My Project
vspace.cc
Go to the documentation of this file.
1 // https://github.com/rbehrends/vspace
2 #include "vspace.h"
3 #include "kernel/mod2.h"
4 #ifdef HAVE_VSPACE
5 #ifdef HAVE_CPP_THREADS
6 #include <thread>
7 #endif
8 #include <cstddef>
9 
10 #if defined(__GNUC__) && (__GNUC__<9) &&!defined(__clang__)
11 
12 namespace vspace {
13 namespace internals {
14 
15 size_t config[4]
17 
19 
20 // offsetof() only works for POD types, so we need to construct
21 // a portable version of it for metapage fields.
22 
23 #define metapageaddr(field) \
24  ((char *) &vmem.metapage->field - (char *) vmem.metapage)
25 
26 size_t VMem::filesize() {
27  struct stat stat;
28  fstat(fd, &stat);
29  return stat.st_size;
30 }
31 
32 Status VMem::init(int fd) {
33  this->fd = fd;
34  for (int i = 0; i < MAX_SEGMENTS; i++)
35  segments[i] = VSeg(NULL);
36  for (int i = 0; i < MAX_PROCESS; i++) {
37  int channel[2];
38  if (pipe(channel) < 0) {
39  for (int j = 0; j < i; j++) {
40  close(channels[j].fd_read);
41  close(channels[j].fd_write);
42  }
43  return Status(ErrOS);
44  }
45  channels[i].fd_read = channel[0];
46  channels[i].fd_write = channel[1];
47  }
48  lock_metapage();
49  init_metapage(filesize() == 0);
52  return Status(ErrNone);
53 }
54 
55 Status VMem::init() {
56  FILE *fp = tmpfile();
57  Status result = init(fileno(fp));
58  if (!result.ok())
59  return result;
60  current_process = 0;
61  file_handle = fp;
62  metapage->process_info[0].pid = getpid();
63  return Status(ErrNone);
64 }
65 
66 Status VMem::init(const char *path) {
67  int fd = open(path, O_RDWR | O_CREAT, 0600);
68  if (fd < 0)
69  return Status(ErrFile);
70  init(fd);
71  lock_metapage();
72  // TODO: enter process in meta table
74  return Status(ErrNone);
75 }
76 
77 void VMem::deinit() {
78  if (file_handle) {
79  fclose(file_handle);
80  file_handle = NULL;
81  } else {
82  close(fd);
83  }
84  munmap(metapage, METABLOCK_SIZE);
85  metapage = NULL;
86  current_process = -1;
87  freelist = NULL;
88  for (int i = 0; i < MAX_SEGMENTS; i++) {
89  if (segments[i].base) munmap(segments[i].base, SEGMENT_SIZE);
90  segments[i] = NULL;
91  }
92  for (int i = 0; i < MAX_PROCESS; i++) {
93  close(channels[i].fd_read);
94  close(channels[i].fd_write);
95  }
96 }
97 
98 void *VMem::mmap_segment(int seg) {
99  lock_metapage();
100  void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
101  METABLOCK_SIZE + seg * SEGMENT_SIZE);
102  if (map == MAP_FAILED) {
103  // This is an "impossible to proceed from here, because system state
104  // is impossible to proceed from" situation, so we abort the program.
105  perror("mmap");
106  abort();
107  }
108  unlock_metapage();
109  return map;
110 }
111 
112 void VMem::add_segment() {
113  int seg = metapage->segment_count++;
115  void *map_addr = mmap_segment(seg);
116  segments[seg] = VSeg(map_addr);
117  Block *top = block_ptr(seg * SEGMENT_SIZE);
118  top->next = freelist[LOG2_SEGMENT_SIZE];
119  top->prev = VADDR_NULL;
121 }
122 
123 void FastLock::lock() {
124 #ifdef HAVE_CPP_THREADS
125  while (_lock.test_and_set()) {
126  }
127  bool empty = _owner < 0;
128  if (empty) {
129  _owner = vmem.current_process;
130  } else {
131  int p = vmem.current_process;
132  vmem.metapage->process_info[p].next = -1;
133  if (_head < 0)
134  _head = p;
135  else
136  vmem.metapage->process_info[_tail].next = p;
137  _tail = p;
138  }
139  _lock.clear();
140  if (!empty)
141  wait_signal(false);
142 #else
144 #endif
145 }
146 
147 void FastLock::unlock() {
148 #ifdef HAVE_CPP_THREADS
149  while (_lock.test_and_set()) {
150  }
151  _owner = _head;
152  if (_owner >= 0)
153  _head = vmem.metapage->process_info[_head].next;
154  _lock.clear();
155  if (_owner >= 0)
156  send_signal(_owner, 0, false);
157 #else
159 #endif
160 }
161 
162 static void lock_allocator() {
164 }
165 
166 static void unlock_allocator() {
168 }
169 
170 static void print_freelists() {
171  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
172  vaddr_t vaddr = vmem.freelist[i];
173  if (vaddr != VADDR_NULL) {
174  printf("%2d: %ld", i, (long)vaddr);
175  vaddr_t prev = block_ptr(vaddr)->prev;
176  if (prev != VADDR_NULL) {
177  printf("(%ld)", (long)prev);
178  }
179  assert(block_ptr(vaddr)->prev == VADDR_NULL);
180  for (;;) {
181  vaddr_t last_vaddr = vaddr;
182  Block *block = block_ptr(vaddr);
183  vaddr = block->next;
184  if (vaddr == VADDR_NULL)
185  break;
186  printf(" -> %ld", (long)vaddr);
187  vaddr_t prev = block_ptr(vaddr)->prev;
188  if (prev != last_vaddr) {
189  printf("(%ld)", (long)prev);
190  }
191  }
192  printf("\n");
193  }
194  }
195  fflush(stdout);
196 }
197 
198 void vmem_free(vaddr_t vaddr) {
199  lock_allocator();
200  vaddr -= offsetof(Block, data);
201  vmem.ensure_is_mapped(vaddr);
202  size_t segno = vmem.segment_no(vaddr);
203  VSeg seg = vmem.segment(vaddr);
204  segaddr_t addr = vmem.segaddr(vaddr);
205  int level = seg.block_ptr(addr)->level();
206  assert(!seg.is_free(addr));
207  while (level < LOG2_SEGMENT_SIZE) {
208  segaddr_t buddy = find_buddy(addr, level);
209  Block *block = seg.block_ptr(buddy);
210  // is buddy free and at the same level?
211  if (!block->is_free() || block->level() != level)
212  break;
213  // remove buddy from freelist.
214  Block *prev = vmem.block_ptr(block->prev);
215  Block *next = vmem.block_ptr(block->next);
216  block->data[0] = level;
217  if (prev) {
218  assert(prev->next == vmem.vaddr(segno, buddy));
219  prev->next = block->next;
220  } else {
221  // head of freelist.
222  assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
223  vmem.freelist[level] = block->next;
224  }
225  if (next) {
226  assert(next->prev == vmem.vaddr(segno, buddy));
227  next->prev = block->prev;
228  }
229  // coalesce block with buddy
230  level++;
231  if (buddy < addr)
232  addr = buddy;
233  }
234  // Add coalesced block to free list
235  Block *block = seg.block_ptr(addr);
236  block->prev = VADDR_NULL;
237  block->next = vmem.freelist[level];
238  block->mark_as_free(level);
239  vaddr_t blockaddr = vmem.vaddr(segno, addr);
240  if (block->next != VADDR_NULL)
241  vmem.block_ptr(block->next)->prev = blockaddr;
242  vmem.freelist[level] = blockaddr;
244 }
245 
246 vaddr_t vmem_alloc(size_t size) {
247  lock_allocator();
248  size_t alloc_size = size + offsetof(Block, data);
249  int level = find_level(alloc_size);
250  int flevel = level;
251  while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
252  flevel++;
253  if (vmem.freelist[flevel] == VADDR_NULL) {
254  vmem.add_segment();
255  }
257  while (flevel > level) {
258  // get and split a block
259  vaddr_t blockaddr = vmem.freelist[flevel];
260  assert((blockaddr & ((1 << flevel) - 1)) == 0);
261  Block *block = vmem.block_ptr(blockaddr);
262  vmem.freelist[flevel] = block->next;
263  if (vmem.freelist[flevel] != VADDR_NULL)
265  vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
266  Block *block2 = vmem.block_ptr(blockaddr2);
267  flevel--;
268  block2->next = vmem.freelist[flevel];
269  block2->prev = blockaddr;
270  block->next = blockaddr2;
271  block->prev = VADDR_NULL;
272  // block->prev == VADDR_NULL already.
273  vmem.freelist[flevel] = blockaddr;
274  }
276  Block *block = vmem.block_ptr(vmem.freelist[level]);
277  vaddr_t vaddr = vmem.freelist[level];
278  #if defined(__GNUC__) && (__GNUC__>11)
279  vaddr_t result = vaddr + (sizeof(vaddr_t)*2);
280  #else
281  vaddr_t result = vaddr + offsetof(Block, data);
282  #endif
283  vmem.freelist[level] = block->next;
284  if (block->next != VADDR_NULL)
285  vmem.block_ptr(block->next)->prev = VADDR_NULL;
286  block->mark_as_allocated(vaddr, level);
288  memset(block->data, 0, size);
289  return result;
290 }
291 
292 void init_flock_struct(
293  struct flock &lock_info, size_t offset, size_t len, bool lock) {
294  lock_info.l_start = offset;
295  lock_info.l_len = len;
296  lock_info.l_pid = 0;
297  lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
298  lock_info.l_whence = SEEK_SET;
299 }
300 
301 void lock_file(int fd, size_t offset, size_t len) {
302  struct flock lock_info;
303  init_flock_struct(lock_info, offset, len, true);
304  fcntl(fd, F_SETLKW, &lock_info);
305 }
306 
307 void unlock_file(int fd, size_t offset, size_t len) {
308  struct flock lock_info;
309  init_flock_struct(lock_info, offset, len, false);
310  fcntl(fd, F_SETLKW, &lock_info);
311 }
312 
313 void lock_metapage() {
314  lock_file(vmem.fd, 0);
315 }
316 
317 void unlock_metapage() {
318  unlock_file(vmem.fd, 0);
319 }
320 
321 void init_metapage(bool create) {
322  if (create)
323  ftruncate(vmem.fd, METABLOCK_SIZE);
324  vmem.metapage = (MetaPage *) mmap(
325  NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
326  if (create) {
327  memcpy(vmem.metapage->config_header, config, sizeof(config));
328  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
330  }
332  vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
333  } else {
334  assert(memcmp(vmem.metapage->config_header, config, sizeof(config)) != 0);
335  }
336 }
337 
338 static void lock_process(int processno) {
339  lock_file(vmem.fd,
341  + sizeof(ProcessInfo) * vmem.current_process);
342 }
343 
344 static void unlock_process(int processno) {
347  + sizeof(ProcessInfo) * vmem.current_process);
348 }
349 
350 static ProcessInfo &process_info(int processno) {
351  return vmem.metapage->process_info[processno];
352 }
353 
354 bool send_signal(int processno, ipc_signal_t sig, bool lock) {
355  if (lock)
356  lock_process(processno);
357  if (process_info(processno).sigstate != Waiting) {
358  unlock_process(processno);
359  return false;
360  }
361  if (processno == vmem.current_process) {
362  process_info(processno).sigstate = Accepted;
363  process_info(processno).signal = sig;
364  } else {
365  process_info(processno).sigstate = Pending;
366  process_info(processno).signal = sig;
367  int fd = vmem.channels[processno].fd_write;
368  char buf[1] = { 0 };
369  while (write(fd, buf, 1) != 1) {
370  }
371  }
372  if (lock)
373  unlock_process(processno);
374  return true;
375 }
376 
377 ipc_signal_t check_signal(bool resume, bool lock) {
379  if (lock)
382  switch (sigstate) {
383  case Waiting:
384  case Pending: {
386  char buf[1];
387  if (lock && sigstate == Waiting) {
389  while (read(fd, buf, 1) != 1) {
390  }
392  } else {
393  while (read(fd, buf, 1) != 1) {
394  }
395  }
398  = resume ? Waiting : Accepted;
399  if (lock)
401  break;
402  }
403  case Accepted:
405  if (resume)
407  if (lock)
409  break;
410  }
411  return result;
412 }
413 
414 void accept_signals() {
418 }
419 
420 ipc_signal_t wait_signal(bool lock) {
421  return check_signal(true, lock);
422 }
423 
424 } // namespace internals
425 
426 pid_t fork_process() {
427  using namespace internals;
428  lock_metapage();
429  for (int p = 0; p < MAX_PROCESS; p++) {
430  if (vmem.metapage->process_info[p].pid == 0) {
431  pid_t pid = fork();
432  if (pid < 0) {
433  // error
434  return -1;
435  } else if (pid == 0) {
436  // child process
437  int parent = vmem.current_process;
439  lock_metapage();
440  vmem.metapage->process_info[p].pid = getpid();
441  unlock_metapage();
442  send_signal(parent);
443  } else {
444  // parent process
445  unlock_metapage();
446  wait_signal();
447  // child has unlocked metapage, so we don't need to.
448  }
449  return pid;
450  }
451  }
452  unlock_metapage();
453  return -1;
454 }
455 
456 void Semaphore::post() {
457  int wakeup = -1;
459  _lock.lock();
460  if (_head == _tail) {
461  _value++;
462  } else {
463  // don't increment value, as we'll pass that on to the next process.
464  wakeup = _waiting[_head];
465  sig = _signals[_head];
466  next(_head);
467  }
468  _lock.unlock();
469  if (wakeup >= 0) {
470  internals::send_signal(wakeup, sig);
471  }
472 }
473 
474 bool Semaphore::try_wait() {
475  bool result = false;
476  _lock.lock();
477  if (_value > 0) {
478  _value--;
479  result = true;
480  }
481  _lock.unlock();
482  return result;
483 }
484 
485 void Semaphore::wait() {
486  _lock.lock();
487  if (_value > 0) {
488  _value--;
489  _lock.unlock();
490  return;
491  }
493  _signals[_tail] = 0;
494  next(_tail);
495  _lock.unlock();
497 }
498 
500  _lock.lock();
501  if (_value > 0) {
502  if (internals::send_signal(internals::vmem.current_process, sig))
503  _value--;
504  _lock.unlock();
505  return false;
506  }
508  _signals[_tail] = sig;
509  next(_tail);
510  _lock.unlock();
511  return true;
512 }
513 
514 bool Semaphore::stop_wait() {
515  bool result = false;
516  _lock.lock();
517  for (int i = _head; i != _tail; next(i)) {
518  if (_waiting[i] == internals::vmem.current_process) {
519  int last = i;
520  next(i);
521  while (i != _tail) {
522  _waiting[last] = _waiting[i];
523  _signals[last] = _signals[i];
524  last = i;
525  next(i);
526  }
527  _tail = last;
528  result = true;
529  break;
530  }
531  }
532  _lock.unlock();
533  return result;
534 }
535 
536 void EventSet::add(Event *event) {
537  event->_next = NULL;
538  if (_head == NULL) {
539  _head = _tail = event;
540  } else {
541  _tail->_next = event;
542  _tail = event;
543  }
544 }
545 
546 int EventSet::wait() {
547  size_t n = 0;
548  for (Event *event = _head; event; event = event->_next) {
549  if (!event->start_listen((int) (n++))) {
550  break;
551  }
552  }
554  for (Event *event = _head; event; event = event->_next) {
555  event->stop_listen();
556  }
558  return (int) result;
559 }
560 
561 } // namespace vspace
562 #else // gcc>9
563 #include <cstdlib>
564 #include <unistd.h>
565 #include <sys/mman.h>
566 #include <sys/stat.h>
567 
568 
569 namespace vspace {
570 namespace internals {
571 
572 size_t config[4]
574 
575 VMem VMem::vmem_global;
576 
577 // offsetof() only works for POD types, so we need to construct
578 // a portable version of it for metapage fields.
579 
580 #define metapageaddr(field) \
581  ((char *) &vmem.metapage->field - (char *) vmem.metapage)
582 
583 size_t VMem::filesize() {
584  struct stat stat;
585  fstat(fd, &stat);
586  return stat.st_size;
587 }
588 
590  this->fd = fd;
591  for (int i = 0; i < MAX_SEGMENTS; i++)
592  segments[i] = VSeg(NULL);
593  for (int i = 0; i < MAX_PROCESS; i++) {
594  int channel[2];
595  if (pipe(channel) < 0) {
596  for (int j = 0; j < i; j++) {
597  close(channels[j].fd_read);
598  close(channels[j].fd_write);
599  }
600  return Status(ErrOS);
601  }
602  channels[i].fd_read = channel[0];
603  channels[i].fd_write = channel[1];
604  }
605  lock_metapage();
606  init_metapage(filesize() == 0);
607  unlock_metapage();
609  return Status(ErrNone);
610 }
611 
613  FILE *fp = tmpfile();
614  Status result = init(fileno(fp));
615  if (!result.ok())
616  return result;
617  current_process = 0;
618  file_handle = fp;
619  metapage->process_info[0].pid = getpid();
620  return Status(ErrNone);
621 }
622 
623 Status VMem::init(const char *path) {
624  int fd = open(path, O_RDWR | O_CREAT, 0600);
625  if (fd < 0)
626  return Status(ErrFile);
627  init(fd);
628  lock_metapage();
629  // TODO: enter process in meta table
630  unlock_metapage();
631  return Status(ErrNone);
632 }
633 
634 void VMem::deinit() {
635  if (file_handle) {
636  fclose(file_handle);
637  file_handle = NULL;
638  } else {
639  close(fd);
640  }
641  munmap(metapage, METABLOCK_SIZE);
642  metapage = NULL;
643  current_process = -1;
644  freelist = NULL;
645  for (int i = 0; i < MAX_SEGMENTS; i++) {
646  if (!segments[i].is_free())
647  munmap(segments[i].base, SEGMENT_SIZE);
648  segments[i] = VSeg(NULL);
649  }
650  for (int i = 0; i < MAX_PROCESS; i++) {
651  close(channels[i].fd_read);
652  close(channels[i].fd_write);
653  }
654 }
655 
656 void *VMem::mmap_segment(int seg) {
657  lock_metapage();
658  void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
659  METABLOCK_SIZE + seg * SEGMENT_SIZE);
660  if (map == MAP_FAILED) {
661  // This is an "impossible to proceed from here, because system state
662  // is impossible to proceed from" situation, so we abort the program.
663  perror("mmap");
664  abort();
665  }
666  unlock_metapage();
667  return map;
668 }
669 
671  int seg = metapage->segment_count++;
673  void *map_addr = mmap_segment(seg);
674  segments[seg] = VSeg(map_addr);
675  Block *top = block_ptr(seg * SEGMENT_SIZE);
677  top->prev = VADDR_NULL;
679 }
680 
682 #ifdef HAVE_CPP_THREADS
683  while (_lock.test_and_set()) {
684  }
685  bool empty = _owner < 0;
686  if (empty) {
687  _owner = vmem.current_process;
688  } else {
689  int p = vmem.current_process;
690  vmem.metapage->process_info[p].next = -1;
691  if (_head < 0)
692  _head = p;
693  else
694  vmem.metapage->process_info[_tail].next = p;
695  _tail = p;
696  }
697  _lock.clear();
698  if (!empty)
699  wait_signal(false);
700 #else
702 #endif
703 }
704 
706 #ifdef HAVE_CPP_THREADS
707  while (_lock.test_and_set()) {
708  }
709  _owner = _head;
710  if (_owner >= 0)
711  _head = vmem.metapage->process_info[_head].next;
712  _lock.clear();
713  if (_owner >= 0)
714  send_signal(_owner, 0, false);
715 #else
717 #endif
718 }
719 
720 static void lock_allocator() {
722 }
723 
724 static void unlock_allocator() {
726 }
727 
728 static void print_freelists() {
729  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
730  vaddr_t vaddr = vmem.freelist[i];
731  if (vaddr != VADDR_NULL) {
732  std::printf("%2d: %ld", i, (long)vaddr);
733  vaddr_t prev = block_ptr(vaddr)->prev;
734  if (prev != VADDR_NULL) {
735  std::printf("(%ld)", (long)prev);
736  }
737  assert(block_ptr(vaddr)->prev == VADDR_NULL);
738  for (;;) {
739  vaddr_t last_vaddr = vaddr;
740  Block *block = block_ptr(vaddr);
741  vaddr = block->next;
742  if (vaddr == VADDR_NULL)
743  break;
744  std::printf(" -> %ld", (long)vaddr);
745  vaddr_t prev = block_ptr(vaddr)->prev;
746  if (prev != last_vaddr) {
747  std::printf("(%ld)", (long)prev);
748  }
749  }
750  std::printf("\n");
751  }
752  }
753  std::fflush(stdout);
754 }
755 
756 void vmem_free(vaddr_t vaddr) {
757  lock_allocator();
758  #if defined(__GNUC__) && (__GNUC__>11)
759  vaddr -= (sizeof(vaddr_t)*2);
760  #else
761  vaddr -= offsetof(Block, data);
762  #endif
763  vmem.ensure_is_mapped(vaddr);
764  size_t segno = vmem.segment_no(vaddr);
765  VSeg seg = vmem.segment(vaddr);
766  segaddr_t addr = vmem.segaddr(vaddr);
767  int level = seg.block_ptr(addr)->level();
768  assert(!seg.is_free(addr));
769  while (level < LOG2_SEGMENT_SIZE) {
770  segaddr_t buddy = find_buddy(addr, level);
771  Block *block = seg.block_ptr(buddy);
772  // is buddy free and at the same level?
773  if (!block->is_free() || block->level() != level)
774  break;
775  // remove buddy from freelist.
776  Block *prev = vmem.block_ptr(block->prev);
777  Block *next = vmem.block_ptr(block->next);
778  block->data[0] = level;
779  if (prev) {
780  assert(prev->next == vmem.vaddr(segno, buddy));
781  prev->next = block->next;
782  } else {
783  // head of freelist.
784  assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
785  vmem.freelist[level] = block->next;
786  }
787  if (next) {
788  assert(next->prev == vmem.vaddr(segno, buddy));
789  next->prev = block->prev;
790  }
791  // coalesce block with buddy
792  level++;
793  if (buddy < addr)
794  addr = buddy;
795  }
796  // Add coalesced block to free list
797  Block *block = seg.block_ptr(addr);
798  block->prev = VADDR_NULL;
799  block->next = vmem.freelist[level];
800  block->mark_as_free(level);
801  vaddr_t blockaddr = vmem.vaddr(segno, addr);
802  if (block->next != VADDR_NULL)
803  vmem.block_ptr(block->next)->prev = blockaddr;
804  vmem.freelist[level] = blockaddr;
806 }
807 
809  lock_allocator();
810  #if defined(__GNUC__) && (__GNUC__>11)
811  size_t alloc_size = size + (sizeof(vaddr_t)*2);
812  #else
813  size_t alloc_size = size + offsetof(Block, data);
814  #endif
815  int level = find_level(alloc_size);
816  int flevel = level;
817  while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
818  flevel++;
819  if (vmem.freelist[flevel] == VADDR_NULL) {
820  vmem.add_segment();
821  }
823  while (flevel > level) {
824  // get and split a block
825  vaddr_t blockaddr = vmem.freelist[flevel];
826  assert((blockaddr & ((1 << flevel) - 1)) == 0);
827  Block *block = vmem.block_ptr(blockaddr);
828  vmem.freelist[flevel] = block->next;
829  if (vmem.freelist[flevel] != VADDR_NULL)
831  vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
832  Block *block2 = vmem.block_ptr(blockaddr2);
833  flevel--;
834  block2->next = vmem.freelist[flevel];
835  block2->prev = blockaddr;
836  block->next = blockaddr2;
837  block->prev = VADDR_NULL;
838  // block->prev == VADDR_NULL already.
839  vmem.freelist[flevel] = blockaddr;
840  }
843  vaddr_t vaddr = vmem.freelist[level];
844  #if defined(__GNUC__) && (__GNUC__>11)
845  vaddr_t result = vaddr + (sizeof(vaddr_t)*2);
846  #else
847  vaddr_t result = vaddr + offsetof(Block, data);
848  #endif
849  vmem.freelist[level] = block->next;
850  if (block->next != VADDR_NULL)
851  vmem.block_ptr(block->next)->prev = VADDR_NULL;
852  block->mark_as_allocated(vaddr, level);
854  memset(block->data, 0, size);
855  return result;
856 }
857 
859  struct flock &lock_info, size_t offset, size_t len, bool lock) {
860  lock_info.l_start = offset;
861  lock_info.l_len = len;
862  lock_info.l_pid = 0;
863  lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
864  lock_info.l_whence = SEEK_SET;
865 }
866 
867 void lock_file(int fd, size_t offset, size_t len) {
868  struct flock lock_info;
869  init_flock_struct(lock_info, offset, len, true);
870  fcntl(fd, F_SETLKW, &lock_info);
871 }
872 
873 void unlock_file(int fd, size_t offset, size_t len) {
874  struct flock lock_info;
875  init_flock_struct(lock_info, offset, len, false);
876  fcntl(fd, F_SETLKW, &lock_info);
877 }
878 
880  lock_file(vmem.fd, 0);
881 }
882 
884  unlock_file(vmem.fd, 0);
885 }
886 
887 void init_metapage(bool create) {
888  if (create)
889  ftruncate(vmem.fd, METABLOCK_SIZE);
890  vmem.metapage = (MetaPage *) mmap(
891  NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
892  if (create) {
893  std::memcpy(vmem.metapage->config_header, config, sizeof(config));
894  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
896  }
898  vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
899  } else {
900  assert(std::memcmp(vmem.metapage->config_header, config,
901  sizeof(config)) != 0);
902  }
903 }
904 
905 static void lock_process(int processno) {
906  lock_file(vmem.fd,
908  + sizeof(ProcessInfo) * vmem.current_process);
909 }
910 
911 static void unlock_process(int processno) {
914  + sizeof(ProcessInfo) * vmem.current_process);
915 }
916 
917 static ProcessInfo &process_info(int processno) {
918  return vmem.metapage->process_info[processno];
919 }
920 
921 bool send_signal(int processno, ipc_signal_t sig, bool lock) {
922  if (lock)
923  lock_process(processno);
924  if (process_info(processno).sigstate != Waiting) {
925  unlock_process(processno);
926  return false;
927  }
928  if (processno == vmem.current_process) {
929  process_info(processno).sigstate = Accepted;
930  process_info(processno).signal = sig;
931  } else {
932  process_info(processno).sigstate = Pending;
933  process_info(processno).signal = sig;
934  int fd = vmem.channels[processno].fd_write;
935  char buf[1] = { 0 };
936  while (write(fd, buf, 1) != 1) {
937  }
938  }
939  if (lock)
940  unlock_process(processno);
941  return true;
942 }
943 
944 ipc_signal_t check_signal(bool resume, bool lock) {
946  if (lock)
949  switch (sigstate) {
950  case Waiting:
951  case Pending: {
953  char buf[1];
954  if (lock && sigstate == Waiting) {
956  while (read(fd, buf, 1) != 1) {
957  }
959  } else {
960  while (read(fd, buf, 1) != 1) {
961  }
962  }
965  = resume ? Waiting : Accepted;
966  if (lock)
968  break;
969  }
970  case Accepted:
972  if (resume)
974  if (lock)
976  break;
977  }
978  return result;
979 }
980 
985 }
986 
988  return check_signal(true, lock);
989 }
990 
991 } // namespace internals
992 
993 pid_t fork_process() {
994  using namespace internals;
995  lock_metapage();
996  for (int p = 0; p < MAX_PROCESS; p++) {
997  if (vmem.metapage->process_info[p].pid == 0) {
998  pid_t pid = fork();
999  if (pid < 0) {
1000  // error
1001  return -1;
1002  } else if (pid == 0) {
1003  // child process
1004  int parent = vmem.current_process;
1006  lock_metapage();
1007  vmem.metapage->process_info[p].pid = getpid();
1008  unlock_metapage();
1009  send_signal(parent);
1010  } else {
1011  // parent process
1012  unlock_metapage();
1013  wait_signal();
1014  // child has unlocked metapage, so we don't need to.
1015  }
1016  return pid;
1017  }
1018  }
1019  unlock_metapage();
1020  return -1;
1021 }
1022 
1024  int wakeup = -1;
1026  _lock.lock();
1027  if (_head == _tail) {
1028  _value++;
1029  } else {
1030  // don't increment value, as we'll pass that on to the next process.
1031  wakeup = _waiting[_head];
1032  sig = _signals[_head];
1033  next(_head);
1034  }
1035  _lock.unlock();
1036  if (wakeup >= 0) {
1037  internals::send_signal(wakeup, sig);
1038  }
1039 }
1040 
1042  bool result = false;
1043  _lock.lock();
1044  if (_value > 0) {
1045  _value--;
1046  result = true;
1047  }
1048  _lock.unlock();
1049  return result;
1050 }
1051 
1053  _lock.lock();
1054  if (_value > 0) {
1055  _value--;
1056  _lock.unlock();
1057  return;
1058  }
1060  _signals[_tail] = 0;
1061  next(_tail);
1062  _lock.unlock();
1064 }
1065 
1067  _lock.lock();
1068  if (_value > 0) {
1069  if (internals::send_signal(internals::vmem.current_process, sig))
1070  _value--;
1071  _lock.unlock();
1072  return false;
1073  }
1075  _signals[_tail] = sig;
1076  next(_tail);
1077  _lock.unlock();
1078  return true;
1079 }
1080 
1082  bool result = false;
1083  _lock.lock();
1084  for (int i = _head; i != _tail; next(i)) {
1085  if (_waiting[i] == internals::vmem.current_process) {
1086  int last = i;
1087  next(i);
1088  while (i != _tail) {
1089  _waiting[last] = _waiting[i];
1090  _signals[last] = _signals[i];
1091  last = i;
1092  next(i);
1093  }
1094  _tail = last;
1095  result = true;
1096  break;
1097  }
1098  }
1099  _lock.unlock();
1100  return result;
1101 }
1102 
1103 void EventSet::add(Event *event) {
1104  event->_next = NULL;
1105  if (_head == NULL) {
1106  _head = _tail = event;
1107  } else {
1108  _tail->_next = event;
1109  _tail = event;
1110  }
1111 }
1112 
1114  size_t n = 0;
1115  for (Event *event = _head; event; event = event->_next) {
1116  if (!event->start_listen((int) (n++))) {
1117  break;
1118  }
1119  }
1121  for (Event *event = _head; event; event = event->_next) {
1122  event->stop_listen();
1123  }
1125  return (int) result;
1126 }
1127 
1128 } // namespace vspace
1129 #endif
1130 #endif
int size(const CanonicalForm &f, const Variable &v)
int size ( const CanonicalForm & f, const Variable & v )
Definition: cf_ops.cc:600
int level(const CanonicalForm &f)
int i
Definition: cfEzgcd.cc:132
int p
Definition: cfModGcd.cc:4078
CanonicalForm fp
Definition: cfModGcd.cc:4102
CanonicalForm map(const CanonicalForm &primElem, const Variable &alpha, const CanonicalForm &F, const Variable &beta)
map from to such that is mapped onto
Definition: cf_map_ext.cc:504
void add(Event *event)
Definition: vspace.cc:1103
Event * _head
Definition: vspace.h:2581
Event * _tail
Definition: vspace.h:2581
Event * _next
Definition: vspace.h:2572
int _waiting[internals::MAX_PROCESS+1]
Definition: vspace.h:2348
void next(int &index)
Definition: vspace.h:2351
bool start_wait(internals::ipc_signal_t sig=0)
Definition: vspace.cc:1066
internals::ipc_signal_t _signals[internals::MAX_PROCESS+1]
Definition: vspace.h:2349
FastLock _lock
Definition: vspace.h:2358
bool stop_wait()
Definition: vspace.cc:1081
return result
Definition: facAbsBiFact.cc:75
int j
Definition: facHensel.cc:110
STATIC_VAR poly last
Definition: hdegree.cc:1173
STATIC_VAR int offset
Definition: janet.cc:29
NodeM * create()
Definition: janet.cc:757
ListNode * next
Definition: janet.h:31
#define SEEK_SET
Definition: mod2.h:115
char N base
Definition: ValueTraits.h:144
void accept_signals()
Definition: vspace.cc:981
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1637
void unlock_metapage()
Definition: vspace.cc:883
const vaddr_t VADDR_NULL
Definition: vspace.h:1417
void init_flock_struct(struct flock &lock_info, size_t offset, size_t len, bool lock)
Definition: vspace.cc:858
size_t vaddr_t
Definition: vspace.h:1414
void lock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:867
void vmem_free(vaddr_t vaddr)
Definition: vspace.cc:756
vaddr_t vmem_alloc(size_t size)
Definition: vspace.cc:808
static void unlock_process(int processno)
Definition: vspace.cc:911
static const size_t MAX_SEGMENTS
Definition: vspace.h:1423
vaddr_t freelist[LOG2_SEGMENT_SIZE+1]
Definition: vspace.h:1511
static const size_t SEGMENT_SIZE
Definition: vspace.h:1424
static const size_t METABLOCK_SIZE
Definition: vspace.h:1420
static void lock_process(int processno)
Definition: vspace.cc:905
static const int LOG2_SEGMENT_SIZE
Definition: vspace.h:1421
ipc_signal_t wait_signal(bool lock)
Definition: vspace.cc:987
void lock_metapage()
Definition: vspace.cc:879
static const int MAX_PROCESS
Definition: vspace.h:1419
static ProcessInfo & process_info(int processno)
Definition: vspace.cc:917
static VMem & vmem
Definition: vspace.h:1635
ProcessInfo process_info[MAX_PROCESS]
Definition: vspace.h:1513
static void lock_allocator()
Definition: vspace.cc:720
static segaddr_t find_buddy(segaddr_t addr, int level)
Definition: vspace.h:1690
ipc_signal_t check_signal(bool resume, bool lock)
Definition: vspace.cc:944
void init_metapage(bool create)
Definition: vspace.cc:887
void unlock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:873
bool send_signal(int processno, ipc_signal_t sig, bool lock)
Definition: vspace.cc:921
static int find_level(size_t size)
Definition: vspace.h:1681
size_t config[4]
Definition: vspace.cc:573
size_t segaddr_t
Definition: vspace.h:1412
static void unlock_allocator()
Definition: vspace.cc:724
static void print_freelists()
Definition: vspace.cc:728
pid_t fork_process()
Definition: vspace.cc:993
@ ErrOS
Definition: vspace.h:1380
@ ErrNone
Definition: vspace.h:1376
@ ErrFile
Definition: vspace.h:1378
internals::Mutex FastLock
Definition: vspace.h:2340
#define NULL
Definition: omList.c:12
#define block
Definition: scanner.cc:646
int status int void size_t count write
Definition: si_signals.h:67
int status read
Definition: si_signals.h:59
int status int void * buf
Definition: si_signals.h:59
int status int void size_t count int const void size_t count open
Definition: si_signals.h:73
int status int fd
Definition: si_signals.h:59
void ensure_is_mapped(vaddr_t vaddr)
Definition: vspace.h:1615
std::FILE * file_handle
Definition: vspace.h:1591
size_t segment_no(vaddr_t vaddr)
Definition: vspace.h:1599
void * mmap_segment(int seg)
Definition: vspace.cc:656
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1610
VSeg segment(vaddr_t vaddr)
Definition: vspace.h:1596
MetaPage * metapage
Definition: vspace.h:1589
static VMem vmem_global
Definition: vspace.h:1588
VSeg segments[MAX_SEGMENTS]
Definition: vspace.h:1594
vaddr_t vaddr(size_t segno, segaddr_t addr)
Definition: vspace.h:1602
segaddr_t segaddr(vaddr_t vaddr)
Definition: vspace.h:1605
ProcessChannel channels[MAX_PROCESS]
Definition: vspace.h:1595
Block * block_ptr(segaddr_t addr)
Definition: vspace.h:1571
#define assert(A)
Definition: svd_si.h:3
#define metapageaddr(field)
Definition: vspace.cc:580