My Project
vspace.cc
Go to the documentation of this file.
1 // https://github.com/rbehrends/vspace
2 #include "vspace.h"
3 #include "kernel/mod2.h"
4 #ifdef HAVE_VSPACE
5 #ifdef HAVE_CPP_THREADS
6 #include <thread>
7 #endif
8 
9 #if defined(__GNUC__) && (__GNUC__<9) &&!defined(__clang__)
10 
11 namespace vspace {
12 namespace internals {
13 
14 size_t config[4]
16 
18 
19 // offsetof() only works for POD types, so we need to construct
20 // a portable version of it for metapage fields.
21 
22 #define metapageaddr(field) \
23  ((char *) &vmem.metapage->field - (char *) vmem.metapage)
24 
25 size_t VMem::filesize() {
26  struct stat stat;
27  fstat(fd, &stat);
28  return stat.st_size;
29 }
30 
31 Status VMem::init(int fd) {
32  this->fd = fd;
33  for (int i = 0; i < MAX_SEGMENTS; i++)
34  segments[i] = VSeg(NULL);
35  for (int i = 0; i < MAX_PROCESS; i++) {
36  int channel[2];
37  if (pipe(channel) < 0) {
38  for (int j = 0; j < i; j++) {
39  close(channels[j].fd_read);
40  close(channels[j].fd_write);
41  }
42  return Status(ErrOS);
43  }
44  channels[i].fd_read = channel[0];
45  channels[i].fd_write = channel[1];
46  }
47  lock_metapage();
48  init_metapage(filesize() == 0);
51  return Status(ErrNone);
52 }
53 
54 Status VMem::init() {
55  FILE *fp = tmpfile();
56  Status result = init(fileno(fp));
57  if (!result.ok())
58  return result;
59  current_process = 0;
60  file_handle = fp;
61  metapage->process_info[0].pid = getpid();
62  return Status(ErrNone);
63 }
64 
65 Status VMem::init(const char *path) {
66  int fd = open(path, O_RDWR | O_CREAT, 0600);
67  if (fd < 0)
68  return Status(ErrFile);
69  init(fd);
70  lock_metapage();
71  // TODO: enter process in meta table
73  return Status(ErrNone);
74 }
75 
76 void VMem::deinit() {
77  if (file_handle) {
78  fclose(file_handle);
79  file_handle = NULL;
80  } else {
81  close(fd);
82  }
83  munmap(metapage, METABLOCK_SIZE);
84  metapage = NULL;
85  current_process = -1;
86  freelist = NULL;
87  for (int i = 0; i < MAX_SEGMENTS; i++) {
88  if (segments[i].base) munmap(segments[i].base, SEGMENT_SIZE);
89  segments[i] = NULL;
90  }
91  for (int i = 0; i < MAX_PROCESS; i++) {
92  close(channels[i].fd_read);
93  close(channels[i].fd_write);
94  }
95 }
96 
97 void *VMem::mmap_segment(int seg) {
98  lock_metapage();
99  void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
100  METABLOCK_SIZE + seg * SEGMENT_SIZE);
101  if (map == MAP_FAILED) {
102  // This is an "impossible to proceed from here, because system state
103  // is impossible to proceed from" situation, so we abort the program.
104  perror("mmap");
105  abort();
106  }
107  unlock_metapage();
108  return map;
109 }
110 
111 void VMem::add_segment() {
112  int seg = metapage->segment_count++;
114  void *map_addr = mmap_segment(seg);
115  segments[seg] = VSeg(map_addr);
116  Block *top = block_ptr(seg * SEGMENT_SIZE);
117  top->next = freelist[LOG2_SEGMENT_SIZE];
118  top->prev = VADDR_NULL;
120 }
121 
122 void FastLock::lock() {
123 #ifdef HAVE_CPP_THREADS
124  while (_lock.test_and_set()) {
125  }
126  bool empty = _owner < 0;
127  if (empty) {
128  _owner = vmem.current_process;
129  } else {
130  int p = vmem.current_process;
131  vmem.metapage->process_info[p].next = -1;
132  if (_head < 0)
133  _head = p;
134  else
135  vmem.metapage->process_info[_tail].next = p;
136  _tail = p;
137  }
138  _lock.clear();
139  if (!empty)
140  wait_signal(false);
141 #else
143 #endif
144 }
145 
146 void FastLock::unlock() {
147 #ifdef HAVE_CPP_THREADS
148  while (_lock.test_and_set()) {
149  }
150  _owner = _head;
151  if (_owner >= 0)
152  _head = vmem.metapage->process_info[_head].next;
153  _lock.clear();
154  if (_owner >= 0)
155  send_signal(_owner, 0, false);
156 #else
158 #endif
159 }
160 
161 static void lock_allocator() {
163 }
164 
165 static void unlock_allocator() {
167 }
168 
169 static void print_freelists() {
170  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
171  vaddr_t vaddr = vmem.freelist[i];
172  if (vaddr != VADDR_NULL) {
173  printf("%2d: %ld", i, vaddr);
174  vaddr_t prev = block_ptr(vaddr)->prev;
175  if (prev != VADDR_NULL) {
176  printf("(%ld)", prev);
177  }
178  assert(block_ptr(vaddr)->prev == VADDR_NULL);
179  for (;;) {
180  vaddr_t last_vaddr = vaddr;
181  Block *block = block_ptr(vaddr);
182  vaddr = block->next;
183  if (vaddr == VADDR_NULL)
184  break;
185  printf(" -> %ld", vaddr);
186  vaddr_t prev = block_ptr(vaddr)->prev;
187  if (prev != last_vaddr) {
188  printf("(%ld)", prev);
189  }
190  }
191  printf("\n");
192  }
193  }
194  fflush(stdout);
195 }
196 
197 void vmem_free(vaddr_t vaddr) {
198  lock_allocator();
199  vaddr -= offsetof(Block, data);
200  vmem.ensure_is_mapped(vaddr);
201  size_t segno = vmem.segment_no(vaddr);
202  VSeg seg = vmem.segment(vaddr);
203  segaddr_t addr = vmem.segaddr(vaddr);
204  int level = seg.block_ptr(addr)->level();
205  assert(!seg.is_free(addr));
206  while (level < LOG2_SEGMENT_SIZE) {
207  segaddr_t buddy = find_buddy(addr, level);
208  Block *block = seg.block_ptr(buddy);
209  // is buddy free and at the same level?
210  if (!block->is_free() || block->level() != level)
211  break;
212  // remove buddy from freelist.
213  Block *prev = vmem.block_ptr(block->prev);
214  Block *next = vmem.block_ptr(block->next);
215  block->data[0] = level;
216  if (prev) {
217  assert(prev->next == vmem.vaddr(segno, buddy));
218  prev->next = block->next;
219  } else {
220  // head of freelist.
221  assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
222  vmem.freelist[level] = block->next;
223  }
224  if (next) {
225  assert(next->prev == vmem.vaddr(segno, buddy));
226  next->prev = block->prev;
227  }
228  // coalesce block with buddy
229  level++;
230  if (buddy < addr)
231  addr = buddy;
232  }
233  // Add coalesced block to free list
234  Block *block = seg.block_ptr(addr);
235  block->prev = VADDR_NULL;
236  block->next = vmem.freelist[level];
237  block->mark_as_free(level);
238  vaddr_t blockaddr = vmem.vaddr(segno, addr);
239  if (block->next != VADDR_NULL)
240  vmem.block_ptr(block->next)->prev = blockaddr;
241  vmem.freelist[level] = blockaddr;
243 }
244 
245 vaddr_t vmem_alloc(size_t size) {
246  lock_allocator();
247  size_t alloc_size = size + offsetof(Block, data);
248  int level = find_level(alloc_size);
249  int flevel = level;
250  while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
251  flevel++;
252  if (vmem.freelist[flevel] == VADDR_NULL) {
253  vmem.add_segment();
254  }
256  while (flevel > level) {
257  // get and split a block
258  vaddr_t blockaddr = vmem.freelist[flevel];
259  assert((blockaddr & ((1 << flevel) - 1)) == 0);
260  Block *block = vmem.block_ptr(blockaddr);
261  vmem.freelist[flevel] = block->next;
262  if (vmem.freelist[flevel] != VADDR_NULL)
264  vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
265  Block *block2 = vmem.block_ptr(blockaddr2);
266  flevel--;
267  block2->next = vmem.freelist[flevel];
268  block2->prev = blockaddr;
269  block->next = blockaddr2;
270  block->prev = VADDR_NULL;
271  // block->prev == VADDR_NULL already.
272  vmem.freelist[flevel] = blockaddr;
273  }
275  Block *block = vmem.block_ptr(vmem.freelist[level]);
276  vaddr_t vaddr = vmem.freelist[level];
277  vaddr_t result = vaddr + offsetof(Block, data);
278  vmem.freelist[level] = block->next;
279  if (block->next != VADDR_NULL)
280  vmem.block_ptr(block->next)->prev = VADDR_NULL;
281  block->mark_as_allocated(vaddr, level);
283  memset(block->data, 0, size);
284  return result;
285 }
286 
287 void init_flock_struct(
288  struct flock &lock_info, size_t offset, size_t len, bool lock) {
289  lock_info.l_start = offset;
290  lock_info.l_len = len;
291  lock_info.l_pid = 0;
292  lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
293  lock_info.l_whence = SEEK_SET;
294 }
295 
296 void lock_file(int fd, size_t offset, size_t len) {
297  struct flock lock_info;
298  init_flock_struct(lock_info, offset, len, true);
299  fcntl(fd, F_SETLKW, &lock_info);
300 }
301 
302 void unlock_file(int fd, size_t offset, size_t len) {
303  struct flock lock_info;
304  init_flock_struct(lock_info, offset, len, false);
305  fcntl(fd, F_SETLKW, &lock_info);
306 }
307 
308 void lock_metapage() {
309  lock_file(vmem.fd, 0);
310 }
311 
312 void unlock_metapage() {
313  unlock_file(vmem.fd, 0);
314 }
315 
316 void init_metapage(bool create) {
317  if (create)
318  ftruncate(vmem.fd, METABLOCK_SIZE);
319  vmem.metapage = (MetaPage *) mmap(
320  NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
321  if (create) {
322  memcpy(vmem.metapage->config_header, config, sizeof(config));
323  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
325  }
327  vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
328  } else {
329  assert(memcmp(vmem.metapage->config_header, config, sizeof(config)) != 0);
330  }
331 }
332 
333 static void lock_process(int processno) {
334  lock_file(vmem.fd,
336  + sizeof(ProcessInfo) * vmem.current_process);
337 }
338 
339 static void unlock_process(int processno) {
342  + sizeof(ProcessInfo) * vmem.current_process);
343 }
344 
345 static ProcessInfo &process_info(int processno) {
346  return vmem.metapage->process_info[processno];
347 }
348 
349 bool send_signal(int processno, ipc_signal_t sig, bool lock) {
350  if (lock)
351  lock_process(processno);
352  if (process_info(processno).sigstate != Waiting) {
353  unlock_process(processno);
354  return false;
355  }
356  if (processno == vmem.current_process) {
357  process_info(processno).sigstate = Accepted;
358  process_info(processno).signal = sig;
359  } else {
360  process_info(processno).sigstate = Pending;
361  process_info(processno).signal = sig;
362  int fd = vmem.channels[processno].fd_write;
363  char buf[1] = { 0 };
364  while (write(fd, buf, 1) != 1) {
365  }
366  }
367  if (lock)
368  unlock_process(processno);
369  return true;
370 }
371 
372 ipc_signal_t check_signal(bool resume, bool lock) {
374  if (lock)
377  switch (sigstate) {
378  case Waiting:
379  case Pending: {
381  char buf[1];
382  if (lock && sigstate == Waiting) {
384  while (read(fd, buf, 1) != 1) {
385  }
387  } else {
388  while (read(fd, buf, 1) != 1) {
389  }
390  }
393  = resume ? Waiting : Accepted;
394  if (lock)
396  break;
397  }
398  case Accepted:
400  if (resume)
402  if (lock)
404  break;
405  }
406  return result;
407 }
408 
409 void accept_signals() {
413 }
414 
415 ipc_signal_t wait_signal(bool lock) {
416  return check_signal(true, lock);
417 }
418 
419 } // namespace internals
420 
421 pid_t fork_process() {
422  using namespace internals;
423  lock_metapage();
424  for (int p = 0; p < MAX_PROCESS; p++) {
425  if (vmem.metapage->process_info[p].pid == 0) {
426  pid_t pid = fork();
427  if (pid < 0) {
428  // error
429  return -1;
430  } else if (pid == 0) {
431  // child process
432  int parent = vmem.current_process;
434  lock_metapage();
435  vmem.metapage->process_info[p].pid = getpid();
436  unlock_metapage();
437  send_signal(parent);
438  } else {
439  // parent process
440  unlock_metapage();
441  wait_signal();
442  // child has unlocked metapage, so we don't need to.
443  }
444  return pid;
445  }
446  }
447  unlock_metapage();
448  return -1;
449 }
450 
451 void Semaphore::post() {
452  int wakeup = -1;
454  _lock.lock();
455  if (_head == _tail) {
456  _value++;
457  } else {
458  // don't increment value, as we'll pass that on to the next process.
459  wakeup = _waiting[_head];
460  sig = _signals[_head];
461  next(_head);
462  }
463  _lock.unlock();
464  if (wakeup >= 0) {
465  internals::send_signal(wakeup, sig);
466  }
467 }
468 
469 bool Semaphore::try_wait() {
470  bool result = false;
471  _lock.lock();
472  if (_value > 0) {
473  _value--;
474  result = true;
475  }
476  _lock.unlock();
477  return result;
478 }
479 
480 void Semaphore::wait() {
481  _lock.lock();
482  if (_value > 0) {
483  _value--;
484  _lock.unlock();
485  return;
486  }
488  _signals[_tail] = 0;
489  next(_tail);
490  _lock.unlock();
492 }
493 
495  _lock.lock();
496  if (_value > 0) {
497  if (internals::send_signal(internals::vmem.current_process, sig))
498  _value--;
499  _lock.unlock();
500  return false;
501  }
503  _signals[_tail] = sig;
504  next(_tail);
505  _lock.unlock();
506  return true;
507 }
508 
509 bool Semaphore::stop_wait() {
510  bool result = false;
511  _lock.lock();
512  for (int i = _head; i != _tail; next(i)) {
513  if (_waiting[i] == internals::vmem.current_process) {
514  int last = i;
515  next(i);
516  while (i != _tail) {
517  _waiting[last] = _waiting[i];
518  _signals[last] = _signals[i];
519  last = i;
520  next(i);
521  }
522  _tail = last;
523  result = true;
524  break;
525  }
526  }
527  _lock.unlock();
528  return result;
529 }
530 
531 void EventSet::add(Event *event) {
532  event->_next = NULL;
533  if (_head == NULL) {
534  _head = _tail = event;
535  } else {
536  _tail->_next = event;
537  _tail = event;
538  }
539 }
540 
541 int EventSet::wait() {
542  size_t n = 0;
543  for (Event *event = _head; event; event = event->_next) {
544  if (!event->start_listen((int) (n++))) {
545  break;
546  }
547  }
549  for (Event *event = _head; event; event = event->_next) {
550  event->stop_listen();
551  }
553  return (int) result;
554 }
555 
556 } // namespace vspace
557 #else
558 #include <cstdlib>
559 #include <unistd.h>
560 #include <sys/mman.h>
561 #include <sys/stat.h>
562 
563 
564 namespace vspace {
565 namespace internals {
566 
567 size_t config[4]
569 
570 VMem VMem::vmem_global;
571 
572 // offsetof() only works for POD types, so we need to construct
573 // a portable version of it for metapage fields.
574 
575 #define metapageaddr(field) \
576  ((char *) &vmem.metapage->field - (char *) vmem.metapage)
577 
578 size_t VMem::filesize() {
579  struct stat stat;
580  fstat(fd, &stat);
581  return stat.st_size;
582 }
583 
585  this->fd = fd;
586  for (int i = 0; i < MAX_SEGMENTS; i++)
587  segments[i] = VSeg(NULL);
588  for (int i = 0; i < MAX_PROCESS; i++) {
589  int channel[2];
590  if (pipe(channel) < 0) {
591  for (int j = 0; j < i; j++) {
592  close(channels[j].fd_read);
593  close(channels[j].fd_write);
594  }
595  return Status(ErrOS);
596  }
597  channels[i].fd_read = channel[0];
598  channels[i].fd_write = channel[1];
599  }
600  lock_metapage();
601  init_metapage(filesize() == 0);
602  unlock_metapage();
604  return Status(ErrNone);
605 }
606 
608  FILE *fp = tmpfile();
609  Status result = init(fileno(fp));
610  if (!result.ok())
611  return result;
612  current_process = 0;
613  file_handle = fp;
614  metapage->process_info[0].pid = getpid();
615  return Status(ErrNone);
616 }
617 
618 Status VMem::init(const char *path) {
619  int fd = open(path, O_RDWR | O_CREAT, 0600);
620  if (fd < 0)
621  return Status(ErrFile);
622  init(fd);
623  lock_metapage();
624  // TODO: enter process in meta table
625  unlock_metapage();
626  return Status(ErrNone);
627 }
628 
629 void VMem::deinit() {
630  if (file_handle) {
631  fclose(file_handle);
632  file_handle = NULL;
633  } else {
634  close(fd);
635  }
636  munmap(metapage, METABLOCK_SIZE);
637  metapage = NULL;
638  current_process = -1;
639  freelist = NULL;
640  for (int i = 0; i < MAX_SEGMENTS; i++) {
641  if (!segments[i].is_free())
642  munmap(segments[i].base, SEGMENT_SIZE);
643  segments[i] = VSeg(NULL);
644  }
645  for (int i = 0; i < MAX_PROCESS; i++) {
646  close(channels[i].fd_read);
647  close(channels[i].fd_write);
648  }
649 }
650 
651 void *VMem::mmap_segment(int seg) {
652  lock_metapage();
653  void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
654  METABLOCK_SIZE + seg * SEGMENT_SIZE);
655  if (map == MAP_FAILED) {
656  // This is an "impossible to proceed from here, because system state
657  // is impossible to proceed from" situation, so we abort the program.
658  perror("mmap");
659  abort();
660  }
661  unlock_metapage();
662  return map;
663 }
664 
666  int seg = metapage->segment_count++;
668  void *map_addr = mmap_segment(seg);
669  segments[seg] = VSeg(map_addr);
670  Block *top = block_ptr(seg * SEGMENT_SIZE);
672  top->prev = VADDR_NULL;
674 }
675 
677 #ifdef HAVE_CPP_THREADS
678  while (_lock.test_and_set()) {
679  }
680  bool empty = _owner < 0;
681  if (empty) {
682  _owner = vmem.current_process;
683  } else {
684  int p = vmem.current_process;
685  vmem.metapage->process_info[p].next = -1;
686  if (_head < 0)
687  _head = p;
688  else
689  vmem.metapage->process_info[_tail].next = p;
690  _tail = p;
691  }
692  _lock.clear();
693  if (!empty)
694  wait_signal(false);
695 #else
697 #endif
698 }
699 
701 #ifdef HAVE_CPP_THREADS
702  while (_lock.test_and_set()) {
703  }
704  _owner = _head;
705  if (_owner >= 0)
706  _head = vmem.metapage->process_info[_head].next;
707  _lock.clear();
708  if (_owner >= 0)
709  send_signal(_owner, 0, false);
710 #else
712 #endif
713 }
714 
715 static void lock_allocator() {
717 }
718 
719 static void unlock_allocator() {
721 }
722 
723 static void print_freelists() {
724  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
725  vaddr_t vaddr = vmem.freelist[i];
726  if (vaddr != VADDR_NULL) {
727  std::printf("%2d: %ld", i, vaddr);
728  vaddr_t prev = block_ptr(vaddr)->prev;
729  if (prev != VADDR_NULL) {
730  std::printf("(%ld)", prev);
731  }
732  assert(block_ptr(vaddr)->prev == VADDR_NULL);
733  for (;;) {
734  vaddr_t last_vaddr = vaddr;
735  Block *block = block_ptr(vaddr);
736  vaddr = block->next;
737  if (vaddr == VADDR_NULL)
738  break;
739  std::printf(" -> %ld", vaddr);
740  vaddr_t prev = block_ptr(vaddr)->prev;
741  if (prev != last_vaddr) {
742  std::printf("(%ld)", prev);
743  }
744  }
745  std::printf("\n");
746  }
747  }
748  std::fflush(stdout);
749 }
750 
751 void vmem_free(vaddr_t vaddr) {
752  lock_allocator();
753  vaddr -= offsetof(Block, data);
754  vmem.ensure_is_mapped(vaddr);
755  size_t segno = vmem.segment_no(vaddr);
756  VSeg seg = vmem.segment(vaddr);
757  segaddr_t addr = vmem.segaddr(vaddr);
758  int level = seg.block_ptr(addr)->level();
759  assert(!seg.is_free(addr));
760  while (level < LOG2_SEGMENT_SIZE) {
761  segaddr_t buddy = find_buddy(addr, level);
762  Block *block = seg.block_ptr(buddy);
763  // is buddy free and at the same level?
764  if (!block->is_free() || block->level() != level)
765  break;
766  // remove buddy from freelist.
767  Block *prev = vmem.block_ptr(block->prev);
768  Block *next = vmem.block_ptr(block->next);
769  block->data[0] = level;
770  if (prev) {
771  assert(prev->next == vmem.vaddr(segno, buddy));
772  prev->next = block->next;
773  } else {
774  // head of freelist.
775  assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
776  vmem.freelist[level] = block->next;
777  }
778  if (next) {
779  assert(next->prev == vmem.vaddr(segno, buddy));
780  next->prev = block->prev;
781  }
782  // coalesce block with buddy
783  level++;
784  if (buddy < addr)
785  addr = buddy;
786  }
787  // Add coalesced block to free list
788  Block *block = seg.block_ptr(addr);
789  block->prev = VADDR_NULL;
790  block->next = vmem.freelist[level];
791  block->mark_as_free(level);
792  vaddr_t blockaddr = vmem.vaddr(segno, addr);
793  if (block->next != VADDR_NULL)
794  vmem.block_ptr(block->next)->prev = blockaddr;
795  vmem.freelist[level] = blockaddr;
797 }
798 
800  lock_allocator();
801  size_t alloc_size = size + offsetof(Block, data);
802  int level = find_level(alloc_size);
803  int flevel = level;
804  while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
805  flevel++;
806  if (vmem.freelist[flevel] == VADDR_NULL) {
807  vmem.add_segment();
808  }
810  while (flevel > level) {
811  // get and split a block
812  vaddr_t blockaddr = vmem.freelist[flevel];
813  assert((blockaddr & ((1 << flevel) - 1)) == 0);
814  Block *block = vmem.block_ptr(blockaddr);
815  vmem.freelist[flevel] = block->next;
816  if (vmem.freelist[flevel] != VADDR_NULL)
818  vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
819  Block *block2 = vmem.block_ptr(blockaddr2);
820  flevel--;
821  block2->next = vmem.freelist[flevel];
822  block2->prev = blockaddr;
823  block->next = blockaddr2;
824  block->prev = VADDR_NULL;
825  // block->prev == VADDR_NULL already.
826  vmem.freelist[flevel] = blockaddr;
827  }
830  vaddr_t vaddr = vmem.freelist[level];
831  vaddr_t result = vaddr + offsetof(Block, data);
832  vmem.freelist[level] = block->next;
833  if (block->next != VADDR_NULL)
834  vmem.block_ptr(block->next)->prev = VADDR_NULL;
835  block->mark_as_allocated(vaddr, level);
837  memset(block->data, 0, size);
838  return result;
839 }
840 
842  struct flock &lock_info, size_t offset, size_t len, bool lock) {
843  lock_info.l_start = offset;
844  lock_info.l_len = len;
845  lock_info.l_pid = 0;
846  lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
847  lock_info.l_whence = SEEK_SET;
848 }
849 
850 void lock_file(int fd, size_t offset, size_t len) {
851  struct flock lock_info;
852  init_flock_struct(lock_info, offset, len, true);
853  fcntl(fd, F_SETLKW, &lock_info);
854 }
855 
856 void unlock_file(int fd, size_t offset, size_t len) {
857  struct flock lock_info;
858  init_flock_struct(lock_info, offset, len, false);
859  fcntl(fd, F_SETLKW, &lock_info);
860 }
861 
863  lock_file(vmem.fd, 0);
864 }
865 
867  unlock_file(vmem.fd, 0);
868 }
869 
870 void init_metapage(bool create) {
871  if (create)
872  ftruncate(vmem.fd, METABLOCK_SIZE);
873  vmem.metapage = (MetaPage *) mmap(
874  NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
875  if (create) {
876  std::memcpy(vmem.metapage->config_header, config, sizeof(config));
877  for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
879  }
881  vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
882  } else {
883  assert(std::memcmp(vmem.metapage->config_header, config,
884  sizeof(config)) != 0);
885  }
886 }
887 
888 static void lock_process(int processno) {
889  lock_file(vmem.fd,
891  + sizeof(ProcessInfo) * vmem.current_process);
892 }
893 
894 static void unlock_process(int processno) {
897  + sizeof(ProcessInfo) * vmem.current_process);
898 }
899 
900 static ProcessInfo &process_info(int processno) {
901  return vmem.metapage->process_info[processno];
902 }
903 
904 bool send_signal(int processno, ipc_signal_t sig, bool lock) {
905  if (lock)
906  lock_process(processno);
907  if (process_info(processno).sigstate != Waiting) {
908  unlock_process(processno);
909  return false;
910  }
911  if (processno == vmem.current_process) {
912  process_info(processno).sigstate = Accepted;
913  process_info(processno).signal = sig;
914  } else {
915  process_info(processno).sigstate = Pending;
916  process_info(processno).signal = sig;
917  int fd = vmem.channels[processno].fd_write;
918  char buf[1] = { 0 };
919  while (write(fd, buf, 1) != 1) {
920  }
921  }
922  if (lock)
923  unlock_process(processno);
924  return true;
925 }
926 
927 ipc_signal_t check_signal(bool resume, bool lock) {
929  if (lock)
932  switch (sigstate) {
933  case Waiting:
934  case Pending: {
936  char buf[1];
937  if (lock && sigstate == Waiting) {
939  while (read(fd, buf, 1) != 1) {
940  }
942  } else {
943  while (read(fd, buf, 1) != 1) {
944  }
945  }
948  = resume ? Waiting : Accepted;
949  if (lock)
951  break;
952  }
953  case Accepted:
955  if (resume)
957  if (lock)
959  break;
960  }
961  return result;
962 }
963 
968 }
969 
971  return check_signal(true, lock);
972 }
973 
974 } // namespace internals
975 
976 pid_t fork_process() {
977  using namespace internals;
978  lock_metapage();
979  for (int p = 0; p < MAX_PROCESS; p++) {
980  if (vmem.metapage->process_info[p].pid == 0) {
981  pid_t pid = fork();
982  if (pid < 0) {
983  // error
984  return -1;
985  } else if (pid == 0) {
986  // child process
987  int parent = vmem.current_process;
989  lock_metapage();
990  vmem.metapage->process_info[p].pid = getpid();
991  unlock_metapage();
992  send_signal(parent);
993  } else {
994  // parent process
995  unlock_metapage();
996  wait_signal();
997  // child has unlocked metapage, so we don't need to.
998  }
999  return pid;
1000  }
1001  }
1002  unlock_metapage();
1003  return -1;
1004 }
1005 
1007  int wakeup = -1;
1009  _lock.lock();
1010  if (_head == _tail) {
1011  _value++;
1012  } else {
1013  // don't increment value, as we'll pass that on to the next process.
1014  wakeup = _waiting[_head];
1015  sig = _signals[_head];
1016  next(_head);
1017  }
1018  _lock.unlock();
1019  if (wakeup >= 0) {
1020  internals::send_signal(wakeup, sig);
1021  }
1022 }
1023 
1025  bool result = false;
1026  _lock.lock();
1027  if (_value > 0) {
1028  _value--;
1029  result = true;
1030  }
1031  _lock.unlock();
1032  return result;
1033 }
1034 
1036  _lock.lock();
1037  if (_value > 0) {
1038  _value--;
1039  _lock.unlock();
1040  return;
1041  }
1043  _signals[_tail] = 0;
1044  next(_tail);
1045  _lock.unlock();
1047 }
1048 
1050  _lock.lock();
1051  if (_value > 0) {
1052  if (internals::send_signal(internals::vmem.current_process, sig))
1053  _value--;
1054  _lock.unlock();
1055  return false;
1056  }
1058  _signals[_tail] = sig;
1059  next(_tail);
1060  _lock.unlock();
1061  return true;
1062 }
1063 
1065  bool result = false;
1066  _lock.lock();
1067  for (int i = _head; i != _tail; next(i)) {
1068  if (_waiting[i] == internals::vmem.current_process) {
1069  int last = i;
1070  next(i);
1071  while (i != _tail) {
1072  _waiting[last] = _waiting[i];
1073  _signals[last] = _signals[i];
1074  last = i;
1075  next(i);
1076  }
1077  _tail = last;
1078  result = true;
1079  break;
1080  }
1081  }
1082  _lock.unlock();
1083  return result;
1084 }
1085 
1086 void EventSet::add(Event *event) {
1087  event->_next = NULL;
1088  if (_head == NULL) {
1089  _head = _tail = event;
1090  } else {
1091  _tail->_next = event;
1092  _tail = event;
1093  }
1094 }
1095 
1097  size_t n = 0;
1098  for (Event *event = _head; event; event = event->_next) {
1099  if (!event->start_listen((int) (n++))) {
1100  break;
1101  }
1102  }
1104  for (Event *event = _head; event; event = event->_next) {
1105  event->stop_listen();
1106  }
1108  return (int) result;
1109 }
1110 
1111 } // namespace vspace
1112 #endif
1113 #endif
int size(const CanonicalForm &f, const Variable &v)
int size ( const CanonicalForm & f, const Variable & v )
Definition: cf_ops.cc:600
int level(const CanonicalForm &f)
int i
Definition: cfEzgcd.cc:132
int p
Definition: cfModGcd.cc:4078
CanonicalForm fp
Definition: cfModGcd.cc:4102
CanonicalForm map(const CanonicalForm &primElem, const Variable &alpha, const CanonicalForm &F, const Variable &beta)
map from to such that is mapped onto
Definition: cf_map_ext.cc:504
void add(Event *event)
Definition: vspace.cc:1086
Event * _head
Definition: vspace.h:2581
Event * _tail
Definition: vspace.h:2581
Event * _next
Definition: vspace.h:2572
int _waiting[internals::MAX_PROCESS+1]
Definition: vspace.h:2348
void next(int &index)
Definition: vspace.h:2351
bool start_wait(internals::ipc_signal_t sig=0)
Definition: vspace.cc:1049
internals::ipc_signal_t _signals[internals::MAX_PROCESS+1]
Definition: vspace.h:2349
FastLock _lock
Definition: vspace.h:2358
bool stop_wait()
Definition: vspace.cc:1064
return result
Definition: facAbsBiFact.cc:75
int j
Definition: facHensel.cc:110
STATIC_VAR poly last
Definition: hdegree.cc:1151
STATIC_VAR int offset
Definition: janet.cc:29
NodeM * create()
Definition: janet.cc:757
ListNode * next
Definition: janet.h:31
#define SEEK_SET
Definition: mod2.h:113
char N base
Definition: ValueTraits.h:144
void accept_signals()
Definition: vspace.cc:964
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1637
void unlock_metapage()
Definition: vspace.cc:866
const vaddr_t VADDR_NULL
Definition: vspace.h:1417
void init_flock_struct(struct flock &lock_info, size_t offset, size_t len, bool lock)
Definition: vspace.cc:841
size_t vaddr_t
Definition: vspace.h:1414
void lock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:850
void vmem_free(vaddr_t vaddr)
Definition: vspace.cc:751
vaddr_t vmem_alloc(size_t size)
Definition: vspace.cc:799
static void unlock_process(int processno)
Definition: vspace.cc:894
static const size_t MAX_SEGMENTS
Definition: vspace.h:1423
vaddr_t freelist[LOG2_SEGMENT_SIZE+1]
Definition: vspace.h:1511
static const size_t SEGMENT_SIZE
Definition: vspace.h:1424
static const size_t METABLOCK_SIZE
Definition: vspace.h:1420
static void lock_process(int processno)
Definition: vspace.cc:888
static const int LOG2_SEGMENT_SIZE
Definition: vspace.h:1421
ipc_signal_t wait_signal(bool lock)
Definition: vspace.cc:970
void lock_metapage()
Definition: vspace.cc:862
static const int MAX_PROCESS
Definition: vspace.h:1419
static ProcessInfo & process_info(int processno)
Definition: vspace.cc:900
static VMem & vmem
Definition: vspace.h:1635
ProcessInfo process_info[MAX_PROCESS]
Definition: vspace.h:1513
static void lock_allocator()
Definition: vspace.cc:715
static segaddr_t find_buddy(segaddr_t addr, int level)
Definition: vspace.h:1690
ipc_signal_t check_signal(bool resume, bool lock)
Definition: vspace.cc:927
void init_metapage(bool create)
Definition: vspace.cc:870
void unlock_file(int fd, size_t offset, size_t len)
Definition: vspace.cc:856
bool send_signal(int processno, ipc_signal_t sig, bool lock)
Definition: vspace.cc:904
static int find_level(size_t size)
Definition: vspace.h:1681
size_t config[4]
Definition: vspace.cc:568
size_t segaddr_t
Definition: vspace.h:1412
static void unlock_allocator()
Definition: vspace.cc:719
static void print_freelists()
Definition: vspace.cc:723
pid_t fork_process()
Definition: vspace.cc:976
@ ErrOS
Definition: vspace.h:1380
@ ErrNone
Definition: vspace.h:1376
@ ErrFile
Definition: vspace.h:1378
internals::Mutex FastLock
Definition: vspace.h:2340
#define NULL
Definition: omList.c:12
#define block
Definition: scanner.cc:666
int status int void size_t count write
Definition: si_signals.h:67
int status read
Definition: si_signals.h:59
int status int void * buf
Definition: si_signals.h:59
int status int void size_t count int const void size_t count open
Definition: si_signals.h:73
int status int fd
Definition: si_signals.h:59
void ensure_is_mapped(vaddr_t vaddr)
Definition: vspace.h:1615
std::FILE * file_handle
Definition: vspace.h:1591
size_t segment_no(vaddr_t vaddr)
Definition: vspace.h:1599
void * mmap_segment(int seg)
Definition: vspace.cc:651
Block * block_ptr(vaddr_t vaddr)
Definition: vspace.h:1610
VSeg segment(vaddr_t vaddr)
Definition: vspace.h:1596
MetaPage * metapage
Definition: vspace.h:1589
static VMem vmem_global
Definition: vspace.h:1588
VSeg segments[MAX_SEGMENTS]
Definition: vspace.h:1594
vaddr_t vaddr(size_t segno, segaddr_t addr)
Definition: vspace.h:1602
segaddr_t segaddr(vaddr_t vaddr)
Definition: vspace.h:1605
ProcessChannel channels[MAX_PROCESS]
Definition: vspace.h:1595
Block * block_ptr(segaddr_t addr)
Definition: vspace.h:1571
#define assert(A)
Definition: svd_si.h:3
#define metapageaddr(field)
Definition: vspace.cc:575