My Project
Loading...
Searching...
No Matches
vspace.cc
Go to the documentation of this file.
1// https://github.com/rbehrends/vspace
2#include "vspace.h"
3#include "kernel/mod2.h"
4#ifdef HAVE_VSPACE
5#ifdef HAVE_CPP_THREADS
6#include <thread>
7#endif
8#ifdef HAVE_POLL
9#include <poll.h>
10#endif
11#include <cstddef>
12#include "reporter/si_signals.h"
13
14#if defined(__GNUC__) && (__GNUC__<9) &&!defined(__clang__)
15
16namespace vspace {
17namespace internals {
18
19size_t config[4]
21
23
24// offsetof() only works for POD types, so we need to construct
25// a portable version of it for metapage fields.
26
27#define metapageaddr(field) \
28 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
29
30size_t VMem::filesize() {
31 struct stat stat;
32 fstat(fd, &stat);
33 return stat.st_size;
34}
35
36Status VMem::init(int fd) {
37 this->fd = fd;
38 for (int i = 0; i < MAX_SEGMENTS; i++)
39 segments[i] = VSeg(NULL);
40 for (int i = 0; i < MAX_PROCESS; i++) {
41 int channel[2];
42 if (pipe(channel) < 0) {
43 for (int j = 0; j < i; j++) {
44 close(channels[j].fd_read);
45 close(channels[j].fd_write);
46 }
47 return Status(ErrOS);
48 }
49 channels[i].fd_read = channel[0];
50 channels[i].fd_write = channel[1];
51 }
53 init_metapage(filesize() == 0);
55 freelist = metapage->freelist;
56 return Status(ErrNone);
57}
58
59Status VMem::init() {
60 FILE *fp = tmpfile();
61 Status result = init(fileno(fp));
62 if (!result.ok())
63 return result;
66 metapage->process_info[0].pid = getpid();
67 return Status(ErrNone);
68}
69
70Status VMem::init(const char *path) {
71 int fd = open(path, O_RDWR | O_CREAT, 0600);
72 if (fd < 0)
73 return Status(ErrFile);
74 init(fd);
76 // TODO: enter process in meta table
78 return Status(ErrNone);
79}
80
81void VMem::deinit() {
82 if (file_handle) {
83 fclose(file_handle);
85 } else {
86 close(fd);
87 }
88 munmap(metapage, METABLOCK_SIZE);
89 metapage = NULL;
90 current_process = -1;
91 freelist = NULL;
92 for (int i = 0; i < MAX_SEGMENTS; i++) {
93 if (segments[i].base) munmap(segments[i].base, SEGMENT_SIZE);
94 segments[i] = NULL;
95 }
96 for (int i = 0; i < MAX_PROCESS; i++) {
97 close(channels[i].fd_read);
98 close(channels[i].fd_write);
99 }
100}
101
102void *VMem::mmap_segment(int seg) {
104 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
106 if (map == MAP_FAILED) {
107 // This is an "impossible to proceed from here, because system state
108 // is impossible to proceed from" situation, so we abort the program.
109 perror("mmap");
110 abort();
111 }
113 return map;
114}
115
116void VMem::add_segment() {
117 int seg = metapage->segment_count++;
118 ftruncate(fd, METABLOCK_SIZE + metapage->segment_count * SEGMENT_SIZE);
119 void *map_addr = mmap_segment(seg);
120 segments[seg] = VSeg(map_addr);
121 Block *top = block_ptr(seg * SEGMENT_SIZE);
122 top->next = freelist[LOG2_SEGMENT_SIZE];
123 top->prev = VADDR_NULL;
125}
126
127void FastLock::lock() {
128#ifdef HAVE_CPP_THREADS
129 while (_lock.test_and_set()) {
130 }
131 bool empty = _owner < 0;
132 if (empty) {
134 } else {
135 int p = vmem.current_process;
136 vmem.metapage->process_info[p].next = -1;
137 if (_head < 0)
138 _head = p;
139 else
140 vmem.metapage->process_info[_tail].next = p;
141 _tail = p;
142 }
143 _lock.clear();
144 if (!empty)
145 wait_signal(false);
146#else
148#endif
149}
150
151void FastLock::unlock() {
152#ifdef HAVE_CPP_THREADS
153 while (_lock.test_and_set()) {
154 }
155 _owner = _head;
156 if (_owner >= 0)
157 _head = vmem.metapage->process_info[_head].next;
158 _lock.clear();
159 if (_owner >= 0)
160 send_signal(_owner, 0, false);
161#else
163#endif
164}
165
166static void lock_allocator() {
167 vmem.metapage->allocator_lock.lock();
168}
169
170static void unlock_allocator() {
171 vmem.metapage->allocator_lock.unlock();
172}
173
174static void print_freelists() {
175 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
176 vaddr_t vaddr = vmem.freelist[i];
177 if (vaddr != VADDR_NULL) {
178 printf("%2d: %ld", i, (long)vaddr);
179 vaddr_t prev = block_ptr(vaddr)->prev;
180 if (prev != VADDR_NULL) {
181 printf("(%ld)", (long)prev);
182 }
183 assert(block_ptr(vaddr)->prev == VADDR_NULL);
184 for (;;) {
185 vaddr_t last_vaddr = vaddr;
186 Block *block = block_ptr(vaddr);
187 vaddr = block->next;
188 if (vaddr == VADDR_NULL)
189 break;
190 printf(" -> %ld", (long)vaddr);
191 vaddr_t prev = block_ptr(vaddr)->prev;
192 if (prev != last_vaddr) {
193 printf("(%ld)", (long)prev);
194 }
195 }
196 printf("\n");
197 }
198 }
199 fflush(stdout);
200}
201
202void vmem_free(vaddr_t vaddr) {
204 vaddr -= offsetof(Block, data);
205 vmem.ensure_is_mapped(vaddr);
206 size_t segno = vmem.segment_no(vaddr);
207 VSeg seg = vmem.segment(vaddr);
208 segaddr_t addr = vmem.segaddr(vaddr);
209 int level = seg.block_ptr(addr)->level();
210 assert(!seg.is_free(addr));
211 while (level < LOG2_SEGMENT_SIZE) {
212 segaddr_t buddy = find_buddy(addr, level);
213 Block *block = seg.block_ptr(buddy);
214 // is buddy free and at the same level?
215 if (!block->is_free() || block->level() != level)
216 break;
217 // remove buddy from freelist.
218 Block *prev = vmem.block_ptr(block->prev);
219 Block *next = vmem.block_ptr(block->next);
220 block->data[0] = level;
221 if (prev) {
222 assert(prev->next == vmem.vaddr(segno, buddy));
223 prev->next = block->next;
224 } else {
225 // head of freelist.
226 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
227 vmem.freelist[level] = block->next;
228 }
229 if (next) {
230 assert(next->prev == vmem.vaddr(segno, buddy));
231 next->prev = block->prev;
232 }
233 // coalesce block with buddy
234 level++;
235 if (buddy < addr)
236 addr = buddy;
237 }
238 // Add coalesced block to free list
239 Block *block = seg.block_ptr(addr);
241 block->next = vmem.freelist[level];
242 block->mark_as_free(level);
243 vaddr_t blockaddr = vmem.vaddr(segno, addr);
244 if (block->next != VADDR_NULL)
245 vmem.block_ptr(block->next)->prev = blockaddr;
246 vmem.freelist[level] = blockaddr;
248}
249
250vaddr_t vmem_alloc(size_t size) {
252 size_t alloc_size = size + offsetof(Block, data);
253 int level = find_level(alloc_size);
254 int flevel = level;
255 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
256 flevel++;
257 if (vmem.freelist[flevel] == VADDR_NULL) {
258 vmem.add_segment();
259 }
260 vmem.ensure_is_mapped(vmem.freelist[flevel]);
261 while (flevel > level) {
262 // get and split a block
263 vaddr_t blockaddr = vmem.freelist[flevel];
264 assert((blockaddr & ((1 << flevel) - 1)) == 0);
265 Block *block = vmem.block_ptr(blockaddr);
266 vmem.freelist[flevel] = block->next;
267 if (vmem.freelist[flevel] != VADDR_NULL)
268 vmem.block_ptr(vmem.freelist[flevel])->prev = VADDR_NULL;
269 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
270 Block *block2 = vmem.block_ptr(blockaddr2);
271 flevel--;
272 block2->next = vmem.freelist[flevel];
273 block2->prev = blockaddr;
274 block->next = blockaddr2;
275 block->prev = VADDR_NULL;
276 // block->prev == VADDR_NULL already.
277 vmem.freelist[flevel] = blockaddr;
278 }
279 assert(vmem.freelist[level] != VADDR_NULL);
280 Block *block = vmem.block_ptr(vmem.freelist[level]);
281 vaddr_t vaddr = vmem.freelist[level];
282 #if defined(__GNUC__) && (__GNUC__>11)
283 vaddr_t result = vaddr + (sizeof(vaddr_t)*2);
284 #else
285 vaddr_t result = vaddr + offsetof(Block, data);
286 #endif
287 vmem.freelist[level] = block->next;
288 if (block->next != VADDR_NULL)
289 vmem.block_ptr(block->next)->prev = VADDR_NULL;
290 block->mark_as_allocated(vaddr, level);
292 memset(block->data, 0, size);
293 return result;
294}
295
297 struct flock &lock_info, size_t offset, size_t len, bool lock) {
298 lock_info.l_start = offset;
299 lock_info.l_len = len;
300 lock_info.l_pid = 0;
301 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
302 lock_info.l_whence = SEEK_SET;
303}
304
305void lock_file(int fd, size_t offset, size_t len) {
306 struct flock lock_info;
307 init_flock_struct(lock_info, offset, len, true);
308 fcntl(fd, F_SETLKW, &lock_info);
309}
310
311void unlock_file(int fd, size_t offset, size_t len) {
312 struct flock lock_info;
313 init_flock_struct(lock_info, offset, len, false);
314 fcntl(fd, F_SETLKW, &lock_info);
315}
316
317void lock_metapage() {
318 lock_file(vmem.fd, 0);
319}
320
321void unlock_metapage() {
322 unlock_file(vmem.fd, 0);
323}
324
325void init_metapage(bool create) {
326 if (create)
327 ftruncate(vmem.fd, METABLOCK_SIZE);
328 vmem.metapage = (MetaPage *) mmap(
329 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
330 if (create) {
331 memcpy(vmem.metapage->config_header, config, sizeof(config));
332 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
333 vmem.metapage->freelist[i] = VADDR_NULL;
334 }
335 vmem.metapage->segment_count = 0;
336 vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
337 } else {
338 assert(memcmp(vmem.metapage->config_header, config, sizeof(config)) != 0);
339 }
340}
341
342static void lock_process(int processno) {
343 lock_file(vmem.fd,
345 + sizeof(ProcessInfo) * vmem.current_process);
346}
347
348static void unlock_process(int processno) {
349 unlock_file(vmem.fd,
351 + sizeof(ProcessInfo) * vmem.current_process);
352}
353
354static ProcessInfo &process_info(int processno) {
355 return vmem.metapage->process_info[processno];
356}
357
358bool send_signal(int processno, ipc_signal_t sig, bool lock) {
359 if (lock)
360 lock_process(processno);
361 if (process_info(processno).sigstate != Waiting) {
362 unlock_process(processno);
363 return false;
364 }
365 if (processno == vmem.current_process) {
366 process_info(processno).sigstate = Accepted;
367 process_info(processno).signal = sig;
368 } else {
369 process_info(processno).sigstate = Pending;
370 process_info(processno).signal = sig;
371 int fd = vmem.channels[processno].fd_write;
372 char buf[1] = { 0 };
373 while (write(fd, buf, 1) != 1) {
374 }
375 }
376 if (lock)
377 unlock_process(processno);
378 return true;
379}
380
381ipc_signal_t check_signal(bool resume, bool lock) {
383 if (lock)
384 lock_process(vmem.current_process);
385 SignalState sigstate = process_info(vmem.current_process).sigstate;
386 switch (sigstate) {
387 case Waiting:
388 case Pending: {
389 int fd = vmem.channels[vmem.current_process].fd_read;
390 char buf[1];
391 if (lock && sigstate == Waiting) {
392 unlock_process(vmem.current_process);
393 loop
394 {
395 #if defined(HAVE_POLL) && !defined(__APPLE__)
396 // fd is restricted on OsX by ulimit "file descriptors" (256)
397 pollfd pfd;
398 pfd.fd = fd;
399 pfd.events = POLLIN;
400 int rv = poll(&pfd, 1, 500000); /* msec*/
401 #else
402 // fd is restricted to <=1024
403 fd_set set;
404 FD_ZERO(&set); /* clear the set */
405 FD_SET(fd, &set); /* add our file descriptor to the set */
406 struct timeval timeout;
407 timeout.tv_sec = 500;
408 timeout.tv_usec = 0;
409 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
410 #endif
411 if (rv== -1) continue; /* an error occurred */
412 if (rv== 0) break; /* timeout */
413 while(read(fd, buf, 1)!=1) {}
414 break;
415 }
416 lock_process(vmem.current_process);
417 } else {
418 loop
419 {
420 #if defined(HAVE_POLL) && !defined(__APPLE__)
421 // fd is restricted on OsX by ulimit "file descriptors" (256)
422 pollfd pfd;
423 pfd.fd = fd;
424 pfd.events = POLLIN;
425 int rv = poll(&pfd, 1, 500000); /* msec*/
426 #else
427 // fd is restricted to <=1024
428 fd_set set;
429 FD_ZERO(&set); /* clear the set */
430 FD_SET(fd, &set); /* add our file descriptor to the set */
431 struct timeval timeout;
432 timeout.tv_sec = 500;
433 timeout.tv_usec = 0;
434 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
435 #endif
436 if (rv== -1) continue; /* an error occurred */
437 if (rv== 0) break; /* timeout */
438 while(read(fd, buf, 1)!=1) {}
439 break;
440 }
441 }
442 result = process_info(vmem.current_process).signal;
443 process_info(vmem.current_process).sigstate
444 = resume ? Waiting : Accepted;
445 if (lock)
446 unlock_process(vmem.current_process);
447 break;
448 }
449 case Accepted:
450 result = process_info(vmem.current_process).signal;
451 if (resume)
452 process_info(vmem.current_process).sigstate = Waiting;
453 if (lock)
454 unlock_process(vmem.current_process);
455 break;
456 }
457 return result;
458}
459
460void accept_signals() {
461 lock_process(vmem.current_process);
462 process_info(vmem.current_process).sigstate = Waiting;
463 unlock_process(vmem.current_process);
464}
465
466ipc_signal_t wait_signal(bool lock) {
467 return check_signal(true, lock);
468}
469
470} // namespace internals
471
472pid_t fork_process() {
473 using namespace internals;
475 for (int p = 0; p < MAX_PROCESS; p++) {
476 if (vmem.metapage->process_info[p].pid == 0) {
477 pid_t pid = fork();
478 if (pid < 0) {
479 // error
480 return -1;
481 } else if (pid == 0) {
482 // child process
483 int parent = vmem.current_process;
484 vmem.current_process = p;
486 vmem.metapage->process_info[p].pid = getpid();
488 send_signal(parent);
489 } else {
490 // parent process
492 wait_signal();
493 // child has unlocked metapage, so we don't need to.
494 }
495 return pid;
496 }
497 }
499 return -1;
500}
501
502void Semaphore::post() {
503 int wakeup = -1;
505 _lock.lock();
506 if (_head == _tail) {
507 _value++;
508 } else {
509 // don't increment value, as we'll pass that on to the next process.
510 wakeup = _waiting[_head];
511 sig = _signals[_head];
512 next(_head);
513 }
514 _lock.unlock();
515 if (wakeup >= 0) {
516 internals::send_signal(wakeup, sig);
517 }
518}
519
520bool Semaphore::try_wait() {
521 bool result = false;
522 _lock.lock();
523 if (_value > 0) {
524 _value--;
525 result = true;
526 }
527 _lock.unlock();
528 return result;
529}
530
531void Semaphore::wait() {
532 _lock.lock();
533 if (_value > 0) {
534 _value--;
535 _lock.unlock();
536 return;
537 }
539 _signals[_tail] = 0;
540 next(_tail);
541 _lock.unlock();
543}
544
546 _lock.lock();
547 if (_value > 0) {
548 if (internals::send_signal(internals::vmem.current_process, sig))
549 _value--;
550 _lock.unlock();
551 return false;
552 }
554 _signals[_tail] = sig;
555 next(_tail);
556 _lock.unlock();
557 return true;
558}
559
561 bool result = false;
562 _lock.lock();
563 for (int i = _head; i != _tail; next(i)) {
564 if (_waiting[i] == internals::vmem.current_process) {
565 int last = i;
566 next(i);
567 while (i != _tail) {
570 last = i;
571 next(i);
572 }
573 _tail = last;
574 result = true;
575 break;
576 }
577 }
578 _lock.unlock();
579 return result;
580}
581
582void EventSet::add(Event *event) {
583 event->_next = NULL;
584 if (_head == NULL) {
585 _head = _tail = event;
586 } else {
587 _tail->_next = event;
588 _tail = event;
589 }
590}
591
592int EventSet::wait() {
593 size_t n = 0;
594 for (Event *event = _head; event; event = event->_next) {
595 if (!event->start_listen((int) (n++))) {
596 break;
597 }
598 }
600 for (Event *event = _head; event; event = event->_next) {
601 event->stop_listen();
602 }
604 return (int) result;
605}
606
607} // namespace vspace
608#else // gcc>9
609#include <cstdlib>
610#include <unistd.h>
611#include <sys/mman.h>
612#include <sys/stat.h>
613
614
615namespace vspace {
616namespace internals {
617
620
622
623// offsetof() only works for POD types, so we need to construct
624// a portable version of it for metapage fields.
625
626#define metapageaddr(field) \
627 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
628
630 struct stat stat;
631 fstat(fd, &stat);
632 return stat.st_size;
633}
634
636 this->fd = fd;
637 for (int i = 0; i < MAX_SEGMENTS; i++)
638 segments[i] = VSeg(NULL);
639 for (int i = 0; i < MAX_PROCESS; i++) {
640 int channel[2];
641 if (pipe(channel) < 0) {
642 for (int j = 0; j < i; j++) {
643 close(channels[j].fd_read);
644 close(channels[j].fd_write);
645 }
646 return Status(ErrOS);
647 }
648 channels[i].fd_read = channel[0];
649 channels[i].fd_write = channel[1];
650 }
652 init_metapage(filesize() == 0);
654 freelist = metapage->freelist;
655 return Status(ErrNone);
656}
657
659 FILE *fp = tmpfile();
660 Status result = init(fileno(fp));
661 if (!result.ok())
662 return result;
663 current_process = 0;
664 file_handle = fp;
665 metapage->process_info[0].pid = getpid();
666 return Status(ErrNone);
667}
668
669Status VMem::init(const char *path) {
670 int fd = open(path, O_RDWR | O_CREAT, 0600);
671 if (fd < 0)
672 return Status(ErrFile);
673 init(fd);
675 // TODO: enter process in meta table
677 return Status(ErrNone);
678}
679
681 if (file_handle) {
682 fclose(file_handle);
684 } else {
685 close(fd);
686 }
687 munmap(metapage, METABLOCK_SIZE);
688 metapage = NULL;
689 current_process = -1;
690 freelist = NULL;
691 for (int i = 0; i < MAX_SEGMENTS; i++) {
692 if (!segments[i].is_free())
693 munmap(segments[i].base, SEGMENT_SIZE);
694 segments[i] = VSeg(NULL);
695 }
696 for (int i = 0; i < MAX_PROCESS; i++) {
697 close(channels[i].fd_read);
698 close(channels[i].fd_write);
699 }
700}
701
702void *VMem::mmap_segment(int seg) {
704 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
706 if (map == MAP_FAILED) {
707 // This is an "impossible to proceed from here, because system state
708 // is impossible to proceed from" situation, so we abort the program.
709 perror("mmap");
710 abort();
711 }
713 return map;
714}
715
717 int seg = metapage->segment_count++;
718 ftruncate(fd, METABLOCK_SIZE + metapage->segment_count * SEGMENT_SIZE);
719 void *map_addr = mmap_segment(seg);
720 segments[seg] = VSeg(map_addr);
721 Block *top = block_ptr(seg * SEGMENT_SIZE);
723 top->prev = VADDR_NULL;
725}
726
728#ifdef HAVE_CPP_THREADS
729 while (_lock.test_and_set()) {
730 }
731 bool empty = _owner < 0;
732 if (empty) {
733 _owner = vmem.current_process;
734 } else {
735 int p = vmem.current_process;
736 vmem.metapage->process_info[p].next = -1;
737 if (_head < 0)
738 _head = p;
739 else
740 vmem.metapage->process_info[_tail].next = p;
741 _tail = p;
742 }
743 _lock.clear();
744 if (!empty)
745 wait_signal(false);
746#else
748#endif
749}
750
752#ifdef HAVE_CPP_THREADS
753 while (_lock.test_and_set()) {
754 }
755 _owner = _head;
756 if (_owner >= 0)
757 _head = vmem.metapage->process_info[_head].next;
758 _lock.clear();
759 if (_owner >= 0)
760 send_signal(_owner, 0, false);
761#else
763#endif
764}
765
766static void lock_allocator() {
767 vmem.metapage->allocator_lock.lock();
768}
769
770static void unlock_allocator() {
771 vmem.metapage->allocator_lock.unlock();
772}
773
774static void print_freelists() {
775 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
776 vaddr_t vaddr = vmem.freelist[i];
777 if (vaddr != VADDR_NULL) {
778 std::printf("%2d: %ld", i, (long)vaddr);
779 vaddr_t prev = block_ptr(vaddr)->prev;
780 if (prev != VADDR_NULL) {
781 std::printf("(%ld)", (long)prev);
782 }
783 assert(block_ptr(vaddr)->prev == VADDR_NULL);
784 for (;;) {
785 vaddr_t last_vaddr = vaddr;
786 Block *block = block_ptr(vaddr);
787 vaddr = block->next;
788 if (vaddr == VADDR_NULL)
789 break;
790 std::printf(" -> %ld", (long)vaddr);
791 vaddr_t prev = block_ptr(vaddr)->prev;
792 if (prev != last_vaddr) {
793 std::printf("(%ld)", (long)prev);
794 }
795 }
796 std::printf("\n");
797 }
798 }
799 std::fflush(stdout);
800}
801
802void vmem_free(vaddr_t vaddr) {
804 #if defined(__GNUC__) && (__GNUC__>11)
805 vaddr -= (sizeof(vaddr_t)*2);
806 #else
807 vaddr -= offsetof(Block, data);
808 #endif
809 vmem.ensure_is_mapped(vaddr);
810 size_t segno = vmem.segment_no(vaddr);
811 VSeg seg = vmem.segment(vaddr);
812 segaddr_t addr = vmem.segaddr(vaddr);
813 int level = seg.block_ptr(addr)->level();
814 assert(!seg.is_free(addr));
815 while (level < LOG2_SEGMENT_SIZE) {
816 segaddr_t buddy = find_buddy(addr, level);
817 Block *block = seg.block_ptr(buddy);
818 // is buddy free and at the same level?
819 if (!block->is_free() || block->level() != level)
820 break;
821 // remove buddy from freelist.
822 Block *prev = vmem.block_ptr(block->prev);
823 Block *next = vmem.block_ptr(block->next);
824 block->data[0] = level;
825 if (prev) {
826 assert(prev->next == vmem.vaddr(segno, buddy));
827 prev->next = block->next;
828 } else {
829 // head of freelist.
830 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
831 vmem.freelist[level] = block->next;
832 }
833 if (next) {
834 assert(next->prev == vmem.vaddr(segno, buddy));
835 next->prev = block->prev;
836 }
837 // coalesce block with buddy
838 level++;
839 if (buddy < addr)
840 addr = buddy;
841 }
842 // Add coalesced block to free list
843 Block *block = seg.block_ptr(addr);
844 block->prev = VADDR_NULL;
845 block->next = vmem.freelist[level];
846 block->mark_as_free(level);
847 vaddr_t blockaddr = vmem.vaddr(segno, addr);
848 if (block->next != VADDR_NULL)
849 vmem.block_ptr(block->next)->prev = blockaddr;
850 vmem.freelist[level] = blockaddr;
852}
853
856 #if defined(__GNUC__) && (__GNUC__>11)
857 size_t alloc_size = size + (sizeof(vaddr_t)*2);
858 #else
859 size_t alloc_size = size + offsetof(Block, data);
860 #endif
861 int level = find_level(alloc_size);
862 int flevel = level;
863 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
864 flevel++;
865 if (vmem.freelist[flevel] == VADDR_NULL) {
866 vmem.add_segment();
867 }
868 vmem.ensure_is_mapped(vmem.freelist[flevel]);
869 while (flevel > level) {
870 // get and split a block
871 vaddr_t blockaddr = vmem.freelist[flevel];
872 assert((blockaddr & ((1 << flevel) - 1)) == 0);
873 Block *block = vmem.block_ptr(blockaddr);
874 vmem.freelist[flevel] = block->next;
875 if (vmem.freelist[flevel] != VADDR_NULL)
876 vmem.block_ptr(vmem.freelist[flevel])->prev = VADDR_NULL;
877 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
878 Block *block2 = vmem.block_ptr(blockaddr2);
879 flevel--;
880 block2->next = vmem.freelist[flevel];
881 block2->prev = blockaddr;
882 block->next = blockaddr2;
883 block->prev = VADDR_NULL;
884 // block->prev == VADDR_NULL already.
885 vmem.freelist[flevel] = blockaddr;
886 }
887 assert(vmem.freelist[level] != VADDR_NULL);
888 Block *block = vmem.block_ptr(vmem.freelist[level]);
889 vaddr_t vaddr = vmem.freelist[level];
890 #if defined(__GNUC__) && (__GNUC__>11)
891 vaddr_t result = vaddr + (sizeof(vaddr_t)*2);
892 #else
893 vaddr_t result = vaddr + offsetof(Block, data);
894 #endif
895 vmem.freelist[level] = block->next;
896 if (block->next != VADDR_NULL)
897 vmem.block_ptr(block->next)->prev = VADDR_NULL;
898 block->mark_as_allocated(vaddr, level);
900 memset(block->data, 0, size);
901 return result;
902}
903
905 struct flock &lock_info, size_t offset, size_t len, bool lock) {
906 lock_info.l_start = offset;
907 lock_info.l_len = len;
908 lock_info.l_pid = 0;
909 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
910 lock_info.l_whence = SEEK_SET;
911}
912
913void lock_file(int fd, size_t offset, size_t len) {
914 struct flock lock_info;
915 init_flock_struct(lock_info, offset, len, true);
916 fcntl(fd, F_SETLKW, &lock_info);
917}
918
919void unlock_file(int fd, size_t offset, size_t len) {
920 struct flock lock_info;
921 init_flock_struct(lock_info, offset, len, false);
922 fcntl(fd, F_SETLKW, &lock_info);
923}
924
926 lock_file(vmem.fd, 0);
927}
928
930 unlock_file(vmem.fd, 0);
931}
932
934 if (create)
935 ftruncate(vmem.fd, METABLOCK_SIZE);
936 vmem.metapage = (MetaPage *) mmap(
937 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
938 if (create) {
939 std::memcpy(vmem.metapage->config_header, config, sizeof(config));
940 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
941 vmem.metapage->freelist[i] = VADDR_NULL;
942 }
943 vmem.metapage->segment_count = 0;
944 vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
945 } else {
946 assert(std::memcmp(vmem.metapage->config_header, config,
947 sizeof(config)) != 0);
948 }
949}
950
951static void lock_process(int processno) {
952 lock_file(vmem.fd,
954 + sizeof(ProcessInfo) * vmem.current_process);
955}
956
957static void unlock_process(int processno) {
958 unlock_file(vmem.fd,
960 + sizeof(ProcessInfo) * vmem.current_process);
961}
962
963static ProcessInfo &process_info(int processno) {
964 return vmem.metapage->process_info[processno];
965}
966
967bool send_signal(int processno, ipc_signal_t sig, bool lock) {
968 if (lock)
969 lock_process(processno);
970 if (process_info(processno).sigstate != Waiting) {
971 unlock_process(processno);
972 return false;
973 }
974 if (processno == vmem.current_process) {
975 process_info(processno).sigstate = Accepted;
976 process_info(processno).signal = sig;
977 } else {
978 process_info(processno).sigstate = Pending;
979 process_info(processno).signal = sig;
980 int fd = vmem.channels[processno].fd_write;
981 char buf[1] = { 0 };
982 while (write(fd, buf, 1) != 1) {
983 }
984 }
985 if (lock)
986 unlock_process(processno);
987 return true;
988}
989
990ipc_signal_t check_signal(bool resume, bool lock) {
992 if (lock)
993 lock_process(vmem.current_process);
994 SignalState sigstate = process_info(vmem.current_process).sigstate;
995 switch (sigstate) {
996 case Waiting:
997 case Pending: {
998 int fd = vmem.channels[vmem.current_process].fd_read;
999 char buf[1];
1000 if (lock && sigstate == Waiting) {
1001 unlock_process(vmem.current_process);
1002 loop
1003 {
1004 #if defined(HAVE_POLL) && !defined(__APPLE__)
1005 // fd is restricted on OsX by ulimit "file descriptors" (256)
1006 pollfd pfd;
1007 pfd.fd = fd;
1008 pfd.events = POLLIN;
1009 int rv = poll(&pfd, 1, 500000); /* msec*/
1010 #else
1011 // fd is restricted to <=1024
1012 fd_set set;
1013 FD_ZERO(&set); /* clear the set */
1014 FD_SET(fd, &set); /* add our file descriptor to the set */
1015 struct timeval timeout;
1016 timeout.tv_sec = 500;
1017 timeout.tv_usec = 0;
1018 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
1019 #endif
1020 if (rv== -1) continue; /* an error occurred */
1021 if (rv== 0) break; /* timeout */
1022 while(read(fd, buf, 1)!=1) {}
1023 break;
1024 }
1025 lock_process(vmem.current_process);
1026 } else {
1027 loop
1028 {
1029 #if defined(HAVE_POLL) && !defined(__APPLE__)
1030 // fd is restricted on OsX by ulimit "file descriptors" (256)
1031 pollfd pfd;
1032 pfd.fd = fd;
1033 pfd.events = POLLIN;
1034 int rv = poll(&pfd, 1, 500000); /* msec*/
1035 #else
1036 // fd is restricted to <=1024
1037 fd_set set;
1038 FD_ZERO(&set); /* clear the set */
1039 FD_SET(fd, &set); /* add our file descriptor to the set */
1040 struct timeval timeout;
1041 timeout.tv_sec = 500;
1042 timeout.tv_usec = 0;
1043 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
1044 #endif
1045 if (rv== -1) continue; /* an error occurred */
1046 if (rv== 0) break;/* timeout */
1047 while(read(fd, buf, 1)!=1) {}
1048 break;
1049 }
1050 }
1051 result = process_info(vmem.current_process).signal;
1052 process_info(vmem.current_process).sigstate
1053 = resume ? Waiting : Accepted;
1054 if (lock)
1055 unlock_process(vmem.current_process);
1056 break;
1057 }
1058 case Accepted:
1059 result = process_info(vmem.current_process).signal;
1060 if (resume)
1061 process_info(vmem.current_process).sigstate = Waiting;
1062 if (lock)
1063 unlock_process(vmem.current_process);
1064 break;
1065 }
1066 return result;
1067}
1068
1070 lock_process(vmem.current_process);
1071 process_info(vmem.current_process).sigstate = Waiting;
1072 unlock_process(vmem.current_process);
1073}
1074
1076 return check_signal(true, lock);
1077}
1078
1079} // namespace internals
1080
1082 using namespace internals;
1083 lock_metapage();
1084 for (int p = 0; p < MAX_PROCESS; p++) {
1085 if (vmem.metapage->process_info[p].pid == 0) {
1086 pid_t pid = fork();
1087 if (pid < 0) {
1088 // error
1089 return -1;
1090 } else if (pid == 0) {
1091 // child process
1092 int parent = vmem.current_process;
1093 vmem.current_process = p;
1094 lock_metapage();
1095 vmem.metapage->process_info[p].pid = getpid();
1096 unlock_metapage();
1097 send_signal(parent);
1098 } else {
1099 // parent process
1100 unlock_metapage();
1101 wait_signal();
1102 // child has unlocked metapage, so we don't need to.
1103 }
1104 return pid;
1105 }
1106 }
1107 unlock_metapage();
1108 return -1;
1109}
1110
1112 int wakeup = -1;
1114 _lock.lock();
1115 if (_head == _tail) {
1116 _value++;
1117 } else {
1118 // don't increment value, as we'll pass that on to the next process.
1119 wakeup = _waiting[_head];
1120 sig = _signals[_head];
1121 next(_head);
1122 }
1123 _lock.unlock();
1124 if (wakeup >= 0) {
1125 internals::send_signal(wakeup, sig);
1126 }
1127}
1128
1130 bool result = false;
1131 _lock.lock();
1132 if (_value > 0) {
1133 _value--;
1134 result = true;
1135 }
1136 _lock.unlock();
1137 return result;
1138}
1139
1141 _lock.lock();
1142 if (_value > 0) {
1143 _value--;
1144 _lock.unlock();
1145 return;
1146 }
1147 _waiting[_tail] = internals::vmem.current_process;
1148 _signals[_tail] = 0;
1149 next(_tail);
1150 _lock.unlock();
1152}
1153
1155 _lock.lock();
1156 if (_value > 0) {
1157 if (internals::send_signal(internals::vmem.current_process, sig))
1158 _value--;
1159 _lock.unlock();
1160 return false;
1161 }
1162 _waiting[_tail] = internals::vmem.current_process;
1163 _signals[_tail] = sig;
1164 next(_tail);
1165 _lock.unlock();
1166 return true;
1167}
1168
1170 bool result = false;
1171 _lock.lock();
1172 for (int i = _head; i != _tail; next(i)) {
1173 if (_waiting[i] == internals::vmem.current_process) {
1174 int last = i;
1175 next(i);
1176 while (i != _tail) {
1177 _waiting[last] = _waiting[i];
1178 _signals[last] = _signals[i];
1179 last = i;
1180 next(i);
1181 }
1182 _tail = last;
1183 result = true;
1184 break;
1185 }
1186 }
1187 _lock.unlock();
1188 return result;
1189}
1190
1191void EventSet::add(Event *event) {
1192 event->_next = NULL;
1193 if (_head == NULL) {
1194 _head = _tail = event;
1195 } else {
1196 _tail->_next = event;
1197 _tail = event;
1198 }
1199}
1200
1202 size_t n = 0;
1203 for (Event *event = _head; event; event = event->_next) {
1204 if (!event->start_listen((int) (n++))) {
1205 break;
1206 }
1207 }
1209 for (Event *event = _head; event; event = event->_next) {
1210 event->stop_listen();
1211 }
1213 return (int) result;
1214}
1215
1216} // namespace vspace
1217#endif
1218#endif
int size(const CanonicalForm &f, const Variable &v)
int size ( const CanonicalForm & f, const Variable & v )
Definition cf_ops.cc:600
int level(const CanonicalForm &f)
int i
Definition cfEzgcd.cc:132
int p
Definition cfModGcd.cc:4086
CanonicalForm fp
Definition cfModGcd.cc:4110
CanonicalForm map(const CanonicalForm &primElem, const Variable &alpha, const CanonicalForm &F, const Variable &beta)
map from to such that is mapped onto
void add(Event *event)
Definition vspace.cc:1191
Event * _head
Definition vspace.h:2581
Event * _tail
Definition vspace.h:2581
int _waiting[internals::MAX_PROCESS+1]
Definition vspace.h:2348
bool start_wait(internals::ipc_signal_t sig=0)
Definition vspace.cc:1154
internals::ipc_signal_t _signals[internals::MAX_PROCESS+1]
Definition vspace.h:2349
FastLock _lock
Definition vspace.h:2358
return result
int j
Definition facHensel.cc:110
STATIC_VAR poly last
Definition hdegree.cc:1137
NodeM * create()
Definition janet.cc:757
STATIC_VAR int offset
Definition janet.cc:29
ListNode * next
Definition janet.h:31
#define SEEK_SET
Definition mod2.h:115
void accept_signals()
Definition vspace.cc:1069
void unlock_metapage()
Definition vspace.cc:929
const vaddr_t VADDR_NULL
Definition vspace.h:1417
void init_flock_struct(struct flock &lock_info, size_t offset, size_t len, bool lock)
Definition vspace.cc:904
static ProcessInfo & process_info(int processno)
Definition vspace.cc:963
void lock_file(int fd, size_t offset, size_t len)
Definition vspace.cc:913
void vmem_free(vaddr_t vaddr)
Definition vspace.cc:802
Block * block_ptr(vaddr_t vaddr)
Definition vspace.h:1637
vaddr_t vmem_alloc(size_t size)
Definition vspace.cc:854
static void unlock_process(int processno)
Definition vspace.cc:957
static const size_t MAX_SEGMENTS
Definition vspace.h:1423
static const size_t SEGMENT_SIZE
Definition vspace.h:1424
static const size_t METABLOCK_SIZE
Definition vspace.h:1420
static void lock_process(int processno)
Definition vspace.cc:951
static const int LOG2_SEGMENT_SIZE
Definition vspace.h:1421
ipc_signal_t wait_signal(bool lock)
Definition vspace.cc:1075
void lock_metapage()
Definition vspace.cc:925
static const int MAX_PROCESS
Definition vspace.h:1419
static VMem & vmem
Definition vspace.h:1635
ProcessInfo process_info[MAX_PROCESS]
Definition vspace.h:1513
static void lock_allocator()
Definition vspace.cc:766
static segaddr_t find_buddy(segaddr_t addr, int level)
Definition vspace.h:1690
ipc_signal_t check_signal(bool resume, bool lock)
Definition vspace.cc:990
void init_metapage(bool create)
Definition vspace.cc:933
void unlock_file(int fd, size_t offset, size_t len)
Definition vspace.cc:919
bool send_signal(int processno, ipc_signal_t sig, bool lock)
Definition vspace.cc:967
static int find_level(size_t size)
Definition vspace.h:1681
size_t config[4]
Definition vspace.cc:619
static void unlock_allocator()
Definition vspace.cc:770
static void print_freelists()
Definition vspace.cc:774
pid_t fork_process()
Definition vspace.cc:1081
@ ErrOS
Definition vspace.h:1380
@ ErrNone
Definition vspace.h:1376
@ ErrFile
Definition vspace.h:1378
internals::Mutex FastLock
Definition vspace.h:2340
#define NULL
Definition omList.c:12
#define block
Definition scanner.cc:646
int status read
Definition si_signals.h:69
int status int fd
Definition si_signals.h:69
int status int void size_t count open
Definition si_signals.h:83
int status int void * buf
Definition si_signals.h:69
#define loop
Definition structs.h:71
std::FILE * file_handle
Definition vspace.h:1591
Block * block_ptr(vaddr_t vaddr)
Definition vspace.h:1610
void * mmap_segment(int seg)
Definition vspace.cc:702
static VMem vmem_global
Definition vspace.h:1588
VSeg segments[MAX_SEGMENTS]
Definition vspace.h:1594
ProcessChannel channels[MAX_PROCESS]
Definition vspace.h:1595
Block * block_ptr(segaddr_t addr)
Definition vspace.h:1571
#define assert(A)
Definition svd_si.h:3
#define metapageaddr(field)
Definition vspace.cc:626