My Project
Loading...
Searching...
No Matches
vspace.cc
Go to the documentation of this file.
1// https://github.com/rbehrends/vspace
2#include "vspace.h"
3#include "kernel/mod2.h"
4#ifdef HAVE_VSPACE
5#ifdef HAVE_CPP_THREADS
6#include <thread>
7#endif
8#ifdef HAVE_POLL
9#include <poll.h>
10#endif
11#include <cstddef>
12#include "reporter/si_signals.h"
13#include "resources/feFopen.h"
14#include <errno.h>
15#include <string.h>
16
17#if defined(__GNUC__) && (__GNUC__<9) &&!defined(__clang__)
18
19namespace vspace {
20namespace internals {
21
22size_t config[4]
24
26
27// offsetof() only works for POD types, so we need to construct
28// a portable version of it for metapage fields.
29
30#define metapageaddr(field) \
31 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
32
33size_t VMem::filesize() {
34 struct stat stat;
35 fstat(fd, &stat);
36 return stat.st_size;
37}
38
39Status VMem::init(int fd) {
40 this->fd = fd;
41 for (int i = 0; i < MAX_SEGMENTS; i++)
42 segments[i] = VSeg(NULL);
43 for (int i = 0; i < MAX_PROCESS; i++) {
44 int channel[2];
45 if (pipe(channel) < 0) {
46 for (int j = 0; j < i; j++) {
47 close(channels[j].fd_read);
48 close(channels[j].fd_write);
49 }
50 return Status(ErrOS);
51 }
52 channels[i].fd_read = channel[0];
53 channels[i].fd_write = channel[1];
54 }
56 init_metapage(filesize() == 0);
58 freelist = metapage->freelist;
59 return Status(ErrNone);
60}
61
62Status VMem::init() {
63 FILE *fp = tmpfile();
64 Status result = init(fileno(fp));
65 if (!result.ok())
66 return result;
69 metapage->process_info[0].pid = getpid();
70 return Status(ErrNone);
71}
72
73Status VMem::init(const char *path) {
74 int fd = open(path, O_RDWR | O_CREAT, 0600);
75 if (fd < 0)
76 return Status(ErrFile);
77 init(fd);
79 // TODO: enter process in meta table
81 return Status(ErrNone);
82}
83
84void VMem::deinit() {
85 if (file_handle) {
86 fclose(file_handle);
88 } else {
89 close(fd);
90 }
91 munmap(metapage, METABLOCK_SIZE);
92 metapage = NULL;
93 current_process = -1;
94 freelist = NULL;
95 for (int i = 0; i < MAX_SEGMENTS; i++) {
96 if (segments[i].base) munmap(segments[i].base, SEGMENT_SIZE);
97 segments[i] = NULL;
98 }
99 for (int i = 0; i < MAX_PROCESS; i++) {
100 close(channels[i].fd_read);
101 close(channels[i].fd_write);
102 }
103}
104
105void *VMem::mmap_segment(int seg) {
107 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
109 if (map == MAP_FAILED) {
110 // This is an "impossible to proceed from here, because system state
111 // is impossible to proceed from" situation, so we abort the program.
112 perror("mmap");
113 abort();
114 }
116 return map;
117}
118
119void VMem::add_segment() {
120 int seg = metapage->segment_count++;
121 if (ftruncate(fd, METABLOCK_SIZE + metapage->segment_count * SEGMENT_SIZE) != 0) {
122 metapage->segment_count--;
123 char err_msg[256];
124 snprintf(err_msg, sizeof(err_msg), "out of memory in vspace:add_segment: %s", strerror(errno));
125 WerrorS(err_msg);
126 return;
127 }
128 void *map_addr = mmap_segment(seg);
129 segments[seg] = VSeg(map_addr);
130 Block *top = block_ptr(seg * SEGMENT_SIZE);
131 top->next = freelist[LOG2_SEGMENT_SIZE];
132 top->prev = VADDR_NULL;
134}
135
136void FastLock::lock() {
137#ifdef HAVE_CPP_THREADS
138 while (_lock.test_and_set()) {
139 }
140 bool empty = _owner < 0;
141 if (empty) {
143 } else {
144 int p = vmem.current_process;
145 vmem.metapage->process_info[p].next = -1;
146 if (_head < 0)
147 _head = p;
148 else
149 vmem.metapage->process_info[_tail].next = p;
150 _tail = p;
151 }
152 _lock.clear();
153 if (!empty)
154 wait_signal(false);
155#else
157#endif
158}
159
160void FastLock::unlock() {
161#ifdef HAVE_CPP_THREADS
162 while (_lock.test_and_set()) {
163 }
164 _owner = _head;
165 if (_owner >= 0)
166 _head = vmem.metapage->process_info[_head].next;
167 _lock.clear();
168 if (_owner >= 0)
169 send_signal(_owner, 0, false);
170#else
172#endif
173}
174
175static void lock_allocator() {
176 vmem.metapage->allocator_lock.lock();
177}
178
179static void unlock_allocator() {
180 vmem.metapage->allocator_lock.unlock();
181}
182
183static void print_freelists() {
184 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
185 vaddr_t vaddr = vmem.freelist[i];
186 if (vaddr != VADDR_NULL) {
187 printf("%2d: %ld", i, (long)vaddr);
188 vaddr_t prev = block_ptr(vaddr)->prev;
189 if (prev != VADDR_NULL) {
190 printf("(%ld)", (long)prev);
191 }
192 assert(block_ptr(vaddr)->prev == VADDR_NULL);
193 for (;;) {
194 vaddr_t last_vaddr = vaddr;
195 Block *block = block_ptr(vaddr);
196 vaddr = block->next;
197 if (vaddr == VADDR_NULL)
198 break;
199 printf(" -> %ld", (long)vaddr);
200 vaddr_t prev = block_ptr(vaddr)->prev;
201 if (prev != last_vaddr) {
202 printf("(%ld)", (long)prev);
203 }
204 }
205 printf("\n");
206 }
207 }
208 fflush(stdout);
209}
210
211void vmem_free(vaddr_t vaddr) {
213 vaddr -= offsetof(Block, data);
214 vmem.ensure_is_mapped(vaddr);
215 size_t segno = vmem.segment_no(vaddr);
216 VSeg seg = vmem.segment(vaddr);
217 segaddr_t addr = vmem.segaddr(vaddr);
218 int level = seg.block_ptr(addr)->level();
219 assert(!seg.is_free(addr));
220 while (level < LOG2_SEGMENT_SIZE) {
221 segaddr_t buddy = find_buddy(addr, level);
222 Block *block = seg.block_ptr(buddy);
223 // is buddy free and at the same level?
224 if (!block->is_free() || block->level() != level)
225 break;
226 // remove buddy from freelist.
227 Block *prev = vmem.block_ptr(block->prev);
228 Block *next = vmem.block_ptr(block->next);
229 block->data[0] = level;
230 if (prev) {
231 assert(prev->next == vmem.vaddr(segno, buddy));
232 prev->next = block->next;
233 } else {
234 // head of freelist.
235 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
236 vmem.freelist[level] = block->next;
237 }
238 if (next) {
239 assert(next->prev == vmem.vaddr(segno, buddy));
240 next->prev = block->prev;
241 }
242 // coalesce block with buddy
243 level++;
244 if (buddy < addr)
245 addr = buddy;
246 }
247 // Add coalesced block to free list
248 Block *block = seg.block_ptr(addr);
250 block->next = vmem.freelist[level];
251 block->mark_as_free(level);
252 vaddr_t blockaddr = vmem.vaddr(segno, addr);
253 if (block->next != VADDR_NULL)
254 vmem.block_ptr(block->next)->prev = blockaddr;
255 vmem.freelist[level] = blockaddr;
257}
258
259vaddr_t vmem_alloc(size_t size) {
261 size_t alloc_size = size + offsetof(Block, data);
262 int level = find_level(alloc_size);
263 int flevel = level;
264 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
265 flevel++;
266 if (vmem.freelist[flevel] == VADDR_NULL) {
267 vmem.add_segment();
268 }
269 vmem.ensure_is_mapped(vmem.freelist[flevel]);
270 while (flevel > level) {
271 // get and split a block
272 vaddr_t blockaddr = vmem.freelist[flevel];
273 assert((blockaddr & ((1 << flevel) - 1)) == 0);
274 Block *block = vmem.block_ptr(blockaddr);
275 vmem.freelist[flevel] = block->next;
276 if (vmem.freelist[flevel] != VADDR_NULL)
277 vmem.block_ptr(vmem.freelist[flevel])->prev = VADDR_NULL;
278 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
279 Block *block2 = vmem.block_ptr(blockaddr2);
280 flevel--;
281 block2->next = vmem.freelist[flevel];
282 block2->prev = blockaddr;
283 block->next = blockaddr2;
284 block->prev = VADDR_NULL;
285 // block->prev == VADDR_NULL already.
286 vmem.freelist[flevel] = blockaddr;
287 }
288 assert(vmem.freelist[level] != VADDR_NULL);
289 Block *block = vmem.block_ptr(vmem.freelist[level]);
290 vaddr_t vaddr = vmem.freelist[level];
291 #if defined(__GNUC__) && (__GNUC__>11)
292 vaddr_t result = vaddr + (sizeof(vaddr_t)*2);
293 #else
294 vaddr_t result = vaddr + offsetof(Block, data);
295 #endif
296 vmem.freelist[level] = block->next;
297 if (block->next != VADDR_NULL)
298 vmem.block_ptr(block->next)->prev = VADDR_NULL;
299 block->mark_as_allocated(vaddr, level);
301 memset(block->data, 0, size);
302 return result;
303}
304
306 struct flock &lock_info, size_t offset, size_t len, bool lock) {
307 lock_info.l_start = offset;
308 lock_info.l_len = len;
309 lock_info.l_pid = 0;
310 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
311 lock_info.l_whence = SEEK_SET;
312}
313
314void lock_file(int fd, size_t offset, size_t len) {
315 struct flock lock_info;
316 init_flock_struct(lock_info, offset, len, true);
317 fcntl(fd, F_SETLKW, &lock_info);
318}
319
320void unlock_file(int fd, size_t offset, size_t len) {
321 struct flock lock_info;
322 init_flock_struct(lock_info, offset, len, false);
323 fcntl(fd, F_SETLKW, &lock_info);
324}
325
326void lock_metapage() {
327 lock_file(vmem.fd, 0);
328}
329
330void unlock_metapage() {
331 unlock_file(vmem.fd, 0);
332}
333
334void init_metapage(bool create) {
335 if (create) {
336 if (ftruncate(vmem.fd, METABLOCK_SIZE) != 0) {
337 char err_msg[256];
338 snprintf(err_msg, sizeof(err_msg), "out of memory in vspace:init_metapage: %s", strerror(errno));
339 WerrorS(err_msg);
340 return;
341 }
342 }
343 vmem.metapage = (MetaPage *) mmap(
344 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
345 if (create) {
346 memcpy(vmem.metapage->config_header, config, sizeof(config));
347 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
348 vmem.metapage->freelist[i] = VADDR_NULL;
349 }
350 vmem.metapage->segment_count = 0;
351 vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
352 } else {
353 assert(memcmp(vmem.metapage->config_header, config, sizeof(config)) != 0);
354 }
355}
356
357static void lock_process(int processno) {
358 lock_file(vmem.fd,
360 + sizeof(ProcessInfo) * vmem.current_process);
361}
362
363static void unlock_process(int processno) {
364 unlock_file(vmem.fd,
366 + sizeof(ProcessInfo) * vmem.current_process);
367}
368
369static ProcessInfo &process_info(int processno) {
370 return vmem.metapage->process_info[processno];
371}
372
373bool send_signal(int processno, ipc_signal_t sig, bool lock) {
374 if (lock)
375 lock_process(processno);
376 if (process_info(processno).sigstate != Waiting) {
377 unlock_process(processno);
378 return false;
379 }
380 if (processno == vmem.current_process) {
381 process_info(processno).sigstate = Accepted;
382 process_info(processno).signal = sig;
383 } else {
384 process_info(processno).sigstate = Pending;
385 process_info(processno).signal = sig;
386 int fd = vmem.channels[processno].fd_write;
387 char buf[1] = { 0 };
388 while (write(fd, buf, 1) != 1) {
389 }
390 }
391 if (lock)
392 unlock_process(processno);
393 return true;
394}
395
396ipc_signal_t check_signal(bool resume, bool lock) {
398 if (lock)
399 lock_process(vmem.current_process);
400 SignalState sigstate = process_info(vmem.current_process).sigstate;
401 switch (sigstate) {
402 case Waiting:
403 case Pending: {
404 int fd = vmem.channels[vmem.current_process].fd_read;
405 char buf[1];
406 if (lock && sigstate == Waiting) {
407 unlock_process(vmem.current_process);
408 loop
409 {
410 #if defined(HAVE_POLL) && !defined(__APPLE__)
411 // fd is restricted on OsX by ulimit "file descriptors" (256)
412 pollfd pfd;
413 pfd.fd = fd;
414 pfd.events = POLLIN;
415 int rv = poll(&pfd, 1, 500000); /* msec*/
416 #else
417 // fd is restricted to <=1024
418 fd_set set;
419 FD_ZERO(&set); /* clear the set */
420 FD_SET(fd, &set); /* add our file descriptor to the set */
421 struct timeval timeout;
422 timeout.tv_sec = 500;
423 timeout.tv_usec = 0;
424 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
425 #endif
426 if (rv== -1) continue; /* an error occurred */
427 if (rv== 0) break; /* timeout */
428 while(read(fd, buf, 1)!=1) {}
429 break;
430 }
431 lock_process(vmem.current_process);
432 } else {
433 loop
434 {
435 #if defined(HAVE_POLL) && !defined(__APPLE__)
436 // fd is restricted on OsX by ulimit "file descriptors" (256)
437 pollfd pfd;
438 pfd.fd = fd;
439 pfd.events = POLLIN;
440 int rv = poll(&pfd, 1, 500000); /* msec*/
441 #else
442 // fd is restricted to <=1024
443 fd_set set;
444 FD_ZERO(&set); /* clear the set */
445 FD_SET(fd, &set); /* add our file descriptor to the set */
446 struct timeval timeout;
447 timeout.tv_sec = 500;
448 timeout.tv_usec = 0;
449 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
450 #endif
451 if (rv== -1) continue; /* an error occurred */
452 if (rv== 0) break; /* timeout */
453 while(read(fd, buf, 1)!=1) {}
454 break;
455 }
456 }
457 result = process_info(vmem.current_process).signal;
458 process_info(vmem.current_process).sigstate
459 = resume ? Waiting : Accepted;
460 if (lock)
461 unlock_process(vmem.current_process);
462 break;
463 }
464 case Accepted:
465 result = process_info(vmem.current_process).signal;
466 if (resume)
467 process_info(vmem.current_process).sigstate = Waiting;
468 if (lock)
469 unlock_process(vmem.current_process);
470 break;
471 }
472 return result;
473}
474
475void accept_signals() {
476 lock_process(vmem.current_process);
477 process_info(vmem.current_process).sigstate = Waiting;
478 unlock_process(vmem.current_process);
479}
480
481ipc_signal_t wait_signal(bool lock) {
482 return check_signal(true, lock);
483}
484
485} // namespace internals
486
487pid_t fork_process() {
488 using namespace internals;
490 for (int p = 0; p < MAX_PROCESS; p++) {
491 if (vmem.metapage->process_info[p].pid == 0) {
492 pid_t pid = fork();
493 if (pid < 0) {
494 // error
495 return -1;
496 } else if (pid == 0) {
497 // child process
498 int parent = vmem.current_process;
499 vmem.current_process = p;
501 vmem.metapage->process_info[p].pid = getpid();
503 send_signal(parent);
504 } else {
505 // parent process
507 wait_signal();
508 // child has unlocked metapage, so we don't need to.
509 }
510 return pid;
511 }
512 }
514 return -1;
515}
516
517void Semaphore::post() {
518 int wakeup = -1;
520 _lock.lock();
521 if (_head == _tail) {
522 _value++;
523 } else {
524 // don't increment value, as we'll pass that on to the next process.
525 wakeup = _waiting[_head];
526 sig = _signals[_head];
527 next(_head);
528 }
529 _lock.unlock();
530 if (wakeup >= 0) {
531 internals::send_signal(wakeup, sig);
532 }
533}
534
535bool Semaphore::try_wait() {
536 bool result = false;
537 _lock.lock();
538 if (_value > 0) {
539 _value--;
540 result = true;
541 }
542 _lock.unlock();
543 return result;
544}
545
546void Semaphore::wait() {
547 _lock.lock();
548 if (_value > 0) {
549 _value--;
550 _lock.unlock();
551 return;
552 }
554 _signals[_tail] = 0;
555 next(_tail);
556 _lock.unlock();
558}
559
561 _lock.lock();
562 if (_value > 0) {
563 if (internals::send_signal(internals::vmem.current_process, sig))
564 _value--;
565 _lock.unlock();
566 return false;
567 }
569 _signals[_tail] = sig;
570 next(_tail);
571 _lock.unlock();
572 return true;
573}
574
576 bool result = false;
577 _lock.lock();
578 for (int i = _head; i != _tail; next(i)) {
579 if (_waiting[i] == internals::vmem.current_process) {
580 int last = i;
581 next(i);
582 while (i != _tail) {
585 last = i;
586 next(i);
587 }
588 _tail = last;
589 result = true;
590 break;
591 }
592 }
593 _lock.unlock();
594 return result;
595}
596
597void EventSet::add(Event *event) {
598 event->_next = NULL;
599 if (_head == NULL) {
600 _head = _tail = event;
601 } else {
602 _tail->_next = event;
603 _tail = event;
604 }
605}
606
607int EventSet::wait() {
608 size_t n = 0;
609 for (Event *event = _head; event; event = event->_next) {
610 if (!event->start_listen((int) (n++))) {
611 break;
612 }
613 }
615 for (Event *event = _head; event; event = event->_next) {
616 event->stop_listen();
617 }
619 return (int) result;
620}
621
622} // namespace vspace
623#else // gcc>9
624#include <cstdlib>
625#include <unistd.h>
626#include <sys/mman.h>
627#include <sys/stat.h>
628
629
630namespace vspace {
631namespace internals {
632
635
637
638// offsetof() only works for POD types, so we need to construct
639// a portable version of it for metapage fields.
640
641#define metapageaddr(field) \
642 ((char *) &vmem.metapage->field - (char *) vmem.metapage)
643
645 struct stat stat;
646 fstat(fd, &stat);
647 return stat.st_size;
648}
649
651 this->fd = fd;
652 for (int i = 0; i < MAX_SEGMENTS; i++)
653 segments[i] = VSeg(NULL);
654 for (int i = 0; i < MAX_PROCESS; i++) {
655 int channel[2];
656 if (pipe(channel) < 0) {
657 for (int j = 0; j < i; j++) {
658 close(channels[j].fd_read);
659 close(channels[j].fd_write);
660 }
661 return Status(ErrOS);
662 }
663 channels[i].fd_read = channel[0];
664 channels[i].fd_write = channel[1];
665 }
667 init_metapage(filesize() == 0);
669 freelist = metapage->freelist;
670 return Status(ErrNone);
671}
672
674 FILE *fp = tmpfile();
675 Status result = init(fileno(fp));
676 if (!result.ok())
677 return result;
678 current_process = 0;
679 file_handle = fp;
680 metapage->process_info[0].pid = getpid();
681 return Status(ErrNone);
682}
683
684Status VMem::init(const char *path) {
685 int fd = open(path, O_RDWR | O_CREAT, 0600);
686 if (fd < 0)
687 return Status(ErrFile);
688 init(fd);
690 // TODO: enter process in meta table
692 return Status(ErrNone);
693}
694
696 if (file_handle) {
697 fclose(file_handle);
699 } else {
700 close(fd);
701 }
702 munmap(metapage, METABLOCK_SIZE);
703 metapage = NULL;
704 current_process = -1;
705 freelist = NULL;
706 for (int i = 0; i < MAX_SEGMENTS; i++) {
707 if (!segments[i].is_free())
708 munmap(segments[i].base, SEGMENT_SIZE);
709 segments[i] = VSeg(NULL);
710 }
711 for (int i = 0; i < MAX_PROCESS; i++) {
712 close(channels[i].fd_read);
713 close(channels[i].fd_write);
714 }
715}
716
717void *VMem::mmap_segment(int seg) {
719 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
721 if (map == MAP_FAILED) {
722 // This is an "impossible to proceed from here, because system state
723 // is impossible to proceed from" situation, so we abort the program.
724 perror("mmap");
725 abort();
726 }
728 return map;
729}
730
732 int seg = metapage->segment_count++;
733 if (ftruncate(fd, METABLOCK_SIZE + metapage->segment_count * SEGMENT_SIZE) != 0) {
734 metapage->segment_count--;
735 char err_msg[256];
736 snprintf(err_msg, sizeof(err_msg), "out of memory in vspace:add_segment: %s", strerror(errno));
737 WerrorS(err_msg);
738 return;
739 }
740 void *map_addr = mmap_segment(seg);
741 segments[seg] = VSeg(map_addr);
742 Block *top = block_ptr(seg * SEGMENT_SIZE);
744 top->prev = VADDR_NULL;
746}
747
749#ifdef HAVE_CPP_THREADS
750 while (_lock.test_and_set()) {
751 }
752 bool empty = _owner < 0;
753 if (empty) {
754 _owner = vmem.current_process;
755 } else {
756 int p = vmem.current_process;
757 vmem.metapage->process_info[p].next = -1;
758 if (_head < 0)
759 _head = p;
760 else
761 vmem.metapage->process_info[_tail].next = p;
762 _tail = p;
763 }
764 _lock.clear();
765 if (!empty)
766 wait_signal(false);
767#else
769#endif
770}
771
773#ifdef HAVE_CPP_THREADS
774 while (_lock.test_and_set()) {
775 }
776 _owner = _head;
777 if (_owner >= 0)
778 _head = vmem.metapage->process_info[_head].next;
779 _lock.clear();
780 if (_owner >= 0)
781 send_signal(_owner, 0, false);
782#else
784#endif
785}
786
787static void lock_allocator() {
788 vmem.metapage->allocator_lock.lock();
789}
790
791static void unlock_allocator() {
792 vmem.metapage->allocator_lock.unlock();
793}
794
795static void print_freelists() {
796 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
797 vaddr_t vaddr = vmem.freelist[i];
798 if (vaddr != VADDR_NULL) {
799 std::printf("%2d: %ld", i, (long)vaddr);
800 vaddr_t prev = block_ptr(vaddr)->prev;
801 if (prev != VADDR_NULL) {
802 std::printf("(%ld)", (long)prev);
803 }
804 assert(block_ptr(vaddr)->prev == VADDR_NULL);
805 for (;;) {
806 vaddr_t last_vaddr = vaddr;
807 Block *block = block_ptr(vaddr);
808 vaddr = block->next;
809 if (vaddr == VADDR_NULL)
810 break;
811 std::printf(" -> %ld", (long)vaddr);
812 vaddr_t prev = block_ptr(vaddr)->prev;
813 if (prev != last_vaddr) {
814 std::printf("(%ld)", (long)prev);
815 }
816 }
817 std::printf("\n");
818 }
819 }
820 std::fflush(stdout);
821}
822
823void vmem_free(vaddr_t vaddr) {
825 #if defined(__GNUC__) && (__GNUC__>11)
826 vaddr -= (sizeof(vaddr_t)*2);
827 #else
828 vaddr -= offsetof(Block, data);
829 #endif
830 vmem.ensure_is_mapped(vaddr);
831 size_t segno = vmem.segment_no(vaddr);
832 VSeg seg = vmem.segment(vaddr);
833 segaddr_t addr = vmem.segaddr(vaddr);
834 int level = seg.block_ptr(addr)->level();
835 assert(!seg.is_free(addr));
836 while (level < LOG2_SEGMENT_SIZE) {
837 segaddr_t buddy = find_buddy(addr, level);
838 Block *block = seg.block_ptr(buddy);
839 // is buddy free and at the same level?
840 if (!block->is_free() || block->level() != level)
841 break;
842 // remove buddy from freelist.
843 Block *prev = vmem.block_ptr(block->prev);
844 Block *next = vmem.block_ptr(block->next);
845 block->data[0] = level;
846 if (prev) {
847 assert(prev->next == vmem.vaddr(segno, buddy));
848 prev->next = block->next;
849 } else {
850 // head of freelist.
851 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
852 vmem.freelist[level] = block->next;
853 }
854 if (next) {
855 assert(next->prev == vmem.vaddr(segno, buddy));
856 next->prev = block->prev;
857 }
858 // coalesce block with buddy
859 level++;
860 if (buddy < addr)
861 addr = buddy;
862 }
863 // Add coalesced block to free list
864 Block *block = seg.block_ptr(addr);
865 block->prev = VADDR_NULL;
866 block->next = vmem.freelist[level];
867 block->mark_as_free(level);
868 vaddr_t blockaddr = vmem.vaddr(segno, addr);
869 if (block->next != VADDR_NULL)
870 vmem.block_ptr(block->next)->prev = blockaddr;
871 vmem.freelist[level] = blockaddr;
873}
874
877 #if defined(__GNUC__) && (__GNUC__>11)
878 size_t alloc_size = size + (sizeof(vaddr_t)*2);
879 #else
880 size_t alloc_size = size + offsetof(Block, data);
881 #endif
882 int level = find_level(alloc_size);
883 int flevel = level;
884 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
885 flevel++;
886 if (vmem.freelist[flevel] == VADDR_NULL) {
887 vmem.add_segment();
888 }
889 vmem.ensure_is_mapped(vmem.freelist[flevel]);
890 while (flevel > level) {
891 // get and split a block
892 vaddr_t blockaddr = vmem.freelist[flevel];
893 assert((blockaddr & ((1 << flevel) - 1)) == 0);
894 Block *block = vmem.block_ptr(blockaddr);
895 vmem.freelist[flevel] = block->next;
896 if (vmem.freelist[flevel] != VADDR_NULL)
897 vmem.block_ptr(vmem.freelist[flevel])->prev = VADDR_NULL;
898 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
899 Block *block2 = vmem.block_ptr(blockaddr2);
900 flevel--;
901 block2->next = vmem.freelist[flevel];
902 block2->prev = blockaddr;
903 block->next = blockaddr2;
904 block->prev = VADDR_NULL;
905 // block->prev == VADDR_NULL already.
906 vmem.freelist[flevel] = blockaddr;
907 }
908 assert(vmem.freelist[level] != VADDR_NULL);
909 Block *block = vmem.block_ptr(vmem.freelist[level]);
910 vaddr_t vaddr = vmem.freelist[level];
911 #if defined(__GNUC__) && (__GNUC__>11)
912 vaddr_t result = vaddr + (sizeof(vaddr_t)*2);
913 #else
914 vaddr_t result = vaddr + offsetof(Block, data);
915 #endif
916 vmem.freelist[level] = block->next;
917 if (block->next != VADDR_NULL)
918 vmem.block_ptr(block->next)->prev = VADDR_NULL;
919 block->mark_as_allocated(vaddr, level);
921 memset(block->data, 0, size);
922 return result;
923}
924
926 struct flock &lock_info, size_t offset, size_t len, bool lock) {
927 lock_info.l_start = offset;
928 lock_info.l_len = len;
929 lock_info.l_pid = 0;
930 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
931 lock_info.l_whence = SEEK_SET;
932}
933
934void lock_file(int fd, size_t offset, size_t len) {
935 struct flock lock_info;
936 init_flock_struct(lock_info, offset, len, true);
937 fcntl(fd, F_SETLKW, &lock_info);
938}
939
940void unlock_file(int fd, size_t offset, size_t len) {
941 struct flock lock_info;
942 init_flock_struct(lock_info, offset, len, false);
943 fcntl(fd, F_SETLKW, &lock_info);
944}
945
947 lock_file(vmem.fd, 0);
948}
949
951 unlock_file(vmem.fd, 0);
952}
953
955 if (create) {
956 if (ftruncate(vmem.fd, METABLOCK_SIZE) != 0) {
957 char err_msg[256];
958 snprintf(err_msg, sizeof(err_msg), "out of memory in vspace:init_metapage: %s", strerror(errno));
959 WerrorS(err_msg);
960 return;
961 }
962 }
963 vmem.metapage = (MetaPage *) mmap(
964 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
965 if (create) {
966 std::memcpy(vmem.metapage->config_header, config, sizeof(config));
967 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
968 vmem.metapage->freelist[i] = VADDR_NULL;
969 }
970 vmem.metapage->segment_count = 0;
971 vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
972 } else {
973 assert(std::memcmp(vmem.metapage->config_header, config,
974 sizeof(config)) != 0);
975 }
976}
977
978static void lock_process(int processno) {
979 lock_file(vmem.fd,
981 + sizeof(ProcessInfo) * vmem.current_process);
982}
983
984static void unlock_process(int processno) {
985 unlock_file(vmem.fd,
987 + sizeof(ProcessInfo) * vmem.current_process);
988}
989
990static ProcessInfo &process_info(int processno) {
991 return vmem.metapage->process_info[processno];
992}
993
994bool send_signal(int processno, ipc_signal_t sig, bool lock) {
995 if (lock)
996 lock_process(processno);
997 if (process_info(processno).sigstate != Waiting) {
998 unlock_process(processno);
999 return false;
1000 }
1001 if (processno == vmem.current_process) {
1002 process_info(processno).sigstate = Accepted;
1003 process_info(processno).signal = sig;
1004 } else {
1005 process_info(processno).sigstate = Pending;
1006 process_info(processno).signal = sig;
1007 int fd = vmem.channels[processno].fd_write;
1008 char buf[1] = { 0 };
1009 while (write(fd, buf, 1) != 1) {
1010 }
1011 }
1012 if (lock)
1013 unlock_process(processno);
1014 return true;
1015}
1016
1017ipc_signal_t check_signal(bool resume, bool lock) {
1019 if (lock)
1020 lock_process(vmem.current_process);
1021 SignalState sigstate = process_info(vmem.current_process).sigstate;
1022 switch (sigstate) {
1023 case Waiting:
1024 case Pending: {
1025 int fd = vmem.channels[vmem.current_process].fd_read;
1026 char buf[1];
1027 if (lock && sigstate == Waiting) {
1028 unlock_process(vmem.current_process);
1029 loop
1030 {
1031 #if defined(HAVE_POLL) && !defined(__APPLE__)
1032 // fd is restricted on OsX by ulimit "file descriptors" (256)
1033 pollfd pfd;
1034 pfd.fd = fd;
1035 pfd.events = POLLIN;
1036 int rv = poll(&pfd, 1, 500000); /* msec*/
1037 #else
1038 // fd is restricted to <=1024
1039 fd_set set;
1040 FD_ZERO(&set); /* clear the set */
1041 FD_SET(fd, &set); /* add our file descriptor to the set */
1042 struct timeval timeout;
1043 timeout.tv_sec = 500;
1044 timeout.tv_usec = 0;
1045 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
1046 #endif
1047 if (rv== -1) continue; /* an error occurred */
1048 if (rv== 0) break; /* timeout */
1049 while(read(fd, buf, 1)!=1) {}
1050 break;
1051 }
1052 lock_process(vmem.current_process);
1053 } else {
1054 loop
1055 {
1056 #if defined(HAVE_POLL) && !defined(__APPLE__)
1057 // fd is restricted on OsX by ulimit "file descriptors" (256)
1058 pollfd pfd;
1059 pfd.fd = fd;
1060 pfd.events = POLLIN;
1061 int rv = poll(&pfd, 1, 500000); /* msec*/
1062 #else
1063 // fd is restricted to <=1024
1064 fd_set set;
1065 FD_ZERO(&set); /* clear the set */
1066 FD_SET(fd, &set); /* add our file descriptor to the set */
1067 struct timeval timeout;
1068 timeout.tv_sec = 500;
1069 timeout.tv_usec = 0;
1070 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
1071 #endif
1072 if (rv== -1) continue; /* an error occurred */
1073 if (rv== 0) break;/* timeout */
1074 while(read(fd, buf, 1)!=1) {}
1075 break;
1076 }
1077 }
1078 result = process_info(vmem.current_process).signal;
1079 process_info(vmem.current_process).sigstate
1080 = resume ? Waiting : Accepted;
1081 if (lock)
1082 unlock_process(vmem.current_process);
1083 break;
1084 }
1085 case Accepted:
1086 result = process_info(vmem.current_process).signal;
1087 if (resume)
1088 process_info(vmem.current_process).sigstate = Waiting;
1089 if (lock)
1090 unlock_process(vmem.current_process);
1091 break;
1092 }
1093 return result;
1094}
1095
1097 lock_process(vmem.current_process);
1098 process_info(vmem.current_process).sigstate = Waiting;
1099 unlock_process(vmem.current_process);
1100}
1101
1103 return check_signal(true, lock);
1104}
1105
1106} // namespace internals
1107
1109 using namespace internals;
1110 lock_metapage();
1111 for (int p = 0; p < MAX_PROCESS; p++) {
1112 if (vmem.metapage->process_info[p].pid == 0) {
1113 pid_t pid = fork();
1114 if (pid < 0) {
1115 // error
1116 return -1;
1117 } else if (pid == 0) {
1118 // child process
1119 int parent = vmem.current_process;
1120 vmem.current_process = p;
1121 lock_metapage();
1122 vmem.metapage->process_info[p].pid = getpid();
1123 unlock_metapage();
1124 send_signal(parent);
1125 } else {
1126 // parent process
1127 unlock_metapage();
1128 wait_signal();
1129 // child has unlocked metapage, so we don't need to.
1130 }
1131 return pid;
1132 }
1133 }
1134 unlock_metapage();
1135 return -1;
1136}
1137
1139 int wakeup = -1;
1141 _lock.lock();
1142 if (_head == _tail) {
1143 _value++;
1144 } else {
1145 // don't increment value, as we'll pass that on to the next process.
1146 wakeup = _waiting[_head];
1147 sig = _signals[_head];
1148 next(_head);
1149 }
1150 _lock.unlock();
1151 if (wakeup >= 0) {
1152 internals::send_signal(wakeup, sig);
1153 }
1154}
1155
1157 bool result = false;
1158 _lock.lock();
1159 if (_value > 0) {
1160 _value--;
1161 result = true;
1162 }
1163 _lock.unlock();
1164 return result;
1165}
1166
1168 _lock.lock();
1169 if (_value > 0) {
1170 _value--;
1171 _lock.unlock();
1172 return;
1173 }
1174 _waiting[_tail] = internals::vmem.current_process;
1175 _signals[_tail] = 0;
1176 next(_tail);
1177 _lock.unlock();
1179}
1180
1182 _lock.lock();
1183 if (_value > 0) {
1184 if (internals::send_signal(internals::vmem.current_process, sig))
1185 _value--;
1186 _lock.unlock();
1187 return false;
1188 }
1189 _waiting[_tail] = internals::vmem.current_process;
1190 _signals[_tail] = sig;
1191 next(_tail);
1192 _lock.unlock();
1193 return true;
1194}
1195
1197 bool result = false;
1198 _lock.lock();
1199 for (int i = _head; i != _tail; next(i)) {
1200 if (_waiting[i] == internals::vmem.current_process) {
1201 int last = i;
1202 next(i);
1203 while (i != _tail) {
1204 _waiting[last] = _waiting[i];
1205 _signals[last] = _signals[i];
1206 last = i;
1207 next(i);
1208 }
1209 _tail = last;
1210 result = true;
1211 break;
1212 }
1213 }
1214 _lock.unlock();
1215 return result;
1216}
1217
1218void EventSet::add(Event *event) {
1219 event->_next = NULL;
1220 if (_head == NULL) {
1221 _head = _tail = event;
1222 } else {
1223 _tail->_next = event;
1224 _tail = event;
1225 }
1226}
1227
1229 size_t n = 0;
1230 for (Event *event = _head; event; event = event->_next) {
1231 if (!event->start_listen((int) (n++))) {
1232 break;
1233 }
1234 }
1236 for (Event *event = _head; event; event = event->_next) {
1237 event->stop_listen();
1238 }
1240 return (int) result;
1241}
1242
1243} // namespace vspace
1244#endif
1245#endif
int size(const CanonicalForm &f, const Variable &v)
int size ( const CanonicalForm & f, const Variable & v )
Definition cf_ops.cc:600
int level(const CanonicalForm &f)
int i
Definition cfEzgcd.cc:132
int p
Definition cfModGcd.cc:4086
CanonicalForm fp
Definition cfModGcd.cc:4110
CanonicalForm map(const CanonicalForm &primElem, const Variable &alpha, const CanonicalForm &F, const Variable &beta)
map from to such that is mapped onto
void add(Event *event)
Definition vspace.cc:1218
Event * _head
Definition vspace.h:2581
Event * _tail
Definition vspace.h:2581
int _waiting[internals::MAX_PROCESS+1]
Definition vspace.h:2348
bool start_wait(internals::ipc_signal_t sig=0)
Definition vspace.cc:1181
internals::ipc_signal_t _signals[internals::MAX_PROCESS+1]
Definition vspace.h:2349
FastLock _lock
Definition vspace.h:2358
return result
int j
Definition facHensel.cc:110
void WerrorS(const char *s)
Definition feFopen.cc:24
STATIC_VAR poly last
Definition hdegree.cc:1138
NodeM * create()
Definition janet.cc:757
STATIC_VAR int offset
Definition janet.cc:29
ListNode * next
Definition janet.h:31
#define SEEK_SET
Definition mod2.h:115
void accept_signals()
Definition vspace.cc:1096
void unlock_metapage()
Definition vspace.cc:950
const vaddr_t VADDR_NULL
Definition vspace.h:1417
void init_flock_struct(struct flock &lock_info, size_t offset, size_t len, bool lock)
Definition vspace.cc:925
static ProcessInfo & process_info(int processno)
Definition vspace.cc:990
void lock_file(int fd, size_t offset, size_t len)
Definition vspace.cc:934
void vmem_free(vaddr_t vaddr)
Definition vspace.cc:823
Block * block_ptr(vaddr_t vaddr)
Definition vspace.h:1637
vaddr_t vmem_alloc(size_t size)
Definition vspace.cc:875
static void unlock_process(int processno)
Definition vspace.cc:984
static const size_t MAX_SEGMENTS
Definition vspace.h:1423
static const size_t SEGMENT_SIZE
Definition vspace.h:1424
static const size_t METABLOCK_SIZE
Definition vspace.h:1420
static void lock_process(int processno)
Definition vspace.cc:978
static const int LOG2_SEGMENT_SIZE
Definition vspace.h:1421
ipc_signal_t wait_signal(bool lock)
Definition vspace.cc:1102
void lock_metapage()
Definition vspace.cc:946
static const int MAX_PROCESS
Definition vspace.h:1419
static VMem & vmem
Definition vspace.h:1635
ProcessInfo process_info[MAX_PROCESS]
Definition vspace.h:1513
static void lock_allocator()
Definition vspace.cc:787
static segaddr_t find_buddy(segaddr_t addr, int level)
Definition vspace.h:1690
ipc_signal_t check_signal(bool resume, bool lock)
Definition vspace.cc:1017
void init_metapage(bool create)
Definition vspace.cc:954
void unlock_file(int fd, size_t offset, size_t len)
Definition vspace.cc:940
bool send_signal(int processno, ipc_signal_t sig, bool lock)
Definition vspace.cc:994
static int find_level(size_t size)
Definition vspace.h:1681
size_t config[4]
Definition vspace.cc:634
static void unlock_allocator()
Definition vspace.cc:791
static void print_freelists()
Definition vspace.cc:795
pid_t fork_process()
Definition vspace.cc:1108
@ ErrOS
Definition vspace.h:1380
@ ErrNone
Definition vspace.h:1376
@ ErrFile
Definition vspace.h:1378
internals::Mutex FastLock
Definition vspace.h:2340
#define NULL
Definition omList.c:12
#define block
Definition scanner.cc:646
int status read
Definition si_signals.h:69
int status int fd
Definition si_signals.h:69
int status int void size_t count open
Definition si_signals.h:83
int status int void * buf
Definition si_signals.h:69
#define loop
Definition structs.h:71
std::FILE * file_handle
Definition vspace.h:1591
Block * block_ptr(vaddr_t vaddr)
Definition vspace.h:1610
void * mmap_segment(int seg)
Definition vspace.cc:717
static VMem vmem_global
Definition vspace.h:1588
VSeg segments[MAX_SEGMENTS]
Definition vspace.h:1594
ProcessChannel channels[MAX_PROCESS]
Definition vspace.h:1595
Block * block_ptr(segaddr_t addr)
Definition vspace.h:1571
#define assert(A)
Definition svd_si.h:3
#define metapageaddr(field)
Definition vspace.cc:641