My Project
Loading...
Searching...
No Matches
Namespaces | Macros | Functions | Variables
vspace.cc File Reference
#include "vspace.h"
#include "kernel/mod2.h"
#include <cstddef>
#include "reporter/si_signals.h"
#include <cstdlib>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/stat.h>

Go to the source code of this file.

Namespaces

namespace  vspace
 
namespace  vspace::internals
 

Macros

#define metapageaddr(field)    ((char *) &vmem.metapage->field - (char *) vmem.metapage)
 

Functions

static void vspace::internals::lock_allocator ()
 
static void vspace::internals::unlock_allocator ()
 
static void vspace::internals::print_freelists ()
 
void vspace::internals::vmem_free (vaddr_t vaddr)
 
vaddr_t vspace::internals::vmem_alloc (size_t size)
 
void vspace::internals::init_flock_struct (struct flock &lock_info, size_t offset, size_t len, bool lock)
 
void vspace::internals::lock_file (int fd, size_t offset, size_t len)
 
void vspace::internals::unlock_file (int fd, size_t offset, size_t len)
 
void vspace::internals::lock_metapage ()
 
void vspace::internals::unlock_metapage ()
 
void vspace::internals::init_metapage (bool create)
 
static void vspace::internals::lock_process (int processno)
 
static void vspace::internals::unlock_process (int processno)
 
static ProcessInfovspace::internals::process_info (int processno)
 
bool vspace::internals::send_signal (int processno, ipc_signal_t sig, bool lock)
 
ipc_signal_t vspace::internals::check_signal (bool resume, bool lock)
 
void vspace::internals::accept_signals ()
 
ipc_signal_t vspace::internals::wait_signal (bool lock)
 
pid_t vspace::fork_process ()
 

Variables

size_t vspace::internals::config [4] = { METABLOCK_SIZE, MAX_PROCESS, SEGMENT_SIZE, MAX_SEGMENTS }
 

Macro Definition Documentation

◆ metapageaddr

#define metapageaddr (   field)     ((char *) &vmem.metapage->field - (char *) vmem.metapage)

Definition at line 626 of file vspace.cc.

628 {
629 struct stat stat;
630 fstat(fd, &stat);
631 return stat.st_size;
632}
633
634Status VMem::init(int fd) {
635 this->fd = fd;
636 for (int i = 0; i < MAX_SEGMENTS; i++)
637 segments[i] = VSeg(NULL);
638 for (int i = 0; i < MAX_PROCESS; i++) {
639 int channel[2];
640 if (pipe(channel) < 0) {
641 for (int j = 0; j < i; j++) {
642 close(channels[j].fd_read);
643 close(channels[j].fd_write);
644 }
645 return Status(ErrOS);
646 }
647 channels[i].fd_read = channel[0];
648 channels[i].fd_write = channel[1];
649 }
651 init_metapage(filesize() == 0);
653 freelist = metapage->freelist;
654 return Status(ErrNone);
655}
656
657Status VMem::init() {
658 FILE *fp = tmpfile();
659 Status result = init(fileno(fp));
660 if (!result.ok())
661 return result;
662 current_process = 0;
663 file_handle = fp;
664 metapage->process_info[0].pid = getpid();
665 return Status(ErrNone);
666}
667
668Status VMem::init(const char *path) {
669 int fd = open(path, O_RDWR | O_CREAT, 0600);
670 if (fd < 0)
671 return Status(ErrFile);
672 init(fd);
674 // TODO: enter process in meta table
676 return Status(ErrNone);
677}
678
679void VMem::deinit() {
680 if (file_handle) {
681 fclose(file_handle);
682 file_handle = NULL;
683 } else {
684 close(fd);
685 }
686 munmap(metapage, METABLOCK_SIZE);
687 metapage = NULL;
688 current_process = -1;
689 freelist = NULL;
690 for (int i = 0; i < MAX_SEGMENTS; i++) {
691 if (!segments[i].is_free())
692 munmap(segments[i].base, SEGMENT_SIZE);
693 segments[i] = VSeg(NULL);
694 }
695 for (int i = 0; i < MAX_PROCESS; i++) {
696 close(channels[i].fd_read);
697 close(channels[i].fd_write);
698 }
699}
700
701void *VMem::mmap_segment(int seg) {
703 void *map = mmap(NULL, SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
704 METABLOCK_SIZE + seg * SEGMENT_SIZE);
705 if (map == MAP_FAILED) {
706 // This is an "impossible to proceed from here, because system state
707 // is impossible to proceed from" situation, so we abort the program.
708 perror("mmap");
709 abort();
710 }
712 return map;
713}
714
715void VMem::add_segment() {
716 int seg = metapage->segment_count++;
717 ftruncate(fd, METABLOCK_SIZE + metapage->segment_count * SEGMENT_SIZE);
718 void *map_addr = mmap_segment(seg);
719 segments[seg] = VSeg(map_addr);
720 Block *top = block_ptr(seg * SEGMENT_SIZE);
721 top->next = freelist[LOG2_SEGMENT_SIZE];
722 top->prev = VADDR_NULL;
723 freelist[LOG2_SEGMENT_SIZE] = seg * SEGMENT_SIZE;
724}
725
726void FastLock::lock() {
727#ifdef HAVE_CPP_THREADS
728 while (_lock.test_and_set()) {
729 }
730 bool empty = _owner < 0;
731 if (empty) {
732 _owner = vmem.current_process;
733 } else {
734 int p = vmem.current_process;
735 vmem.metapage->process_info[p].next = -1;
736 if (_head < 0)
737 _head = p;
738 else
739 vmem.metapage->process_info[_tail].next = p;
740 _tail = p;
741 }
742 _lock.clear();
743 if (!empty)
744 wait_signal(false);
745#else
746 lock_file(vmem.fd, _offset);
747#endif
748}
749
750void FastLock::unlock() {
751#ifdef HAVE_CPP_THREADS
752 while (_lock.test_and_set()) {
753 }
754 _owner = _head;
755 if (_owner >= 0)
756 _head = vmem.metapage->process_info[_head].next;
757 _lock.clear();
758 if (_owner >= 0)
759 send_signal(_owner, 0, false);
760#else
761 unlock_file(vmem.fd, _offset);
762#endif
763}
764
765static void lock_allocator() {
766 vmem.metapage->allocator_lock.lock();
767}
768
769static void unlock_allocator() {
770 vmem.metapage->allocator_lock.unlock();
771}
772
773static void print_freelists() {
774 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
775 vaddr_t vaddr = vmem.freelist[i];
776 if (vaddr != VADDR_NULL) {
777 std::printf("%2d: %ld", i, (long)vaddr);
778 vaddr_t prev = block_ptr(vaddr)->prev;
779 if (prev != VADDR_NULL) {
780 std::printf("(%ld)", (long)prev);
781 }
782 assert(block_ptr(vaddr)->prev == VADDR_NULL);
783 for (;;) {
784 vaddr_t last_vaddr = vaddr;
785 Block *block = block_ptr(vaddr);
786 vaddr = block->next;
787 if (vaddr == VADDR_NULL)
788 break;
789 std::printf(" -> %ld", (long)vaddr);
790 vaddr_t prev = block_ptr(vaddr)->prev;
791 if (prev != last_vaddr) {
792 std::printf("(%ld)", (long)prev);
793 }
794 }
795 std::printf("\n");
796 }
797 }
798 std::fflush(stdout);
799}
800
801void vmem_free(vaddr_t vaddr) {
803 #if defined(__GNUC__) && (__GNUC__>11)
804 vaddr -= (sizeof(vaddr_t)*2);
805 #else
806 vaddr -= offsetof(Block, data);
807 #endif
808 vmem.ensure_is_mapped(vaddr);
809 size_t segno = vmem.segment_no(vaddr);
810 VSeg seg = vmem.segment(vaddr);
811 segaddr_t addr = vmem.segaddr(vaddr);
812 int level = seg.block_ptr(addr)->level();
813 assert(!seg.is_free(addr));
814 while (level < LOG2_SEGMENT_SIZE) {
815 segaddr_t buddy = find_buddy(addr, level);
816 Block *block = seg.block_ptr(buddy);
817 // is buddy free and at the same level?
818 if (!block->is_free() || block->level() != level)
819 break;
820 // remove buddy from freelist.
821 Block *prev = vmem.block_ptr(block->prev);
822 Block *next = vmem.block_ptr(block->next);
823 block->data[0] = level;
824 if (prev) {
825 assert(prev->next == vmem.vaddr(segno, buddy));
826 prev->next = block->next;
827 } else {
828 // head of freelist.
829 assert(vmem.freelist[level] == vmem.vaddr(segno, buddy));
830 vmem.freelist[level] = block->next;
831 }
832 if (next) {
833 assert(next->prev == vmem.vaddr(segno, buddy));
834 next->prev = block->prev;
835 }
836 // coalesce block with buddy
837 level++;
838 if (buddy < addr)
839 addr = buddy;
840 }
841 // Add coalesced block to free list
842 Block *block = seg.block_ptr(addr);
843 block->prev = VADDR_NULL;
844 block->next = vmem.freelist[level];
845 block->mark_as_free(level);
846 vaddr_t blockaddr = vmem.vaddr(segno, addr);
847 if (block->next != VADDR_NULL)
848 vmem.block_ptr(block->next)->prev = blockaddr;
849 vmem.freelist[level] = blockaddr;
851}
852
853vaddr_t vmem_alloc(size_t size) {
855 #if defined(__GNUC__) && (__GNUC__>11)
856 size_t alloc_size = size + (sizeof(vaddr_t)*2);
857 #else
858 size_t alloc_size = size + offsetof(Block, data);
859 #endif
860 int level = find_level(alloc_size);
861 int flevel = level;
862 while (flevel < LOG2_SEGMENT_SIZE && vmem.freelist[flevel] == VADDR_NULL)
863 flevel++;
864 if (vmem.freelist[flevel] == VADDR_NULL) {
865 vmem.add_segment();
866 }
867 vmem.ensure_is_mapped(vmem.freelist[flevel]);
868 while (flevel > level) {
869 // get and split a block
870 vaddr_t blockaddr = vmem.freelist[flevel];
871 assert((blockaddr & ((1 << flevel) - 1)) == 0);
872 Block *block = vmem.block_ptr(blockaddr);
873 vmem.freelist[flevel] = block->next;
874 if (vmem.freelist[flevel] != VADDR_NULL)
875 vmem.block_ptr(vmem.freelist[flevel])->prev = VADDR_NULL;
876 vaddr_t blockaddr2 = blockaddr + (1 << (flevel - 1));
877 Block *block2 = vmem.block_ptr(blockaddr2);
878 flevel--;
879 block2->next = vmem.freelist[flevel];
880 block2->prev = blockaddr;
881 block->next = blockaddr2;
882 block->prev = VADDR_NULL;
883 // block->prev == VADDR_NULL already.
884 vmem.freelist[flevel] = blockaddr;
885 }
886 assert(vmem.freelist[level] != VADDR_NULL);
887 Block *block = vmem.block_ptr(vmem.freelist[level]);
888 vaddr_t vaddr = vmem.freelist[level];
889 #if defined(__GNUC__) && (__GNUC__>11)
890 vaddr_t result = vaddr + (sizeof(vaddr_t)*2);
891 #else
892 vaddr_t result = vaddr + offsetof(Block, data);
893 #endif
894 vmem.freelist[level] = block->next;
895 if (block->next != VADDR_NULL)
896 vmem.block_ptr(block->next)->prev = VADDR_NULL;
897 block->mark_as_allocated(vaddr, level);
899 memset(block->data, 0, size);
900 return result;
901}
902
904 struct flock &lock_info, size_t offset, size_t len, bool lock) {
905 lock_info.l_start = offset;
906 lock_info.l_len = len;
907 lock_info.l_pid = 0;
908 lock_info.l_type = lock ? F_WRLCK : F_UNLCK;
909 lock_info.l_whence = SEEK_SET;
910}
911
912void lock_file(int fd, size_t offset, size_t len) {
913 struct flock lock_info;
916}
917
918void unlock_file(int fd, size_t offset, size_t len) {
919 struct flock lock_info;
920 init_flock_struct(lock_info, offset, len, false);
922}
923
924void lock_metapage() {
925 lock_file(vmem.fd, 0);
926}
927
928void unlock_metapage() {
929 unlock_file(vmem.fd, 0);
930}
931
932void init_metapage(bool create) {
933 if (create)
934 ftruncate(vmem.fd, METABLOCK_SIZE);
935 vmem.metapage = (MetaPage *) mmap(
936 NULL, METABLOCK_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, vmem.fd, 0);
937 if (create) {
938 std::memcpy(vmem.metapage->config_header, config, sizeof(config));
939 for (int i = 0; i <= LOG2_SEGMENT_SIZE; i++) {
940 vmem.metapage->freelist[i] = VADDR_NULL;
941 }
942 vmem.metapage->segment_count = 0;
943 vmem.metapage->allocator_lock = FastLock(metapageaddr(allocator_lock));
944 } else {
945 assert(std::memcmp(vmem.metapage->config_header, config,
946 sizeof(config)) != 0);
947 }
948}
949
950static void lock_process(int processno) {
951 lock_file(vmem.fd,
952 metapageaddr(process_info)
953 + sizeof(ProcessInfo) * vmem.current_process);
954}
955
956static void unlock_process(int processno) {
957 unlock_file(vmem.fd,
958 metapageaddr(process_info)
959 + sizeof(ProcessInfo) * vmem.current_process);
960}
961
962static ProcessInfo &process_info(int processno) {
963 return vmem.metapage->process_info[processno];
964}
965
966bool send_signal(int processno, ipc_signal_t sig, bool lock) {
967 if (lock)
969 if (process_info(processno).sigstate != Waiting) {
971 return false;
972 }
973 if (processno == vmem.current_process) {
974 process_info(processno).sigstate = Accepted;
975 process_info(processno).signal = sig;
976 } else {
977 process_info(processno).sigstate = Pending;
978 process_info(processno).signal = sig;
979 int fd = vmem.channels[processno].fd_write;
980 char buf[1] = { 0 };
981 while (write(fd, buf, 1) != 1) {
982 }
983 }
984 if (lock)
986 return true;
987}
988
989ipc_signal_t check_signal(bool resume, bool lock) {
991 if (lock)
992 lock_process(vmem.current_process);
993 SignalState sigstate = process_info(vmem.current_process).sigstate;
994 switch (sigstate) {
995 case Waiting:
996 case Pending: {
997 int fd = vmem.channels[vmem.current_process].fd_read;
998 char buf[1];
999 if (lock && sigstate == Waiting) {
1000 unlock_process(vmem.current_process);
1001 loop
1002 {
1003 #if defined(HAVE_POLL) && !defined(__APPLE__)
1004 // fd is restricted on OsX by ulimit "file descriptors" (256)
1005 pollfd pfd;
1006 pfd.fd = fd;
1007 pfd.events = POLLIN;
1008 int rv = poll(&pfd, 1, 500000); /* msec*/
1009 #else
1010 // fd is restricted to <=1024
1011 fd_set set;
1012 FD_ZERO(&set); /* clear the set */
1013 FD_SET(fd, &set); /* add our file descriptor to the set */
1014 struct timeval timeout;
1015 timeout.tv_sec = 500;
1016 timeout.tv_usec = 0;
1017 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
1018 #endif
1019 if (rv== -1) continue; /* an error occurred */
1020 if (rv== 0) break; /* timeout */
1021 while(read(fd, buf, 1)!=1) {}
1022 break;
1023 }
1024 lock_process(vmem.current_process);
1025 } else {
1026 loop
1027 {
1028 #if defined(HAVE_POLL) && !defined(__APPLE__)
1029 // fd is restricted on OsX by ulimit "file descriptors" (256)
1030 pollfd pfd;
1031 pfd.fd = fd;
1032 pfd.events = POLLIN;
1033 int rv = poll(&pfd, 1, 500000); /* msec*/
1034 #else
1035 // fd is restricted to <=1024
1036 fd_set set;
1037 FD_ZERO(&set); /* clear the set */
1038 FD_SET(fd, &set); /* add our file descriptor to the set */
1039 struct timeval timeout;
1040 timeout.tv_sec = 500;
1041 timeout.tv_usec = 0;
1042 int rv = si_select(fd + 1, &set, NULL, NULL, &timeout);
1043 #endif
1044 if (rv== -1) continue; /* an error occurred */
1045 if (rv== 0) break;/* timeout */
1046 while(read(fd, buf, 1)!=1) {}
1047 break;
1048 }
1049 }
1050 result = process_info(vmem.current_process).signal;
1051 process_info(vmem.current_process).sigstate
1052 = resume ? Waiting : Accepted;
1053 if (lock)
1054 unlock_process(vmem.current_process);
1055 break;
1056 }
1057 case Accepted:
1058 result = process_info(vmem.current_process).signal;
1059 if (resume)
1060 process_info(vmem.current_process).sigstate = Waiting;
1061 if (lock)
1062 unlock_process(vmem.current_process);
1063 break;
1064 }
1065 return result;
1066}
1067
1068void accept_signals() {
1069 lock_process(vmem.current_process);
1070 process_info(vmem.current_process).sigstate = Waiting;
1071 unlock_process(vmem.current_process);
1072}
1073
1074ipc_signal_t wait_signal(bool lock) {
1075 return check_signal(true, lock);
1076}
1077
1078} // namespace internals
1079
1081 using namespace internals;
1082 lock_metapage();
1083 for (int p = 0; p < MAX_PROCESS; p++) {
1084 if (vmem.metapage->process_info[p].pid == 0) {
1085 pid_t pid = fork();
1086 if (pid < 0) {
1087 // error
1088 return -1;
1089 } else if (pid == 0) {
1090 // child process
1091 int parent = vmem.current_process;
1092 vmem.current_process = p;
1093 lock_metapage();
1094 vmem.metapage->process_info[p].pid = getpid();
1096 send_signal(parent);
1097 } else {
1098 // parent process
1100 wait_signal();
1101 // child has unlocked metapage, so we don't need to.
1102 }
1103 return pid;
1104 }
1105 }
1107 return -1;
1108}
1109
1110void Semaphore::post() {
1111 int wakeup = -1;
1112 internals::ipc_signal_t sig;
1113 _lock.lock();
1114 if (_head == _tail) {
1115 _value++;
1116 } else {
1117 // don't increment value, as we'll pass that on to the next process.
1118 wakeup = _waiting[_head];
1119 sig = _signals[_head];
1120 next(_head);
1121 }
1122 _lock.unlock();
1123 if (wakeup >= 0) {
1124 internals::send_signal(wakeup, sig);
1125 }
1126}
1127
1128bool Semaphore::try_wait() {
1129 bool result = false;
1130 _lock.lock();
1131 if (_value > 0) {
1132 _value--;
1133 result = true;
1134 }
1135 _lock.unlock();
1136 return result;
1137}
1138
1139void Semaphore::wait() {
1140 _lock.lock();
1141 if (_value > 0) {
1142 _value--;
1143 _lock.unlock();
1144 return;
1145 }
1146 _waiting[_tail] = internals::vmem.current_process;
1147 _signals[_tail] = 0;
1148 next(_tail);
1149 _lock.unlock();
1150 internals::wait_signal();
1151}
1152
1153bool Semaphore::start_wait(internals::ipc_signal_t sig) {
1154 _lock.lock();
1155 if (_value > 0) {
1156 if (internals::send_signal(internals::vmem.current_process, sig))
1157 _value--;
1158 _lock.unlock();
1159 return false;
1160 }
1161 _waiting[_tail] = internals::vmem.current_process;
1162 _signals[_tail] = sig;
1163 next(_tail);
1164 _lock.unlock();
1165 return true;
1166}
1167
1168bool Semaphore::stop_wait() {
1169 bool result = false;
1170 _lock.lock();
1171 for (int i = _head; i != _tail; next(i)) {
1172 if (_waiting[i] == internals::vmem.current_process) {
1173 int last = i;
1174 next(i);
1175 while (i != _tail) {
1176 _waiting[last] = _waiting[i];
1177 _signals[last] = _signals[i];
1178 last = i;
1179 next(i);
1180 }
1181 _tail = last;
1182 result = true;
1183 break;
1184 }
1185 }
1186 _lock.unlock();
1187 return result;
1188}
1189
1190void EventSet::add(Event *event) {
1191 event->_next = NULL;
1192 if (_head == NULL) {
1193 _head = _tail = event;
1194 } else {
1195 _tail->_next = event;
1196 _tail = event;
1197 }
1198}
1199
1200int EventSet::wait() {
1201 size_t n = 0;
1202 for (Event *event = _head; event; event = event->_next) {
1203 if (!event->start_listen((int) (n++))) {
1204 break;
1205 }
1206 }
1207 internals::ipc_signal_t result = internals::check_signal();
1208 for (Event *event = _head; event; event = event->_next) {
1209 event->stop_listen();
1210 }
1211 internals::accept_signals();
1212 return (int) result;
1213}
1214
1215} // namespace vspace
1216#endif
1217#endif
int size(const CanonicalForm &f, const Variable &v)
int size ( const CanonicalForm & f, const Variable & v )
Definition cf_ops.cc:600
int level(const CanonicalForm &f)
int i
Definition cfEzgcd.cc:132
int p
Definition cfModGcd.cc:4086
CanonicalForm fp
Definition cfModGcd.cc:4110
CanonicalForm map(const CanonicalForm &primElem, const Variable &alpha, const CanonicalForm &F, const Variable &beta)
map from to such that is mapped onto
void wait()
Definition thread.cc:23
void post()
Definition thread.cc:33
return result
int j
Definition facHensel.cc:110
STATIC_VAR poly last
Definition hdegree.cc:1137
NodeM * create()
Definition janet.cc:757
STATIC_VAR int offset
Definition janet.cc:29
ListNode * next
Definition janet.h:31
#define SEEK_SET
Definition mod2.h:115
void init()
Definition lintree.cc:864
void accept_signals()
Definition vspace.cc:1069
void unlock_metapage()
Definition vspace.cc:929
const vaddr_t VADDR_NULL
Definition vspace.h:1417
void init_flock_struct(struct flock &lock_info, size_t offset, size_t len, bool lock)
Definition vspace.cc:904
static ProcessInfo & process_info(int processno)
Definition vspace.cc:963
void lock_file(int fd, size_t offset, size_t len)
Definition vspace.cc:913
void vmem_free(vaddr_t vaddr)
Definition vspace.cc:802
Block * block_ptr(vaddr_t vaddr)
Definition vspace.h:1637
vaddr_t vmem_alloc(size_t size)
Definition vspace.cc:854
static void unlock_process(int processno)
Definition vspace.cc:957
static void lock_process(int processno)
Definition vspace.cc:951
ipc_signal_t wait_signal(bool lock)
Definition vspace.cc:1075
void lock_metapage()
Definition vspace.cc:925
static void lock_allocator()
Definition vspace.cc:766
ipc_signal_t check_signal(bool resume, bool lock)
Definition vspace.cc:990
void init_metapage(bool create)
Definition vspace.cc:933
void unlock_file(int fd, size_t offset, size_t len)
Definition vspace.cc:919
bool send_signal(int processno, ipc_signal_t sig, bool lock)
Definition vspace.cc:967
static void unlock_allocator()
Definition vspace.cc:770
static void print_freelists()
Definition vspace.cc:774
pid_t fork_process()
Definition vspace.cc:1081
internals::Mutex FastLock
Definition vspace.h:2340
#define NULL
Definition omList.c:12
#define block
Definition scanner.cc:646
int status read
Definition si_signals.h:69
int status int fd
Definition si_signals.h:69
int status int void size_t count open
Definition si_signals.h:83
int status int void * buf
Definition si_signals.h:69
#define loop
Definition structs.h:71
#define assert(A)
Definition svd_si.h:3
#define metapageaddr(field)
Definition vspace.cc:626