etc/services - sync with NetBSD-8
[minix.git] / minix / servers / ipc / shm.c
blob11b4897659af4770448b55d935160f171fc00de5
1 #include "inc.h"
3 /* Private shm_perm.mode flags, synchronized with NetBSD kernel values */
4 #define SHM_ALLOC 0x0800 /* slot is in use (SHMSEG_ALLOCATED) */
6 struct shm_struct {
7 struct shmid_ds shmid_ds;
8 vir_bytes page;
9 phys_bytes vm_id;
11 static struct shm_struct shm_list[SHMMNI];
12 static unsigned int shm_list_nr = 0; /* highest in-use slot number plus one */
14 static struct shm_struct *
15 shm_find_key(key_t key)
17 unsigned int i;
19 if (key == IPC_PRIVATE)
20 return NULL;
22 for (i = 0; i < shm_list_nr; i++) {
23 if (!(shm_list[i].shmid_ds.shm_perm.mode & SHM_ALLOC))
24 continue;
25 if (shm_list[i].shmid_ds.shm_perm._key == key)
26 return &shm_list[i];
29 return NULL;
32 static struct shm_struct *
33 shm_find_id(int id)
35 struct shm_struct *shm;
36 unsigned int i;
38 i = IPCID_TO_IX(id);
39 if (i >= shm_list_nr)
40 return NULL;
42 shm = &shm_list[i];
43 if (!(shm->shmid_ds.shm_perm.mode & SHM_ALLOC))
44 return NULL;
45 if (shm->shmid_ds.shm_perm._seq != IPCID_TO_SEQ(id))
46 return NULL;
47 return shm;
50 int
51 do_shmget(message * m)
53 struct shm_struct *shm;
54 unsigned int i, seq;
55 key_t key;
56 size_t size, old_size;
57 int flag;
58 void *page;
60 key = m->m_lc_ipc_shmget.key;
61 old_size = size = m->m_lc_ipc_shmget.size;
62 flag = m->m_lc_ipc_shmget.flag;
64 if ((shm = shm_find_key(key)) != NULL) {
65 if (!check_perm(&shm->shmid_ds.shm_perm, m->m_source, flag))
66 return EACCES;
67 if ((flag & IPC_CREAT) && (flag & IPC_EXCL))
68 return EEXIST;
69 if (size && shm->shmid_ds.shm_segsz < size)
70 return EINVAL;
71 i = shm - shm_list;
72 } else { /* no key found */
73 if (!(flag & IPC_CREAT))
74 return ENOENT;
75 if (size <= 0)
76 return EINVAL;
77 size = roundup(size, PAGE_SIZE);
78 if (size <= 0)
79 return EINVAL;
81 /* Find a free entry. */
82 for (i = 0; i < __arraycount(shm_list); i++)
83 if (!(shm_list[i].shmid_ds.shm_perm.mode & SHM_ALLOC))
84 break;
85 if (i == __arraycount(shm_list))
86 return ENOSPC;
89 * Allocate memory to share. For now, we store the page
90 * reference as a numerical value so as to avoid issues with
91 * live update. TODO: a proper solution.
93 page = mmap(0, size, PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
94 if (page == MAP_FAILED)
95 return ENOMEM;
96 memset(page, 0, size);
98 /* Initialize the entry. */
99 shm = &shm_list[i];
100 seq = shm->shmid_ds.shm_perm._seq;
101 memset(shm, 0, sizeof(*shm));
103 shm->shmid_ds.shm_perm._key = key;
104 shm->shmid_ds.shm_perm.cuid =
105 shm->shmid_ds.shm_perm.uid = getnuid(m->m_source);
106 shm->shmid_ds.shm_perm.cgid =
107 shm->shmid_ds.shm_perm.gid = getngid(m->m_source);
108 shm->shmid_ds.shm_perm.mode = SHM_ALLOC | (flag & ACCESSPERMS);
109 shm->shmid_ds.shm_perm._seq = (seq + 1) & 0x7fff;
110 shm->shmid_ds.shm_segsz = old_size;
111 shm->shmid_ds.shm_atime = 0;
112 shm->shmid_ds.shm_dtime = 0;
113 shm->shmid_ds.shm_ctime = clock_time(NULL);
114 shm->shmid_ds.shm_cpid = getnpid(m->m_source);
115 shm->shmid_ds.shm_lpid = 0;
116 shm->shmid_ds.shm_nattch = 0;
117 shm->page = (vir_bytes)page;
118 shm->vm_id = vm_getphys(sef_self(), page);
120 assert(i <= shm_list_nr);
121 if (i == shm_list_nr)
122 shm_list_nr++;
125 m->m_lc_ipc_shmget.retid = IXSEQ_TO_IPCID(i, shm->shmid_ds.shm_perm);
126 return OK;
130 do_shmat(message * m)
132 int id, flag, mask;
133 vir_bytes addr;
134 void *ret;
135 struct shm_struct *shm;
137 id = m->m_lc_ipc_shmat.id;
138 addr = (vir_bytes)m->m_lc_ipc_shmat.addr;
139 flag = m->m_lc_ipc_shmat.flag;
141 if (addr % PAGE_SIZE) {
142 if (flag & SHM_RND)
143 addr -= addr % PAGE_SIZE;
144 else
145 return EINVAL;
148 if ((shm = shm_find_id(id)) == NULL)
149 return EINVAL;
151 mask = 0;
152 if (flag & SHM_RDONLY)
153 mask = IPC_R;
154 else
155 mask = IPC_R | IPC_W;
156 if (!check_perm(&shm->shmid_ds.shm_perm, m->m_source, mask))
157 return EACCES;
159 ret = vm_remap(m->m_source, sef_self(), (void *)addr,
160 (void *)shm->page, shm->shmid_ds.shm_segsz);
161 if (ret == MAP_FAILED)
162 return ENOMEM;
164 shm->shmid_ds.shm_atime = clock_time(NULL);
165 shm->shmid_ds.shm_lpid = getnpid(m->m_source);
166 /* nattch is updated lazily */
168 m->m_lc_ipc_shmat.retaddr = ret;
169 return OK;
172 void
173 update_refcount_and_destroy(void)
175 u8_t rc;
176 unsigned int i;
178 for (i = 0; i < shm_list_nr; i++) {
179 if (!(shm_list[i].shmid_ds.shm_perm.mode & SHM_ALLOC))
180 continue;
182 rc = vm_getrefcount(sef_self(), (void *)shm_list[i].page);
183 if (rc == (u8_t)-1) {
184 printf("IPC: can't find physical region.\n");
185 continue;
187 shm_list[i].shmid_ds.shm_nattch = rc - 1;
189 if (shm_list[i].shmid_ds.shm_nattch == 0 &&
190 (shm_list[i].shmid_ds.shm_perm.mode & SHM_DEST)) {
191 munmap((void *)shm_list[i].page,
192 roundup(shm_list[i].shmid_ds.shm_segsz,
193 PAGE_SIZE));
194 /* Mark the entry as free. */
195 shm_list[i].shmid_ds.shm_perm.mode &= ~SHM_ALLOC;
200 * Now that we may have removed an arbitrary set of slots, ensure that
201 * shm_list_nr again equals the highest in-use slot number plus one.
203 while (shm_list_nr > 0 &&
204 !(shm_list[shm_list_nr - 1].shmid_ds.shm_perm.mode & SHM_ALLOC))
205 shm_list_nr--;
209 do_shmdt(message * m)
211 struct shm_struct *shm;
212 vir_bytes addr;
213 phys_bytes vm_id;
214 unsigned int i;
216 addr = (vir_bytes)m->m_lc_ipc_shmdt.addr;
218 if ((vm_id = vm_getphys(m->m_source, (void *)addr)) == 0)
219 return EINVAL;
221 for (i = 0; i < shm_list_nr; i++) {
222 shm = &shm_list[i];
224 if (!(shm->shmid_ds.shm_perm.mode & SHM_ALLOC))
225 continue;
227 if (shm->vm_id == vm_id) {
228 shm->shmid_ds.shm_atime = clock_time(NULL);
229 shm->shmid_ds.shm_lpid = getnpid(m->m_source);
230 /* nattch is updated lazily */
232 vm_unmap(m->m_source, (void *)addr);
233 break;
236 if (i == shm_list_nr)
237 printf("IPC: do_shmdt: ID %lu not found\n", vm_id);
239 update_refcount_and_destroy();
241 return OK;
245 * Fill a shminfo structure with actual information.
247 static void
248 fill_shminfo(struct shminfo * sinfo)
251 memset(sinfo, 0, sizeof(*sinfo));
253 sinfo->shmmax = (unsigned long)-1;
254 sinfo->shmmin = 1;
255 sinfo->shmmni = __arraycount(shm_list);
256 sinfo->shmseg = (unsigned long)-1;
257 sinfo->shmall = (unsigned long)-1;
261 do_shmctl(message * m)
263 struct shmid_ds tmp_ds;
264 struct shm_struct *shm;
265 struct shminfo sinfo;
266 struct shm_info s_info;
267 vir_bytes buf;
268 unsigned int i;
269 uid_t uid;
270 int r, id, cmd;
272 id = m->m_lc_ipc_shmctl.id;
273 cmd = m->m_lc_ipc_shmctl.cmd;
274 buf = (vir_bytes)m->m_lc_ipc_shmctl.buf;
277 * For stat calls, sure that all information is up-to-date. Since this
278 * may free the slot, do this before mapping from ID to slot below.
280 if (cmd == IPC_STAT || cmd == SHM_STAT)
281 update_refcount_and_destroy();
283 switch (cmd) {
284 case IPC_INFO:
285 case SHM_INFO:
286 shm = NULL;
287 break;
288 case SHM_STAT:
289 if (id < 0 || (unsigned int)id >= shm_list_nr)
290 return EINVAL;
291 shm = &shm_list[id];
292 if (!(shm->shmid_ds.shm_perm.mode & SHM_ALLOC))
293 return EINVAL;
294 break;
295 default:
296 if ((shm = shm_find_id(id)) == NULL)
297 return EINVAL;
298 break;
301 switch (cmd) {
302 case IPC_STAT:
303 case SHM_STAT:
304 /* Check whether the caller has read permission. */
305 if (!check_perm(&shm->shmid_ds.shm_perm, m->m_source, IPC_R))
306 return EACCES;
307 if ((r = sys_datacopy(SELF, (vir_bytes)&shm->shmid_ds,
308 m->m_source, buf, sizeof(shm->shmid_ds))) != OK)
309 return r;
310 if (cmd == SHM_STAT)
311 m->m_lc_ipc_shmctl.ret =
312 IXSEQ_TO_IPCID(id, shm->shmid_ds.shm_perm);
313 break;
314 case IPC_SET:
315 uid = getnuid(m->m_source);
316 if (uid != shm->shmid_ds.shm_perm.cuid &&
317 uid != shm->shmid_ds.shm_perm.uid && uid != 0)
318 return EPERM;
319 if ((r = sys_datacopy(m->m_source, buf, SELF,
320 (vir_bytes)&tmp_ds, sizeof(tmp_ds))) != OK)
321 return r;
322 shm->shmid_ds.shm_perm.uid = tmp_ds.shm_perm.uid;
323 shm->shmid_ds.shm_perm.gid = tmp_ds.shm_perm.gid;
324 shm->shmid_ds.shm_perm.mode &= ~ACCESSPERMS;
325 shm->shmid_ds.shm_perm.mode |=
326 tmp_ds.shm_perm.mode & ACCESSPERMS;
327 shm->shmid_ds.shm_ctime = clock_time(NULL);
328 break;
329 case IPC_RMID:
330 uid = getnuid(m->m_source);
331 if (uid != shm->shmid_ds.shm_perm.cuid &&
332 uid != shm->shmid_ds.shm_perm.uid && uid != 0)
333 return EPERM;
334 shm->shmid_ds.shm_perm.mode |= SHM_DEST;
335 /* Destroy if possible. */
336 update_refcount_and_destroy();
337 break;
338 case IPC_INFO:
339 fill_shminfo(&sinfo);
340 if ((r = sys_datacopy(SELF, (vir_bytes)&sinfo, m->m_source,
341 buf, sizeof(sinfo))) != OK)
342 return r;
343 if (shm_list_nr > 0)
344 m->m_lc_ipc_shmctl.ret = shm_list_nr - 1;
345 else
346 m->m_lc_ipc_shmctl.ret = 0;
347 break;
348 case SHM_INFO:
349 memset(&s_info, 0, sizeof(s_info));
350 s_info.used_ids = shm_list_nr;
351 s_info.shm_tot = 0;
352 for (i = 0; i < shm_list_nr; i++)
353 s_info.shm_tot +=
354 shm_list[i].shmid_ds.shm_segsz / PAGE_SIZE;
355 s_info.shm_rss = s_info.shm_tot;
356 s_info.shm_swp = 0;
357 s_info.swap_attempts = 0;
358 s_info.swap_successes = 0;
359 if ((r = sys_datacopy(SELF, (vir_bytes)&s_info, m->m_source,
360 buf, sizeof(s_info))) != OK)
361 return r;
362 if (shm_list_nr > 0)
363 m->m_lc_ipc_shmctl.ret = shm_list_nr - 1;
364 else
365 m->m_lc_ipc_shmctl.ret = 0;
366 break;
367 default:
368 return EINVAL;
370 return OK;
374 * Return shared memory information for a remote MIB call on the sysvipc_info
375 * node in the kern.ipc subtree. The particular semantics of this call are
376 * tightly coupled to the implementation of the ipcs(1) userland utility.
378 ssize_t
379 get_shm_mib_info(struct rmib_oldp * oldp)
381 struct shm_sysctl_info shmsi;
382 struct shmid_ds *shmds;
383 unsigned int i;
384 ssize_t r, off;
386 off = 0;
388 fill_shminfo(&shmsi.shminfo);
391 * As a hackish exception, the requested size may imply that just
392 * general information is to be returned, without throwing an ENOMEM
393 * error because there is no space for full output.
395 if (rmib_getoldlen(oldp) == sizeof(shmsi.shminfo))
396 return rmib_copyout(oldp, 0, &shmsi.shminfo,
397 sizeof(shmsi.shminfo));
400 * ipcs(1) blindly expects the returned array to be of size
401 * shminfo.shmmni, using the SHMSEG_ALLOCATED (aka SHM_ALLOC) mode flag
402 * to see whether each entry is valid. If we return a smaller size,
403 * ipcs(1) will access arbitrary memory.
405 assert(shmsi.shminfo.shmmni > 0);
407 if (oldp == NULL)
408 return sizeof(shmsi) + sizeof(shmsi.shmids[0]) *
409 (shmsi.shminfo.shmmni - 1);
412 * Copy out entries one by one. For the first entry, copy out the
413 * entire "shmsi" structure. For subsequent entries, reuse the single
414 * embedded 'shmids' element of "shmsi" and copy out only that element.
416 for (i = 0; i < shmsi.shminfo.shmmni; i++) {
417 shmds = &shm_list[i].shmid_ds;
419 memset(&shmsi.shmids[0], 0, sizeof(shmsi.shmids[0]));
420 if (i < shm_list_nr && (shmds->shm_perm.mode & SHM_ALLOC)) {
421 prepare_mib_perm(&shmsi.shmids[0].shm_perm,
422 &shmds->shm_perm);
423 shmsi.shmids[0].shm_segsz = shmds->shm_segsz;
424 shmsi.shmids[0].shm_lpid = shmds->shm_lpid;
425 shmsi.shmids[0].shm_cpid = shmds->shm_cpid;
426 shmsi.shmids[0].shm_atime = shmds->shm_atime;
427 shmsi.shmids[0].shm_dtime = shmds->shm_dtime;
428 shmsi.shmids[0].shm_ctime = shmds->shm_ctime;
429 shmsi.shmids[0].shm_nattch = shmds->shm_nattch;
432 if (off == 0)
433 r = rmib_copyout(oldp, off, &shmsi, sizeof(shmsi));
434 else
435 r = rmib_copyout(oldp, off, &shmsi.shmids[0],
436 sizeof(shmsi.shmids[0]));
438 if (r < 0)
439 return r;
440 off += r;
443 return off;
446 #if 0
447 static void
448 list_shm_ds(void)
450 unsigned int i;
452 printf("key\tid\tpage\n");
453 for (i = 0; i < shm_list_nr; i++) {
454 if (!(shm_list[i].shmid_ds.shm_perm.mode & SHM_ALLOC))
455 continue;
456 printf("%ld\t%d\t%lx\n",
457 shm_list[i].shmid_ds.shm_perm._key,
458 IXSEQ_TO_IPCID(i, shm_list[i].shmid_ds.shm_perm),
459 shm_list[i].page);
462 #endif
465 is_shm_nil(void)
468 return (shm_list_nr == 0);