Annotation of sys/kern/sysv_shm.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: sysv_shm.c,v 1.47 2007/05/29 10:44:28 sturm Exp $ */
2: /* $NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $ */
3:
4: /*
5: * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
6: *
7: * Permission to use, copy, modify, and distribute this software for any
8: * purpose with or without fee is hereby granted, provided that the above
9: * copyright notice and this permission notice appear in all copies.
10: *
11: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18: *
19: * Sponsored in part by the Defense Advanced Research Projects
20: * Agency (DARPA) and Air Force Research Laboratory, Air Force
21: * Materiel Command, USAF, under agreement number F39502-99-1-0512.
22: */
23: /*
24: * Copyright (c) 1994 Adam Glass and Charles M. Hannum. All rights reserved.
25: *
26: * Redistribution and use in source and binary forms, with or without
27: * modification, are permitted provided that the following conditions
28: * are met:
29: * 1. Redistributions of source code must retain the above copyright
30: * notice, this list of conditions and the following disclaimer.
31: * 2. Redistributions in binary form must reproduce the above copyright
32: * notice, this list of conditions and the following disclaimer in the
33: * documentation and/or other materials provided with the distribution.
34: * 3. All advertising materials mentioning features or use of this software
35: * must display the following acknowledgement:
36: * This product includes software developed by Adam Glass and Charles M.
37: * Hannum.
38: * 4. The names of the authors may not be used to endorse or promote products
39: * derived from this software without specific prior written permission.
40: *
41: * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
42: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
43: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
44: * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
45: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51: */
52:
53: #include <sys/param.h>
54: #include <sys/kernel.h>
55: #include <sys/shm.h>
56: #include <sys/proc.h>
57: #include <sys/uio.h>
58: #include <sys/time.h>
59: #include <sys/malloc.h>
60: #include <sys/mman.h>
61: #include <sys/pool.h>
62: #include <sys/systm.h>
63: #include <sys/sysctl.h>
64: #include <sys/stat.h>
65:
66: #include <sys/mount.h>
67: #include <sys/syscallargs.h>
68:
69: #include <uvm/uvm_extern.h>
70:
71: extern struct shminfo shminfo;
72: struct shmid_ds **shmsegs; /* linear mapping of shmid -> shmseg */
73: struct pool shm_pool;
74: unsigned short *shmseqs; /* array of shm sequence numbers */
75:
76: struct shmid_ds *shm_find_segment_by_shmid(int);
77:
78: /*
79: * Provides the following externally accessible functions:
80: *
81: * shminit(void); initialization
82: * shmexit(struct vmspace *) cleanup
83: * shmfork(struct vmspace *, struct vmspace *) fork handling
84: * shmsys(arg1, arg2, arg3, arg4); shm{at,ctl,dt,get}(arg2, arg3, arg4)
85: *
86: * Structures:
87: * shmsegs (an array of 'struct shmid_ds *')
88: * per proc 'struct shmmap_head' with an array of 'struct shmmap_state'
89: */
90:
91: #define SHMSEG_REMOVED 0x0200 /* can't overlap ACCESSPERMS */
92: #define SHMSEG_RMLINGER 0x0400
93:
94: int shm_last_free, shm_nused, shm_committed;
95:
96: struct shm_handle {
97: struct uvm_object *shm_object;
98: };
99:
100: struct shmmap_state {
101: vaddr_t va;
102: int shmid;
103: };
104:
105: struct shmmap_head {
106: int shmseg;
107: struct shmmap_state state[1];
108: };
109:
110: int shm_find_segment_by_key(key_t);
111: void shm_deallocate_segment(struct shmid_ds *);
112: int shm_delete_mapping(struct vmspace *, struct shmmap_state *);
113: int shmget_existing(struct proc *, struct sys_shmget_args *,
114: int, int, register_t *);
115: int shmget_allocate_segment(struct proc *, struct sys_shmget_args *,
116: int, register_t *);
117:
118: int
119: shm_find_segment_by_key(key_t key)
120: {
121: struct shmid_ds *shmseg;
122: int i;
123:
124: for (i = 0; i < shminfo.shmmni; i++) {
125: shmseg = shmsegs[i];
126: if (shmseg != NULL && shmseg->shm_perm.key == key)
127: return (i);
128: }
129: return (-1);
130: }
131:
132: struct shmid_ds *
133: shm_find_segment_by_shmid(int shmid)
134: {
135: int segnum;
136: struct shmid_ds *shmseg;
137:
138: segnum = IPCID_TO_IX(shmid);
139: if (segnum < 0 || segnum >= shminfo.shmmni ||
140: (shmseg = shmsegs[segnum]) == NULL ||
141: shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid))
142: return (NULL);
143: if ((shmseg->shm_perm.mode & (SHMSEG_REMOVED|SHMSEG_RMLINGER)) == SHMSEG_REMOVED)
144: return (NULL);
145: return (shmseg);
146: }
147:
148: void
149: shm_deallocate_segment(struct shmid_ds *shmseg)
150: {
151: struct shm_handle *shm_handle;
152: size_t size;
153:
154: shm_handle = shmseg->shm_internal;
155: size = round_page(shmseg->shm_segsz);
156: uao_detach(shm_handle->shm_object);
157: pool_put(&shm_pool, shmseg);
158: shm_committed -= btoc(size);
159: shm_nused--;
160: }
161:
162: int
163: shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
164: {
165: struct shmid_ds *shmseg;
166: int segnum;
167: size_t size;
168:
169: segnum = IPCID_TO_IX(shmmap_s->shmid);
170: if (segnum < 0 || segnum >= shminfo.shmmni ||
171: (shmseg = shmsegs[segnum]) == NULL)
172: return (EINVAL);
173: size = round_page(shmseg->shm_segsz);
174: uvm_deallocate(&vm->vm_map, shmmap_s->va, size);
175: shmmap_s->shmid = -1;
176: shmseg->shm_dtime = time_second;
177: if ((--shmseg->shm_nattch <= 0) &&
178: (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
179: shm_deallocate_segment(shmseg);
180: shm_last_free = segnum;
181: shmsegs[shm_last_free] = NULL;
182: }
183: return (0);
184: }
185:
186: int
187: sys_shmdt(struct proc *p, void *v, register_t *retval)
188: {
189: struct sys_shmdt_args /* {
190: syscallarg(const void *) shmaddr;
191: } */ *uap = v;
192: struct shmmap_head *shmmap_h;
193: struct shmmap_state *shmmap_s;
194: int i;
195:
196: shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
197: if (shmmap_h == NULL)
198: return (EINVAL);
199:
200: for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
201: i++, shmmap_s++)
202: if (shmmap_s->shmid != -1 &&
203: shmmap_s->va == (vaddr_t)SCARG(uap, shmaddr))
204: break;
205: if (i == shmmap_h->shmseg)
206: return (EINVAL);
207: return (shm_delete_mapping(p->p_vmspace, shmmap_s));
208: }
209:
210: int
211: sys_shmat(struct proc *p, void *v, register_t *retval)
212: {
213: struct sys_shmat_args /* {
214: syscallarg(int) shmid;
215: syscallarg(const void *) shmaddr;
216: syscallarg(int) shmflg;
217: } */ *uap = v;
218: int error, i, flags;
219: struct ucred *cred = p->p_ucred;
220: struct shmid_ds *shmseg;
221: struct shmmap_head *shmmap_h;
222: struct shmmap_state *shmmap_s;
223: struct shm_handle *shm_handle;
224: vaddr_t attach_va;
225: vm_prot_t prot;
226: vsize_t size;
227:
228: shmmap_h = (struct shmmap_head *)p->p_vmspace->vm_shm;
229: if (shmmap_h == NULL) {
230: size = sizeof(int) +
231: shminfo.shmseg * sizeof(struct shmmap_state);
232: shmmap_h = malloc(size, M_SHM, M_WAITOK);
233: shmmap_h->shmseg = shminfo.shmseg;
234: for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
235: i++, shmmap_s++)
236: shmmap_s->shmid = -1;
237: p->p_vmspace->vm_shm = (caddr_t)shmmap_h;
238: }
239: shmseg = shm_find_segment_by_shmid(SCARG(uap, shmid));
240: if (shmseg == NULL)
241: return (EINVAL);
242: error = ipcperm(cred, &shmseg->shm_perm,
243: (SCARG(uap, shmflg) & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
244: if (error)
245: return (error);
246: for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg; i++) {
247: if (shmmap_s->shmid == -1)
248: break;
249: shmmap_s++;
250: }
251: if (i >= shmmap_h->shmseg)
252: return (EMFILE);
253: size = round_page(shmseg->shm_segsz);
254: prot = VM_PROT_READ;
255: if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
256: prot |= VM_PROT_WRITE;
257: flags = MAP_ANON | MAP_SHARED;
258: if (SCARG(uap, shmaddr)) {
259: flags |= MAP_FIXED;
260: if (SCARG(uap, shmflg) & SHM_RND)
261: attach_va =
262: (vaddr_t)SCARG(uap, shmaddr) & ~(SHMLBA-1);
263: else if (((vaddr_t)SCARG(uap, shmaddr) & (SHMLBA-1)) == 0)
264: attach_va = (vaddr_t)SCARG(uap, shmaddr);
265: else
266: return (EINVAL);
267: } else {
268: /* This is just a hint to uvm_map() about where to put it. */
269: attach_va = uvm_map_hint(p, prot);
270: }
271: shm_handle = shmseg->shm_internal;
272: uao_reference(shm_handle->shm_object);
273: error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
274: shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
275: UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
276: if (error) {
277: uao_detach(shm_handle->shm_object);
278: return (error);
279: }
280:
281: shmmap_s->va = attach_va;
282: shmmap_s->shmid = SCARG(uap, shmid);
283: shmseg->shm_lpid = p->p_pid;
284: shmseg->shm_atime = time_second;
285: shmseg->shm_nattch++;
286: *retval = attach_va;
287: return (0);
288: }
289:
290: int
291: sys_shmctl(struct proc *p, void *v, register_t *retval)
292: {
293: struct sys_shmctl_args /* {
294: syscallarg(int) shmid;
295: syscallarg(int) cmd;
296: syscallarg(struct shmid_ds *) buf;
297: } */ *uap = v;
298:
299: return (shmctl1(p, SCARG(uap, shmid), SCARG(uap, cmd),
300: (caddr_t)SCARG(uap, buf), copyin, copyout));
301: }
302:
303: int
304: shmctl1(struct proc *p, int shmid, int cmd, caddr_t buf,
305: int (*ds_copyin)(const void *, void *, size_t),
306: int (*ds_copyout)(const void *, void *, size_t))
307: {
308: struct ucred *cred = p->p_ucred;
309: struct shmid_ds inbuf, *shmseg;
310: int error;
311:
312: shmseg = shm_find_segment_by_shmid(shmid);
313: if (shmseg == NULL)
314: return (EINVAL);
315: switch (cmd) {
316: case IPC_STAT:
317: if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_R)) != 0)
318: return (error);
319: error = ds_copyout(shmseg, buf, sizeof(inbuf));
320: if (error)
321: return (error);
322: break;
323: case IPC_SET:
324: if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
325: return (error);
326: error = ds_copyin(buf, &inbuf, sizeof(inbuf));
327: if (error)
328: return (error);
329: shmseg->shm_perm.uid = inbuf.shm_perm.uid;
330: shmseg->shm_perm.gid = inbuf.shm_perm.gid;
331: shmseg->shm_perm.mode =
332: (shmseg->shm_perm.mode & ~ACCESSPERMS) |
333: (inbuf.shm_perm.mode & ACCESSPERMS);
334: shmseg->shm_ctime = time_second;
335: break;
336: case IPC_RMID:
337: if ((error = ipcperm(cred, &shmseg->shm_perm, IPC_M)) != 0)
338: return (error);
339: shmseg->shm_perm.key = IPC_PRIVATE;
340: shmseg->shm_perm.mode |= SHMSEG_REMOVED;
341: if (shmseg->shm_nattch <= 0) {
342: shm_deallocate_segment(shmseg);
343: shm_last_free = IPCID_TO_IX(shmid);
344: shmsegs[shm_last_free] = NULL;
345: }
346: break;
347: case SHM_LOCK:
348: case SHM_UNLOCK:
349: default:
350: return (EINVAL);
351: }
352: return (0);
353: }
354:
355: int
356: shmget_existing(struct proc *p,
357: struct sys_shmget_args /* {
358: syscallarg(key_t) key;
359: syscallarg(size_t) size;
360: syscallarg(int) shmflg;
361: } */ *uap,
362: int mode, int segnum, register_t *retval)
363: {
364: struct shmid_ds *shmseg;
365: struct ucred *cred = p->p_ucred;
366: int error;
367:
368: shmseg = shmsegs[segnum]; /* We assume the segnum is valid */
369: if ((error = ipcperm(cred, &shmseg->shm_perm, mode)) != 0)
370: return (error);
371: if (SCARG(uap, size) && SCARG(uap, size) > shmseg->shm_segsz)
372: return (EINVAL);
373: if ((SCARG(uap, shmflg) & (IPC_CREAT | IPC_EXCL)) ==
374: (IPC_CREAT | IPC_EXCL))
375: return (EEXIST);
376: *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
377: return (0);
378: }
379:
380: int
381: shmget_allocate_segment(struct proc *p,
382: struct sys_shmget_args /* {
383: syscallarg(key_t) key;
384: syscallarg(size_t) size;
385: syscallarg(int) shmflg;
386: } */ *uap,
387: int mode, register_t *retval)
388: {
389: size_t size;
390: key_t key;
391: int segnum;
392: struct ucred *cred = p->p_ucred;
393: struct shmid_ds *shmseg;
394: struct shm_handle *shm_handle;
395: int error = 0;
396:
397: if (SCARG(uap, size) < shminfo.shmmin ||
398: SCARG(uap, size) > shminfo.shmmax)
399: return (EINVAL);
400: if (shm_nused >= shminfo.shmmni) /* any shmids left? */
401: return (ENOSPC);
402: size = round_page(SCARG(uap, size));
403: if (shm_committed + btoc(size) > shminfo.shmall)
404: return (ENOMEM);
405: shm_nused++;
406: shm_committed += btoc(size);
407:
408: /*
409: * If a key has been specified and we had to wait for memory
410: * to be freed up we need to verify that no one has allocated
411: * the key we want in the meantime. Yes, this is ugly.
412: */
413: key = SCARG(uap, key);
414: shmseg = pool_get(&shm_pool, key == IPC_PRIVATE ? PR_WAITOK : 0);
415: if (shmseg == NULL) {
416: shmseg = pool_get(&shm_pool, PR_WAITOK);
417: if (shm_find_segment_by_key(key) != -1) {
418: pool_put(&shm_pool, shmseg);
419: shm_nused--;
420: shm_committed -= btoc(size);
421: return (EAGAIN);
422: }
423: }
424:
425: /* XXX - hash shmids instead */
426: if (shm_last_free < 0) {
427: for (segnum = 0; segnum < shminfo.shmmni && shmsegs[segnum];
428: segnum++)
429: ;
430: if (segnum == shminfo.shmmni)
431: panic("shmseg free count inconsistent");
432: } else {
433: segnum = shm_last_free;
434: if (++shm_last_free >= shminfo.shmmni || shmsegs[shm_last_free])
435: shm_last_free = -1;
436: }
437: shmsegs[segnum] = shmseg;
438:
439: shm_handle = (struct shm_handle *)((caddr_t)shmseg + sizeof(*shmseg));
440: shm_handle->shm_object = uao_create(size, 0);
441:
442: shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid;
443: shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid;
444: shmseg->shm_perm.mode = (mode & (ACCESSPERMS|SHMSEG_RMLINGER));
445: shmseg->shm_perm.seq = shmseqs[segnum] = (shmseqs[segnum] + 1) & 0x7fff;
446: shmseg->shm_perm.key = key;
447: shmseg->shm_segsz = SCARG(uap, size);
448: shmseg->shm_cpid = p->p_pid;
449: shmseg->shm_lpid = shmseg->shm_nattch = 0;
450: shmseg->shm_atime = shmseg->shm_dtime = 0;
451: shmseg->shm_ctime = time_second;
452: shmseg->shm_internal = shm_handle;
453:
454: *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm);
455: return (error);
456: }
457:
458: int
459: sys_shmget(struct proc *p, void *v, register_t *retval)
460: {
461: struct sys_shmget_args /* {
462: syscallarg(key_t) key;
463: syscallarg(size_t) size;
464: syscallarg(int) shmflg;
465: } */ *uap = v;
466: int segnum, mode, error;
467:
468: mode = SCARG(uap, shmflg) & ACCESSPERMS;
469: if (SCARG(uap, shmflg) & _SHM_RMLINGER)
470: mode |= SHMSEG_RMLINGER;
471:
472: if (SCARG(uap, key) != IPC_PRIVATE) {
473: again:
474: segnum = shm_find_segment_by_key(SCARG(uap, key));
475: if (segnum >= 0)
476: return (shmget_existing(p, uap, mode, segnum, retval));
477: if ((SCARG(uap, shmflg) & IPC_CREAT) == 0)
478: return (ENOENT);
479: }
480: error = shmget_allocate_segment(p, uap, mode, retval);
481: if (error == EAGAIN)
482: goto again;
483: return (error);
484: }
485:
486: void
487: shmfork(struct vmspace *vm1, struct vmspace *vm2)
488: {
489: struct shmmap_head *shmmap_h;
490: struct shmmap_state *shmmap_s;
491: struct shmid_ds *shmseg;
492: size_t size;
493: int i;
494:
495: if (vm1->vm_shm == NULL) {
496: vm2->vm_shm = NULL;
497: return;
498: }
499:
500: shmmap_h = (struct shmmap_head *)vm1->vm_shm;
501: size = sizeof(int) + shmmap_h->shmseg * sizeof(struct shmmap_state);
502: vm2->vm_shm = malloc(size, M_SHM, M_WAITOK);
503: bcopy(vm1->vm_shm, vm2->vm_shm, size);
504: for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
505: i++, shmmap_s++) {
506: if (shmmap_s->shmid != -1 &&
507: (shmseg = shmsegs[IPCID_TO_IX(shmmap_s->shmid)]) != NULL)
508: shmseg->shm_nattch++;
509: }
510: }
511:
512: void
513: shmexit(struct vmspace *vm)
514: {
515: struct shmmap_head *shmmap_h;
516: struct shmmap_state *shmmap_s;
517: int i;
518:
519: shmmap_h = (struct shmmap_head *)vm->vm_shm;
520: if (shmmap_h == NULL)
521: return;
522: for (i = 0, shmmap_s = shmmap_h->state; i < shmmap_h->shmseg;
523: i++, shmmap_s++)
524: if (shmmap_s->shmid != -1)
525: shm_delete_mapping(vm, shmmap_s);
526: free(vm->vm_shm, M_SHM);
527: vm->vm_shm = NULL;
528: }
529:
530: void
531: shminit(void)
532: {
533:
534: pool_init(&shm_pool, sizeof(struct shmid_ds) +
535: sizeof(struct shm_handle), 0, 0, 0, "shmpl",
536: &pool_allocator_nointr);
537: shmsegs = malloc(shminfo.shmmni * sizeof(struct shmid_ds *),
538: M_SHM, M_WAITOK);
539: bzero(shmsegs, shminfo.shmmni * sizeof(struct shmid_ds *));
540: shmseqs = malloc(shminfo.shmmni * sizeof(unsigned short),
541: M_SHM, M_WAITOK);
542: bzero(shmseqs, shminfo.shmmni * sizeof(unsigned short));
543:
544: shminfo.shmmax *= PAGE_SIZE; /* actually in pages */
545: shm_last_free = 0;
546: shm_nused = 0;
547: shm_committed = 0;
548: }
549:
550: /*
551: * Userland access to struct shminfo.
552: */
553: int
554: sysctl_sysvshm(int *name, u_int namelen, void *oldp, size_t *oldlenp,
555: void *newp, size_t newlen)
556: {
557: int error, val;
558: struct shmid_ds **newsegs;
559: unsigned short *newseqs;
560:
561: if (namelen != 2) {
562: switch (name[0]) {
563: case KERN_SHMINFO_SHMMAX:
564: case KERN_SHMINFO_SHMMIN:
565: case KERN_SHMINFO_SHMMNI:
566: case KERN_SHMINFO_SHMSEG:
567: case KERN_SHMINFO_SHMALL:
568: break;
569: default:
570: return (ENOTDIR); /* overloaded */
571: }
572: }
573:
574: switch (name[0]) {
575: case KERN_SHMINFO_SHMMAX:
576: if ((error = sysctl_int(oldp, oldlenp, newp, newlen,
577: &shminfo.shmmax)) || newp == NULL)
578: return (error);
579:
580: /* If new shmmax > shmall, crank shmall */
581: if (btoc(round_page(shminfo.shmmax)) > shminfo.shmall)
582: shminfo.shmall = btoc(round_page(shminfo.shmmax));
583: return (0);
584: case KERN_SHMINFO_SHMMIN:
585: val = shminfo.shmmin;
586: if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
587: val == shminfo.shmmin)
588: return (error);
589: if (val <= 0)
590: return (EINVAL); /* shmmin must be >= 1 */
591: shminfo.shmmin = val;
592: return (0);
593: case KERN_SHMINFO_SHMMNI:
594: val = shminfo.shmmni;
595: if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
596: val == shminfo.shmmni)
597: return (error);
598:
599: if (val < shminfo.shmmni || val > 0xffff)
600: return (EINVAL);
601:
602: /* Expand shmsegs and shmseqs arrays */
603: newsegs = malloc(val * sizeof(struct shmid_ds *),
604: M_SHM, M_WAITOK);
605: bcopy(shmsegs, newsegs,
606: shminfo.shmmni * sizeof(struct shmid_ds *));
607: bzero(newsegs + shminfo.shmmni,
608: (val - shminfo.shmmni) * sizeof(struct shmid_ds *));
609: free(shmsegs, M_SHM);
610: shmsegs = newsegs;
611: newseqs = malloc(val * sizeof(unsigned short), M_SHM, M_WAITOK);
612: bcopy(shmseqs, newseqs,
613: shminfo.shmmni * sizeof(unsigned short));
614: bzero(newseqs + shminfo.shmmni,
615: (val - shminfo.shmmni) * sizeof(unsigned short));
616: free(shmseqs, M_SHM);
617: shmseqs = newseqs;
618: shminfo.shmmni = val;
619: return (0);
620: case KERN_SHMINFO_SHMSEG:
621: val = shminfo.shmseg;
622: if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
623: val == shminfo.shmseg)
624: return (error);
625: if (val <= 0)
626: return (EINVAL); /* shmseg must be >= 1 */
627: shminfo.shmseg = val;
628: return (0);
629: case KERN_SHMINFO_SHMALL:
630: val = shminfo.shmall;
631: if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &val)) ||
632: val == shminfo.shmall)
633: return (error);
634: if (val < shminfo.shmall)
635: return (EINVAL); /* can't decrease shmall */
636: shminfo.shmall = val;
637: return (0);
638: default:
639: return (EOPNOTSUPP);
640: }
641: /* NOTREACHED */
642: }
CVSweb