- 论坛徽章:
- 1
|
头文件
- 1 /*
- 2 * linux/ipc/util.h
- 3 * Copyright (C) 1999 Christoph Rohland
- 4 *
- 5 * ipc helper functions (c) 1999 Manfred Spraul <manfreds@colorfullife.com>;
- 6 */
- 7
- 8 #define USHRT_MAX 0xffff
- 9 #define SEQ_MULTIPLIER (IPCMNI)
- 10
- 11 void sem_init (void);
- 12 void msg_init (void);
- 13 void shm_init (void);
- 14
- 15 struct ipc_ids {
- 16 int size;
- 17 int in_use;
- 18 int max_id;
- 19 unsigned short seq;
- 20 unsigned short seq_max;
- 21 struct semaphore sem;
- 22 spinlock_t ary;
- 23 struct ipc_id* entries;
- 24 };
- 25
- 26 struct ipc_id {
- 27 struct kern_ipc_perm* p;
- 28 };
- 29
- 30
- 31 void __init ipc_init_ids(struct ipc_ids* ids, int size);
- 32
- 33 /* must be called with ids->;sem acquired.*/
- 34 int ipc_findkey(struct ipc_ids* ids, key_t key);
- 35 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size);
- 36
- 37 /* must be called with both locks acquired. */
- 38 struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id);
- 39
- 40 int ipcperms (struct kern_ipc_perm *ipcp, short flg);
- 41
- 42 /* for rare, potentially huge allocations.
- 43 * both function can sleep
- 44 */
- 45 void* ipc_alloc(int size);
- 46 void ipc_free(void* ptr, int size);
- 47
- 48 extern inline void ipc_lockall(struct ipc_ids* ids)
- 49 {
- 50 spin_lock(&ids->;ary);
- 51 }
- 52
- 53 extern inline struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
- 54 {
- 55 struct kern_ipc_perm* out;
- 56 int lid = id % SEQ_MULTIPLIER;
- 57 if(lid >;= ids->;size)
- 58 return NULL;
- 59
- 60 out = ids->;entries[lid].p;
- 61 return out;
- 62 }
- 63
- 64 extern inline void ipc_unlockall(struct ipc_ids* ids)
- 65 {
- 66 spin_unlock(&ids->;ary);
- 67 }
- 68 extern inline struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
- 69 {
- 70 struct kern_ipc_perm* out;
- 71 int lid = id % SEQ_MULTIPLIER;
- 72 if(lid >;= ids->;size)
- 73 return NULL;
- 74
- 75 spin_lock(&ids->;ary);
- 76 out = ids->;entries[lid].p;
- 77 if(out==NULL)
- 78 spin_unlock(&ids->;ary);
- 79 return out;
- 80 }
- 81
- 82 extern inline void ipc_unlock(struct ipc_ids* ids, int id)
- 83 {
- 84 spin_unlock(&ids->;ary);
- 85 }
- 86
- 87 extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq)
- 88 {
- 89 return SEQ_MULTIPLIER*seq + id;
- 90 }
- 91
- 92 extern inline int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
- 93 {
- 94 if(uid/SEQ_MULTIPLIER != ipcp->;seq)
- 95 return 1;
- 96 return 0;
- 97 }
- 98
- 99 void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
- 100 void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
- 101
- 102 #if defined(__ia64__) || defined(__hppa__)
- 103 /* On IA-64 and PA-RISC, we always use the "64-bit version" of the IPC structures. */
- 104 # define ipc_parse_version(cmd) IPC_64
- 105 #else
- 106 int ipc_parse_version (int *cmd);
- 107 #endif
- 108
复制代码
shm.c
- 1 /*
- 2 * linux/ipc/shm.c
- 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
- 4 * Many improvements/fixes by Bruno Haible.
- 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
- 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
- 7 *
- 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>;
- 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>;
- 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>;
- 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>;
- 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>;
- 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>;
- 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>;
- 15 *
- 16 */
- 17
- 18 #include <linux/config.h>;
- 19 #include <linux/slab.h>;
- 20 #include <linux/shm.h>;
- 21 #include <linux/init.h>;
- 22 #include <linux/file.h>;
- 23 #include <linux/mman.h>;
- 24 #include <linux/proc_fs.h>;
- 25 #include <asm/uaccess.h>;
- 26
- 27 #include "util.h"
- 28
- 29 struct shmid_kernel /* private to the kernel */
- 30 {
- 31 struct kern_ipc_perm shm_perm;
- 32 struct file * shm_file;
- 33 int id;
- 34 unsigned long shm_nattch;
- 35 unsigned long shm_segsz;
- 36 time_t shm_atim;
- 37 time_t shm_dtim;
- 38 time_t shm_ctim;
- 39 pid_t shm_cprid;
- 40 pid_t shm_lprid;
- 41 };
- 42
- 43 #define shm_flags shm_perm.mode
- 44
- 45 static struct file_operations shm_file_operations;
- 46 static struct vm_operations_struct shm_vm_ops;
- 47
- 48 static struct ipc_ids shm_ids;
- 49
- 50 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
- 51 #define shm_unlock(id) ipc_unlock(&shm_ids,id)
- 52 #define shm_lockall() ipc_lockall(&shm_ids)
- 53 #define shm_unlockall() ipc_unlockall(&shm_ids)
- 54 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
- 55 #define shm_buildid(id, seq) \
- 56 ipc_buildid(&shm_ids, id, seq)
- 57
- 58 static int newseg (key_t key, int shmflg, size_t size);
- 59 static void shm_open (struct vm_area_struct *shmd);
- 60 static void shm_close (struct vm_area_struct *shmd);
- 61 #ifdef CONFIG_PROC_FS
- 62 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
- 63 #endif
- 64
- 65 size_t shm_ctlmax = SHMMAX;
- 66 size_t shm_ctlall = SHMALL;
- 67 int shm_ctlmni = SHMMNI;
- 68
- 69 static int shm_tot; /* total number of shared memory pages */
- 70
- 71 void __init shm_init (void)
- 72 {
- 73 ipc_init_ids(&shm_ids, 1);
- 74 #ifdef CONFIG_PROC_FS
- 75 create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc, NULL);
- 76 #endif
- 77 }
- 78
- 79 static inline int shm_checkid(struct shmid_kernel *s, int id)
- 80 {
- 81 if (ipc_checkid(&shm_ids,&s->;shm_perm,id))
- 82 return -EIDRM;
- 83 return 0;
- 84 }
- 85
- 86 static inline struct shmid_kernel *shm_rmid(int id)
- 87 {
- 88 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
- 89 }
- 90
- 91 static inline int shm_addid(struct shmid_kernel *shp)
- 92 {
- 93 return ipc_addid(&shm_ids, &shp->;shm_perm, shm_ctlmni+1);
- 94 }
- 95
- 96
- 97
- 98 static inline void shm_inc (int id) {
- 99 struct shmid_kernel *shp;
- 100
- 101 if(!(shp = shm_lock(id)))
- 102 BUG();
- 103 shp->;shm_atim = CURRENT_TIME;
- 104 shp->;shm_lprid = current->;pid;
- 105 shp->;shm_nattch++;
- 106 shm_unlock(id);
- 107 }
- 108
- 109 /* This is called by fork, once for every shm attach. */
- 110 static void shm_open (struct vm_area_struct *shmd)
- 111 {
- 112 shm_inc (shmd->;vm_file->;f_dentry->;d_inode->;i_ino);
- 113 }
- 114
- 115 /*
- 116 * shm_destroy - free the struct shmid_kernel
- 117 *
- 118 * @shp: struct to free
- 119 *
- 120 * It has to be called with shp and shm_ids.sem locked,
- 121 * but returns with shp unlocked and freed.
- 122 */
- 123 static void shm_destroy (struct shmid_kernel *shp)
- 124 {
- 125 shm_tot -= (shp->;shm_segsz + PAGE_SIZE - 1) >;>; PAGE_SHIFT;
- 126 shm_rmid (shp->;id);
- 127 shm_unlock(shp->;id);
- 128 shmem_lock(shp->;shm_file, 0);
- 129 fput (shp->;shm_file);
- 130 kfree (shp);
- 131 }
- 132
- 133 /*
- 134 * remove the attach descriptor shmd.
- 135 * free memory for segment if it is marked destroyed.
- 136 * The descriptor has already been removed from the current->;mm->;mmap list
- 137 * and will later be kfree()d.
- 138 */
- 139 static void shm_close (struct vm_area_struct *shmd)
- 140 {
- 141 struct file * file = shmd->;vm_file;
- 142 int id = file->;f_dentry->;d_inode->;i_ino;
- 143 struct shmid_kernel *shp;
- 144
- 145 down (&shm_ids.sem);
- 146 /* remove from the list of attaches of the shm segment */
- 147 if(!(shp = shm_lock(id)))
- 148 BUG();
- 149 shp->;shm_lprid = current->;pid;
- 150 shp->;shm_dtim = CURRENT_TIME;
- 151 shp->;shm_nattch--;
- 152 if(shp->;shm_nattch == 0 &&
- 153 shp->;shm_flags & SHM_DEST)
- 154 shm_destroy (shp);
- 155 else
- 156 shm_unlock(id);
- 157 up (&shm_ids.sem);
- 158 }
- 159
- 160 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
- 161 {
- 162 UPDATE_ATIME(file->;f_dentry->;d_inode);
- 163 vma->;vm_ops = &shm_vm_ops;
- 164 shm_inc(file->;f_dentry->;d_inode->;i_ino);
- 165 return 0;
- 166 }
- 167
- 168 static struct file_operations shm_file_operations = {
- 169 mmap: shm_mmap
- 170 };
- 171
- 172 static struct vm_operations_struct shm_vm_ops = {
- 173 open: shm_open, /* callback for a new vm-area open */
- 174 close: shm_close, /* callback for when the vm-area is released */
- 175 nopage: shmem_nopage,
- 176 };
- 177
- 178 static int newseg (key_t key, int shmflg, size_t size)
- 179 {
- 180 int error;
- 181 struct shmid_kernel *shp;
- 182 int numpages = (size + PAGE_SIZE -1) >;>; PAGE_SHIFT;
- 183 struct file * file;
- 184 char name[13];
- 185 int id;
- 186
- 187 if (size < SHMMIN || size >; shm_ctlmax)
- 188 return -EINVAL;
- 189
- 190 if (shm_tot + numpages >;= shm_ctlall)
- 191 return -ENOSPC;
- 192
- 193 shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_USER);
- 194 if (!shp)
- 195 return -ENOMEM;
- 196 sprintf (name, "SYSV%08x", key);
- 197 file = shmem_file_setup(name, size);
- 198 error = PTR_ERR(file);
- 199 if (IS_ERR(file))
- 200 goto no_file;
- 201
- 202 error = -ENOSPC;
- 203 id = shm_addid(shp);
- 204 if(id == -1)
- 205 goto no_id;
- 206 shp->;shm_perm.key = key;
- 207 shp->;shm_flags = (shmflg & S_IRWXUGO);
- 208 shp->;shm_cprid = current->;pid;
- 209 shp->;shm_lprid = 0;
- 210 shp->;shm_atim = shp->;shm_dtim = 0;
- 211 shp->;shm_ctim = CURRENT_TIME;
- 212 shp->;shm_segsz = size;
- 213 shp->;shm_nattch = 0;
- 214 shp->;id = shm_buildid(id,shp->;shm_perm.seq);
- 215 shp->;shm_file = file;
- 216 file->;f_dentry->;d_inode->;i_ino = shp->;id;
- 217 file->;f_op = &shm_file_operations;
- 218 shm_tot += numpages;
- 219 shm_unlock (id);
- 220 return shp->;id;
- 221
- 222 no_id:
- 223 fput(file);
- 224 no_file:
- 225 kfree(shp);
- 226 return error;
- 227 }
- 228
- 229 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
- 230 {
- 231 struct shmid_kernel *shp;
- 232 int err, id = 0;
- 233
- 234 down(&shm_ids.sem);
- 235 if (key == IPC_PRIVATE) {
- 236 err = newseg(key, shmflg, size);
- 237 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
- 238 if (!(shmflg & IPC_CREAT))
- 239 err = -ENOENT;
- 240 else
- 241 err = newseg(key, shmflg, size);
- 242 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
- 243 err = -EEXIST;
- 244 } else {
- 245 shp = shm_lock(id);
- 246 if(shp==NULL)
- 247 BUG();
- 248 if (shp->;shm_segsz < size)
- 249 err = -EINVAL;
- 250 else if (ipcperms(&shp->;shm_perm, shmflg))
- 251 err = -EACCES;
- 252 else
- 253 err = shm_buildid(id, shp->;shm_perm.seq);
- 254 shm_unlock(id);
- 255 }
- 256 up(&shm_ids.sem);
- 257 return err;
- 258 }
- 259
- 260 static inline unsigned long copy_shmid_to_user(void *buf, struct shmid64_ds *in, int version)
- 261 {
- 262 switch(version) {
- 263 case IPC_64:
- 264 return copy_to_user(buf, in, sizeof(*in));
- 265 case IPC_OLD:
- 266 {
- 267 struct shmid_ds out;
- 268
- 269 ipc64_perm_to_ipc_perm(&in->;shm_perm, &out.shm_perm);
- 270 out.shm_segsz = in->;shm_segsz;
- 271 out.shm_atime = in->;shm_atime;
- 272 out.shm_dtime = in->;shm_dtime;
- 273 out.shm_ctime = in->;shm_ctime;
- 274 out.shm_cpid = in->;shm_cpid;
- 275 out.shm_lpid = in->;shm_lpid;
- 276 out.shm_nattch = in->;shm_nattch;
- 277
- 278 return copy_to_user(buf, &out, sizeof(out));
- 279 }
- 280 default:
- 281 return -EINVAL;
- 282 }
- 283 }
- 284
- 285 struct shm_setbuf {
- 286 uid_t uid;
- 287 gid_t gid;
- 288 mode_t mode;
- 289 };
- 290
- 291 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void *buf, int version)
- 292 {
- 293 switch(version) {
- 294 case IPC_64:
- 295 {
- 296 struct shmid64_ds tbuf;
- 297
- 298 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
- 299 return -EFAULT;
- 300
- 301 out->;uid = tbuf.shm_perm.uid;
- 302 out->;gid = tbuf.shm_perm.gid;
- 303 out->;mode = tbuf.shm_flags;
- 304
- 305 return 0;
- 306 }
- 307 case IPC_OLD:
- 308 {
- 309 struct shmid_ds tbuf_old;
- 310
- 311 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
- 312 return -EFAULT;
- 313
- 314 out->;uid = tbuf_old.shm_perm.uid;
- 315 out->;gid = tbuf_old.shm_perm.gid;
- 316 out->;mode = tbuf_old.shm_flags;
- 317
- 318 return 0;
- 319 }
- 320 default:
- 321 return -EINVAL;
- 322 }
- 323 }
- 324
- 325 static inline unsigned long copy_shminfo_to_user(void *buf, struct shminfo64 *in, int version)
- 326 {
- 327 switch(version) {
- 328 case IPC_64:
- 329 return copy_to_user(buf, in, sizeof(*in));
- 330 case IPC_OLD:
- 331 {
- 332 struct shminfo out;
- 333
- 334 if(in->;shmmax >; INT_MAX)
- 335 out.shmmax = INT_MAX;
- 336 else
- 337 out.shmmax = (int)in->;shmmax;
- 338
- 339 out.shmmin = in->;shmmin;
- 340 out.shmmni = in->;shmmni;
- 341 out.shmseg = in->;shmseg;
- 342 out.shmall = in->;shmall;
- 343
- 344 return copy_to_user(buf, &out, sizeof(out));
- 345 }
- 346 default:
- 347 return -EINVAL;
- 348 }
- 349 }
- 350
- 351 static void shm_get_stat (unsigned long *rss, unsigned long *swp)
- 352 {
- 353 struct shmem_inode_info *info;
- 354 int i;
- 355
- 356 *rss = 0;
- 357 *swp = 0;
- 358
- 359 for(i = 0; i <= shm_ids.max_id; i++) {
- 360 struct shmid_kernel* shp;
- 361 struct inode * inode;
- 362
- 363 shp = shm_get(i);
- 364 if(shp == NULL)
- 365 continue;
- 366 inode = shp->;shm_file->;f_dentry->;d_inode;
- 367 info = SHMEM_I(inode);
- 368 spin_lock (&info->;lock);
- 369 *rss += inode->;i_mapping->;nrpages;
- 370 *swp += info->;swapped;
- 371 spin_unlock (&info->;lock);
- 372 }
- 373 }
- 374
- 375 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
- 376 {
- 377 struct shm_setbuf setbuf;
- 378 struct shmid_kernel *shp;
- 379 int err, version;
- 380
- 381 if (cmd < 0 || shmid < 0)
- 382 return -EINVAL;
- 383
- 384 version = ipc_parse_version(&cmd);
- 385
- 386 switch (cmd) { /* replace with proc interface ? */
- 387 case IPC_INFO:
- 388 {
- 389 struct shminfo64 shminfo;
- 390
- 391 memset(&shminfo,0,sizeof(shminfo));
- 392 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
- 393 shminfo.shmmax = shm_ctlmax;
- 394 shminfo.shmall = shm_ctlall;
- 395
- 396 shminfo.shmmin = SHMMIN;
- 397 if(copy_shminfo_to_user (buf, &shminfo, version))
- 398 return -EFAULT;
- 399 /* reading a integer is always atomic */
- 400 err= shm_ids.max_id;
- 401 if(err<0)
- 402 err = 0;
- 403 return err;
- 404 }
- 405 case SHM_INFO:
- 406 {
- 407 struct shm_info shm_info;
- 408
- 409 memset(&shm_info,0,sizeof(shm_info));
- 410 down(&shm_ids.sem);
- 411 shm_lockall();
- 412 shm_info.used_ids = shm_ids.in_use;
- 413 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
- 414 shm_info.shm_tot = shm_tot;
- 415 shm_info.swap_attempts = 0;
- 416 shm_info.swap_successes = 0;
- 417 err = shm_ids.max_id;
- 418 shm_unlockall();
- 419 up(&shm_ids.sem);
- 420 if(copy_to_user (buf, &shm_info, sizeof(shm_info)))
- 421 return -EFAULT;
- 422
- 423 return err < 0 ? 0 : err;
- 424 }
- 425 case SHM_STAT:
- 426 case IPC_STAT:
- 427 {
- 428 struct shmid64_ds tbuf;
- 429 int result;
- 430 memset(&tbuf, 0, sizeof(tbuf));
- 431 shp = shm_lock(shmid);
- 432 if(shp==NULL)
- 433 return -EINVAL;
- 434 if(cmd==SHM_STAT) {
- 435 err = -EINVAL;
- 436 if (shmid >; shm_ids.max_id)
- 437 goto out_unlock;
- 438 result = shm_buildid(shmid, shp->;shm_perm.seq);
- 439 } else {
- 440 err = shm_checkid(shp,shmid);
- 441 if(err)
- 442 goto out_unlock;
- 443 result = 0;
- 444 }
- 445 err=-EACCES;
- 446 if (ipcperms (&shp->;shm_perm, S_IRUGO))
- 447 goto out_unlock;
- 448 kernel_to_ipc64_perm(&shp->;shm_perm, &tbuf.shm_perm);
- 449 tbuf.shm_segsz = shp->;shm_segsz;
- 450 tbuf.shm_atime = shp->;shm_atim;
- 451 tbuf.shm_dtime = shp->;shm_dtim;
- 452 tbuf.shm_ctime = shp->;shm_ctim;
- 453 tbuf.shm_cpid = shp->;shm_cprid;
- 454 tbuf.shm_lpid = shp->;shm_lprid;
- 455 tbuf.shm_nattch = shp->;shm_nattch;
- 456 shm_unlock(shmid);
- 457 if(copy_shmid_to_user (buf, &tbuf, version))
- 458 return -EFAULT;
- 459 return result;
- 460 }
- 461 case SHM_LOCK:
- 462 case SHM_UNLOCK:
- 463 {
- 464 /* Allow superuser to lock segment in memory */
- 465 /* Should the pages be faulted in here or leave it to user? */
- 466 /* need to determine interaction with current->;swappable */
- 467 if (!capable(CAP_IPC_LOCK))
- 468 return -EPERM;
- 469
- 470 shp = shm_lock(shmid);
- 471 if(shp==NULL)
- 472 return -EINVAL;
- 473 err = shm_checkid(shp,shmid);
- 474 if(err)
- 475 goto out_unlock;
- 476 if(cmd==SHM_LOCK) {
- 477 shmem_lock(shp->;shm_file, 1);
- 478 shp->;shm_flags |= SHM_LOCKED;
- 479 } else {
- 480 shmem_lock(shp->;shm_file, 0);
- 481 shp->;shm_flags &= ~SHM_LOCKED;
- 482 }
- 483 shm_unlock(shmid);
- 484 return err;
- 485 }
- 486 case IPC_RMID:
- 487 {
- 488 /*
- 489 * We cannot simply remove the file. The SVID states
- 490 * that the block remains until the last person
- 491 * detaches from it, then is deleted. A shmat() on
- 492 * an RMID segment is legal in older Linux and if
- 493 * we change it apps break...
- 494 *
- 495 * Instead we set a destroyed flag, and then blow
- 496 * the name away when the usage hits zero.
- 497 */
- 498 down(&shm_ids.sem);
- 499 shp = shm_lock(shmid);
- 500 err = -EINVAL;
- 501 if (shp == NULL)
- 502 goto out_up;
- 503 err = shm_checkid(shp, shmid);
- 504 if(err)
- 505 goto out_unlock_up;
- 506 if (current->;euid != shp->;shm_perm.uid &&
- 507 current->;euid != shp->;shm_perm.cuid &&
- 508 !capable(CAP_SYS_ADMIN)) {
- 509 err=-EPERM;
- 510 goto out_unlock_up;
- 511 }
- 512 if (shp->;shm_nattch){
- 513 shp->;shm_flags |= SHM_DEST;
- 514 /* Do not find it any more */
- 515 shp->;shm_perm.key = IPC_PRIVATE;
- 516 shm_unlock(shmid);
- 517 } else
- 518 shm_destroy (shp);
- 519 up(&shm_ids.sem);
- 520 return err;
- 521 }
- 522
- 523 case IPC_SET:
- 524 {
- 525 if(copy_shmid_from_user (&setbuf, buf, version))
- 526 return -EFAULT;
- 527 down(&shm_ids.sem);
- 528 shp = shm_lock(shmid);
- 529 err=-EINVAL;
- 530 if(shp==NULL)
- 531 goto out_up;
- 532 err = shm_checkid(shp,shmid);
- 533 if(err)
- 534 goto out_unlock_up;
- 535 err=-EPERM;
- 536 if (current->;euid != shp->;shm_perm.uid &&
- 537 current->;euid != shp->;shm_perm.cuid &&
- 538 !capable(CAP_SYS_ADMIN)) {
- 539 goto out_unlock_up;
- 540 }
- 541
- 542 shp->;shm_perm.uid = setbuf.uid;
- 543 shp->;shm_perm.gid = setbuf.gid;
- 544 shp->;shm_flags = (shp->;shm_flags & ~S_IRWXUGO)
- 545 | (setbuf.mode & S_IRWXUGO);
- 546 shp->;shm_ctim = CURRENT_TIME;
- 547 break;
- 548 }
- 549
- 550 default:
- 551 return -EINVAL;
- 552 }
- 553
- 554 err = 0;
- 555 out_unlock_up:
- 556 shm_unlock(shmid);
- 557 out_up:
- 558 up(&shm_ids.sem);
- 559 return err;
- 560 out_unlock:
- 561 shm_unlock(shmid);
- 562 return err;
- 563 }
- 564
- 565 /*
- 566 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
- 567 */
- 568 asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
- 569 {
- 570 struct shmid_kernel *shp;
- 571 unsigned long addr;
- 572 unsigned long size;
- 573 struct file * file;
- 574 int err;
- 575 unsigned long flags;
- 576 unsigned long prot;
- 577 unsigned long o_flags;
- 578 int acc_mode;
- 579 void *user_addr;
- 580
- 581 if (shmid < 0)
- 582 return -EINVAL;
- 583
- 584 if ((addr = (ulong)shmaddr)) {
- 585 if (addr & (SHMLBA-1)) {
- 586 if (shmflg & SHM_RND)
- 587 addr &= ~(SHMLBA-1); /* round down */
- 588 else
- 589 return -EINVAL;
- 590 }
- 591 flags = MAP_SHARED | MAP_FIXED;
- 592 } else {
- 593 if ((shmflg & SHM_REMAP))
- 594 return -EINVAL;
- 595
- 596 flags = MAP_SHARED;
- 597 }
- 598
- 599 if (shmflg & SHM_RDONLY) {
- 600 prot = PROT_READ;
- 601 o_flags = O_RDONLY;
- 602 acc_mode = S_IRUGO;
- 603 } else {
- 604 prot = PROT_READ | PROT_WRITE;
- 605 o_flags = O_RDWR;
- 606 acc_mode = S_IRUGO | S_IWUGO;
- 607 }
- 608
- 609 /*
- 610 * We cannot rely on the fs check since SYSV IPC does have an
- 611 * additional creator id...
- 612 */
- 613 shp = shm_lock(shmid);
- 614 if(shp == NULL)
- 615 return -EINVAL;
- 616 err = shm_checkid(shp,shmid);
- 617 if (err) {
- 618 shm_unlock(shmid);
- 619 return err;
- 620 }
- 621 if (ipcperms(&shp->;shm_perm, acc_mode)) {
- 622 shm_unlock(shmid);
- 623 return -EACCES;
- 624 }
- 625 file = shp->;shm_file;
- 626 size = file->;f_dentry->;d_inode->;i_size;
- 627 shp->;shm_nattch++;
- 628 shm_unlock(shmid);
- 629
- 630 down_write(¤t->;mm->;mmap_sem);
- 631 if (addr && !(shmflg & SHM_REMAP)) {
- 632 user_addr = ERR_PTR(-EINVAL);
- 633 if (find_vma_intersection(current->;mm, addr, addr + size))
- 634 goto invalid;
- 635 /*
- 636 * If shm segment goes below stack, make sure there is some
- 637 * space left for the stack to grow (at least 4 pages).
- 638 */
- 639 if (addr < current->;mm->;start_stack &&
- 640 addr >; current->;mm->;start_stack - size - PAGE_SIZE * 5)
- 641 goto invalid;
- 642 }
- 643
- 644 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
- 645
- 646 invalid:
- 647 up_write(¤t->;mm->;mmap_sem);
- 648
- 649 down (&shm_ids.sem);
- 650 if(!(shp = shm_lock(shmid)))
- 651 BUG();
- 652 shp->;shm_nattch--;
- 653 if(shp->;shm_nattch == 0 &&
- 654 shp->;shm_flags & SHM_DEST)
- 655 shm_destroy (shp);
- 656 else
- 657 shm_unlock(shmid);
- 658 up (&shm_ids.sem);
- 659
- 660 *raddr = (unsigned long) user_addr;
- 661 err = 0;
- 662 if (IS_ERR(user_addr))
- 663 err = PTR_ERR(user_addr);
- 664 return err;
- 665
- 666 }
- 667
- 668 /*
- 669 * detach and kill segment if marked destroyed.
- 670 * The work is done in shm_close.
- 671 */
- 672 asmlinkage long sys_shmdt (char *shmaddr)
- 673 {
- 674 struct mm_struct *mm = current->;mm;
- 675 struct vm_area_struct *shmd, *shmdnext;
- 676 int retval = -EINVAL;
- 677
- 678 down_write(&mm->;mmap_sem);
- 679 for (shmd = mm->;mmap; shmd; shmd = shmdnext) {
- 680 shmdnext = shmd->;vm_next;
- 681 if (shmd->;vm_ops == &shm_vm_ops
- 682 && shmd->;vm_start - (shmd->;vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) {
- 683 do_munmap(mm, shmd->;vm_start, shmd->;vm_end - shmd->;vm_start);
- 684 retval = 0;
- 685 }
- 686 }
- 687 up_write(&mm->;mmap_sem);
- 688 return retval;
- 689 }
- 690
- 691 #ifdef CONFIG_PROC_FS
- 692 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
- 693 {
- 694 off_t pos = 0;
- 695 off_t begin = 0;
- 696 int i, len = 0;
- 697
- 698 down(&shm_ids.sem);
- 699 len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
- 700
- 701 for(i = 0; i <= shm_ids.max_id; i++) {
- 702 struct shmid_kernel* shp;
- 703
- 704 shp = shm_lock(i);
- 705 if(shp!=NULL) {
- 706 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
- 707 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
- 708 char *format;
- 709
- 710 if (sizeof(size_t) <= sizeof(int))
- 711 format = SMALL_STRING;
- 712 else
- 713 format = BIG_STRING;
- 714 len += sprintf(buffer + len, format,
- 715 shp->;shm_perm.key,
- 716 shm_buildid(i, shp->;shm_perm.seq),
- 717 shp->;shm_flags,
- 718 shp->;shm_segsz,
- 719 shp->;shm_cprid,
- 720 shp->;shm_lprid,
- 721 shp->;shm_nattch,
- 722 shp->;shm_perm.uid,
- 723 shp->;shm_perm.gid,
- 724 shp->;shm_perm.cuid,
- 725 shp->;shm_perm.cgid,
- 726 shp->;shm_atim,
- 727 shp->;shm_dtim,
- 728 shp->;shm_ctim);
- 729 shm_unlock(i);
- 730
- 731 pos += len;
- 732 if(pos < offset) {
- 733 len = 0;
- 734 begin = pos;
- 735 }
- 736 if(pos >; offset + length)
- 737 goto done;
- 738 }
- 739 }
- 740 *eof = 1;
- 741 done:
- 742 up(&shm_ids.sem);
- 743 *start = buffer + (offset - begin);
- 744 len -= (offset - begin);
- 745 if(len >; length)
- 746 len = length;
- 747 if(len < 0)
- 748 len = 0;
- 749 return len;
- 750 }
- 751 #endif
- 752
复制代码
解释
sys_shmget()
The entire call to sys_shmget() is protected by the global shared memory semaphore.
In the case where a new shared memory segment must be created, the newseg() function is called to create and initialize a new shared memory segment. The ID of the new segment is returned to the caller.
In the case where a key value is provided for an existing shared memory segment, the corresponding index in the shared memory descriptors array is looked up, and the parameters and permissions of the caller are verified before returning the shared memory segment ID. The look up operation and verification are performed while the global shared memory spinlock is held.
sys_shmctl()
IPC_INFO
A temporary shminfo64 buffer is loaded with system-wide shared memory parameters and is copied out to user space for access by the calling application.
SHM_INFO
The global shared memory semaphore and the global shared memory spinlock are held while gathering system-wide statistical information for shared memory. The shm_get_stat() function is called to calculate both the number of shared memory pages that are resident in memory and the number of shared memory pages that are swapped out. Other statistics include the total number of shared memory pages and the number of shared memory segments in use. The counts of swap_attempts and swap_successes are hard-coded to zero. These statistics are stored in a temporary shm_info buffer and copied out to user space for the calling application.
SHM_STAT, IPC_STAT
For SHM_STAT and IPC_STATA, a temporary buffer of type struct shmid64_ds is initialized, and the global shared memory spinlock is locked.
For the SHM_STAT case, the shared memory segment ID parameter is expected to be a straight index (i.e. 0 to n where n is the number of shared memory IDs in the system). After validating the index, ipc_buildid() is called (via shm_buildid()) to convert the index into a shared memory ID. In the passing case of SHM_STAT, the shared memory ID will be the return value. Note that this is an undocumented feature, but is maintained for the ipcs( program.
For the IPC_STAT case, the shared memory segment ID parameter is expected to be an ID that was generated by a call to shmget(). The ID is validated before proceeding. In the passing case of IPC_STAT, 0 will be the return value.
For both SHM_STAT and IPC_STAT, the access permissions of the caller are verified. The desired statistics are loaded into the temporary buffer and then copied out to the calling application.
SHM_LOCK, SHM_UNLOCK
After validating access permissions, the global shared memory spinlock is locked, and the shared memory segment ID is validated. For both SHM_LOCK and SHM_UNLOCK, shmem_lock() is called to perform the function. The parameters for shmem_lock() identify the function to be performed.
IPC_RMID
During IPC_RMID the global shared memory semaphore and the global shared memory spinlock are held throughout this function. The Shared Memory ID is validated, and then if there are no current attachments, shm_destroy() is called to destroy the shared memory segment. Otherwise, the SHM_DEST flag is set to mark it for destruction, and the IPC_PRIVATE flag is set to prevent other processes from being able to reference the shared memory ID.
IPC_SET
After validating the shared memory segment ID and the user access permissions, the uid, gid, and mode flags of the shared memory segment are updated with the user data. The shm_ctime field is also updated. These changes are made while holding the global shared memory semaphore and the global share memory spinlock.
sys_shmat()
sys_shmat() takes as parameters, a shared memory segment ID, an address at which the shared memory segment should be attached(shmaddr), and flags which will be described below.
If shmaddr is non-zero, and the SHM_RND flag is specified, then shmaddr is rounded down to a multiple of SHMLBA. If shmaddr is not a multiple of SHMLBA and SHM_RND is not specified, then EINVAL is returned.
The access permissions of the caller are validated and the shm_nattch field for the shared memory segment is incremented. Note that this increment guarantees that the attachment count is non-zero and prevents the shared memory segment from being destroyed during the process of attaching to the segment. These operations are performed while holding the global shared memory spinlock.
The do_mmap() function is called to create a virtual memory mapping to the shared memory segment pages. This is done while holding the mmap_sem semaphore of the current task. The MAP_SHARED flag is passed to do_mmap(). If an address was provided by the caller, then the MAP_FIXED flag is also passed to do_mmap(). Otherwise, do_mmap() will select the virtual address at which to map the shared memory segment.
NOTE shm_inc() will be invoked within the do_mmap() function call via the shm_file_operations structure. This function is called to set the PID, to set the current time, and to increment the number of attachments to this shared memory segment.
After the call to do_mmap(), the global shared memory semaphore and the global shared memory spinlock are both obtained. The attachment count is then decremented. The the net change to the attachment count is 1 for a call to shmat() because of the call to shm_inc(). If, after decrementing the attachment count, the resulting count is found to be zero, and if the segment is marked for destruction (SHM_DEST), then shm_destroy() is called to release the shared memory segment resources.
Finally, the virtual address at which the shared memory is mapped is returned to the caller at the user specified address. If an error code had been returned by do_mmap(), then this failure code is passed on as the return value for the system call.
sys_shmdt()
The global shared memory semaphore is held while performing sys_shmdt(). The mm_struct of the current process is searched for the vm_area_struct associated with the shared memory address. When it is found, do_munmap() is called to undo the virtual address mapping for the shared memory segment.
Note also that do_munmap() performs a call-back to shm_close(), which performs the shared-memory book keeping functions, and releases the shared memory segment resources if there are no other attachments.
sys_shmdt() unconditionally returns 0.
Shared Memory Support Structures
struct shminfo64
--------------------------------------------------------------------------------
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
--------------------------------------------------------------------------------
struct shm_info
--------------------------------------------------------------------------------
struct shm_info {
int used_ids;
unsigned long shm_tot; /* total allocated shm */
unsigned long shm_rss; /* total resident shm */
unsigned long shm_swp; /* total swapped shm */
unsigned long swap_attempts;
unsigned long swap_successes;
};
--------------------------------------------------------------------------------
struct shmid_kernel
--------------------------------------------------------------------------------
struct shmid_kernel /* private to the kernel */
{
struct kern_ipc_perm shm_perm;
struct file * shm_file;
int id;
unsigned long shm_nattch;
unsigned long shm_segsz;
time_t shm_atim;
time_t shm_dtim;
time_t shm_ctim;
pid_t shm_cprid;
pid_t shm_lprid;
};
--------------------------------------------------------------------------------
struct shmid64_ds
--------------------------------------------------------------------------------
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
unsigned long __unused1;
__kernel_time_t shm_dtime; /* last detach time */
unsigned long __unused2;
__kernel_time_t shm_ctime; /* last change time */
unsigned long __unused3;
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused4;
unsigned long __unused5;
};
--------------------------------------------------------------------------------
struct shmem_inode_info
--------------------------------------------------------------------------------
struct shmem_inode_info {
spinlock_t lock;
unsigned long max_index;
swp_entry_t i_direct[SHMEM_NR_DIRECT]; /* for the first blocks */
swp_entry_t **i_indirect; /* doubly indirect blocks */
unsigned long swapped;
int locked; /* into memory */
struct list_head list;
};
--------------------------------------------------------------------------------
Shared Memory Support Functions
newseg()
The newseg() function is called when a new shared memory segment needs to be created. It acts on three parameters for the new segment the key, the flag, and the size. After validating that the size of the shared memory segment to be created is between SHMMIN and SHMMAX and that the total number of shared memory segments does not exceed SHMALL, it allocates a new shared memory segment descriptor. The shmem_file_setup() function is invoked later to create an unlinked file of type tmpfs. The returned file pointer is saved in the shm_file field of the associated shared memory segment descriptor. The files size is set to be the same as the size of the segment. The new shared memory segment descriptor is initialized and inserted into the global IPC shared memory descriptors array. The shared memory segment ID is created by shm_buildid() (via ipc_buildid()). This segment ID is saved in the id field of the shared memory segment descriptor, as well as in the i_ino field of the associated inode. In addition, the address of the shared memory operations defined in structure shm_file_operation is stored in the associated file. The value of the global variable shm_tot, which indicates the total number of shared memory segments system wide, is also increased to reflect this change. On success, the segment ID is returned to the caller application.
shm_get_stat()
shm_get_stat() cycles through all of the shared memory structures, and calculates the total number of memory pages in use by shared memory and the total number of shared memory pages that are swapped out. There is a file structure and an inode structure for each shared memory segment. Since the required data is obtained via the inode, the spinlock for each inode structure that is accessed is locked and unlocked in sequence.
shmem_lock()
shmem_lock() receives as parameters a pointer to the shared memory segment descriptor and a flag indicating lock vs. unlock.The locking state of the shared memory segment is stored in an associated inode. This state is compared with the desired locking state; shmem_lock() simply returns if they match.
While holding the semaphore of the associated inode, the locking state of the inode is set. The following list of items occur for each page in the shared memory segment:
find_lock_page() is called to lock the page (setting PG_locked) and to increment the reference count of the page. Incrementing the reference count assures that the shared memory segment remains locked in memory throughout this operation.
If the desired state is locked, then PG_locked is cleared, but the reference count remains incremented.
If the desired state is unlocked, then the reference count is decremented twice once for the current reference, and once for the existing reference which caused the page to remain locked in memory. Then PG_locked is cleared.
shm_destroy()
During shm_destroy() the total number of shared memory pages is adjusted to account for the removal of the shared memory segment. ipc_rmid() is called (via shm_rmid()) to remove the Shared Memory ID. shmem_lock is called to unlock the shared memory pages, effectively decrementing the reference counts to zero for each page. fput() is called to decrement the usage counter f_count for the associated file object, and if necessary, to release the file object resources. kfree() is called to free the shared memory segment descriptor.
shm_inc()
shm_inc() sets the PID, sets the current time, and increments the number of attachments for the given shared memory segment. These operations are performed while holding the global shared memory spinlock.
shm_close()
shm_close() updates the shm_lprid and the shm_dtim fields and decrements the number of attached shared memory segments. If there are no other attachments to the shared memory segment, then shm_destroy() is called to release the shared memory segment resources. These operations are all performed while holding both the global shared memory semaphore and the global shared memory spinlock.
shmem_file_setup()
The function shmem_file_setup() sets up an unlinked file living in the tmpfs file system with the given name and size. If there are enough systen memory resource for this file, it creates a new dentry under the mount root of tmpfs, and allocates a new file descriptor and a new inode object of tmpfs type. Then it associates the new dentry object with the new inode object by calling d_instantiate() and saves the address of the dentry object in the file descriptor. The i_size field of the inode object is set to be the file size and the i_nlink field is set to be 0 in order to mark the inode unlinked. Also, shmem_file_setup() stores the address of the shmem_file_operations structure in the f_op field, and initializes f_mode and f_vfsmnt fields of the file descriptor properly. The function shmem_truncate() is called to complete the initialization of the inode object. On success, shmem_file_setup() returns the new file descriptor.
5.4 Linux IPC Primitives
Generic Linux IPC Primitives used with Semaphores, Messages,and Shared Memory
The semaphores, messages, and shared memory mechanisms of Linux are built on a set of common primitives. These primitives are described in the sections below.
ipc_alloc()
If the memory allocation is greater than PAGE_SIZE, then vmalloc() is used to allocate memory. Otherwise, kmalloc() is called with GFP_KERNEL to allocate the memory.
ipc_addid()
When a new semaphore set, message queue, or shared memory segment is added, ipc_addid() first calls grow_ary() to insure that the size of the corresponding descriptor array is sufficiently large for the system maximum. The array of descriptors is searched for the first unused element. If an unused element is found, the count of descriptors which are in use is incremented. The kern_ipc_perm structure for the new resource descriptor is then initialized, and the array index for the new descriptor is returned. When ipc_addid() succeeds, it returns with the global spinlock for the given IPC type locked.
ipc_rmid()
ipc_rmid() removes the IPC descriptor from the the global descriptor array of the IPC type, updates the count of IDs which are in use, and adjusts the maximum ID in the corresponding descriptor array if necessary. A pointer to the IPC descriptor associated with given IPC ID is returned.
ipc_buildid()
ipc_buildid() creates a unique ID to be associated with each descriptor within a given IPC type. This ID is created at the time a new IPC element is added (e.g. a new shared memory segment or a new semaphore set). The IPC ID converts easily into the corresponding descriptor array index. Each IPC type maintains a sequence number which is incremented each time a descriptor is added. An ID is created by multiplying the sequence number with SEQ_MULTIPLIER and adding the product to the descriptor array index. The sequence number used in creating a particular IPC ID is then stored in the corresponding descriptor. The existence of the sequence number makes it possible to detect the use of a stale IPC ID.
ipc_checkid()
ipc_checkid() divides the given IPC ID by the SEQ_MULTIPLIER and compares the quotient with the seq value saved corresponding descriptor. If they are equal, then the IPC ID is considered to be valid and 1 is returned. Otherwise, 0 is returned.
grow_ary()
grow_ary() handles the possibility that the maximum (tunable) number of IDs for a given IPC type can be dynamically changed. It enforces the current maximum limit so that it is no greater than the permanent system limit (IPCMNI) and adjusts it down if necessary. It also insures that the existing descriptor array is large enough. If the existing array size is sufficiently large, then the current maximum limit is returned. Otherwise, a new larger array is allocated, the old array is copied into the new array, and the old array is freed. The corresponding global spinlock is held when updating the descriptor array for the given IPC type.
ipc_findkey()
ipc_findkey() searches through the descriptor array of the specified ipc_ids object, and searches for the specified key. Once found, the index of the corresponding descriptor is returned. If the key is not found, then -1 is returned.
ipcperms()
ipcperms() checks the user, group, and other permissions for access to the IPC resources. It returns 0 if permission is granted and -1 otherwise.
ipc_lock()
ipc_lock() takes an IPC ID as one of its parameters. It locks the global spinlock for the given IPC type, and returns a pointer to the descriptor corresponding to the specified IPC ID.
ipc_unlock()
ipc_unlock() releases the global spinlock for the indicated IPC type.
ipc_lockall()
ipc_lockall() locks the global spinlock for the given IPC mechanism (i.e. shared memory, semaphores, and messaging).
ipc_unlockall()
ipc_unlockall() unlocks the global spinlock for the given IPC mechanism (i.e. shared memory, semaphores, and messaging).
ipc_get()
ipc_get() takes a pointer to a particular IPC type (i.e. shared memory, semaphores, or message queues) and a descriptor ID, and returns a pointer to the corresponding IPC descriptor. Note that although the descriptors for each IPC type are of different data types, the common kern_ipc_perm structure type is embedded as the first entity in every case. The ipc_get() function returns this common data type. The expected model is that ipc_get() is called through a wrapper function (e.g. shm_get()) which casts the data type to the correct descriptor data type.
ipc_parse_version()
ipc_parse_version() removes the IPC_64 flag from the command if it is present and returns either IPC_64 or IPC_OLD.
Generic IPC Structures used with Semaphores,Messages, and Shared Memory
The semaphores, messages, and shared memory mechanisms all make use of the following common structures:
struct kern_ipc_perm
Each of the IPC descriptors has a data object of this type as the first element. This makes it possible to access any descriptor from any of the generic IPC functions using a pointer of this data type.
--------------------------------------------------------------------------------
/* used by in-kernel data structures */
struct kern_ipc_perm {
key_t key;
uid_t uid;
gid_t gid;
uid_t cuid;
gid_t cgid;
mode_t mode;
unsigned long seq;
};
--------------------------------------------------------------------------------
struct ipc_ids
The ipc_ids structure describes the common data for semaphores, message queues, and shared memory. There are three global instances of this data structure-- semid_ds, msgid_ds and shmid_ds-- for semaphores, messages and shared memory respectively. In each instance, the sem semaphore is used to protect access to the structure. The entries field points to an IPC descriptor array, and the ary spinlock protects access to this array. The seq field is a global sequence number which will be incremented when a new IPC resource is created.
--------------------------------------------------------------------------------
struct ipc_ids {
int size;
int in_use;
int max_id;
unsigned short seq;
unsigned short seq_max;
struct semaphore sem;
spinlock_t ary;
struct ipc_id* entries;
};
--------------------------------------------------------------------------------
struct ipc_id
An array of struct ipc_id exists in each instance of the ipc_ids structure. The array is dynamically allocated and may be replaced with larger array by grow_ary() as required. The array is sometimes referred to as the descriptor array, since the kern_ipc_perm data type is used as the common descriptor data type by the IPC generic functions.
--------------------------------------------------------------------------------
struct ipc_id {
struct kern_ipc_perm* p;
}; |
|