Lines Matching defs:shp
88 #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
93 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
109 * Called with shm_ids.rwsem (writer) and the shp structure locked.
114 struct shmid_kernel *shp;
116 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
117 WARN_ON(ns != shp->ns);
119 if (shp->shm_nattch) {
120 shp->shm_perm.mode |= SHM_DEST;
122 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
123 shm_unlock(shp);
125 shm_destroy(ns, shp);
227 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, shm_perm);
228 security_shm_free(&shp->shm_perm);
229 kvfree(shp);
233 * It has to be called with shp locked.
236 static inline void shm_clist_rm(struct shmid_kernel *shp)
247 if (!list_empty(&shp->shm_clist)) {
249 * shp->shm_creator is guaranteed to be valid *only*
250 * if shp->shm_clist is not empty.
252 creator = shp->shm_creator;
259 list_del_init(&shp->shm_clist);
275 struct shmid_kernel *shp;
277 shp = shm_lock(sfd->ns, sfd->id);
278 if (IS_ERR(shp)) {
279 return PTR_ERR(shp);
282 if (shp->shm_file != sfd->file) {
284 shm_unlock(shp);
288 shp->shm_atim = ktime_get_real_seconds();
289 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
290 shp->shm_nattch++;
291 shm_unlock(shp);
310 * @shp: struct to free
312 * It has to be called with shp and shm_ids.rwsem (writer) locked,
313 * but returns with shp unlocked and freed.
315 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
319 shm_file = shp->shm_file;
320 shp->shm_file = NULL;
321 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
322 shm_rmid(shp);
323 shm_unlock(shp);
325 shmem_lock(shm_file, 0, shp->mlock_user);
326 } else if (shp->mlock_user) {
327 user_shm_unlock(i_size_read(file_inode(shm_file)), shp->mlock_user);
330 ipc_update_pid(&shp->shm_cprid, NULL);
331 ipc_update_pid(&shp->shm_lprid, NULL);
332 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
341 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
345 static bool shm_may_destroy(struct shmid_kernel *shp)
347 return (shp->shm_nattch == 0) &&
348 (shp->ns->shm_rmid_forced ||
349 (shp->shm_perm.mode & SHM_DEST));
362 struct shmid_kernel *shp;
367 shp = shm_lock(ns, sfd->id);
372 if (WARN_ON_ONCE(IS_ERR(shp))) {
376 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
377 shp->shm_dtim = ktime_get_real_seconds();
378 shp->shm_nattch--;
379 if (shm_may_destroy(shp)) {
380 shm_destroy(ns, shp);
382 shm_unlock(shp);
393 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
399 * As shp->* are changed under rwsem, it's safe to skip shp locking.
401 if (!list_empty(&shp->shm_clist)) {
405 if (shm_may_destroy(shp)) {
406 shm_lock_by_ptr(shp);
407 shm_destroy(ns, shp);
425 struct shmid_kernel *shp;
435 shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
441 * shp lifetime is always shorter than namespace lifetime
442 * in which shp lives.
443 * We taken task_lock it means that shp won't be freed.
445 ns = shp->ns;
463 list_del_init(&shp->shm_clist);
469 * 4) get a reference to shp.
473 WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
482 list_del_init(&shp->shm_clist);
488 * Thus lock & if needed destroy shp.
491 shm_lock_by_ptr(shp);
496 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
498 if (ipc_valid_object(&shp->shm_perm)) {
499 if (shm_may_destroy(shp))
500 shm_destroy(ns, shp);
502 shm_unlock(shp);
505 * Someone else deleted the shp from namespace
509 shm_unlock(shp);
697 struct shmid_kernel *shp;
715 shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
716 if (unlikely(!shp)) {
720 shp->shm_perm.key = key;
721 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
722 shp->mlock_user = NULL;
724 shp->shm_perm.security = NULL;
725 error = security_shm_alloc(&shp->shm_perm);
727 kvfree(shp);
747 file = hugetlb_file_setup(name, hugesize, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE,
764 shp->shm_cprid = get_pid(task_tgid(current));
765 shp->shm_lprid = NULL;
766 shp->shm_atim = shp->shm_dtim = 0;
767 shp->shm_ctim = ktime_get_real_seconds();
768 shp->shm_segsz = size;
769 shp->shm_nattch = 0;
770 shp->shm_file = file;
771 shp->shm_creator = current;
773 /* ipc_addid() locks shp upon success. */
774 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
778 shp->ns = ns;
781 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
788 file_inode(file)->i_ino = shp->shm_perm.id;
791 error = shp->shm_perm.id;
793 ipc_unlock_object(&shp->shm_perm);
798 ipc_update_pid(&shp->shm_cprid, NULL);
799 ipc_update_pid(&shp->shm_lprid, NULL);
800 if (is_file_hugepages(file) && shp->mlock_user) {
801 user_shm_unlock(size, shp->mlock_user);
804 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
807 call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
816 struct shmid_kernel *shp;
818 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
819 if (shp->shm_segsz < params->u.size) {
931 static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add)
935 inode = file_inode(shp->shm_file);
937 if (is_file_hugepages(shp->shm_file)) {
939 struct hstate *h = hstate_file(shp->shm_file);
970 struct shmid_kernel *shp;
976 shp = container_of(ipc, struct shmid_kernel, shm_perm);
978 shm_add_rss_swap(shp, rss, swp);
992 struct shmid_kernel *shp;
1004 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1006 err = security_shm_shmctl(&shp->shm_perm, cmd);
1013 ipc_lock_object(&shp->shm_perm);
1018 ipc_lock_object(&shp->shm_perm);
1023 shp->shm_ctim = ktime_get_real_seconds();
1031 ipc_unlock_object(&shp->shm_perm);
1080 struct shmid_kernel *shp;
1087 shp = shm_obtain_object(ns, shmid);
1088 if (IS_ERR(shp)) {
1089 err = PTR_ERR(shp);
1093 shp = shm_obtain_object_check(ns, shmid);
1094 if (IS_ERR(shp)) {
1095 err = PTR_ERR(shp);
1108 audit_ipc_obj(&shp->shm_perm);
1111 if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) {
1116 err = security_shm_shmctl(&shp->shm_perm, cmd);
1121 ipc_lock_object(&shp->shm_perm);
1123 if (!ipc_valid_object(&shp->shm_perm)) {
1124 ipc_unlock_object(&shp->shm_perm);
1129 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
1130 tbuf->shm_segsz = shp->shm_segsz;
1131 tbuf->shm_atime = shp->shm_atim;
1132 tbuf->shm_dtime = shp->shm_dtim;
1133 tbuf->shm_ctime = shp->shm_ctim;
1135 tbuf->shm_atime_high = shp->shm_atim >> 0x20;
1136 tbuf->shm_dtime_high = shp->shm_dtim >> 0x20;
1137 tbuf->shm_ctime_high = shp->shm_ctim >> 0x20;
1139 tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
1140 tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
1141 tbuf->shm_nattch = shp->shm_nattch;
1154 err = shp->shm_perm.id;
1157 ipc_unlock_object(&shp->shm_perm);
1165 struct shmid_kernel *shp;
1170 shp = shm_obtain_object_check(ns, shmid);
1171 if (IS_ERR(shp)) {
1172 err = PTR_ERR(shp);
1176 audit_ipc_obj(&(shp->shm_perm));
1177 err = security_shm_shmctl(&shp->shm_perm, cmd);
1182 ipc_lock_object(&shp->shm_perm);
1184 /* check if shm_destroy() is tearing down shp */
1185 if (!ipc_valid_object(&shp->shm_perm)) {
1192 if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) {
1202 shm_file = shp->shm_file;
1211 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
1212 shp->shm_perm.mode |= SHM_LOCKED;
1213 shp->mlock_user = user;
1219 if (!(shp->shm_perm.mode & SHM_LOCKED)) {
1222 shmem_lock(shm_file, 0, shp->mlock_user);
1223 shp->shm_perm.mode &= ~SHM_LOCKED;
1224 shp->mlock_user = NULL;
1226 ipc_unlock_object(&shp->shm_perm);
1234 ipc_unlock_object(&shp->shm_perm);
1532 struct shmid_kernel *shp;
1595 shp = shm_obtain_object_check(ns, shmid);
1596 if (IS_ERR(shp)) {
1597 err = PTR_ERR(shp);
1602 if (ipcperms(ns, &shp->shm_perm, acc_mode)) {
1606 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
1611 ipc_lock_object(&shp->shm_perm);
1613 /* check if shm_destroy() is tearing down shp */
1614 if (!ipc_valid_object(&shp->shm_perm)) {
1615 ipc_unlock_object(&shp->shm_perm);
1629 base = get_file(shp->shm_file);
1630 shp->shm_nattch++;
1632 ipc_unlock_object(&shp->shm_perm);
1650 sfd->id = shp->shm_perm.id;
1694 shp = shm_lock(ns, shmid);
1695 shp->shm_nattch--;
1696 if (shm_may_destroy(shp)) {
1697 shm_destroy(ns, shp);
1699 shm_unlock(shp);
1866 struct shmid_kernel *shp;
1869 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
1870 shm_add_rss_swap(shp, &rss, &swp);
1881 shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz,
1882 pid_nr_ns(shp->shm_cprid, pid_ns), pid_nr_ns(shp->shm_lprid, pid_ns), shp->shm_nattch,
1883 from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid),
1884 from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid),
1885 shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE);