Lines Matching defs:sma
275 * @sma: semaphore array
280 static void unmerge_queues(struct sem_array *sma)
285 if (sma->complex_count) {
293 list_for_each_entry_safe(q, tq, &sma->pending_alter, list)
296 curr = &sma->sems[q->sops[0].sem_num];
300 INIT_LIST_HEAD(&sma->pending_alter);
305 * @sma: semaphore array
312 static void merge_queues(struct sem_array *sma)
315 for (i = 0; i < sma->sem_nsems; i++) {
316 struct sem *sem = &sma->sems[i];
318 list_splice_init(&sem->pending_alter, &sma->pending_alter);
325 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
327 security_sem_free(&sma->sem_perm);
328 kvfree(sma);
335 static void complexmode_enter(struct sem_array *sma)
340 if (sma->use_global_lock > 0) {
346 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
349 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
351 for (i = 0; i < sma->sem_nsems; i++) {
352 sem = &sma->sems[i];
362 static void complexmode_tryleave(struct sem_array *sma)
364 if (sma->complex_count) {
370 if (sma->use_global_lock == 1) {
372 smp_store_release(&sma->use_global_lock, 0);
374 sma->use_global_lock--;
386 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, int nsops)
393 ipc_lock_object(&sma->sem_perm);
396 complexmode_enter(sma);
407 idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
408 sem = &sma->sems[idx];
414 if (!sma->use_global_lock) {
422 if (!smp_load_acquire(&sma->use_global_lock)) {
430 ipc_lock_object(&sma->sem_perm);
432 if (sma->use_global_lock == 0) {
435 * sma->sem_perm.lock. Thus we must switch to locking
438 * sma->use_global_lock after we have acquired sem->lock:
439 * We own sma->sem_perm.lock, thus use_global_lock cannot
444 ipc_unlock_object(&sma->sem_perm);
456 static inline void sem_unlock(struct sem_array *sma, int locknum)
459 unmerge_queues(sma);
460 complexmode_tryleave(sma);
461 ipc_unlock_object(&sma->sem_perm);
463 struct sem *sem = &sma->sems[locknum];
496 static inline void sem_lock_and_putref(struct sem_array *sma)
498 sem_lock(sma, NULL, -1);
499 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
509 struct sem_array *sma;
511 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0])) {
515 sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL);
516 if (unlikely(!sma)) {
520 return sma;
533 struct sem_array *sma;
546 sma = sem_alloc(nsems);
547 if (!sma) {
551 sma->sem_perm.mode = (semflg & S_IRWXUGO);
552 sma->sem_perm.key = key;
554 sma->sem_perm.security = NULL;
555 retval = security_sem_alloc(&sma->sem_perm);
557 kvfree(sma);
562 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
563 INIT_LIST_HEAD(&sma->sems[i].pending_const);
564 spin_lock_init(&sma->sems[i].lock);
567 sma->complex_count = 0;
568 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
569 INIT_LIST_HEAD(&sma->pending_alter);
570 INIT_LIST_HEAD(&sma->pending_const);
571 INIT_LIST_HEAD(&sma->list_id);
572 sma->sem_nsems = nsems;
573 sma->sem_ctime = ktime_get_real_seconds();
575 /* ipc_addid() locks sma upon success. */
576 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
578 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
583 sem_unlock(sma, -1);
586 return sma->sem_perm.id;
594 struct sem_array *sma;
596 sma = container_of(ipcp, struct sem_array, sem_perm);
597 if (params->u.nsems > sma->sem_nsems) {
635 * @sma: semaphore array
649 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
663 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
664 curr = &sma->sems[idx];
695 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
718 sma->sems[sop->sem_num].semval -= sem_op;
728 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
741 return perform_atomic_semop_slow(sma, q);
751 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
753 curr = &sma->sems[idx];
781 curr = &sma->sems[sop->sem_num];
813 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
817 sma->complex_count--;
821 /** check_restart(sma, q)
822 * @sma: semaphore array
831 static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
834 if (!list_empty(&sma->pending_alter)) {
859 * @sma: semaphore array.
871 static int wake_const_ops(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
878 pending_list = &sma->pending_const;
880 pending_list = &sma->sems[semnum].pending_const;
885 int error = perform_atomic_semop(sma, q);
890 unlink_queue(sma, q);
903 * @sma: semaphore array
912 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, int nsops, struct wake_q_head *wake_q)
923 if (sma->sems[num].semval == 0) {
925 semop_completed |= wake_const_ops(sma, num, wake_q);
933 for (i = 0; i < sma->sem_nsems; i++) {
934 if (sma->sems[i].semval == 0) {
936 semop_completed |= wake_const_ops(sma, i, wake_q);
945 semop_completed |= wake_const_ops(sma, -1, wake_q);
953 * @sma: semaphore array.
967 static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
974 pending_list = &sma->pending_alter;
976 pending_list = &sma->sems[semnum].pending_alter;
991 if (semnum != -1 && sma->sems[semnum].semval == 0) {
994 error = perform_atomic_semop(sma, q);
1000 unlink_queue(sma, q);
1006 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
1007 restart = check_restart(sma, q);
1020 * @sma: semaphore array
1026 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1029 sma->sems[0].sem_otime = ktime_get_real_seconds();
1031 sma->sems[sops[0].sem_num].sem_otime = ktime_get_real_seconds();
1037 * @sma: semaphore array
1049 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, int otime,
1054 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1056 if (!list_empty(&sma->pending_alter)) {
1058 otime |= update_queue(sma, -1, wake_q);
1065 for (i = 0; i < sma->sem_nsems; i++) {
1066 otime |= update_queue(sma, i, wake_q);
1080 otime |= update_queue(sma, sops[i].sem_num, wake_q);
1086 set_semotime(sma, sops);
1093 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, bool count_zero)
1129 static int count_semcnt(struct sem_array *sma, ushort semnum, bool count_zero)
1138 l = &sma->sems[semnum].pending_const;
1140 l = &sma->sems[semnum].pending_alter;
1152 list_for_each_entry(q, &sma->pending_alter, list)
1154 semcnt += check_qop(sma, semnum, q, count_zero);
1157 list_for_each_entry(q, &sma->pending_const, list)
1159 semcnt += check_qop(sma, semnum, q, count_zero);
1173 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1178 ipc_assert_locked_object(&sma->sem_perm);
1179 list_for_each_entry_safe(un, tu, &sma->list_id, list_id)
1190 list_for_each_entry_safe(q, tq, &sma->pending_const, list)
1192 unlink_queue(sma, q);
1196 list_for_each_entry_safe(q, tq, &sma->pending_alter, list)
1198 unlink_queue(sma, q);
1201 for (i = 0; i < sma->sem_nsems; i++) {
1202 struct sem *sem = &sma->sems[i];
1205 unlink_queue(sma, q);
1210 unlink_queue(sma, q);
1217 sem_rmid(ns, sma);
1218 sem_unlock(sma, -1);
1222 ns->used_sems -= sma->sem_nsems;
1223 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1249 static time64_t get_semotime(struct sem_array *sma)
1254 res = sma->sems[0].sem_otime;
1255 for (i = 1; i < sma->sem_nsems; i++) {
1256 time64_t to = sma->sems[i].sem_otime;
1267 struct sem_array *sma;
1275 sma = sem_obtain_object(ns, semid);
1276 if (IS_ERR(sma)) {
1277 err = PTR_ERR(sma);
1281 sma = sem_obtain_object_check(ns, semid);
1282 if (IS_ERR(sma)) {
1283 err = PTR_ERR(sma);
1290 audit_ipc_obj(&sma->sem_perm);
1293 if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) {
1298 err = security_sem_semctl(&sma->sem_perm, cmd);
1303 ipc_lock_object(&sma->sem_perm);
1305 if (!ipc_valid_object(&sma->sem_perm)) {
1306 ipc_unlock_object(&sma->sem_perm);
1311 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1312 semotime = get_semotime(sma);
1314 semid64->sem_ctime = sma->sem_ctime;
1317 semid64->sem_ctime_high = sma->sem_ctime >> 0x20;
1319 semid64->sem_nsems = sma->sem_nsems;
1332 err = sma->sem_perm.id;
1334 ipc_unlock_object(&sma->sem_perm);
1379 struct sem_array *sma;
1389 sma = sem_obtain_object_check(ns, semid);
1390 if (IS_ERR(sma)) {
1392 return PTR_ERR(sma);
1395 if (semnum < 0 || semnum >= sma->sem_nsems) {
1400 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1405 err = security_sem_semctl(&sma->sem_perm, SETVAL);
1411 sem_lock(sma, NULL, -1);
1413 if (!ipc_valid_object(&sma->sem_perm)) {
1414 sem_unlock(sma, -1);
1419 semnum = array_index_nospec(semnum, sma->sem_nsems);
1420 curr = &sma->sems[semnum];
1422 ipc_assert_locked_object(&sma->sem_perm);
1423 list_for_each_entry(un, &sma->list_id, list_id) un->semadj[semnum] = 0;
1427 sma->sem_ctime = ktime_get_real_seconds();
1429 do_smart_update(sma, NULL, 0, 0, &wake_q);
1430 sem_unlock(sma, -1);
1438 struct sem_array *sma;
1446 sma = sem_obtain_object_check(ns, semid);
1447 if (IS_ERR(sma)) {
1449 return PTR_ERR(sma);
1452 nsems = sma->sem_nsems;
1455 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) {
1459 err = security_sem_semctl(&sma->sem_perm, cmd);
1470 sem_lock(sma, NULL, -1);
1471 if (!ipc_valid_object(&sma->sem_perm)) {
1476 if (!ipc_rcu_getref(&sma->sem_perm)) {
1480 sem_unlock(sma, -1);
1484 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1489 sem_lock_and_putref(sma);
1490 if (!ipc_valid_object(&sma->sem_perm)) {
1495 for (i = 0; i < sma->sem_nsems; i++) {
1496 sem_io[i] = sma->sems[i].semval;
1498 sem_unlock(sma, -1);
1510 if (!ipc_rcu_getref(&sma->sem_perm)) {
1519 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1525 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1532 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1538 sem_lock_and_putref(sma);
1539 if (!ipc_valid_object(&sma->sem_perm)) {
1545 sma->sems[i].semval = sem_io[i];
1546 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1549 ipc_assert_locked_object(&sma->sem_perm);
1550 list_for_each_entry(un, &sma->list_id, list_id)
1556 sma->sem_ctime = ktime_get_real_seconds();
1558 do_smart_update(sma, NULL, 0, 0, &wake_q);
1569 sem_lock(sma, NULL, -1);
1570 if (!ipc_valid_object(&sma->sem_perm)) {
1576 curr = &sma->sems[semnum];
1586 err = count_semcnt(sma, semnum, 0);
1589 err = count_semcnt(sma, semnum, 1);
1594 sem_unlock(sma, -1);
1638 struct sem_array *sma;
1651 sma = container_of(ipcp, struct sem_array, sem_perm);
1653 err = security_sem_semctl(&sma->sem_perm, cmd);
1660 sem_lock(sma, NULL, -1);
1665 sem_lock(sma, NULL, -1);
1670 sma->sem_ctime = ktime_get_real_seconds();
1678 sem_unlock(sma, -1);
1951 struct sem_array *sma;
1971 sma = sem_obtain_object_check(ns, semid);
1972 if (IS_ERR(sma)) {
1974 return ERR_CAST(sma);
1977 nsems = sma->sem_nsems;
1978 if (!ipc_rcu_getref(&sma->sem_perm)) {
1988 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1994 sem_lock_and_putref(sma);
1995 if (!ipc_valid_object(&sma->sem_perm)) {
1996 sem_unlock(sma, -1);
2018 ipc_assert_locked_object(&sma->sem_perm);
2019 list_add(&new->list_id, &sma->list_id);
2024 sem_unlock(sma, -1);
2032 struct sem_array *sma;
2107 sma = sem_obtain_object_check(ns, semid);
2108 if (IS_ERR(sma)) {
2110 error = PTR_ERR(sma);
2115 if (max >= sma->sem_nsems) {
2121 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2126 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2133 locknum = sem_lock(sma, sops, nsops);
2142 if (!ipc_valid_object(&sma->sem_perm)) {
2163 error = perform_atomic_semop(sma, &queue);
2172 do_smart_update(sma, sops, nsops, 1, &wake_q);
2174 set_semotime(sma, sops);
2177 sem_unlock(sma, locknum);
2193 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2194 curr = &sma->sems[idx];
2197 if (sma->complex_count) {
2198 list_add_tail(&queue.list, &sma->pending_alter);
2207 if (!sma->complex_count) {
2208 merge_queues(sma);
2212 list_add_tail(&queue.list, &sma->pending_alter);
2214 list_add_tail(&queue.list, &sma->pending_const);
2217 sma->complex_count++;
2227 sem_unlock(sma, locknum);
2255 locknum = sem_lock(sma, sops, nsops);
2257 if (!ipc_valid_object(&sma->sem_perm)) {
2282 unlink_queue(sma, &queue);
2285 sem_unlock(sma, locknum);
2389 struct sem_array *sma;
2420 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2422 if (IS_ERR(sma)) {
2427 sem_lock(sma, NULL, -1);
2429 if (!ipc_valid_object(&sma->sem_perm)) {
2430 sem_unlock(sma, -1);
2439 sem_unlock(sma, -1);
2445 ipc_assert_locked_object(&sma->sem_perm);
2453 for (i = 0; i < sma->sem_nsems; i++) {
2454 struct sem *semaphore = &sma->sems[i];
2480 do_smart_update(sma, NULL, 0, 1, &wake_q);
2481 sem_unlock(sma, -1);
2495 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2504 complexmode_enter(sma);
2506 sem_otime = get_semotime(sma);
2508 seq_printf(s, "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n", sma->sem_perm.key, sma->sem_perm.id,
2509 sma->sem_perm.mode, sma->sem_nsems, from_kuid_munged(user_ns, sma->sem_perm.uid),
2510 from_kgid_munged(user_ns, sma->sem_perm.gid), from_kuid_munged(user_ns, sma->sem_perm.cuid),
2511 from_kgid_munged(user_ns, sma->sem_perm.cgid), sem_otime, sma->sem_ctime);
2513 complexmode_tryleave(sma);