Lines Matching refs:sma
276 * @sma: semaphore array
281 static void unmerge_queues(struct sem_array *sma)
286 if (sma->complex_count)
293 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
295 curr = &sma->sems[q->sops[0].sem_num];
299 INIT_LIST_HEAD(&sma->pending_alter);
304 * @sma: semaphore array
311 static void merge_queues(struct sem_array *sma)
314 for (i = 0; i < sma->sem_nsems; i++) {
315 struct sem *sem = &sma->sems[i];
317 list_splice_init(&sem->pending_alter, &sma->pending_alter);
324 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
326 security_sem_free(&sma->sem_perm);
327 kvfree(sma);
334 static void complexmode_enter(struct sem_array *sma)
339 if (sma->use_global_lock > 0) {
345 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
348 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
350 for (i = 0; i < sma->sem_nsems; i++) {
351 sem = &sma->sems[i];
361 static void complexmode_tryleave(struct sem_array *sma)
363 if (sma->complex_count) {
369 if (sma->use_global_lock == 1) {
372 smp_store_release(&sma->use_global_lock, 0);
374 sma->use_global_lock--;
386 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
394 ipc_lock_object(&sma->sem_perm);
397 complexmode_enter(sma);
408 idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
409 sem = &sma->sems[idx];
415 if (!sma->use_global_lock) {
423 if (!smp_load_acquire(&sma->use_global_lock)) {
431 ipc_lock_object(&sma->sem_perm);
433 if (sma->use_global_lock == 0) {
436 * sma->sem_perm.lock. Thus we must switch to locking
439 * sma->use_global_lock after we have acquired sem->lock:
440 * We own sma->sem_perm.lock, thus use_global_lock cannot
445 ipc_unlock_object(&sma->sem_perm);
457 static inline void sem_unlock(struct sem_array *sma, int locknum)
460 unmerge_queues(sma);
461 complexmode_tryleave(sma);
462 ipc_unlock_object(&sma->sem_perm);
464 struct sem *sem = &sma->sems[locknum];
496 static inline void sem_lock_and_putref(struct sem_array *sma)
498 sem_lock(sma, NULL, -1);
499 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
509 struct sem_array *sma;
511 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
514 sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
515 if (unlikely(!sma))
518 return sma;
531 struct sem_array *sma;
542 sma = sem_alloc(nsems);
543 if (!sma)
546 sma->sem_perm.mode = (semflg & S_IRWXUGO);
547 sma->sem_perm.key = key;
549 sma->sem_perm.security = NULL;
550 retval = security_sem_alloc(&sma->sem_perm);
552 kvfree(sma);
557 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
558 INIT_LIST_HEAD(&sma->sems[i].pending_const);
559 spin_lock_init(&sma->sems[i].lock);
562 sma->complex_count = 0;
563 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
564 INIT_LIST_HEAD(&sma->pending_alter);
565 INIT_LIST_HEAD(&sma->pending_const);
566 INIT_LIST_HEAD(&sma->list_id);
567 sma->sem_nsems = nsems;
568 sma->sem_ctime = ktime_get_real_seconds();
570 /* ipc_addid() locks sma upon success. */
571 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
573 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
578 sem_unlock(sma, -1);
581 return sma->sem_perm.id;
590 struct sem_array *sma;
592 sma = container_of(ipcp, struct sem_array, sem_perm);
593 if (params->u.nsems > sma->sem_nsems)
629 * @sma: semaphore array
643 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
657 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
658 curr = &sma->sems[idx];
685 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
707 sma->sems[sop->sem_num].semval -= sem_op;
716 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
729 return perform_atomic_semop_slow(sma, q);
738 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
740 curr = &sma->sems[idx];
764 curr = &sma->sems[sop->sem_num];
797 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
801 sma->complex_count--;
804 /** check_restart(sma, q)
805 * @sma: semaphore array
814 static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
817 if (!list_empty(&sma->pending_alter))
840 * @sma: semaphore array.
852 static int wake_const_ops(struct sem_array *sma, int semnum,
860 pending_list = &sma->pending_const;
862 pending_list = &sma->sems[semnum].pending_const;
865 int error = perform_atomic_semop(sma, q);
870 unlink_queue(sma, q);
882 * @sma: semaphore array
891 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
903 if (sma->sems[num].semval == 0) {
905 semop_completed |= wake_const_ops(sma, num, wake_q);
913 for (i = 0; i < sma->sem_nsems; i++) {
914 if (sma->sems[i].semval == 0) {
916 semop_completed |= wake_const_ops(sma, i, wake_q);
925 semop_completed |= wake_const_ops(sma, -1, wake_q);
933 * @sma: semaphore array.
947 static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
954 pending_list = &sma->pending_alter;
956 pending_list = &sma->sems[semnum].pending_alter;
969 if (semnum != -1 && sma->sems[semnum].semval == 0)
972 error = perform_atomic_semop(sma, q);
978 unlink_queue(sma, q);
984 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
985 restart = check_restart(sma, q);
997 * @sma: semaphore array
1003 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1006 sma->sems[0].sem_otime = ktime_get_real_seconds();
1008 sma->sems[sops[0].sem_num].sem_otime =
1015 * @sma: semaphore array
1027 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1032 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1034 if (!list_empty(&sma->pending_alter)) {
1036 otime |= update_queue(sma, -1, wake_q);
1043 for (i = 0; i < sma->sem_nsems; i++)
1044 otime |= update_queue(sma, i, wake_q);
1057 otime |= update_queue(sma,
1064 set_semotime(sma, sops);
1070 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1104 static int count_semcnt(struct sem_array *sma, ushort semnum,
1114 l = &sma->sems[semnum].pending_const;
1116 l = &sma->sems[semnum].pending_alter;
1126 list_for_each_entry(q, &sma->pending_alter, list) {
1127 semcnt += check_qop(sma, semnum, q, count_zero);
1130 list_for_each_entry(q, &sma->pending_const, list) {
1131 semcnt += check_qop(sma, semnum, q, count_zero);
1145 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1150 ipc_assert_locked_object(&sma->sem_perm);
1151 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1161 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1162 unlink_queue(sma, q);
1166 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1167 unlink_queue(sma, q);
1170 for (i = 0; i < sma->sem_nsems; i++) {
1171 struct sem *sem = &sma->sems[i];
1173 unlink_queue(sma, q);
1177 unlink_queue(sma, q);
1184 sem_rmid(ns, sma);
1185 sem_unlock(sma, -1);
1189 ns->used_sems -= sma->sem_nsems;
1190 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1217 static time64_t get_semotime(struct sem_array *sma)
1222 res = sma->sems[0].sem_otime;
1223 for (i = 1; i < sma->sem_nsems; i++) {
1224 time64_t to = sma->sems[i].sem_otime;
1235 struct sem_array *sma;
1243 sma = sem_obtain_object(ns, semid);
1244 if (IS_ERR(sma)) {
1245 err = PTR_ERR(sma);
1249 sma = sem_obtain_object_check(ns, semid);
1250 if (IS_ERR(sma)) {
1251 err = PTR_ERR(sma);
1258 audit_ipc_obj(&sma->sem_perm);
1261 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1265 err = security_sem_semctl(&sma->sem_perm, cmd);
1269 ipc_lock_object(&sma->sem_perm);
1271 if (!ipc_valid_object(&sma->sem_perm)) {
1272 ipc_unlock_object(&sma->sem_perm);
1277 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1278 semotime = get_semotime(sma);
1280 semid64->sem_ctime = sma->sem_ctime;
1283 semid64->sem_ctime_high = sma->sem_ctime >> 32;
1285 semid64->sem_nsems = sma->sem_nsems;
1298 err = sma->sem_perm.id;
1300 ipc_unlock_object(&sma->sem_perm);
1345 struct sem_array *sma;
1354 sma = sem_obtain_object_check(ns, semid);
1355 if (IS_ERR(sma)) {
1357 return PTR_ERR(sma);
1360 if (semnum < 0 || semnum >= sma->sem_nsems) {
1366 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1371 err = security_sem_semctl(&sma->sem_perm, SETVAL);
1377 sem_lock(sma, NULL, -1);
1379 if (!ipc_valid_object(&sma->sem_perm)) {
1380 sem_unlock(sma, -1);
1385 semnum = array_index_nospec(semnum, sma->sem_nsems);
1386 curr = &sma->sems[semnum];
1388 ipc_assert_locked_object(&sma->sem_perm);
1389 list_for_each_entry(un, &sma->list_id, list_id)
1394 sma->sem_ctime = ktime_get_real_seconds();
1396 do_smart_update(sma, NULL, 0, 0, &wake_q);
1397 sem_unlock(sma, -1);
1406 struct sem_array *sma;
1414 sma = sem_obtain_object_check(ns, semid);
1415 if (IS_ERR(sma)) {
1417 return PTR_ERR(sma);
1420 nsems = sma->sem_nsems;
1423 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1426 err = security_sem_semctl(&sma->sem_perm, cmd);
1437 sem_lock(sma, NULL, -1);
1438 if (!ipc_valid_object(&sma->sem_perm)) {
1443 if (!ipc_rcu_getref(&sma->sem_perm)) {
1447 sem_unlock(sma, -1);
1452 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1457 sem_lock_and_putref(sma);
1458 if (!ipc_valid_object(&sma->sem_perm)) {
1463 for (i = 0; i < sma->sem_nsems; i++)
1464 sem_io[i] = sma->sems[i].semval;
1465 sem_unlock(sma, -1);
1477 if (!ipc_rcu_getref(&sma->sem_perm)) {
1487 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1493 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1500 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1506 sem_lock_and_putref(sma);
1507 if (!ipc_valid_object(&sma->sem_perm)) {
1513 sma->sems[i].semval = sem_io[i];
1514 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1517 ipc_assert_locked_object(&sma->sem_perm);
1518 list_for_each_entry(un, &sma->list_id, list_id) {
1522 sma->sem_ctime = ktime_get_real_seconds();
1524 do_smart_update(sma, NULL, 0, 0, &wake_q);
1534 sem_lock(sma, NULL, -1);
1535 if (!ipc_valid_object(&sma->sem_perm)) {
1541 curr = &sma->sems[semnum];
1551 err = count_semcnt(sma, semnum, 0);
1554 err = count_semcnt(sma, semnum, 1);
1559 sem_unlock(sma, -1);
1603 struct sem_array *sma;
1617 sma = container_of(ipcp, struct sem_array, sem_perm);
1619 err = security_sem_semctl(&sma->sem_perm, cmd);
1625 sem_lock(sma, NULL, -1);
1630 sem_lock(sma, NULL, -1);
1634 sma->sem_ctime = ktime_get_real_seconds();
1642 sem_unlock(sma, -1);
1907 struct sem_array *sma;
1925 sma = sem_obtain_object_check(ns, semid);
1926 if (IS_ERR(sma)) {
1928 return ERR_CAST(sma);
1931 nsems = sma->sem_nsems;
1932 if (!ipc_rcu_getref(&sma->sem_perm)) {
1942 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1948 sem_lock_and_putref(sma);
1949 if (!ipc_valid_object(&sma->sem_perm)) {
1950 sem_unlock(sma, -1);
1972 ipc_assert_locked_object(&sma->sem_perm);
1973 list_add(&new->list_id, &sma->list_id);
1978 sem_unlock(sma, -1);
1987 struct sem_array *sma;
2058 sma = sem_obtain_object_check(ns, semid);
2059 if (IS_ERR(sma)) {
2061 error = PTR_ERR(sma);
2066 if (max >= sma->sem_nsems) {
2072 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2077 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2084 locknum = sem_lock(sma, sops, nsops);
2093 if (!ipc_valid_object(&sma->sem_perm))
2112 error = perform_atomic_semop(sma, &queue);
2121 do_smart_update(sma, sops, nsops, 1, &wake_q);
2123 set_semotime(sma, sops);
2125 sem_unlock(sma, locknum);
2140 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2141 curr = &sma->sems[idx];
2144 if (sma->complex_count) {
2146 &sma->pending_alter);
2156 if (!sma->complex_count)
2157 merge_queues(sma);
2160 list_add_tail(&queue.list, &sma->pending_alter);
2162 list_add_tail(&queue.list, &sma->pending_const);
2164 sma->complex_count++;
2174 sem_unlock(sma, locknum);
2202 locknum = sem_lock(sma, sops, nsops);
2204 if (!ipc_valid_object(&sma->sem_perm))
2226 unlink_queue(sma, &queue);
2229 sem_unlock(sma, locknum);
2329 struct sem_array *sma;
2361 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2363 if (IS_ERR(sma)) {
2368 sem_lock(sma, NULL, -1);
2370 if (!ipc_valid_object(&sma->sem_perm)) {
2371 sem_unlock(sma, -1);
2380 sem_unlock(sma, -1);
2386 ipc_assert_locked_object(&sma->sem_perm);
2394 for (i = 0; i < sma->sem_nsems; i++) {
2395 struct sem *semaphore = &sma->sems[i];
2419 do_smart_update(sma, NULL, 0, 1, &wake_q);
2420 sem_unlock(sma, -1);
2434 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2443 complexmode_enter(sma);
2445 sem_otime = get_semotime(sma);
2449 sma->sem_perm.key,
2450 sma->sem_perm.id,
2451 sma->sem_perm.mode,
2452 sma->sem_nsems,
2453 from_kuid_munged(user_ns, sma->sem_perm.uid),
2454 from_kgid_munged(user_ns, sma->sem_perm.gid),
2455 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2456 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2458 sma->sem_ctime);
2460 complexmode_tryleave(sma);