Lines Matching refs:sma

278  * @sma: semaphore array
283 static void unmerge_queues(struct sem_array *sma)
288 if (sma->complex_count)
295 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
297 curr = &sma->sems[q->sops[0].sem_num];
301 INIT_LIST_HEAD(&sma->pending_alter);
306 * @sma: semaphore array
313 static void merge_queues(struct sem_array *sma)
316 for (i = 0; i < sma->sem_nsems; i++) {
317 struct sem *sem = &sma->sems[i];
319 list_splice_init(&sem->pending_alter, &sma->pending_alter);
326 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
328 security_sem_free(&sma->sem_perm);
329 kvfree(sma);
336 static void complexmode_enter(struct sem_array *sma)
341 if (sma->use_global_lock > 0) {
347 WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
350 WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
352 for (i = 0; i < sma->sem_nsems; i++) {
353 sem = &sma->sems[i];
363 static void complexmode_tryleave(struct sem_array *sma)
365 if (sma->complex_count) {
371 if (sma->use_global_lock == 1) {
374 smp_store_release(&sma->use_global_lock, 0);
376 WRITE_ONCE(sma->use_global_lock,
377 sma->use_global_lock-1);
389 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
397 ipc_lock_object(&sma->sem_perm);
400 complexmode_enter(sma);
411 idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
412 sem = &sma->sems[idx];
418 if (!READ_ONCE(sma->use_global_lock)) {
426 if (!smp_load_acquire(&sma->use_global_lock)) {
434 ipc_lock_object(&sma->sem_perm);
436 if (sma->use_global_lock == 0) {
439 * sma->sem_perm.lock. Thus we must switch to locking
442 * sma->use_global_lock after we have acquired sem->lock:
443 * We own sma->sem_perm.lock, thus use_global_lock cannot
448 ipc_unlock_object(&sma->sem_perm);
460 static inline void sem_unlock(struct sem_array *sma, int locknum)
463 unmerge_queues(sma);
464 complexmode_tryleave(sma);
465 ipc_unlock_object(&sma->sem_perm);
467 struct sem *sem = &sma->sems[locknum];
499 static inline void sem_lock_and_putref(struct sem_array *sma)
501 sem_lock(sma, NULL, -1);
502 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
512 struct sem_array *sma;
514 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
517 sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
518 if (unlikely(!sma))
521 return sma;
534 struct sem_array *sma;
545 sma = sem_alloc(nsems);
546 if (!sma)
549 sma->sem_perm.mode = (semflg & S_IRWXUGO);
550 sma->sem_perm.key = key;
552 sma->sem_perm.security = NULL;
553 retval = security_sem_alloc(&sma->sem_perm);
555 kvfree(sma);
560 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
561 INIT_LIST_HEAD(&sma->sems[i].pending_const);
562 spin_lock_init(&sma->sems[i].lock);
565 sma->complex_count = 0;
566 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
567 INIT_LIST_HEAD(&sma->pending_alter);
568 INIT_LIST_HEAD(&sma->pending_const);
569 INIT_LIST_HEAD(&sma->list_id);
570 sma->sem_nsems = nsems;
571 sma->sem_ctime = ktime_get_real_seconds();
573 /* ipc_addid() locks sma upon success. */
574 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
576 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
581 sem_unlock(sma, -1);
584 return sma->sem_perm.id;
593 struct sem_array *sma;
595 sma = container_of(ipcp, struct sem_array, sem_perm);
596 if (params->u.nsems > sma->sem_nsems)
632 * @sma: semaphore array
646 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
660 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
661 curr = &sma->sems[idx];
688 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
710 sma->sems[sop->sem_num].semval -= sem_op;
719 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
732 return perform_atomic_semop_slow(sma, q);
741 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
743 curr = &sma->sems[idx];
767 curr = &sma->sems[sop->sem_num];
799 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
803 sma->complex_count--;
806 /** check_restart(sma, q)
807 * @sma: semaphore array
816 static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
819 if (!list_empty(&sma->pending_alter))
842 * @sma: semaphore array.
854 static int wake_const_ops(struct sem_array *sma, int semnum,
862 pending_list = &sma->pending_const;
864 pending_list = &sma->sems[semnum].pending_const;
867 int error = perform_atomic_semop(sma, q);
872 unlink_queue(sma, q);
884 * @sma: semaphore array
893 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
905 if (sma->sems[num].semval == 0) {
907 semop_completed |= wake_const_ops(sma, num, wake_q);
915 for (i = 0; i < sma->sem_nsems; i++) {
916 if (sma->sems[i].semval == 0) {
918 semop_completed |= wake_const_ops(sma, i, wake_q);
927 semop_completed |= wake_const_ops(sma, -1, wake_q);
935 * @sma: semaphore array.
949 static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
956 pending_list = &sma->pending_alter;
958 pending_list = &sma->sems[semnum].pending_alter;
971 if (semnum != -1 && sma->sems[semnum].semval == 0)
974 error = perform_atomic_semop(sma, q);
980 unlink_queue(sma, q);
986 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
987 restart = check_restart(sma, q);
999 * @sma: semaphore array
1005 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1008 sma->sems[0].sem_otime = ktime_get_real_seconds();
1010 sma->sems[sops[0].sem_num].sem_otime =
1017 * @sma: semaphore array
1029 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1034 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1036 if (!list_empty(&sma->pending_alter)) {
1038 otime |= update_queue(sma, -1, wake_q);
1045 for (i = 0; i < sma->sem_nsems; i++)
1046 otime |= update_queue(sma, i, wake_q);
1059 otime |= update_queue(sma,
1066 set_semotime(sma, sops);
1072 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1106 static int count_semcnt(struct sem_array *sma, ushort semnum,
1116 l = &sma->sems[semnum].pending_const;
1118 l = &sma->sems[semnum].pending_alter;
1128 list_for_each_entry(q, &sma->pending_alter, list) {
1129 semcnt += check_qop(sma, semnum, q, count_zero);
1132 list_for_each_entry(q, &sma->pending_const, list) {
1133 semcnt += check_qop(sma, semnum, q, count_zero);
1147 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1152 ipc_assert_locked_object(&sma->sem_perm);
1153 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1163 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1164 unlink_queue(sma, q);
1168 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1169 unlink_queue(sma, q);
1172 for (i = 0; i < sma->sem_nsems; i++) {
1173 struct sem *sem = &sma->sems[i];
1175 unlink_queue(sma, q);
1179 unlink_queue(sma, q);
1186 sem_rmid(ns, sma);
1187 sem_unlock(sma, -1);
1191 ns->used_sems -= sma->sem_nsems;
1192 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1219 static time64_t get_semotime(struct sem_array *sma)
1224 res = sma->sems[0].sem_otime;
1225 for (i = 1; i < sma->sem_nsems; i++) {
1226 time64_t to = sma->sems[i].sem_otime;
1237 struct sem_array *sma;
1245 sma = sem_obtain_object(ns, semid);
1246 if (IS_ERR(sma)) {
1247 err = PTR_ERR(sma);
1251 sma = sem_obtain_object_check(ns, semid);
1252 if (IS_ERR(sma)) {
1253 err = PTR_ERR(sma);
1260 audit_ipc_obj(&sma->sem_perm);
1263 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1267 err = security_sem_semctl(&sma->sem_perm, cmd);
1271 ipc_lock_object(&sma->sem_perm);
1273 if (!ipc_valid_object(&sma->sem_perm)) {
1274 ipc_unlock_object(&sma->sem_perm);
1279 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1280 semotime = get_semotime(sma);
1282 semid64->sem_ctime = sma->sem_ctime;
1285 semid64->sem_ctime_high = sma->sem_ctime >> 32;
1287 semid64->sem_nsems = sma->sem_nsems;
1300 err = sma->sem_perm.id;
1302 ipc_unlock_object(&sma->sem_perm);
1347 struct sem_array *sma;
1356 sma = sem_obtain_object_check(ns, semid);
1357 if (IS_ERR(sma)) {
1359 return PTR_ERR(sma);
1362 if (semnum < 0 || semnum >= sma->sem_nsems) {
1368 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1373 err = security_sem_semctl(&sma->sem_perm, SETVAL);
1379 sem_lock(sma, NULL, -1);
1381 if (!ipc_valid_object(&sma->sem_perm)) {
1382 sem_unlock(sma, -1);
1387 semnum = array_index_nospec(semnum, sma->sem_nsems);
1388 curr = &sma->sems[semnum];
1390 ipc_assert_locked_object(&sma->sem_perm);
1391 list_for_each_entry(un, &sma->list_id, list_id)
1396 sma->sem_ctime = ktime_get_real_seconds();
1398 do_smart_update(sma, NULL, 0, 0, &wake_q);
1399 sem_unlock(sma, -1);
1408 struct sem_array *sma;
1416 sma = sem_obtain_object_check(ns, semid);
1417 if (IS_ERR(sma)) {
1419 return PTR_ERR(sma);
1422 nsems = sma->sem_nsems;
1425 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1428 err = security_sem_semctl(&sma->sem_perm, cmd);
1438 sem_lock(sma, NULL, -1);
1439 if (!ipc_valid_object(&sma->sem_perm)) {
1444 if (!ipc_rcu_getref(&sma->sem_perm)) {
1448 sem_unlock(sma, -1);
1453 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1458 sem_lock_and_putref(sma);
1459 if (!ipc_valid_object(&sma->sem_perm)) {
1464 for (i = 0; i < sma->sem_nsems; i++)
1465 sem_io[i] = sma->sems[i].semval;
1466 sem_unlock(sma, -1);
1478 if (!ipc_rcu_getref(&sma->sem_perm)) {
1488 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1494 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1501 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1507 sem_lock_and_putref(sma);
1508 if (!ipc_valid_object(&sma->sem_perm)) {
1514 sma->sems[i].semval = sem_io[i];
1515 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1518 ipc_assert_locked_object(&sma->sem_perm);
1519 list_for_each_entry(un, &sma->list_id, list_id) {
1523 sma->sem_ctime = ktime_get_real_seconds();
1525 do_smart_update(sma, NULL, 0, 0, &wake_q);
1535 sem_lock(sma, NULL, -1);
1536 if (!ipc_valid_object(&sma->sem_perm)) {
1542 curr = &sma->sems[semnum];
1552 err = count_semcnt(sma, semnum, 0);
1555 err = count_semcnt(sma, semnum, 1);
1560 sem_unlock(sma, -1);
1604 struct sem_array *sma;
1618 sma = container_of(ipcp, struct sem_array, sem_perm);
1620 err = security_sem_semctl(&sma->sem_perm, cmd);
1626 sem_lock(sma, NULL, -1);
1631 sem_lock(sma, NULL, -1);
1635 sma->sem_ctime = ktime_get_real_seconds();
1643 sem_unlock(sma, -1);
1908 struct sem_array *sma;
1926 sma = sem_obtain_object_check(ns, semid);
1927 if (IS_ERR(sma)) {
1929 return ERR_CAST(sma);
1932 nsems = sma->sem_nsems;
1933 if (!ipc_rcu_getref(&sma->sem_perm)) {
1943 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1949 sem_lock_and_putref(sma);
1950 if (!ipc_valid_object(&sma->sem_perm)) {
1951 sem_unlock(sma, -1);
1973 ipc_assert_locked_object(&sma->sem_perm);
1974 list_add(&new->list_id, &sma->list_id);
1978 sem_unlock(sma, -1);
1988 struct sem_array *sma;
2047 sma = sem_obtain_object_check(ns, semid);
2048 if (IS_ERR(sma)) {
2050 error = PTR_ERR(sma);
2055 if (max >= sma->sem_nsems) {
2061 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2066 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2073 locknum = sem_lock(sma, sops, nsops);
2082 if (!ipc_valid_object(&sma->sem_perm))
2101 error = perform_atomic_semop(sma, &queue);
2110 do_smart_update(sma, sops, nsops, 1, &wake_q);
2112 set_semotime(sma, sops);
2114 sem_unlock(sma, locknum);
2129 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2130 curr = &sma->sems[idx];
2133 if (sma->complex_count) {
2135 &sma->pending_alter);
2145 if (!sma->complex_count)
2146 merge_queues(sma);
2149 list_add_tail(&queue.list, &sma->pending_alter);
2151 list_add_tail(&queue.list, &sma->pending_const);
2153 sma->complex_count++;
2163 sem_unlock(sma, locknum);
2189 locknum = sem_lock(sma, sops, nsops);
2191 if (!ipc_valid_object(&sma->sem_perm))
2213 unlink_queue(sma, &queue);
2216 sem_unlock(sma, locknum);
2348 struct sem_array *sma;
2380 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2382 if (IS_ERR(sma)) {
2387 sem_lock(sma, NULL, -1);
2389 if (!ipc_valid_object(&sma->sem_perm)) {
2390 sem_unlock(sma, -1);
2399 sem_unlock(sma, -1);
2405 ipc_assert_locked_object(&sma->sem_perm);
2413 for (i = 0; i < sma->sem_nsems; i++) {
2414 struct sem *semaphore = &sma->sems[i];
2438 do_smart_update(sma, NULL, 0, 1, &wake_q);
2439 sem_unlock(sma, -1);
2453 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2458 * ipc_lock_object(), i.e. spin_lock(&sma->sem_perm.lock).
2463 complexmode_enter(sma);
2465 sem_otime = get_semotime(sma);
2469 sma->sem_perm.key,
2470 sma->sem_perm.id,
2471 sma->sem_perm.mode,
2472 sma->sem_nsems,
2473 from_kuid_munged(user_ns, sma->sem_perm.uid),
2474 from_kgid_munged(user_ns, sma->sem_perm.gid),
2475 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2476 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2478 sma->sem_ctime);
2480 complexmode_tryleave(sma);