Lines Matching defs:head
369 struct llist_node *head;
371 head = llist_del_all(&conf->released_stripes);
372 head = llist_reverse_order(head);
373 llist_for_each_entry_safe(sh, t, head, release_list) {
917 struct stripe_head *head;
929 head = last_sh;
930 atomic_inc(&head->count);
934 head = find_get_stripe(conf, head_sector, conf->generation,
937 if (!head)
939 if (!stripe_can_batch(head))
943 lock_two_stripes(head, sh);
945 if (!stripe_can_batch(head) || !stripe_can_batch(sh))
954 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
955 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
958 if (head->batch_head) {
959 spin_lock(&head->batch_head->batch_lock);
961 if (!stripe_can_batch(head)) {
962 spin_unlock(&head->batch_head->batch_lock);
967 * batch_lock, otherwise clear_batch_ready of batch head
972 sh->batch_head = head->batch_head;
975 * at this point, head's BATCH_READY could be cleared, but we
978 list_add(&sh->batch_list, &head->batch_list);
979 spin_unlock(&head->batch_head->batch_lock);
981 head->batch_head = head;
982 sh->batch_head = head->batch_head;
983 spin_lock(&head->batch_lock);
984 list_add_tail(&sh->batch_list, &head->batch_list);
985 spin_unlock(&head->batch_lock);
1004 unlock_two_stripes(head, sh);
1006 raid5_release_stripe(head);
1066 /* temporarily move the head */
4891 * a head which can now be handled.
4951 "head stripe state: %lx\n", head_sh->state);
5001 * the head of batch list or lone stripe can continue. Otherwise we
5407 struct list_head head;
5408 list_add(&head, &conf->bitmap_list);
5410 while (!list_empty(&head)) {
5411 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
5613 * head of the hold_list has changed, i.e. the head was promoted to the
6227 * otherwise the batch_last stripe head could prevent
6634 * So we do one stripe head at a time and record in
7713 * Losing a stripe head costs more than the time to refill it,