Lines Matching defs:pgpath

41 struct pgpath {
53 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
79 struct pgpath *current_pgpath;
109 struct pgpath *pgpath;
114 typedef int (*action_fn) (struct pgpath *pgpath);
118 static void activate_or_offline_path(struct pgpath *pgpath);
156 static struct pgpath *alloc_pgpath(void)
158 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
160 if (!pgpath)
163 pgpath->is_active = true;
165 return pgpath;
168 static void free_pgpath(struct pgpath *pgpath)
170 kfree(pgpath);
187 struct pgpath *pgpath, *tmp;
189 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
190 list_del(&pgpath->list);
191 dm_put_device(ti, pgpath->path.dev);
192 free_pgpath(pgpath);
304 mpio->pgpath = NULL;
318 struct pgpath *pgpath;
336 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
338 if (!pgpath->is_active)
340 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
377 static struct pgpath *choose_path_in_pg(struct multipath *m,
383 struct pgpath *pgpath;
389 pgpath = path_to_pgpath(path);
394 m->current_pgpath = pgpath;
399 return pgpath;
402 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
406 struct pgpath *pgpath;
426 pgpath = choose_path_in_pg(m, pg, nr_bytes);
427 if (!IS_ERR_OR_NULL(pgpath))
428 return pgpath;
435 pgpath = choose_path_in_pg(m, pg, nr_bytes);
436 if (!IS_ERR_OR_NULL(pgpath))
437 return pgpath;
450 pgpath = choose_path_in_pg(m, pg, nr_bytes);
451 if (!IS_ERR_OR_NULL(pgpath)) {
457 return pgpath;
513 struct pgpath *pgpath;
519 /* Do we need to select a new pgpath? */
520 pgpath = READ_ONCE(m->current_pgpath);
521 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
522 pgpath = choose_pgpath(m, nr_bytes);
524 if (!pgpath) {
535 mpio->pgpath = pgpath;
538 bdev = pgpath->path.dev->bdev;
546 activate_or_offline_path(pgpath);
563 if (pgpath->pg->ps.type->start_io)
564 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
565 &pgpath->path,
579 struct pgpath *pgpath = mpio->pgpath;
581 if (pgpath && pgpath->pg->ps.type->end_io)
582 pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
583 &pgpath->path,
612 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
614 struct pgpath *pgpath;
617 /* Do we need to select a new pgpath? */
618 pgpath = READ_ONCE(m->current_pgpath);
619 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
620 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
622 if (!pgpath) {
626 pgpath = ERR_PTR(-EAGAIN);
637 return pgpath;
643 struct pgpath *pgpath = __map_bio(m, bio);
645 if (IS_ERR(pgpath))
648 if (!pgpath) {
655 mpio->pgpath = pgpath;
657 if (dm_ps_use_hr_timer(pgpath->pg->ps.type))
661 bio_set_dev(bio, pgpath->path.dev->bdev);
664 if (pgpath->pg->ps.type->start_io)
665 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
666 &pgpath->path,
935 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
939 struct pgpath *p;
1028 struct pgpath *pgpath;
1040 pgpath = parse_path(&path_args, &pg->ps, ti);
1041 if (IS_ERR(pgpath)) {
1042 r = PTR_ERR(pgpath);
1046 pgpath->pg = pg;
1047 list_add_tail(&pgpath->list, &pg->pgpaths);
1332 static int fail_path(struct pgpath *pgpath)
1335 struct multipath *m = pgpath->pg->m;
1339 if (!pgpath->is_active)
1344 pgpath->path.dev->name);
1346 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1347 pgpath->is_active = false;
1348 pgpath->fail_count++;
1352 if (pgpath == m->current_pgpath)
1356 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1371 static int reinstate_path(struct pgpath *pgpath)
1375 struct multipath *m = pgpath->pg->m;
1380 if (pgpath->is_active)
1385 pgpath->path.dev->name);
1387 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1391 pgpath->is_active = true;
1397 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1398 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1403 pgpath->path.dev->name, nr_valid_paths);
1414 if (pgpath->is_active)
1427 struct pgpath *pgpath;
1431 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1432 if (pgpath->path.dev == dev)
1433 r = action(pgpath);
1519 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1539 struct pgpath *pgpath = data;
1540 struct priority_group *pg = pgpath->pg;
1559 fail_path(pgpath);
1574 if (pg_init_limit_reached(m, pgpath))
1575 fail_path(pgpath);
1585 fail_path(pgpath);
1590 if (pgpath == m->current_pgpath) {
1624 static void activate_or_offline_path(struct pgpath *pgpath)
1626 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1628 if (pgpath->is_active && !blk_queue_dying(q))
1629 scsi_dh_activate(q, pg_init_done, pgpath);
1631 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1636 struct pgpath *pgpath =
1637 container_of(work, struct pgpath, activate_path.work);
1639 activate_or_offline_path(pgpath);
1646 struct pgpath *pgpath = mpio->pgpath;
1668 if (pgpath)
1669 fail_path(pgpath);
1680 if (pgpath) {
1681 struct path_selector *ps = &pgpath->pg->ps;
1684 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1696 struct pgpath *pgpath = mpio->pgpath;
1703 if (pgpath)
1704 fail_path(pgpath);
1724 if (pgpath) {
1725 struct path_selector *ps = &pgpath->pg->ps;
1728 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1806 struct pgpath *p;
2032 struct pgpath *pgpath;
2036 pgpath = READ_ONCE(m->current_pgpath);
2037 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
2038 pgpath = choose_pgpath(m, 0);
2040 if (pgpath) {
2042 *bdev = pgpath->path.dev->bdev;
2083 struct pgpath *p;
2098 static int pgpath_busy(struct pgpath *pgpath)
2100 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2118 struct pgpath *pgpath;
2158 list_for_each_entry(pgpath, &pg->pgpaths, list) {
2159 if (pgpath->is_active) {
2161 if (!pgpath_busy(pgpath)) {