Lines Matching defs:pgpath

38 struct pgpath {
50 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
76 struct pgpath *current_pgpath;
106 struct pgpath *pgpath;
110 typedef int (*action_fn) (struct pgpath *pgpath);
114 static void activate_or_offline_path(struct pgpath *pgpath);
149 static struct pgpath *alloc_pgpath(void)
151 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
153 if (!pgpath)
156 pgpath->is_active = true;
158 return pgpath;
161 static void free_pgpath(struct pgpath *pgpath)
163 kfree(pgpath);
180 struct pgpath *pgpath, *tmp;
182 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
183 list_del(&pgpath->list);
184 dm_put_device(ti, pgpath->path.dev);
185 free_pgpath(pgpath);
297 mpio->pgpath = NULL;
309 struct pgpath *pgpath;
327 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
329 if (!pgpath->is_active)
331 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
368 static struct pgpath *choose_path_in_pg(struct multipath *m,
374 struct pgpath *pgpath;
380 pgpath = path_to_pgpath(path);
385 m->current_pgpath = pgpath;
390 return pgpath;
393 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
397 struct pgpath *pgpath;
417 pgpath = choose_path_in_pg(m, pg, nr_bytes);
418 if (!IS_ERR_OR_NULL(pgpath))
419 return pgpath;
426 pgpath = choose_path_in_pg(m, pg, nr_bytes);
427 if (!IS_ERR_OR_NULL(pgpath))
428 return pgpath;
441 pgpath = choose_path_in_pg(m, pg, nr_bytes);
442 if (!IS_ERR_OR_NULL(pgpath)) {
448 return pgpath;
506 struct pgpath *pgpath;
512 /* Do we need to select a new pgpath? */
513 pgpath = READ_ONCE(m->current_pgpath);
514 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
515 pgpath = choose_pgpath(m, nr_bytes);
517 if (!pgpath) {
528 mpio->pgpath = pgpath;
531 bdev = pgpath->path.dev->bdev;
539 activate_or_offline_path(pgpath);
557 if (pgpath->pg->ps.type->start_io)
558 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
559 &pgpath->path,
573 struct pgpath *pgpath = mpio->pgpath;
575 if (pgpath && pgpath->pg->ps.type->end_io)
576 pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
577 &pgpath->path,
606 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
608 struct pgpath *pgpath;
611 /* Do we need to select a new pgpath? */
612 pgpath = READ_ONCE(m->current_pgpath);
613 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
614 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
616 if (!pgpath) {
620 pgpath = ERR_PTR(-EAGAIN);
631 return pgpath;
637 struct pgpath *pgpath = __map_bio(m, bio);
639 if (IS_ERR(pgpath))
642 if (!pgpath) {
649 mpio->pgpath = pgpath;
652 bio_set_dev(bio, pgpath->path.dev->bdev);
655 if (pgpath->pg->ps.type->start_io)
656 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
657 &pgpath->path,
926 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
930 struct pgpath *p;
1019 struct pgpath *pgpath;
1031 pgpath = parse_path(&path_args, &pg->ps, ti);
1032 if (IS_ERR(pgpath)) {
1033 r = PTR_ERR(pgpath);
1037 pgpath->pg = pg;
1038 list_add_tail(&pgpath->list, &pg->pgpaths);
1324 static int fail_path(struct pgpath *pgpath)
1327 struct multipath *m = pgpath->pg->m;
1331 if (!pgpath->is_active)
1336 pgpath->path.dev->name);
1338 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1339 pgpath->is_active = false;
1340 pgpath->fail_count++;
1344 if (pgpath == m->current_pgpath)
1348 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1363 static int reinstate_path(struct pgpath *pgpath)
1367 struct multipath *m = pgpath->pg->m;
1372 if (pgpath->is_active)
1377 pgpath->path.dev->name);
1379 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1383 pgpath->is_active = true;
1389 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1390 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1395 pgpath->path.dev->name, nr_valid_paths);
1406 if (pgpath->is_active)
1419 struct pgpath *pgpath;
1423 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1424 if (pgpath->path.dev == dev)
1425 r = action(pgpath);
1511 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1531 struct pgpath *pgpath = data;
1532 struct priority_group *pg = pgpath->pg;
1551 fail_path(pgpath);
1566 if (pg_init_limit_reached(m, pgpath))
1567 fail_path(pgpath);
1577 fail_path(pgpath);
1582 if (pgpath == m->current_pgpath) {
1616 static void activate_or_offline_path(struct pgpath *pgpath)
1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1620 if (pgpath->is_active && !blk_queue_dying(q))
1621 scsi_dh_activate(q, pg_init_done, pgpath);
1623 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1628 struct pgpath *pgpath =
1629 container_of(work, struct pgpath, activate_path.work);
1631 activate_or_offline_path(pgpath);
1638 struct pgpath *pgpath = mpio->pgpath;
1660 if (pgpath)
1661 fail_path(pgpath);
1672 if (pgpath) {
1673 struct path_selector *ps = &pgpath->pg->ps;
1676 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1688 struct pgpath *pgpath = mpio->pgpath;
1695 if (pgpath)
1696 fail_path(pgpath);
1716 if (pgpath) {
1717 struct path_selector *ps = &pgpath->pg->ps;
1720 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1797 struct pgpath *p;
1985 struct pgpath *pgpath;
1989 pgpath = READ_ONCE(m->current_pgpath);
1990 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m))
1991 pgpath = choose_pgpath(m, 0);
1993 if (pgpath) {
1995 *bdev = pgpath->path.dev->bdev;
2036 struct pgpath *p;
2051 static int pgpath_busy(struct pgpath *pgpath)
2053 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2071 struct pgpath *pgpath;
2110 list_for_each_entry(pgpath, &pg->pgpaths, list) {
2111 if (pgpath->is_active) {
2113 if (!pgpath_busy(pgpath)) {