Lines Matching refs:tpg
304 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
306 struct vhost_scsi_tport *tport = tpg->tport;
313 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
315 return tpg->tport_tpgt;
320 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
323 return tpg->tv_fabric_prot_type;
595 vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
608 tv_nexus = tpg->tpg_nexus;
1015 struct vhost_scsi_tpg **vs_tpg, *tpg;
1019 tpg = READ_ONCE(vs_tpg[*vc->target]);
1020 if (unlikely(!tpg)) {
1024 *tpgp = tpg;
1040 struct vhost_scsi_tpg **vs_tpg, *tpg;
1098 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1195 cmd = vhost_scsi_get_cmd(vq, tpg, cdb, tag, lun, task_attr,
1301 vhost_scsi_handle_tmf(struct vhost_scsi *vs, struct vhost_scsi_tpg *tpg,
1314 if (!tpg->tpg_nexus || !tpg->tpg_nexus->tvn_se_sess) {
1331 if (target_submit_tmr(&tmf->se_cmd, tpg->tpg_nexus->tvn_se_sess, NULL,
1371 struct vhost_scsi_tpg *tpg;
1453 ret = vhost_scsi_get_req(vq, &vc, &tpg);
1458 vhost_scsi_handle_tmf(vs, tpg, vq, &v_req.tmf, &vc);
1489 struct vhost_scsi_tpg *tpg, struct se_lun *lun,
1498 if (tpg && lun) {
1505 evt->event.lun[1] = tpg->tport_tpgt;
1656 * vs->dev.mutex -> vhost_scsi_mutex -> tpg->tv_tpg_mutex -> vq->mutex
1664 struct vhost_scsi_tpg *tpg;
1691 list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
1692 mutex_lock(&tpg->tv_tpg_mutex);
1693 if (!tpg->tpg_nexus) {
1694 mutex_unlock(&tpg->tv_tpg_mutex);
1697 if (tpg->tv_tpg_vhost_count != 0) {
1698 mutex_unlock(&tpg->tv_tpg_mutex);
1701 tv_tport = tpg->tport;
1704 if (vs->vs_tpg && vs->vs_tpg[tpg->tport_tpgt]) {
1705 mutex_unlock(&tpg->tv_tpg_mutex);
1716 se_tpg = &tpg->se_tpg;
1720 mutex_unlock(&tpg->tv_tpg_mutex);
1724 tpg->tv_tpg_vhost_count++;
1725 tpg->vhost_scsi = vs;
1726 vs_tpg[tpg->tport_tpgt] = tpg;
1729 mutex_unlock(&tpg->tv_tpg_mutex);
1775 tpg = vs_tpg[i];
1776 if (tpg) {
1777 mutex_lock(&tpg->tv_tpg_mutex);
1778 tpg->vhost_scsi = NULL;
1779 tpg->tv_tpg_vhost_count--;
1780 mutex_unlock(&tpg->tv_tpg_mutex);
1781 target_undepend_item(&tpg->se_tpg.tpg_group.cg_item);
1796 struct vhost_scsi_tpg *tpg;
1818 tpg = vs->vs_tpg[target];
1819 if (!tpg)
1822 tv_tport = tpg->tport;
1829 pr_warn("tv_tport->tport_name: %s, tpg->tport_tpgt: %hu"
1831 tv_tport->tport_name, tpg->tport_tpgt,
1857 * We can now release our hold on the tpg and sessions and userspace
1862 tpg = vs->vs_tpg[target];
1863 if (!tpg)
1866 mutex_lock(&tpg->tv_tpg_mutex);
1868 tpg->tv_tpg_vhost_count--;
1869 tpg->vhost_scsi = NULL;
1872 mutex_unlock(&tpg->tv_tpg_mutex);
1874 se_tpg = &tpg->se_tpg;
2128 vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
2132 struct vhost_scsi *vs = tpg->vhost_scsi;
2154 vhost_scsi_send_evt(vs, vq, tpg, lun,
2160 static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2162 vhost_scsi_do_plug(tpg, lun, true);
2165 static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
2167 vhost_scsi_do_plug(tpg, lun, false);
2173 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2176 mutex_lock(&tpg->tv_tpg_mutex);
2177 tpg->tv_tpg_port_count++;
2178 vhost_scsi_hotplug(tpg, lun);
2179 mutex_unlock(&tpg->tv_tpg_mutex);
2187 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2190 mutex_lock(&tpg->tv_tpg_mutex);
2191 tpg->tv_tpg_port_count--;
2192 vhost_scsi_hotunplug(tpg, lun);
2193 mutex_unlock(&tpg->tv_tpg_mutex);
2200 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2213 tpg->tv_fabric_prot_type = val;
2222 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2225 return sysfs_emit(page, "%d\n", tpg->tv_fabric_prot_type);
2235 static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
2240 mutex_lock(&tpg->tv_tpg_mutex);
2241 if (tpg->tpg_nexus) {
2242 mutex_unlock(&tpg->tv_tpg_mutex);
2243 pr_debug("tpg->tpg_nexus already exists\n");
2249 mutex_unlock(&tpg->tv_tpg_mutex);
2258 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, 0, 0,
2262 mutex_unlock(&tpg->tv_tpg_mutex);
2266 tpg->tpg_nexus = tv_nexus;
2268 mutex_unlock(&tpg->tv_tpg_mutex);
2272 static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
2277 mutex_lock(&tpg->tv_tpg_mutex);
2278 tv_nexus = tpg->tpg_nexus;
2280 mutex_unlock(&tpg->tv_tpg_mutex);
2286 mutex_unlock(&tpg->tv_tpg_mutex);
2290 if (tpg->tv_tpg_port_count != 0) {
2291 mutex_unlock(&tpg->tv_tpg_mutex);
2294 tpg->tv_tpg_port_count);
2298 if (tpg->tv_tpg_vhost_count != 0) {
2299 mutex_unlock(&tpg->tv_tpg_mutex);
2302 tpg->tv_tpg_vhost_count);
2307 " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
2314 tpg->tpg_nexus = NULL;
2315 mutex_unlock(&tpg->tv_tpg_mutex);
2324 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2329 mutex_lock(&tpg->tv_tpg_mutex);
2330 tv_nexus = tpg->tpg_nexus;
2332 mutex_unlock(&tpg->tv_tpg_mutex);
2337 mutex_unlock(&tpg->tv_tpg_mutex);
2346 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2348 struct vhost_scsi_tport *tport_wwn = tpg->tport;
2355 ret = vhost_scsi_drop_nexus(tpg);
2413 ret = vhost_scsi_make_nexus(tpg, port_ptr);
2433 struct vhost_scsi_tpg *tpg;
2442 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2443 if (!tpg) {
2447 mutex_init(&tpg->tv_tpg_mutex);
2448 INIT_LIST_HEAD(&tpg->tv_tpg_list);
2449 tpg->tport = tport;
2450 tpg->tport_tpgt = tpgt;
2452 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
2454 kfree(tpg);
2458 list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
2461 return &tpg->se_tpg;
2466 struct vhost_scsi_tpg *tpg = container_of(se_tpg,
2470 list_del(&tpg->tv_tpg_list);
2475 vhost_scsi_drop_nexus(tpg);
2480 kfree(tpg);