1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SBP2 target driver (SCSI over IEEE1394 in target mode)
4 *
5 * Copyright (C) 2011  Chris Boot <bootc@bootc.net>
6 */
7
8#define KMSG_COMPONENT "sbp_target"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/configfs.h>
17#include <linux/ctype.h>
18#include <linux/delay.h>
19#include <linux/firewire.h>
20#include <linux/firewire-constants.h>
21#include <scsi/scsi_proto.h>
22#include <scsi/scsi_tcq.h>
23#include <target/target_core_base.h>
24#include <target/target_core_backend.h>
25#include <target/target_core_fabric.h>
26#include <asm/unaligned.h>
27
28#include "sbp_target.h"
29
30/* FireWire address region for management and command block address handlers */
31static const struct fw_address_region sbp_register_region = {
32	.start	= CSR_REGISTER_BASE + 0x10000,
33	.end	= 0x1000000000000ULL,
34};
35
36static const u32 sbp_unit_directory_template[] = {
37	0x1200609e, /* unit_specifier_id: NCITS/T10 */
38	0x13010483, /* unit_sw_version: 1155D Rev 4 */
39	0x3800609e, /* command_set_specifier_id: NCITS/T10 */
40	0x390104d8, /* command_set: SPC-2 */
41	0x3b000000, /* command_set_revision: 0 */
42	0x3c000001, /* firmware_revision: 1 */
43};
44
45#define SESSION_MAINTENANCE_INTERVAL HZ
46
47static atomic_t login_id = ATOMIC_INIT(0);
48
49static void session_maintenance_work(struct work_struct *);
50static int sbp_run_transaction(struct fw_card *, int, int, int, int,
51		unsigned long long, void *, size_t);
52
53static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
54{
55	int ret;
56	__be32 high, low;
57
58	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
59			req->node_addr, req->generation, req->speed,
60			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
61			&high, sizeof(high));
62	if (ret != RCODE_COMPLETE)
63		return ret;
64
65	ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
66			req->node_addr, req->generation, req->speed,
67			(CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
68			&low, sizeof(low));
69	if (ret != RCODE_COMPLETE)
70		return ret;
71
72	*guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
73
74	return RCODE_COMPLETE;
75}
76
77static struct sbp_session *sbp_session_find_by_guid(
78	struct sbp_tpg *tpg, u64 guid)
79{
80	struct se_session *se_sess;
81	struct sbp_session *sess, *found = NULL;
82
83	spin_lock_bh(&tpg->se_tpg.session_lock);
84	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
85		sess = se_sess->fabric_sess_ptr;
86		if (sess->guid == guid)
87			found = sess;
88	}
89	spin_unlock_bh(&tpg->se_tpg.session_lock);
90
91	return found;
92}
93
94static struct sbp_login_descriptor *sbp_login_find_by_lun(
95		struct sbp_session *session, u32 unpacked_lun)
96{
97	struct sbp_login_descriptor *login, *found = NULL;
98
99	spin_lock_bh(&session->lock);
100	list_for_each_entry(login, &session->login_list, link) {
101		if (login->login_lun == unpacked_lun)
102			found = login;
103	}
104	spin_unlock_bh(&session->lock);
105
106	return found;
107}
108
109static int sbp_login_count_all_by_lun(
110		struct sbp_tpg *tpg,
111		u32 unpacked_lun,
112		int exclusive)
113{
114	struct se_session *se_sess;
115	struct sbp_session *sess;
116	struct sbp_login_descriptor *login;
117	int count = 0;
118
119	spin_lock_bh(&tpg->se_tpg.session_lock);
120	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
121		sess = se_sess->fabric_sess_ptr;
122
123		spin_lock_bh(&sess->lock);
124		list_for_each_entry(login, &sess->login_list, link) {
125			if (login->login_lun != unpacked_lun)
126				continue;
127
128			if (!exclusive || login->exclusive)
129				count++;
130		}
131		spin_unlock_bh(&sess->lock);
132	}
133	spin_unlock_bh(&tpg->se_tpg.session_lock);
134
135	return count;
136}
137
138static struct sbp_login_descriptor *sbp_login_find_by_id(
139	struct sbp_tpg *tpg, int login_id)
140{
141	struct se_session *se_sess;
142	struct sbp_session *sess;
143	struct sbp_login_descriptor *login, *found = NULL;
144
145	spin_lock_bh(&tpg->se_tpg.session_lock);
146	list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
147		sess = se_sess->fabric_sess_ptr;
148
149		spin_lock_bh(&sess->lock);
150		list_for_each_entry(login, &sess->login_list, link) {
151			if (login->login_id == login_id)
152				found = login;
153		}
154		spin_unlock_bh(&sess->lock);
155	}
156	spin_unlock_bh(&tpg->se_tpg.session_lock);
157
158	return found;
159}
160
161static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
162{
163	struct se_portal_group *se_tpg = &tpg->se_tpg;
164	struct se_lun *se_lun;
165
166	rcu_read_lock();
167	hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
168		if (se_lun->unpacked_lun == login_lun) {
169			rcu_read_unlock();
170			*err = 0;
171			return login_lun;
172		}
173	}
174	rcu_read_unlock();
175
176	*err = -ENODEV;
177	return login_lun;
178}
179
180static struct sbp_session *sbp_session_create(
181		struct sbp_tpg *tpg,
182		u64 guid)
183{
184	struct sbp_session *sess;
185	int ret;
186	char guid_str[17];
187
188	snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
189
190	sess = kmalloc(sizeof(*sess), GFP_KERNEL);
191	if (!sess)
192		return ERR_PTR(-ENOMEM);
193
194	spin_lock_init(&sess->lock);
195	INIT_LIST_HEAD(&sess->login_list);
196	INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
197	sess->guid = guid;
198
199	sess->se_sess = target_setup_session(&tpg->se_tpg, 128,
200					     sizeof(struct sbp_target_request),
201					     TARGET_PROT_NORMAL, guid_str,
202					     sess, NULL);
203	if (IS_ERR(sess->se_sess)) {
204		pr_err("failed to init se_session\n");
205		ret = PTR_ERR(sess->se_sess);
206		kfree(sess);
207		return ERR_PTR(ret);
208	}
209
210	return sess;
211}
212
213static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
214{
215	spin_lock_bh(&sess->lock);
216	if (!list_empty(&sess->login_list)) {
217		spin_unlock_bh(&sess->lock);
218		return;
219	}
220	spin_unlock_bh(&sess->lock);
221
222	if (cancel_work)
223		cancel_delayed_work_sync(&sess->maint_work);
224
225	target_remove_session(sess->se_sess);
226
227	if (sess->card)
228		fw_card_put(sess->card);
229
230	kfree(sess);
231}
232
233static void sbp_target_agent_unregister(struct sbp_target_agent *);
234
235static void sbp_login_release(struct sbp_login_descriptor *login,
236	bool cancel_work)
237{
238	struct sbp_session *sess = login->sess;
239
240	/* FIXME: abort/wait on tasks */
241
242	sbp_target_agent_unregister(login->tgt_agt);
243
244	if (sess) {
245		spin_lock_bh(&sess->lock);
246		list_del(&login->link);
247		spin_unlock_bh(&sess->lock);
248
249		sbp_session_release(sess, cancel_work);
250	}
251
252	kfree(login);
253}
254
255static struct sbp_target_agent *sbp_target_agent_register(
256	struct sbp_login_descriptor *);
257
258static void sbp_management_request_login(
259	struct sbp_management_agent *agent, struct sbp_management_request *req,
260	int *status_data_size)
261{
262	struct sbp_tport *tport = agent->tport;
263	struct sbp_tpg *tpg = tport->tpg;
264	struct sbp_session *sess;
265	struct sbp_login_descriptor *login;
266	struct sbp_login_response_block *response;
267	u64 guid;
268	u32 unpacked_lun;
269	int login_response_len, ret;
270
271	unpacked_lun = sbp_get_lun_from_tpg(tpg,
272			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
273	if (ret) {
274		pr_notice("login to unknown LUN: %d\n",
275			LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
276
277		req->status.status = cpu_to_be32(
278			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
279			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
280		return;
281	}
282
283	ret = read_peer_guid(&guid, req);
284	if (ret != RCODE_COMPLETE) {
285		pr_warn("failed to read peer GUID: %d\n", ret);
286
287		req->status.status = cpu_to_be32(
288			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
289			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
290		return;
291	}
292
293	pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
294		unpacked_lun, guid);
295
296	sess = sbp_session_find_by_guid(tpg, guid);
297	if (sess) {
298		login = sbp_login_find_by_lun(sess, unpacked_lun);
299		if (login) {
300			pr_notice("initiator already logged-in\n");
301
302			/*
303			 * SBP-2 R4 says we should return access denied, but
304			 * that can confuse initiators. Instead we need to
305			 * treat this like a reconnect, but send the login
306			 * response block like a fresh login.
307			 *
308			 * This is required particularly in the case of Apple
309			 * devices booting off the FireWire target, where
310			 * the firmware has an active login to the target. When
311			 * the OS takes control of the session it issues its own
312			 * LOGIN rather than a RECONNECT. To avoid the machine
313			 * waiting until the reconnect_hold expires, we can skip
314			 * the ACCESS_DENIED errors to speed things up.
315			 */
316
317			goto already_logged_in;
318		}
319	}
320
321	/*
322	 * check exclusive bit in login request
323	 * reject with access_denied if any logins present
324	 */
325	if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
326			sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
327		pr_warn("refusing exclusive login with other active logins\n");
328
329		req->status.status = cpu_to_be32(
330			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
331			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
332		return;
333	}
334
335	/*
336	 * check exclusive bit in any existing login descriptor
337	 * reject with access_denied if any exclusive logins present
338	 */
339	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
340		pr_warn("refusing login while another exclusive login present\n");
341
342		req->status.status = cpu_to_be32(
343			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
344			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
345		return;
346	}
347
348	/*
349	 * check we haven't exceeded the number of allowed logins
350	 * reject with resources_unavailable if we have
351	 */
352	if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
353			tport->max_logins_per_lun) {
354		pr_warn("max number of logins reached\n");
355
356		req->status.status = cpu_to_be32(
357			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
358			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
359		return;
360	}
361
362	if (!sess) {
363		sess = sbp_session_create(tpg, guid);
364		if (IS_ERR(sess)) {
365			switch (PTR_ERR(sess)) {
366			case -EPERM:
367				ret = SBP_STATUS_ACCESS_DENIED;
368				break;
369			default:
370				ret = SBP_STATUS_RESOURCES_UNAVAIL;
371				break;
372			}
373
374			req->status.status = cpu_to_be32(
375				STATUS_BLOCK_RESP(
376					STATUS_RESP_REQUEST_COMPLETE) |
377				STATUS_BLOCK_SBP_STATUS(ret));
378			return;
379		}
380
381		sess->node_id = req->node_addr;
382		sess->card = fw_card_get(req->card);
383		sess->generation = req->generation;
384		sess->speed = req->speed;
385
386		schedule_delayed_work(&sess->maint_work,
387				SESSION_MAINTENANCE_INTERVAL);
388	}
389
390	/* only take the latest reconnect_hold into account */
391	sess->reconnect_hold = min(
392		1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
393		tport->max_reconnect_timeout) - 1;
394
395	login = kmalloc(sizeof(*login), GFP_KERNEL);
396	if (!login) {
397		pr_err("failed to allocate login descriptor\n");
398
399		sbp_session_release(sess, true);
400
401		req->status.status = cpu_to_be32(
402			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
403			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
404		return;
405	}
406
407	login->sess = sess;
408	login->login_lun = unpacked_lun;
409	login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
410	login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
411	login->login_id = atomic_inc_return(&login_id);
412
413	login->tgt_agt = sbp_target_agent_register(login);
414	if (IS_ERR(login->tgt_agt)) {
415		ret = PTR_ERR(login->tgt_agt);
416		pr_err("failed to map command block handler: %d\n", ret);
417
418		sbp_session_release(sess, true);
419		kfree(login);
420
421		req->status.status = cpu_to_be32(
422			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
423			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
424		return;
425	}
426
427	spin_lock_bh(&sess->lock);
428	list_add_tail(&login->link, &sess->login_list);
429	spin_unlock_bh(&sess->lock);
430
431already_logged_in:
432	response = kzalloc(sizeof(*response), GFP_KERNEL);
433	if (!response) {
434		pr_err("failed to allocate login response block\n");
435
436		sbp_login_release(login, true);
437
438		req->status.status = cpu_to_be32(
439			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
440			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
441		return;
442	}
443
444	login_response_len = clamp_val(
445			LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
446			12, sizeof(*response));
447	response->misc = cpu_to_be32(
448		((login_response_len & 0xffff) << 16) |
449		(login->login_id & 0xffff));
450	response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
451	addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
452		&response->command_block_agent);
453
454	ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
455		sess->node_id, sess->generation, sess->speed,
456		sbp2_pointer_to_addr(&req->orb.ptr2), response,
457		login_response_len);
458	if (ret != RCODE_COMPLETE) {
459		pr_debug("failed to write login response block: %x\n", ret);
460
461		kfree(response);
462		sbp_login_release(login, true);
463
464		req->status.status = cpu_to_be32(
465			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
466			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
467		return;
468	}
469
470	kfree(response);
471
472	req->status.status = cpu_to_be32(
473		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
474		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
475}
476
477static void sbp_management_request_query_logins(
478	struct sbp_management_agent *agent, struct sbp_management_request *req,
479	int *status_data_size)
480{
481	pr_notice("QUERY LOGINS not implemented\n");
482	/* FIXME: implement */
483
484	req->status.status = cpu_to_be32(
485		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
486		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
487}
488
489static void sbp_management_request_reconnect(
490	struct sbp_management_agent *agent, struct sbp_management_request *req,
491	int *status_data_size)
492{
493	struct sbp_tport *tport = agent->tport;
494	struct sbp_tpg *tpg = tport->tpg;
495	int ret;
496	u64 guid;
497	struct sbp_login_descriptor *login;
498
499	ret = read_peer_guid(&guid, req);
500	if (ret != RCODE_COMPLETE) {
501		pr_warn("failed to read peer GUID: %d\n", ret);
502
503		req->status.status = cpu_to_be32(
504			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
505			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
506		return;
507	}
508
509	pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
510
511	login = sbp_login_find_by_id(tpg,
512		RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
513
514	if (!login) {
515		pr_err("mgt_agent RECONNECT unknown login ID\n");
516
517		req->status.status = cpu_to_be32(
518			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
519			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
520		return;
521	}
522
523	if (login->sess->guid != guid) {
524		pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
525
526		req->status.status = cpu_to_be32(
527			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
528			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
529		return;
530	}
531
532	spin_lock_bh(&login->sess->lock);
533	if (login->sess->card)
534		fw_card_put(login->sess->card);
535
536	/* update the node details */
537	login->sess->generation = req->generation;
538	login->sess->node_id = req->node_addr;
539	login->sess->card = fw_card_get(req->card);
540	login->sess->speed = req->speed;
541	spin_unlock_bh(&login->sess->lock);
542
543	req->status.status = cpu_to_be32(
544		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
545		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
546}
547
548static void sbp_management_request_logout(
549	struct sbp_management_agent *agent, struct sbp_management_request *req,
550	int *status_data_size)
551{
552	struct sbp_tport *tport = agent->tport;
553	struct sbp_tpg *tpg = tport->tpg;
554	int id;
555	struct sbp_login_descriptor *login;
556
557	id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
558
559	login = sbp_login_find_by_id(tpg, id);
560	if (!login) {
561		pr_warn("cannot find login: %d\n", id);
562
563		req->status.status = cpu_to_be32(
564			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
565			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
566		return;
567	}
568
569	pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
570		login->login_lun, login->login_id);
571
572	if (req->node_addr != login->sess->node_id) {
573		pr_warn("logout from different node ID\n");
574
575		req->status.status = cpu_to_be32(
576			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
577			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
578		return;
579	}
580
581	sbp_login_release(login, true);
582
583	req->status.status = cpu_to_be32(
584		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
585		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
586}
587
588static void session_check_for_reset(struct sbp_session *sess)
589{
590	bool card_valid = false;
591
592	spin_lock_bh(&sess->lock);
593
594	if (sess->card) {
595		spin_lock_irq(&sess->card->lock);
596		card_valid = (sess->card->local_node != NULL);
597		spin_unlock_irq(&sess->card->lock);
598
599		if (!card_valid) {
600			fw_card_put(sess->card);
601			sess->card = NULL;
602		}
603	}
604
605	if (!card_valid || (sess->generation != sess->card->generation)) {
606		pr_info("Waiting for reconnect from node: %016llx\n",
607				sess->guid);
608
609		sess->node_id = -1;
610		sess->reconnect_expires = get_jiffies_64() +
611			((sess->reconnect_hold + 1) * HZ);
612	}
613
614	spin_unlock_bh(&sess->lock);
615}
616
617static void session_reconnect_expired(struct sbp_session *sess)
618{
619	struct sbp_login_descriptor *login, *temp;
620	LIST_HEAD(login_list);
621
622	pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
623
624	spin_lock_bh(&sess->lock);
625	list_for_each_entry_safe(login, temp, &sess->login_list, link) {
626		login->sess = NULL;
627		list_move_tail(&login->link, &login_list);
628	}
629	spin_unlock_bh(&sess->lock);
630
631	list_for_each_entry_safe(login, temp, &login_list, link) {
632		list_del(&login->link);
633		sbp_login_release(login, false);
634	}
635
636	sbp_session_release(sess, false);
637}
638
639static void session_maintenance_work(struct work_struct *work)
640{
641	struct sbp_session *sess = container_of(work, struct sbp_session,
642			maint_work.work);
643
644	/* could be called while tearing down the session */
645	spin_lock_bh(&sess->lock);
646	if (list_empty(&sess->login_list)) {
647		spin_unlock_bh(&sess->lock);
648		return;
649	}
650	spin_unlock_bh(&sess->lock);
651
652	if (sess->node_id != -1) {
653		/* check for bus reset and make node_id invalid */
654		session_check_for_reset(sess);
655
656		schedule_delayed_work(&sess->maint_work,
657				SESSION_MAINTENANCE_INTERVAL);
658	} else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
659		/* still waiting for reconnect */
660		schedule_delayed_work(&sess->maint_work,
661				SESSION_MAINTENANCE_INTERVAL);
662	} else {
663		/* reconnect timeout has expired */
664		session_reconnect_expired(sess);
665	}
666}
667
668static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
669		struct sbp_target_agent *agent)
670{
671	int state;
672
673	switch (tcode) {
674	case TCODE_READ_QUADLET_REQUEST:
675		pr_debug("tgt_agent AGENT_STATE READ\n");
676
677		spin_lock_bh(&agent->lock);
678		state = agent->state;
679		spin_unlock_bh(&agent->lock);
680
681		*(__be32 *)data = cpu_to_be32(state);
682
683		return RCODE_COMPLETE;
684
685	case TCODE_WRITE_QUADLET_REQUEST:
686		/* ignored */
687		return RCODE_COMPLETE;
688
689	default:
690		return RCODE_TYPE_ERROR;
691	}
692}
693
694static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
695		struct sbp_target_agent *agent)
696{
697	switch (tcode) {
698	case TCODE_WRITE_QUADLET_REQUEST:
699		pr_debug("tgt_agent AGENT_RESET\n");
700		spin_lock_bh(&agent->lock);
701		agent->state = AGENT_STATE_RESET;
702		spin_unlock_bh(&agent->lock);
703		return RCODE_COMPLETE;
704
705	default:
706		return RCODE_TYPE_ERROR;
707	}
708}
709
710static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
711		struct sbp_target_agent *agent)
712{
713	struct sbp2_pointer *ptr = data;
714
715	switch (tcode) {
716	case TCODE_WRITE_BLOCK_REQUEST:
717		spin_lock_bh(&agent->lock);
718		if (agent->state != AGENT_STATE_SUSPENDED &&
719				agent->state != AGENT_STATE_RESET) {
720			spin_unlock_bh(&agent->lock);
721			pr_notice("Ignoring ORB_POINTER write while active.\n");
722			return RCODE_CONFLICT_ERROR;
723		}
724		agent->state = AGENT_STATE_ACTIVE;
725		spin_unlock_bh(&agent->lock);
726
727		agent->orb_pointer = sbp2_pointer_to_addr(ptr);
728		agent->doorbell = false;
729
730		pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
731				agent->orb_pointer);
732
733		queue_work(system_unbound_wq, &agent->work);
734
735		return RCODE_COMPLETE;
736
737	case TCODE_READ_BLOCK_REQUEST:
738		pr_debug("tgt_agent ORB_POINTER READ\n");
739		spin_lock_bh(&agent->lock);
740		addr_to_sbp2_pointer(agent->orb_pointer, ptr);
741		spin_unlock_bh(&agent->lock);
742		return RCODE_COMPLETE;
743
744	default:
745		return RCODE_TYPE_ERROR;
746	}
747}
748
749static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
750		struct sbp_target_agent *agent)
751{
752	switch (tcode) {
753	case TCODE_WRITE_QUADLET_REQUEST:
754		spin_lock_bh(&agent->lock);
755		if (agent->state != AGENT_STATE_SUSPENDED) {
756			spin_unlock_bh(&agent->lock);
757			pr_debug("Ignoring DOORBELL while active.\n");
758			return RCODE_CONFLICT_ERROR;
759		}
760		agent->state = AGENT_STATE_ACTIVE;
761		spin_unlock_bh(&agent->lock);
762
763		agent->doorbell = true;
764
765		pr_debug("tgt_agent DOORBELL\n");
766
767		queue_work(system_unbound_wq, &agent->work);
768
769		return RCODE_COMPLETE;
770
771	case TCODE_READ_QUADLET_REQUEST:
772		return RCODE_COMPLETE;
773
774	default:
775		return RCODE_TYPE_ERROR;
776	}
777}
778
779static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
780		int tcode, void *data, struct sbp_target_agent *agent)
781{
782	switch (tcode) {
783	case TCODE_WRITE_QUADLET_REQUEST:
784		pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
785		/* ignored as we don't send unsolicited status */
786		return RCODE_COMPLETE;
787
788	case TCODE_READ_QUADLET_REQUEST:
789		return RCODE_COMPLETE;
790
791	default:
792		return RCODE_TYPE_ERROR;
793	}
794}
795
796static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
797		int tcode, int destination, int source, int generation,
798		unsigned long long offset, void *data, size_t length,
799		void *callback_data)
800{
801	struct sbp_target_agent *agent = callback_data;
802	struct sbp_session *sess = agent->login->sess;
803	int sess_gen, sess_node, rcode;
804
805	spin_lock_bh(&sess->lock);
806	sess_gen = sess->generation;
807	sess_node = sess->node_id;
808	spin_unlock_bh(&sess->lock);
809
810	if (generation != sess_gen) {
811		pr_notice("ignoring request with wrong generation\n");
812		rcode = RCODE_TYPE_ERROR;
813		goto out;
814	}
815
816	if (source != sess_node) {
817		pr_notice("ignoring request from foreign node (%x != %x)\n",
818				source, sess_node);
819		rcode = RCODE_TYPE_ERROR;
820		goto out;
821	}
822
823	/* turn offset into the offset from the start of the block */
824	offset -= agent->handler.offset;
825
826	if (offset == 0x00 && length == 4) {
827		/* AGENT_STATE */
828		rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
829	} else if (offset == 0x04 && length == 4) {
830		/* AGENT_RESET */
831		rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
832	} else if (offset == 0x08 && length == 8) {
833		/* ORB_POINTER */
834		rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
835	} else if (offset == 0x10 && length == 4) {
836		/* DOORBELL */
837		rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
838	} else if (offset == 0x14 && length == 4) {
839		/* UNSOLICITED_STATUS_ENABLE */
840		rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
841				data, agent);
842	} else {
843		rcode = RCODE_ADDRESS_ERROR;
844	}
845
846out:
847	fw_send_response(card, request, rcode);
848}
849
850static void sbp_handle_command(struct sbp_target_request *);
851static int sbp_send_status(struct sbp_target_request *);
852static void sbp_free_request(struct sbp_target_request *);
853
854static void tgt_agent_process_work(struct work_struct *work)
855{
856	struct sbp_target_request *req =
857		container_of(work, struct sbp_target_request, work);
858
859	pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
860			req->orb_pointer,
861			sbp2_pointer_to_addr(&req->orb.next_orb),
862			sbp2_pointer_to_addr(&req->orb.data_descriptor),
863			be32_to_cpu(req->orb.misc));
864
865	if (req->orb_pointer >> 32)
866		pr_debug("ORB with high bits set\n");
867
868	switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
869		case 0:/* Format specified by this standard */
870			sbp_handle_command(req);
871			return;
872		case 1: /* Reserved for future standardization */
873		case 2: /* Vendor-dependent */
874			req->status.status |= cpu_to_be32(
875					STATUS_BLOCK_RESP(
876						STATUS_RESP_REQUEST_COMPLETE) |
877					STATUS_BLOCK_DEAD(0) |
878					STATUS_BLOCK_LEN(1) |
879					STATUS_BLOCK_SBP_STATUS(
880						SBP_STATUS_REQ_TYPE_NOTSUPP));
881			sbp_send_status(req);
882			return;
883		case 3: /* Dummy ORB */
884			req->status.status |= cpu_to_be32(
885					STATUS_BLOCK_RESP(
886						STATUS_RESP_REQUEST_COMPLETE) |
887					STATUS_BLOCK_DEAD(0) |
888					STATUS_BLOCK_LEN(1) |
889					STATUS_BLOCK_SBP_STATUS(
890						SBP_STATUS_DUMMY_ORB_COMPLETE));
891			sbp_send_status(req);
892			return;
893		default:
894			BUG();
895	}
896}
897
898/* used to double-check we haven't been issued an AGENT_RESET */
899static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
900{
901	bool active;
902
903	spin_lock_bh(&agent->lock);
904	active = (agent->state == AGENT_STATE_ACTIVE);
905	spin_unlock_bh(&agent->lock);
906
907	return active;
908}
909
910static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess,
911	struct fw_card *card, u64 next_orb)
912{
913	struct se_session *se_sess = sess->se_sess;
914	struct sbp_target_request *req;
915	int tag, cpu;
916
917	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
918	if (tag < 0)
919		return ERR_PTR(-ENOMEM);
920
921	req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag];
922	memset(req, 0, sizeof(*req));
923	req->se_cmd.map_tag = tag;
924	req->se_cmd.map_cpu = cpu;
925	req->se_cmd.tag = next_orb;
926
927	return req;
928}
929
930static void tgt_agent_fetch_work(struct work_struct *work)
931{
932	struct sbp_target_agent *agent =
933		container_of(work, struct sbp_target_agent, work);
934	struct sbp_session *sess = agent->login->sess;
935	struct sbp_target_request *req;
936	int ret;
937	bool doorbell = agent->doorbell;
938	u64 next_orb = agent->orb_pointer;
939
940	while (next_orb && tgt_agent_check_active(agent)) {
941		req = sbp_mgt_get_req(sess, sess->card, next_orb);
942		if (IS_ERR(req)) {
943			spin_lock_bh(&agent->lock);
944			agent->state = AGENT_STATE_DEAD;
945			spin_unlock_bh(&agent->lock);
946			return;
947		}
948
949		req->login = agent->login;
950		req->orb_pointer = next_orb;
951
952		req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
953					req->orb_pointer >> 32));
954		req->status.orb_low = cpu_to_be32(
955				req->orb_pointer & 0xfffffffc);
956
957		/* read in the ORB */
958		ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
959				sess->node_id, sess->generation, sess->speed,
960				req->orb_pointer, &req->orb, sizeof(req->orb));
961		if (ret != RCODE_COMPLETE) {
962			pr_debug("tgt_orb fetch failed: %x\n", ret);
963			req->status.status |= cpu_to_be32(
964					STATUS_BLOCK_SRC(
965						STATUS_SRC_ORB_FINISHED) |
966					STATUS_BLOCK_RESP(
967						STATUS_RESP_TRANSPORT_FAILURE) |
968					STATUS_BLOCK_DEAD(1) |
969					STATUS_BLOCK_LEN(1) |
970					STATUS_BLOCK_SBP_STATUS(
971						SBP_STATUS_UNSPECIFIED_ERROR));
972			spin_lock_bh(&agent->lock);
973			agent->state = AGENT_STATE_DEAD;
974			spin_unlock_bh(&agent->lock);
975
976			sbp_send_status(req);
977			return;
978		}
979
980		/* check the next_ORB field */
981		if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
982			next_orb = 0;
983			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
984						STATUS_SRC_ORB_FINISHED));
985		} else {
986			next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
987			req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
988						STATUS_SRC_ORB_CONTINUING));
989		}
990
991		if (tgt_agent_check_active(agent) && !doorbell) {
992			INIT_WORK(&req->work, tgt_agent_process_work);
993			queue_work(system_unbound_wq, &req->work);
994		} else {
995			/* don't process this request, just check next_ORB */
996			sbp_free_request(req);
997		}
998
999		spin_lock_bh(&agent->lock);
1000		doorbell = agent->doorbell = false;
1001
1002		/* check if we should carry on processing */
1003		if (next_orb)
1004			agent->orb_pointer = next_orb;
1005		else
1006			agent->state = AGENT_STATE_SUSPENDED;
1007
1008		spin_unlock_bh(&agent->lock);
1009	};
1010}
1011
1012static struct sbp_target_agent *sbp_target_agent_register(
1013		struct sbp_login_descriptor *login)
1014{
1015	struct sbp_target_agent *agent;
1016	int ret;
1017
1018	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1019	if (!agent)
1020		return ERR_PTR(-ENOMEM);
1021
1022	spin_lock_init(&agent->lock);
1023
1024	agent->handler.length = 0x20;
1025	agent->handler.address_callback = tgt_agent_rw;
1026	agent->handler.callback_data = agent;
1027
1028	agent->login = login;
1029	agent->state = AGENT_STATE_RESET;
1030	INIT_WORK(&agent->work, tgt_agent_fetch_work);
1031	agent->orb_pointer = 0;
1032	agent->doorbell = false;
1033
1034	ret = fw_core_add_address_handler(&agent->handler,
1035			&sbp_register_region);
1036	if (ret < 0) {
1037		kfree(agent);
1038		return ERR_PTR(ret);
1039	}
1040
1041	return agent;
1042}
1043
1044static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1045{
1046	fw_core_remove_address_handler(&agent->handler);
1047	cancel_work_sync(&agent->work);
1048	kfree(agent);
1049}
1050
1051/*
1052 * Simple wrapper around fw_run_transaction that retries the transaction several
1053 * times in case of failure, with an exponential backoff.
1054 */
1055static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1056		int generation, int speed, unsigned long long offset,
1057		void *payload, size_t length)
1058{
1059	int attempt, ret, delay;
1060
1061	for (attempt = 1; attempt <= 5; attempt++) {
1062		ret = fw_run_transaction(card, tcode, destination_id,
1063				generation, speed, offset, payload, length);
1064
1065		switch (ret) {
1066		case RCODE_COMPLETE:
1067		case RCODE_TYPE_ERROR:
1068		case RCODE_ADDRESS_ERROR:
1069		case RCODE_GENERATION:
1070			return ret;
1071
1072		default:
1073			delay = 5 * attempt * attempt;
1074			usleep_range(delay, delay * 2);
1075		}
1076	}
1077
1078	return ret;
1079}
1080
1081/*
1082 * Wrapper around sbp_run_transaction that gets the card, destination,
1083 * generation and speed out of the request's session.
1084 */
1085static int sbp_run_request_transaction(struct sbp_target_request *req,
1086		int tcode, unsigned long long offset, void *payload,
1087		size_t length)
1088{
1089	struct sbp_login_descriptor *login = req->login;
1090	struct sbp_session *sess = login->sess;
1091	struct fw_card *card;
1092	int node_id, generation, speed, ret;
1093
1094	spin_lock_bh(&sess->lock);
1095	card = fw_card_get(sess->card);
1096	node_id = sess->node_id;
1097	generation = sess->generation;
1098	speed = sess->speed;
1099	spin_unlock_bh(&sess->lock);
1100
1101	ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1102			offset, payload, length);
1103
1104	fw_card_put(card);
1105
1106	return ret;
1107}
1108
1109static int sbp_fetch_command(struct sbp_target_request *req)
1110{
1111	int ret, cmd_len, copy_len;
1112
1113	cmd_len = scsi_command_size(req->orb.command_block);
1114
1115	req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1116	if (!req->cmd_buf)
1117		return -ENOMEM;
1118
1119	memcpy(req->cmd_buf, req->orb.command_block,
1120		min_t(int, cmd_len, sizeof(req->orb.command_block)));
1121
1122	if (cmd_len > sizeof(req->orb.command_block)) {
1123		pr_debug("sbp_fetch_command: filling in long command\n");
1124		copy_len = cmd_len - sizeof(req->orb.command_block);
1125
1126		ret = sbp_run_request_transaction(req,
1127				TCODE_READ_BLOCK_REQUEST,
1128				req->orb_pointer + sizeof(req->orb),
1129				req->cmd_buf + sizeof(req->orb.command_block),
1130				copy_len);
1131		if (ret != RCODE_COMPLETE)
1132			return -EIO;
1133	}
1134
1135	return 0;
1136}
1137
1138static int sbp_fetch_page_table(struct sbp_target_request *req)
1139{
1140	int pg_tbl_sz, ret;
1141	struct sbp_page_table_entry *pg_tbl;
1142
1143	if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1144		return 0;
1145
1146	pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1147		sizeof(struct sbp_page_table_entry);
1148
1149	pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1150	if (!pg_tbl)
1151		return -ENOMEM;
1152
1153	ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1154			sbp2_pointer_to_addr(&req->orb.data_descriptor),
1155			pg_tbl, pg_tbl_sz);
1156	if (ret != RCODE_COMPLETE) {
1157		kfree(pg_tbl);
1158		return -EIO;
1159	}
1160
1161	req->pg_tbl = pg_tbl;
1162	return 0;
1163}
1164
1165static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1166	u32 *data_len, enum dma_data_direction *data_dir)
1167{
1168	int data_size, direction, idx;
1169
1170	data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1171	direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1172
1173	if (!data_size) {
1174		*data_len = 0;
1175		*data_dir = DMA_NONE;
1176		return;
1177	}
1178
1179	*data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1180
1181	if (req->pg_tbl) {
1182		*data_len = 0;
1183		for (idx = 0; idx < data_size; idx++) {
1184			*data_len += be16_to_cpu(
1185					req->pg_tbl[idx].segment_length);
1186		}
1187	} else {
1188		*data_len = data_size;
1189	}
1190}
1191
1192static void sbp_handle_command(struct sbp_target_request *req)
1193{
1194	struct sbp_login_descriptor *login = req->login;
1195	struct sbp_session *sess = login->sess;
1196	int ret, unpacked_lun;
1197	u32 data_length;
1198	enum dma_data_direction data_dir;
1199
1200	ret = sbp_fetch_command(req);
1201	if (ret) {
1202		pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
1203		goto err;
1204	}
1205
1206	ret = sbp_fetch_page_table(req);
1207	if (ret) {
1208		pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1209			ret);
1210		goto err;
1211	}
1212
1213	unpacked_lun = req->login->login_lun;
1214	sbp_calc_data_length_direction(req, &data_length, &data_dir);
1215
1216	pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1217			req->orb_pointer, unpacked_lun, data_length, data_dir);
1218
1219	/* only used for printk until we do TMRs */
1220	req->se_cmd.tag = req->orb_pointer;
1221	if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1222			      req->sense_buf, unpacked_lun, data_length,
1223			      TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF))
1224		goto err;
1225
1226	return;
1227
1228err:
1229	req->status.status |= cpu_to_be32(
1230		STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1231		STATUS_BLOCK_DEAD(0) |
1232		STATUS_BLOCK_LEN(1) |
1233		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1234	sbp_send_status(req);
1235}
1236
1237/*
1238 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1239 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1240 */
1241static int sbp_rw_data(struct sbp_target_request *req)
1242{
1243	struct sbp_session *sess = req->login->sess;
1244	int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1245		generation, num_pte, length, tfr_length,
1246		rcode = RCODE_COMPLETE;
1247	struct sbp_page_table_entry *pte;
1248	unsigned long long offset;
1249	struct fw_card *card;
1250	struct sg_mapping_iter iter;
1251
1252	if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1253		tcode = TCODE_WRITE_BLOCK_REQUEST;
1254		sg_miter_flags = SG_MITER_FROM_SG;
1255	} else {
1256		tcode = TCODE_READ_BLOCK_REQUEST;
1257		sg_miter_flags = SG_MITER_TO_SG;
1258	}
1259
1260	max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1261	speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1262
1263	pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1264	if (pg_size) {
1265		pr_err("sbp_run_transaction: page size ignored\n");
1266		pg_size = 0x100 << pg_size;
1267	}
1268
1269	spin_lock_bh(&sess->lock);
1270	card = fw_card_get(sess->card);
1271	node_id = sess->node_id;
1272	generation = sess->generation;
1273	spin_unlock_bh(&sess->lock);
1274
1275	if (req->pg_tbl) {
1276		pte = req->pg_tbl;
1277		num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1278
1279		offset = 0;
1280		length = 0;
1281	} else {
1282		pte = NULL;
1283		num_pte = 0;
1284
1285		offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1286		length = req->se_cmd.data_length;
1287	}
1288
1289	sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1290		sg_miter_flags);
1291
1292	while (length || num_pte) {
1293		if (!length) {
1294			offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1295				be32_to_cpu(pte->segment_base_lo);
1296			length = be16_to_cpu(pte->segment_length);
1297
1298			pte++;
1299			num_pte--;
1300		}
1301
1302		sg_miter_next(&iter);
1303
1304		tfr_length = min3(length, max_payload, (int)iter.length);
1305
1306		/* FIXME: take page_size into account */
1307
1308		rcode = sbp_run_transaction(card, tcode, node_id,
1309				generation, speed,
1310				offset, iter.addr, tfr_length);
1311
1312		if (rcode != RCODE_COMPLETE)
1313			break;
1314
1315		length -= tfr_length;
1316		offset += tfr_length;
1317		iter.consumed = tfr_length;
1318	}
1319
1320	sg_miter_stop(&iter);
1321	fw_card_put(card);
1322
1323	if (rcode == RCODE_COMPLETE) {
1324		WARN_ON(length != 0);
1325		return 0;
1326	} else {
1327		return -EIO;
1328	}
1329}
1330
1331static int sbp_send_status(struct sbp_target_request *req)
1332{
1333	int rc, ret = 0, length;
1334	struct sbp_login_descriptor *login = req->login;
1335
1336	length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1337
1338	rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1339			login->status_fifo_addr, &req->status, length);
1340	if (rc != RCODE_COMPLETE) {
1341		pr_debug("sbp_send_status: write failed: 0x%x\n", rc);
1342		ret = -EIO;
1343		goto put_ref;
1344	}
1345
1346	pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1347			req->orb_pointer);
1348	/*
1349	 * Drop the extra ACK_KREF reference taken by target_submit_cmd()
1350	 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd()
1351	 * final se_cmd->cmd_kref put.
1352	 */
1353put_ref:
1354	target_put_sess_cmd(&req->se_cmd);
1355	return ret;
1356}
1357
1358static void sbp_sense_mangle(struct sbp_target_request *req)
1359{
1360	struct se_cmd *se_cmd = &req->se_cmd;
1361	u8 *sense = req->sense_buf;
1362	u8 *status = req->status.data;
1363
1364	WARN_ON(se_cmd->scsi_sense_length < 18);
1365
1366	switch (sense[0] & 0x7f) { 		/* sfmt */
1367	case 0x70: /* current, fixed */
1368		status[0] = 0 << 6;
1369		break;
1370	case 0x71: /* deferred, fixed */
1371		status[0] = 1 << 6;
1372		break;
1373	case 0x72: /* current, descriptor */
1374	case 0x73: /* deferred, descriptor */
1375	default:
1376		/*
1377		 * TODO: SBP-3 specifies what we should do with descriptor
1378		 * format sense data
1379		 */
1380		pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1381			sense[0]);
1382		req->status.status |= cpu_to_be32(
1383			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1384			STATUS_BLOCK_DEAD(0) |
1385			STATUS_BLOCK_LEN(1) |
1386			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1387		return;
1388	}
1389
1390	status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1391	status[1] =
1392		(sense[0] & 0x80) |		/* valid */
1393		((sense[2] & 0xe0) >> 1) |	/* mark, eom, ili */
1394		(sense[2] & 0x0f);		/* sense_key */
1395	status[2] = se_cmd->scsi_asc;		/* sense_code */
1396	status[3] = se_cmd->scsi_ascq;		/* sense_qualifier */
1397
1398	/* information */
1399	status[4] = sense[3];
1400	status[5] = sense[4];
1401	status[6] = sense[5];
1402	status[7] = sense[6];
1403
1404	/* CDB-dependent */
1405	status[8] = sense[8];
1406	status[9] = sense[9];
1407	status[10] = sense[10];
1408	status[11] = sense[11];
1409
1410	/* fru */
1411	status[12] = sense[14];
1412
1413	/* sense_key-dependent */
1414	status[13] = sense[15];
1415	status[14] = sense[16];
1416	status[15] = sense[17];
1417
1418	req->status.status |= cpu_to_be32(
1419		STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1420		STATUS_BLOCK_DEAD(0) |
1421		STATUS_BLOCK_LEN(5) |
1422		STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1423}
1424
1425static int sbp_send_sense(struct sbp_target_request *req)
1426{
1427	struct se_cmd *se_cmd = &req->se_cmd;
1428
1429	if (se_cmd->scsi_sense_length) {
1430		sbp_sense_mangle(req);
1431	} else {
1432		req->status.status |= cpu_to_be32(
1433			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1434			STATUS_BLOCK_DEAD(0) |
1435			STATUS_BLOCK_LEN(1) |
1436			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1437	}
1438
1439	return sbp_send_status(req);
1440}
1441
1442static void sbp_free_request(struct sbp_target_request *req)
1443{
1444	struct se_cmd *se_cmd = &req->se_cmd;
1445	struct se_session *se_sess = se_cmd->se_sess;
1446
1447	kfree(req->pg_tbl);
1448	kfree(req->cmd_buf);
1449
1450	target_free_tag(se_sess, se_cmd);
1451}
1452
1453static void sbp_mgt_agent_process(struct work_struct *work)
1454{
1455	struct sbp_management_agent *agent =
1456		container_of(work, struct sbp_management_agent, work);
1457	struct sbp_management_request *req = agent->request;
1458	int ret;
1459	int status_data_len = 0;
1460
1461	/* fetch the ORB from the initiator */
1462	ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1463		req->node_addr, req->generation, req->speed,
1464		agent->orb_offset, &req->orb, sizeof(req->orb));
1465	if (ret != RCODE_COMPLETE) {
1466		pr_debug("mgt_orb fetch failed: %x\n", ret);
1467		goto out;
1468	}
1469
1470	pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1471		sbp2_pointer_to_addr(&req->orb.ptr1),
1472		sbp2_pointer_to_addr(&req->orb.ptr2),
1473		be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1474		sbp2_pointer_to_addr(&req->orb.status_fifo));
1475
1476	if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1477		ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1478		pr_err("mgt_orb bad request\n");
1479		goto out;
1480	}
1481
1482	switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1483	case MANAGEMENT_ORB_FUNCTION_LOGIN:
1484		sbp_management_request_login(agent, req, &status_data_len);
1485		break;
1486
1487	case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1488		sbp_management_request_query_logins(agent, req,
1489				&status_data_len);
1490		break;
1491
1492	case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1493		sbp_management_request_reconnect(agent, req, &status_data_len);
1494		break;
1495
1496	case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1497		pr_notice("SET PASSWORD not implemented\n");
1498
1499		req->status.status = cpu_to_be32(
1500			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1501			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1502
1503		break;
1504
1505	case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1506		sbp_management_request_logout(agent, req, &status_data_len);
1507		break;
1508
1509	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1510		pr_notice("ABORT TASK not implemented\n");
1511
1512		req->status.status = cpu_to_be32(
1513			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1514			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1515
1516		break;
1517
1518	case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1519		pr_notice("ABORT TASK SET not implemented\n");
1520
1521		req->status.status = cpu_to_be32(
1522			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1523			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1524
1525		break;
1526
1527	case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1528		pr_notice("LOGICAL UNIT RESET not implemented\n");
1529
1530		req->status.status = cpu_to_be32(
1531			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1532			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1533
1534		break;
1535
1536	case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1537		pr_notice("TARGET RESET not implemented\n");
1538
1539		req->status.status = cpu_to_be32(
1540			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1541			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1542
1543		break;
1544
1545	default:
1546		pr_notice("unknown management function 0x%x\n",
1547			MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1548
1549		req->status.status = cpu_to_be32(
1550			STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1551			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1552
1553		break;
1554	}
1555
1556	req->status.status |= cpu_to_be32(
1557		STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1558		STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1559		STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1560	req->status.orb_low = cpu_to_be32(agent->orb_offset);
1561
1562	/* write the status block back to the initiator */
1563	ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1564		req->node_addr, req->generation, req->speed,
1565		sbp2_pointer_to_addr(&req->orb.status_fifo),
1566		&req->status, 8 + status_data_len);
1567	if (ret != RCODE_COMPLETE) {
1568		pr_debug("mgt_orb status write failed: %x\n", ret);
1569		goto out;
1570	}
1571
1572out:
1573	fw_card_put(req->card);
1574	kfree(req);
1575
1576	spin_lock_bh(&agent->lock);
1577	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1578	spin_unlock_bh(&agent->lock);
1579}
1580
1581static void sbp_mgt_agent_rw(struct fw_card *card,
1582	struct fw_request *request, int tcode, int destination, int source,
1583	int generation, unsigned long long offset, void *data, size_t length,
1584	void *callback_data)
1585{
1586	struct sbp_management_agent *agent = callback_data;
1587	struct sbp2_pointer *ptr = data;
1588	int rcode = RCODE_ADDRESS_ERROR;
1589
1590	if (!agent->tport->enable)
1591		goto out;
1592
1593	if ((offset != agent->handler.offset) || (length != 8))
1594		goto out;
1595
1596	if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1597		struct sbp_management_request *req;
1598		int prev_state;
1599
1600		spin_lock_bh(&agent->lock);
1601		prev_state = agent->state;
1602		agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1603		spin_unlock_bh(&agent->lock);
1604
1605		if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1606			pr_notice("ignoring management request while busy\n");
1607			rcode = RCODE_CONFLICT_ERROR;
1608			goto out;
1609		}
1610		req = kzalloc(sizeof(*req), GFP_ATOMIC);
1611		if (!req) {
1612			rcode = RCODE_CONFLICT_ERROR;
1613			goto out;
1614		}
1615
1616		req->card = fw_card_get(card);
1617		req->generation = generation;
1618		req->node_addr = source;
1619		req->speed = fw_get_request_speed(request);
1620
1621		agent->orb_offset = sbp2_pointer_to_addr(ptr);
1622		agent->request = req;
1623
1624		queue_work(system_unbound_wq, &agent->work);
1625		rcode = RCODE_COMPLETE;
1626	} else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1627		addr_to_sbp2_pointer(agent->orb_offset, ptr);
1628		rcode = RCODE_COMPLETE;
1629	} else {
1630		rcode = RCODE_TYPE_ERROR;
1631	}
1632
1633out:
1634	fw_send_response(card, request, rcode);
1635}
1636
1637static struct sbp_management_agent *sbp_management_agent_register(
1638		struct sbp_tport *tport)
1639{
1640	int ret;
1641	struct sbp_management_agent *agent;
1642
1643	agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1644	if (!agent)
1645		return ERR_PTR(-ENOMEM);
1646
1647	spin_lock_init(&agent->lock);
1648	agent->tport = tport;
1649	agent->handler.length = 0x08;
1650	agent->handler.address_callback = sbp_mgt_agent_rw;
1651	agent->handler.callback_data = agent;
1652	agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1653	INIT_WORK(&agent->work, sbp_mgt_agent_process);
1654	agent->orb_offset = 0;
1655	agent->request = NULL;
1656
1657	ret = fw_core_add_address_handler(&agent->handler,
1658			&sbp_register_region);
1659	if (ret < 0) {
1660		kfree(agent);
1661		return ERR_PTR(ret);
1662	}
1663
1664	return agent;
1665}
1666
1667static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1668{
1669	fw_core_remove_address_handler(&agent->handler);
1670	cancel_work_sync(&agent->work);
1671	kfree(agent);
1672}
1673
1674static int sbp_check_true(struct se_portal_group *se_tpg)
1675{
1676	return 1;
1677}
1678
1679static int sbp_check_false(struct se_portal_group *se_tpg)
1680{
1681	return 0;
1682}
1683
1684static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1685{
1686	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1687	struct sbp_tport *tport = tpg->tport;
1688
1689	return &tport->tport_name[0];
1690}
1691
1692static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1693{
1694	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1695	return tpg->tport_tpgt;
1696}
1697
1698static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1699{
1700	return 1;
1701}
1702
1703static void sbp_release_cmd(struct se_cmd *se_cmd)
1704{
1705	struct sbp_target_request *req = container_of(se_cmd,
1706			struct sbp_target_request, se_cmd);
1707
1708	sbp_free_request(req);
1709}
1710
1711static u32 sbp_sess_get_index(struct se_session *se_sess)
1712{
1713	return 0;
1714}
1715
1716static int sbp_write_pending(struct se_cmd *se_cmd)
1717{
1718	struct sbp_target_request *req = container_of(se_cmd,
1719			struct sbp_target_request, se_cmd);
1720	int ret;
1721
1722	ret = sbp_rw_data(req);
1723	if (ret) {
1724		req->status.status |= cpu_to_be32(
1725			STATUS_BLOCK_RESP(
1726				STATUS_RESP_TRANSPORT_FAILURE) |
1727			STATUS_BLOCK_DEAD(0) |
1728			STATUS_BLOCK_LEN(1) |
1729			STATUS_BLOCK_SBP_STATUS(
1730				SBP_STATUS_UNSPECIFIED_ERROR));
1731		sbp_send_status(req);
1732		return ret;
1733	}
1734
1735	target_execute_cmd(se_cmd);
1736	return 0;
1737}
1738
1739static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1740{
1741	return;
1742}
1743
1744static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1745{
1746	return 0;
1747}
1748
1749static int sbp_queue_data_in(struct se_cmd *se_cmd)
1750{
1751	struct sbp_target_request *req = container_of(se_cmd,
1752			struct sbp_target_request, se_cmd);
1753	int ret;
1754
1755	ret = sbp_rw_data(req);
1756	if (ret) {
1757		req->status.status |= cpu_to_be32(
1758			STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1759			STATUS_BLOCK_DEAD(0) |
1760			STATUS_BLOCK_LEN(1) |
1761			STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1762		sbp_send_status(req);
1763		return ret;
1764	}
1765
1766	return sbp_send_sense(req);
1767}
1768
1769/*
1770 * Called after command (no data transfer) or after the write (to device)
1771 * operation is completed
1772 */
1773static int sbp_queue_status(struct se_cmd *se_cmd)
1774{
1775	struct sbp_target_request *req = container_of(se_cmd,
1776			struct sbp_target_request, se_cmd);
1777
1778	return sbp_send_sense(req);
1779}
1780
1781static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
1782{
1783}
1784
1785static void sbp_aborted_task(struct se_cmd *se_cmd)
1786{
1787	return;
1788}
1789
1790static int sbp_check_stop_free(struct se_cmd *se_cmd)
1791{
1792	struct sbp_target_request *req = container_of(se_cmd,
1793			struct sbp_target_request, se_cmd);
1794
1795	return transport_generic_free_cmd(&req->se_cmd, 0);
1796}
1797
1798static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1799{
1800	struct se_lun *lun;
1801	int count = 0;
1802
1803	rcu_read_lock();
1804	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
1805		count++;
1806	rcu_read_unlock();
1807
1808	return count;
1809}
1810
1811static int sbp_update_unit_directory(struct sbp_tport *tport)
1812{
1813	struct se_lun *lun;
1814	int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
1815	u32 *data;
1816
1817	if (tport->unit_directory.data) {
1818		fw_core_remove_descriptor(&tport->unit_directory);
1819		kfree(tport->unit_directory.data);
1820		tport->unit_directory.data = NULL;
1821	}
1822
1823	if (!tport->enable || !tport->tpg)
1824		return 0;
1825
1826	num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1827
1828	/*
1829	 * Number of entries in the final unit directory:
1830	 *  - all of those in the template
1831	 *  - management_agent
1832	 *  - unit_characteristics
1833	 *  - reconnect_timeout
1834	 *  - unit unique ID
1835	 *  - one for each LUN
1836	 *
1837	 *  MUST NOT include leaf or sub-directory entries
1838	 */
1839	num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1840
1841	if (tport->directory_id != -1)
1842		num_entries++;
1843
1844	/* allocate num_entries + 4 for the header and unique ID leaf */
1845	data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1846	if (!data)
1847		return -ENOMEM;
1848
1849	/* directory_length */
1850	data[idx++] = num_entries << 16;
1851
1852	/* directory_id */
1853	if (tport->directory_id != -1)
1854		data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1855
1856	/* unit directory template */
1857	memcpy(&data[idx], sbp_unit_directory_template,
1858			sizeof(sbp_unit_directory_template));
1859	idx += ARRAY_SIZE(sbp_unit_directory_template);
1860
1861	/* management_agent */
1862	mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1863	data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1864
1865	/* unit_characteristics */
1866	data[idx++] = 0x3a000000 |
1867		(((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1868		SBP_ORB_FETCH_SIZE;
1869
1870	/* reconnect_timeout */
1871	data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1872
1873	/* unit unique ID (leaf is just after LUNs) */
1874	data[idx++] = 0x8d000000 | (num_luns + 1);
1875
1876	rcu_read_lock();
1877	hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
1878		struct se_device *dev;
1879		int type;
1880		/*
1881		 * rcu_dereference_raw protected by se_lun->lun_group symlink
1882		 * reference to se_device->dev_group.
1883		 */
1884		dev = rcu_dereference_raw(lun->lun_se_dev);
1885		type = dev->transport->get_device_type(dev);
1886
1887		/* logical_unit_number */
1888		data[idx++] = 0x14000000 |
1889			((type << 16) & 0x1f0000) |
1890			(lun->unpacked_lun & 0xffff);
1891	}
1892	rcu_read_unlock();
1893
1894	/* unit unique ID leaf */
1895	data[idx++] = 2 << 16;
1896	data[idx++] = tport->guid >> 32;
1897	data[idx++] = tport->guid;
1898
1899	tport->unit_directory.length = idx;
1900	tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1901	tport->unit_directory.data = data;
1902
1903	ret = fw_core_add_descriptor(&tport->unit_directory);
1904	if (ret < 0) {
1905		kfree(tport->unit_directory.data);
1906		tport->unit_directory.data = NULL;
1907	}
1908
1909	return ret;
1910}
1911
1912static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
1913{
1914	const char *cp;
1915	char c, nibble;
1916	int pos = 0, err;
1917
1918	*wwn = 0;
1919	for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1920		c = *cp;
1921		if (c == '\n' && cp[1] == '\0')
1922			continue;
1923		if (c == '\0') {
1924			err = 2;
1925			if (pos != 16)
1926				goto fail;
1927			return cp - name;
1928		}
1929		err = 3;
1930		if (isdigit(c))
1931			nibble = c - '0';
1932		else if (isxdigit(c))
1933			nibble = tolower(c) - 'a' + 10;
1934		else
1935			goto fail;
1936		*wwn = (*wwn << 4) | nibble;
1937		pos++;
1938	}
1939	err = 4;
1940fail:
1941	printk(KERN_INFO "err %u len %zu pos %u\n",
1942			err, cp - name, pos);
1943	return -1;
1944}
1945
1946static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1947{
1948	return snprintf(buf, len, "%016llx", wwn);
1949}
1950
1951static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1952{
1953	u64 guid = 0;
1954
1955	if (sbp_parse_wwn(name, &guid) < 0)
1956		return -EINVAL;
1957	return 0;
1958}
1959
1960static int sbp_post_link_lun(
1961		struct se_portal_group *se_tpg,
1962		struct se_lun *se_lun)
1963{
1964	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1965
1966	return sbp_update_unit_directory(tpg->tport);
1967}
1968
1969static void sbp_pre_unlink_lun(
1970		struct se_portal_group *se_tpg,
1971		struct se_lun *se_lun)
1972{
1973	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1974	struct sbp_tport *tport = tpg->tport;
1975	int ret;
1976
1977	if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
1978		tport->enable = 0;
1979
1980	ret = sbp_update_unit_directory(tport);
1981	if (ret < 0)
1982		pr_err("unlink LUN: failed to update unit directory\n");
1983}
1984
1985static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn,
1986					    const char *name)
1987{
1988	struct sbp_tport *tport =
1989		container_of(wwn, struct sbp_tport, tport_wwn);
1990
1991	struct sbp_tpg *tpg;
1992	unsigned long tpgt;
1993	int ret;
1994
1995	if (strstr(name, "tpgt_") != name)
1996		return ERR_PTR(-EINVAL);
1997	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1998		return ERR_PTR(-EINVAL);
1999
2000	if (tport->tpg) {
2001		pr_err("Only one TPG per Unit is possible.\n");
2002		return ERR_PTR(-EBUSY);
2003	}
2004
2005	tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2006	if (!tpg)
2007		return ERR_PTR(-ENOMEM);
2008
2009	tpg->tport = tport;
2010	tpg->tport_tpgt = tpgt;
2011	tport->tpg = tpg;
2012
2013	/* default attribute values */
2014	tport->enable = 0;
2015	tport->directory_id = -1;
2016	tport->mgt_orb_timeout = 15;
2017	tport->max_reconnect_timeout = 5;
2018	tport->max_logins_per_lun = 1;
2019
2020	tport->mgt_agt = sbp_management_agent_register(tport);
2021	if (IS_ERR(tport->mgt_agt)) {
2022		ret = PTR_ERR(tport->mgt_agt);
2023		goto out_free_tpg;
2024	}
2025
2026	ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
2027	if (ret < 0)
2028		goto out_unreg_mgt_agt;
2029
2030	return &tpg->se_tpg;
2031
2032out_unreg_mgt_agt:
2033	sbp_management_agent_unregister(tport->mgt_agt);
2034out_free_tpg:
2035	tport->tpg = NULL;
2036	kfree(tpg);
2037	return ERR_PTR(ret);
2038}
2039
2040static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2041{
2042	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2043	struct sbp_tport *tport = tpg->tport;
2044
2045	core_tpg_deregister(se_tpg);
2046	sbp_management_agent_unregister(tport->mgt_agt);
2047	tport->tpg = NULL;
2048	kfree(tpg);
2049}
2050
2051static struct se_wwn *sbp_make_tport(
2052		struct target_fabric_configfs *tf,
2053		struct config_group *group,
2054		const char *name)
2055{
2056	struct sbp_tport *tport;
2057	u64 guid = 0;
2058
2059	if (sbp_parse_wwn(name, &guid) < 0)
2060		return ERR_PTR(-EINVAL);
2061
2062	tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2063	if (!tport)
2064		return ERR_PTR(-ENOMEM);
2065
2066	tport->guid = guid;
2067	sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2068
2069	return &tport->tport_wwn;
2070}
2071
2072static void sbp_drop_tport(struct se_wwn *wwn)
2073{
2074	struct sbp_tport *tport =
2075		container_of(wwn, struct sbp_tport, tport_wwn);
2076
2077	kfree(tport);
2078}
2079
2080static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
2081{
2082	return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2083}
2084
2085CONFIGFS_ATTR_RO(sbp_wwn_, version);
2086
2087static struct configfs_attribute *sbp_wwn_attrs[] = {
2088	&sbp_wwn_attr_version,
2089	NULL,
2090};
2091
2092static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
2093{
2094	struct se_portal_group *se_tpg = to_tpg(item);
2095	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2096	struct sbp_tport *tport = tpg->tport;
2097
2098	if (tport->directory_id == -1)
2099		return sprintf(page, "implicit\n");
2100	else
2101		return sprintf(page, "%06x\n", tport->directory_id);
2102}
2103
2104static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2105		const char *page, size_t count)
2106{
2107	struct se_portal_group *se_tpg = to_tpg(item);
2108	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2109	struct sbp_tport *tport = tpg->tport;
2110	unsigned long val;
2111
2112	if (tport->enable) {
2113		pr_err("Cannot change the directory_id on an active target.\n");
2114		return -EBUSY;
2115	}
2116
2117	if (strstr(page, "implicit") == page) {
2118		tport->directory_id = -1;
2119	} else {
2120		if (kstrtoul(page, 16, &val) < 0)
2121			return -EINVAL;
2122		if (val > 0xffffff)
2123			return -EINVAL;
2124
2125		tport->directory_id = val;
2126	}
2127
2128	return count;
2129}
2130
2131static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
2132{
2133	struct se_portal_group *se_tpg = to_tpg(item);
2134	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2135	struct sbp_tport *tport = tpg->tport;
2136	return sprintf(page, "%d\n", tport->enable);
2137}
2138
2139static ssize_t sbp_tpg_enable_store(struct config_item *item,
2140		const char *page, size_t count)
2141{
2142	struct se_portal_group *se_tpg = to_tpg(item);
2143	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2144	struct sbp_tport *tport = tpg->tport;
2145	unsigned long val;
2146	int ret;
2147
2148	if (kstrtoul(page, 0, &val) < 0)
2149		return -EINVAL;
2150	if ((val != 0) && (val != 1))
2151		return -EINVAL;
2152
2153	if (tport->enable == val)
2154		return count;
2155
2156	if (val) {
2157		if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2158			pr_err("Cannot enable a target with no LUNs!\n");
2159			return -EINVAL;
2160		}
2161	} else {
2162		/* XXX: force-shutdown sessions instead? */
2163		spin_lock_bh(&se_tpg->session_lock);
2164		if (!list_empty(&se_tpg->tpg_sess_list)) {
2165			spin_unlock_bh(&se_tpg->session_lock);
2166			return -EBUSY;
2167		}
2168		spin_unlock_bh(&se_tpg->session_lock);
2169	}
2170
2171	tport->enable = val;
2172
2173	ret = sbp_update_unit_directory(tport);
2174	if (ret < 0) {
2175		pr_err("Could not update Config ROM\n");
2176		return ret;
2177	}
2178
2179	return count;
2180}
2181
2182CONFIGFS_ATTR(sbp_tpg_, directory_id);
2183CONFIGFS_ATTR(sbp_tpg_, enable);
2184
2185static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2186	&sbp_tpg_attr_directory_id,
2187	&sbp_tpg_attr_enable,
2188	NULL,
2189};
2190
2191static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
2192		char *page)
2193{
2194	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2195	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2196	struct sbp_tport *tport = tpg->tport;
2197	return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2198}
2199
2200static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2201		const char *page, size_t count)
2202{
2203	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2204	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2205	struct sbp_tport *tport = tpg->tport;
2206	unsigned long val;
2207	int ret;
2208
2209	if (kstrtoul(page, 0, &val) < 0)
2210		return -EINVAL;
2211	if ((val < 1) || (val > 127))
2212		return -EINVAL;
2213
2214	if (tport->mgt_orb_timeout == val)
2215		return count;
2216
2217	tport->mgt_orb_timeout = val;
2218
2219	ret = sbp_update_unit_directory(tport);
2220	if (ret < 0)
2221		return ret;
2222
2223	return count;
2224}
2225
2226static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
2227		char *page)
2228{
2229	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2230	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2231	struct sbp_tport *tport = tpg->tport;
2232	return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2233}
2234
2235static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2236		const char *page, size_t count)
2237{
2238	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2239	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2240	struct sbp_tport *tport = tpg->tport;
2241	unsigned long val;
2242	int ret;
2243
2244	if (kstrtoul(page, 0, &val) < 0)
2245		return -EINVAL;
2246	if ((val < 1) || (val > 32767))
2247		return -EINVAL;
2248
2249	if (tport->max_reconnect_timeout == val)
2250		return count;
2251
2252	tport->max_reconnect_timeout = val;
2253
2254	ret = sbp_update_unit_directory(tport);
2255	if (ret < 0)
2256		return ret;
2257
2258	return count;
2259}
2260
2261static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
2262		char *page)
2263{
2264	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2265	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2266	struct sbp_tport *tport = tpg->tport;
2267	return sprintf(page, "%d\n", tport->max_logins_per_lun);
2268}
2269
2270static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2271		const char *page, size_t count)
2272{
2273	struct se_portal_group *se_tpg = attrib_to_tpg(item);
2274	struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2275	struct sbp_tport *tport = tpg->tport;
2276	unsigned long val;
2277
2278	if (kstrtoul(page, 0, &val) < 0)
2279		return -EINVAL;
2280	if ((val < 1) || (val > 127))
2281		return -EINVAL;
2282
2283	/* XXX: also check against current count? */
2284
2285	tport->max_logins_per_lun = val;
2286
2287	return count;
2288}
2289
2290CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2291CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2292CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
2293
2294static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2295	&sbp_tpg_attrib_attr_mgt_orb_timeout,
2296	&sbp_tpg_attrib_attr_max_reconnect_timeout,
2297	&sbp_tpg_attrib_attr_max_logins_per_lun,
2298	NULL,
2299};
2300
2301static const struct target_core_fabric_ops sbp_ops = {
2302	.module				= THIS_MODULE,
2303	.fabric_name			= "sbp",
2304	.tpg_get_wwn			= sbp_get_fabric_wwn,
2305	.tpg_get_tag			= sbp_get_tag,
2306	.tpg_check_demo_mode		= sbp_check_true,
2307	.tpg_check_demo_mode_cache	= sbp_check_true,
2308	.tpg_check_demo_mode_write_protect = sbp_check_false,
2309	.tpg_check_prod_mode_write_protect = sbp_check_false,
2310	.tpg_get_inst_index		= sbp_tpg_get_inst_index,
2311	.release_cmd			= sbp_release_cmd,
2312	.sess_get_index			= sbp_sess_get_index,
2313	.write_pending			= sbp_write_pending,
2314	.set_default_node_attributes	= sbp_set_default_node_attrs,
2315	.get_cmd_state			= sbp_get_cmd_state,
2316	.queue_data_in			= sbp_queue_data_in,
2317	.queue_status			= sbp_queue_status,
2318	.queue_tm_rsp			= sbp_queue_tm_rsp,
2319	.aborted_task			= sbp_aborted_task,
2320	.check_stop_free		= sbp_check_stop_free,
2321
2322	.fabric_make_wwn		= sbp_make_tport,
2323	.fabric_drop_wwn		= sbp_drop_tport,
2324	.fabric_make_tpg		= sbp_make_tpg,
2325	.fabric_drop_tpg		= sbp_drop_tpg,
2326	.fabric_post_link		= sbp_post_link_lun,
2327	.fabric_pre_unlink		= sbp_pre_unlink_lun,
2328	.fabric_make_np			= NULL,
2329	.fabric_drop_np			= NULL,
2330	.fabric_init_nodeacl		= sbp_init_nodeacl,
2331
2332	.tfc_wwn_attrs			= sbp_wwn_attrs,
2333	.tfc_tpg_base_attrs		= sbp_tpg_base_attrs,
2334	.tfc_tpg_attrib_attrs		= sbp_tpg_attrib_attrs,
2335};
2336
2337static int __init sbp_init(void)
2338{
2339	return target_register_template(&sbp_ops);
2340};
2341
2342static void __exit sbp_exit(void)
2343{
2344	target_unregister_template(&sbp_ops);
2345};
2346
2347MODULE_DESCRIPTION("FireWire SBP fabric driver");
2348MODULE_LICENSE("GPL");
2349module_init(sbp_init);
2350module_exit(sbp_exit);
2351