1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4   Copyright 2023 NXP
5
6   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8   This program is free software; you can redistribute it and/or modify
9   it under the terms of the GNU General Public License version 2 as
10   published by the Free Software Foundation;
11
12   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23   SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI connection handling. */
27
28#include <linux/export.h>
29#include <linux/debugfs.h>
30
31#include <net/bluetooth/bluetooth.h>
32#include <net/bluetooth/hci_core.h>
33#include <net/bluetooth/l2cap.h>
34#include <net/bluetooth/iso.h>
35#include <net/bluetooth/mgmt.h>
36
37#include "hci_request.h"
38#include "smp.h"
39#include "eir.h"
40
41struct sco_param {
42	u16 pkt_type;
43	u16 max_latency;
44	u8  retrans_effort;
45};
46
47struct conn_handle_t {
48	struct hci_conn *conn;
49	__u16 handle;
50};
51
52static const struct sco_param esco_param_cvsd[] = {
53	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */
54	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */
55	{ EDR_ESCO_MASK | ESCO_EV3,   0x0007,	0x01 }, /* S1 */
56	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0x01 }, /* D1 */
57	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0x01 }, /* D0 */
58};
59
60static const struct sco_param sco_param_cvsd[] = {
61	{ EDR_ESCO_MASK | ESCO_HV3,   0xffff,	0xff }, /* D1 */
62	{ EDR_ESCO_MASK | ESCO_HV1,   0xffff,	0xff }, /* D0 */
63};
64
65static const struct sco_param esco_param_msbc[] = {
66	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,	0x02 }, /* T2 */
67	{ EDR_ESCO_MASK | ESCO_EV3,   0x0008,	0x02 }, /* T1 */
68};
69
70/* This function requires the caller holds hdev->lock */
71static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
72{
73	struct hci_conn_params *params;
74	struct hci_dev *hdev = conn->hdev;
75	struct smp_irk *irk;
76	bdaddr_t *bdaddr;
77	u8 bdaddr_type;
78
79	bdaddr = &conn->dst;
80	bdaddr_type = conn->dst_type;
81
82	/* Check if we need to convert to identity address */
83	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
84	if (irk) {
85		bdaddr = &irk->bdaddr;
86		bdaddr_type = irk->addr_type;
87	}
88
89	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
90					   bdaddr_type);
91	if (!params)
92		return;
93
94	if (params->conn) {
95		hci_conn_drop(params->conn);
96		hci_conn_put(params->conn);
97		params->conn = NULL;
98	}
99
100	if (!params->explicit_connect)
101		return;
102
103	/* If the status indicates successful cancellation of
104	 * the attempt (i.e. Unknown Connection Id) there's no point of
105	 * notifying failure since we'll go back to keep trying to
106	 * connect. The only exception is explicit connect requests
107	 * where a timeout + cancel does indicate an actual failure.
108	 */
109	if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
110		mgmt_connect_failed(hdev, &conn->dst, conn->type,
111				    conn->dst_type, status);
112
113	/* The connection attempt was doing scan for new RPA, and is
114	 * in scan phase. If params are not associated with any other
115	 * autoconnect action, remove them completely. If they are, just unmark
116	 * them as waiting for connection, by clearing explicit_connect field.
117	 */
118	params->explicit_connect = false;
119
120	hci_pend_le_list_del_init(params);
121
122	switch (params->auto_connect) {
123	case HCI_AUTO_CONN_EXPLICIT:
124		hci_conn_params_del(hdev, bdaddr, bdaddr_type);
125		/* return instead of break to avoid duplicate scan update */
126		return;
127	case HCI_AUTO_CONN_DIRECT:
128	case HCI_AUTO_CONN_ALWAYS:
129		hci_pend_le_list_add(params, &hdev->pend_le_conns);
130		break;
131	case HCI_AUTO_CONN_REPORT:
132		hci_pend_le_list_add(params, &hdev->pend_le_reports);
133		break;
134	default:
135		break;
136	}
137
138	hci_update_passive_scan(hdev);
139}
140
141static void hci_conn_cleanup(struct hci_conn *conn)
142{
143	struct hci_dev *hdev = conn->hdev;
144
145	if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
146		hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
147
148	if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
149		hci_remove_link_key(hdev, &conn->dst);
150
151	hci_chan_list_flush(conn);
152
153	hci_conn_hash_del(hdev, conn);
154
155	if (HCI_CONN_HANDLE_UNSET(conn->handle))
156		ida_free(&hdev->unset_handle_ida, conn->handle);
157
158	if (conn->cleanup)
159		conn->cleanup(conn);
160
161	if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
162		switch (conn->setting & SCO_AIRMODE_MASK) {
163		case SCO_AIRMODE_CVSD:
164		case SCO_AIRMODE_TRANSP:
165			if (hdev->notify)
166				hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO);
167			break;
168		}
169	} else {
170		if (hdev->notify)
171			hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
172	}
173
174	debugfs_remove_recursive(conn->debugfs);
175
176	hci_conn_del_sysfs(conn);
177
178	hci_dev_put(hdev);
179}
180
181static void hci_acl_create_connection(struct hci_conn *conn)
182{
183	struct hci_dev *hdev = conn->hdev;
184	struct inquiry_entry *ie;
185	struct hci_cp_create_conn cp;
186
187	BT_DBG("hcon %p", conn);
188
189	/* Many controllers disallow HCI Create Connection while it is doing
190	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
191	 * Connection. This may cause the MGMT discovering state to become false
192	 * without user space's request but it is okay since the MGMT Discovery
193	 * APIs do not promise that discovery should be done forever. Instead,
194	 * the user space monitors the status of MGMT discovering and it may
195	 * request for discovery again when this flag becomes false.
196	 */
197	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
198		/* Put this connection to "pending" state so that it will be
199		 * executed after the inquiry cancel command complete event.
200		 */
201		conn->state = BT_CONNECT2;
202		hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
203		return;
204	}
205
206	conn->state = BT_CONNECT;
207	conn->out = true;
208	conn->role = HCI_ROLE_MASTER;
209
210	conn->attempt++;
211
212	conn->link_policy = hdev->link_policy;
213
214	memset(&cp, 0, sizeof(cp));
215	bacpy(&cp.bdaddr, &conn->dst);
216	cp.pscan_rep_mode = 0x02;
217
218	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
219	if (ie) {
220		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
221			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
222			cp.pscan_mode     = ie->data.pscan_mode;
223			cp.clock_offset   = ie->data.clock_offset |
224					    cpu_to_le16(0x8000);
225		}
226
227		memcpy(conn->dev_class, ie->data.dev_class, 3);
228	}
229
230	cp.pkt_type = cpu_to_le16(conn->pkt_type);
231	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
232		cp.role_switch = 0x01;
233	else
234		cp.role_switch = 0x00;
235
236	hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
237}
238
239int hci_disconnect(struct hci_conn *conn, __u8 reason)
240{
241	BT_DBG("hcon %p", conn);
242
243	/* When we are central of an established connection and it enters
244	 * the disconnect timeout, then go ahead and try to read the
245	 * current clock offset.  Processing of the result is done
246	 * within the event handling and hci_clock_offset_evt function.
247	 */
248	if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
249	    (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
250		struct hci_dev *hdev = conn->hdev;
251		struct hci_cp_read_clock_offset clkoff_cp;
252
253		clkoff_cp.handle = cpu_to_le16(conn->handle);
254		hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
255			     &clkoff_cp);
256	}
257
258	return hci_abort_conn(conn, reason);
259}
260
261static void hci_add_sco(struct hci_conn *conn, __u16 handle)
262{
263	struct hci_dev *hdev = conn->hdev;
264	struct hci_cp_add_sco cp;
265
266	BT_DBG("hcon %p", conn);
267
268	conn->state = BT_CONNECT;
269	conn->out = true;
270
271	conn->attempt++;
272
273	cp.handle   = cpu_to_le16(handle);
274	cp.pkt_type = cpu_to_le16(conn->pkt_type);
275
276	hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
277}
278
279static bool find_next_esco_param(struct hci_conn *conn,
280				 const struct sco_param *esco_param, int size)
281{
282	if (!conn->parent)
283		return false;
284
285	for (; conn->attempt <= size; conn->attempt++) {
286		if (lmp_esco_2m_capable(conn->parent) ||
287		    (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
288			break;
289		BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
290		       conn, conn->attempt);
291	}
292
293	return conn->attempt <= size;
294}
295
296static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)
297{
298	int err;
299	__u8 vnd_len, *vnd_data = NULL;
300	struct hci_op_configure_data_path *cmd = NULL;
301
302	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
303					  &vnd_data);
304	if (err < 0)
305		goto error;
306
307	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
308	if (!cmd) {
309		err = -ENOMEM;
310		goto error;
311	}
312
313	err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
314	if (err < 0)
315		goto error;
316
317	cmd->vnd_len = vnd_len;
318	memcpy(cmd->vnd_data, vnd_data, vnd_len);
319
320	cmd->direction = 0x00;
321	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
322			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT);
323
324	cmd->direction = 0x01;
325	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH,
326				    sizeof(*cmd) + vnd_len, cmd,
327				    HCI_CMD_TIMEOUT);
328error:
329
330	kfree(cmd);
331	kfree(vnd_data);
332	return err;
333}
334
335static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
336{
337	struct conn_handle_t *conn_handle = data;
338	struct hci_conn *conn = conn_handle->conn;
339	__u16 handle = conn_handle->handle;
340	struct hci_cp_enhanced_setup_sync_conn cp;
341	const struct sco_param *param;
342
343	kfree(conn_handle);
344
345	bt_dev_dbg(hdev, "hcon %p", conn);
346
347	/* for offload use case, codec needs to configured before opening SCO */
348	if (conn->codec.data_path)
349		configure_datapath_sync(hdev, &conn->codec);
350
351	conn->state = BT_CONNECT;
352	conn->out = true;
353
354	conn->attempt++;
355
356	memset(&cp, 0x00, sizeof(cp));
357
358	cp.handle   = cpu_to_le16(handle);
359
360	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
361	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
362
363	switch (conn->codec.id) {
364	case BT_CODEC_MSBC:
365		if (!find_next_esco_param(conn, esco_param_msbc,
366					  ARRAY_SIZE(esco_param_msbc)))
367			return -EINVAL;
368
369		param = &esco_param_msbc[conn->attempt - 1];
370		cp.tx_coding_format.id = 0x05;
371		cp.rx_coding_format.id = 0x05;
372		cp.tx_codec_frame_size = __cpu_to_le16(60);
373		cp.rx_codec_frame_size = __cpu_to_le16(60);
374		cp.in_bandwidth = __cpu_to_le32(32000);
375		cp.out_bandwidth = __cpu_to_le32(32000);
376		cp.in_coding_format.id = 0x04;
377		cp.out_coding_format.id = 0x04;
378		cp.in_coded_data_size = __cpu_to_le16(16);
379		cp.out_coded_data_size = __cpu_to_le16(16);
380		cp.in_pcm_data_format = 2;
381		cp.out_pcm_data_format = 2;
382		cp.in_pcm_sample_payload_msb_pos = 0;
383		cp.out_pcm_sample_payload_msb_pos = 0;
384		cp.in_data_path = conn->codec.data_path;
385		cp.out_data_path = conn->codec.data_path;
386		cp.in_transport_unit_size = 1;
387		cp.out_transport_unit_size = 1;
388		break;
389
390	case BT_CODEC_TRANSPARENT:
391		if (!find_next_esco_param(conn, esco_param_msbc,
392					  ARRAY_SIZE(esco_param_msbc)))
393			return false;
394		param = &esco_param_msbc[conn->attempt - 1];
395		cp.tx_coding_format.id = 0x03;
396		cp.rx_coding_format.id = 0x03;
397		cp.tx_codec_frame_size = __cpu_to_le16(60);
398		cp.rx_codec_frame_size = __cpu_to_le16(60);
399		cp.in_bandwidth = __cpu_to_le32(0x1f40);
400		cp.out_bandwidth = __cpu_to_le32(0x1f40);
401		cp.in_coding_format.id = 0x03;
402		cp.out_coding_format.id = 0x03;
403		cp.in_coded_data_size = __cpu_to_le16(16);
404		cp.out_coded_data_size = __cpu_to_le16(16);
405		cp.in_pcm_data_format = 2;
406		cp.out_pcm_data_format = 2;
407		cp.in_pcm_sample_payload_msb_pos = 0;
408		cp.out_pcm_sample_payload_msb_pos = 0;
409		cp.in_data_path = conn->codec.data_path;
410		cp.out_data_path = conn->codec.data_path;
411		cp.in_transport_unit_size = 1;
412		cp.out_transport_unit_size = 1;
413		break;
414
415	case BT_CODEC_CVSD:
416		if (conn->parent && lmp_esco_capable(conn->parent)) {
417			if (!find_next_esco_param(conn, esco_param_cvsd,
418						  ARRAY_SIZE(esco_param_cvsd)))
419				return -EINVAL;
420			param = &esco_param_cvsd[conn->attempt - 1];
421		} else {
422			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
423				return -EINVAL;
424			param = &sco_param_cvsd[conn->attempt - 1];
425		}
426		cp.tx_coding_format.id = 2;
427		cp.rx_coding_format.id = 2;
428		cp.tx_codec_frame_size = __cpu_to_le16(60);
429		cp.rx_codec_frame_size = __cpu_to_le16(60);
430		cp.in_bandwidth = __cpu_to_le32(16000);
431		cp.out_bandwidth = __cpu_to_le32(16000);
432		cp.in_coding_format.id = 4;
433		cp.out_coding_format.id = 4;
434		cp.in_coded_data_size = __cpu_to_le16(16);
435		cp.out_coded_data_size = __cpu_to_le16(16);
436		cp.in_pcm_data_format = 2;
437		cp.out_pcm_data_format = 2;
438		cp.in_pcm_sample_payload_msb_pos = 0;
439		cp.out_pcm_sample_payload_msb_pos = 0;
440		cp.in_data_path = conn->codec.data_path;
441		cp.out_data_path = conn->codec.data_path;
442		cp.in_transport_unit_size = 16;
443		cp.out_transport_unit_size = 16;
444		break;
445	default:
446		return -EINVAL;
447	}
448
449	cp.retrans_effort = param->retrans_effort;
450	cp.pkt_type = __cpu_to_le16(param->pkt_type);
451	cp.max_latency = __cpu_to_le16(param->max_latency);
452
453	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
454		return -EIO;
455
456	return 0;
457}
458
459static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)
460{
461	struct hci_dev *hdev = conn->hdev;
462	struct hci_cp_setup_sync_conn cp;
463	const struct sco_param *param;
464
465	bt_dev_dbg(hdev, "hcon %p", conn);
466
467	conn->state = BT_CONNECT;
468	conn->out = true;
469
470	conn->attempt++;
471
472	cp.handle   = cpu_to_le16(handle);
473
474	cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
475	cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
476	cp.voice_setting  = cpu_to_le16(conn->setting);
477
478	switch (conn->setting & SCO_AIRMODE_MASK) {
479	case SCO_AIRMODE_TRANSP:
480		if (!find_next_esco_param(conn, esco_param_msbc,
481					  ARRAY_SIZE(esco_param_msbc)))
482			return false;
483		param = &esco_param_msbc[conn->attempt - 1];
484		break;
485	case SCO_AIRMODE_CVSD:
486		if (conn->parent && lmp_esco_capable(conn->parent)) {
487			if (!find_next_esco_param(conn, esco_param_cvsd,
488						  ARRAY_SIZE(esco_param_cvsd)))
489				return false;
490			param = &esco_param_cvsd[conn->attempt - 1];
491		} else {
492			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
493				return false;
494			param = &sco_param_cvsd[conn->attempt - 1];
495		}
496		break;
497	default:
498		return false;
499	}
500
501	cp.retrans_effort = param->retrans_effort;
502	cp.pkt_type = __cpu_to_le16(param->pkt_type);
503	cp.max_latency = __cpu_to_le16(param->max_latency);
504
505	if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
506		return false;
507
508	return true;
509}
510
511bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
512{
513	int result;
514	struct conn_handle_t *conn_handle;
515
516	if (enhanced_sync_conn_capable(conn->hdev)) {
517		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL);
518
519		if (!conn_handle)
520			return false;
521
522		conn_handle->conn = conn;
523		conn_handle->handle = handle;
524		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync,
525					    conn_handle, NULL);
526		if (result < 0)
527			kfree(conn_handle);
528
529		return result == 0;
530	}
531
532	return hci_setup_sync_conn(conn, handle);
533}
534
535u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
536		      u16 to_multiplier)
537{
538	struct hci_dev *hdev = conn->hdev;
539	struct hci_conn_params *params;
540	struct hci_cp_le_conn_update cp;
541
542	hci_dev_lock(hdev);
543
544	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
545	if (params) {
546		params->conn_min_interval = min;
547		params->conn_max_interval = max;
548		params->conn_latency = latency;
549		params->supervision_timeout = to_multiplier;
550	}
551
552	hci_dev_unlock(hdev);
553
554	memset(&cp, 0, sizeof(cp));
555	cp.handle		= cpu_to_le16(conn->handle);
556	cp.conn_interval_min	= cpu_to_le16(min);
557	cp.conn_interval_max	= cpu_to_le16(max);
558	cp.conn_latency		= cpu_to_le16(latency);
559	cp.supervision_timeout	= cpu_to_le16(to_multiplier);
560	cp.min_ce_len		= cpu_to_le16(0x0000);
561	cp.max_ce_len		= cpu_to_le16(0x0000);
562
563	hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
564
565	if (params)
566		return 0x01;
567
568	return 0x00;
569}
570
571void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
572		      __u8 ltk[16], __u8 key_size)
573{
574	struct hci_dev *hdev = conn->hdev;
575	struct hci_cp_le_start_enc cp;
576
577	BT_DBG("hcon %p", conn);
578
579	memset(&cp, 0, sizeof(cp));
580
581	cp.handle = cpu_to_le16(conn->handle);
582	cp.rand = rand;
583	cp.ediv = ediv;
584	memcpy(cp.ltk, ltk, key_size);
585
586	hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
587}
588
589/* Device _must_ be locked */
590void hci_sco_setup(struct hci_conn *conn, __u8 status)
591{
592	struct hci_link *link;
593
594	link = list_first_entry_or_null(&conn->link_list, struct hci_link, list);
595	if (!link || !link->conn)
596		return;
597
598	BT_DBG("hcon %p", conn);
599
600	if (!status) {
601		if (lmp_esco_capable(conn->hdev))
602			hci_setup_sync(link->conn, conn->handle);
603		else
604			hci_add_sco(link->conn, conn->handle);
605	} else {
606		hci_connect_cfm(link->conn, status);
607		hci_conn_del(link->conn);
608	}
609}
610
611static void hci_conn_timeout(struct work_struct *work)
612{
613	struct hci_conn *conn = container_of(work, struct hci_conn,
614					     disc_work.work);
615	int refcnt = atomic_read(&conn->refcnt);
616
617	BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
618
619	WARN_ON(refcnt < 0);
620
621	/* FIXME: It was observed that in pairing failed scenario, refcnt
622	 * drops below 0. Probably this is because l2cap_conn_del calls
623	 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
624	 * dropped. After that loop hci_chan_del is called which also drops
625	 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
626	 * otherwise drop it.
627	 */
628	if (refcnt > 0)
629		return;
630
631	hci_abort_conn(conn, hci_proto_disconn_ind(conn));
632}
633
634/* Enter sniff mode */
635static void hci_conn_idle(struct work_struct *work)
636{
637	struct hci_conn *conn = container_of(work, struct hci_conn,
638					     idle_work.work);
639	struct hci_dev *hdev = conn->hdev;
640
641	BT_DBG("hcon %p mode %d", conn, conn->mode);
642
643	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
644		return;
645
646	if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
647		return;
648
649	if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
650		struct hci_cp_sniff_subrate cp;
651		cp.handle             = cpu_to_le16(conn->handle);
652		cp.max_latency        = cpu_to_le16(0);
653		cp.min_remote_timeout = cpu_to_le16(0);
654		cp.min_local_timeout  = cpu_to_le16(0);
655		hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
656	}
657
658	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
659		struct hci_cp_sniff_mode cp;
660		cp.handle       = cpu_to_le16(conn->handle);
661		cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
662		cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
663		cp.attempt      = cpu_to_le16(4);
664		cp.timeout      = cpu_to_le16(1);
665		hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
666	}
667}
668
669static void hci_conn_auto_accept(struct work_struct *work)
670{
671	struct hci_conn *conn = container_of(work, struct hci_conn,
672					     auto_accept_work.work);
673
674	hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
675		     &conn->dst);
676}
677
678static void le_disable_advertising(struct hci_dev *hdev)
679{
680	if (ext_adv_capable(hdev)) {
681		struct hci_cp_le_set_ext_adv_enable cp;
682
683		cp.enable = 0x00;
684		cp.num_of_sets = 0x00;
685
686		hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
687			     &cp);
688	} else {
689		u8 enable = 0x00;
690		hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
691			     &enable);
692	}
693}
694
695static void le_conn_timeout(struct work_struct *work)
696{
697	struct hci_conn *conn = container_of(work, struct hci_conn,
698					     le_conn_timeout.work);
699	struct hci_dev *hdev = conn->hdev;
700
701	BT_DBG("");
702
703	/* We could end up here due to having done directed advertising,
704	 * so clean up the state if necessary. This should however only
705	 * happen with broken hardware or if low duty cycle was used
706	 * (which doesn't have a timeout of its own).
707	 */
708	if (conn->role == HCI_ROLE_SLAVE) {
709		/* Disable LE Advertising */
710		le_disable_advertising(hdev);
711		hci_dev_lock(hdev);
712		hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
713		hci_dev_unlock(hdev);
714		return;
715	}
716
717	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
718}
719
720struct iso_cig_params {
721	struct hci_cp_le_set_cig_params cp;
722	struct hci_cis_params cis[0x1f];
723};
724
725struct iso_list_data {
726	union {
727		u8  cig;
728		u8  big;
729	};
730	union {
731		u8  cis;
732		u8  bis;
733		u16 sync_handle;
734	};
735	int count;
736	bool big_term;
737	bool pa_sync_term;
738	bool big_sync_term;
739};
740
741static void bis_list(struct hci_conn *conn, void *data)
742{
743	struct iso_list_data *d = data;
744
745	/* Skip if not broadcast/ANY address */
746	if (bacmp(&conn->dst, BDADDR_ANY))
747		return;
748
749	if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET ||
750	    d->bis != conn->iso_qos.bcast.bis)
751		return;
752
753	d->count++;
754}
755
756static int terminate_big_sync(struct hci_dev *hdev, void *data)
757{
758	struct iso_list_data *d = data;
759
760	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis);
761
762	hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL);
763
764	/* Only terminate BIG if it has been created */
765	if (!d->big_term)
766		return 0;
767
768	return hci_le_terminate_big_sync(hdev, d->big,
769					 HCI_ERROR_LOCAL_HOST_TERM);
770}
771
772static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
773{
774	kfree(data);
775}
776
777static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn)
778{
779	struct iso_list_data *d;
780	int ret;
781
782	bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big,
783		   conn->iso_qos.bcast.bis);
784
785	d = kzalloc(sizeof(*d), GFP_KERNEL);
786	if (!d)
787		return -ENOMEM;
788
789	d->big = conn->iso_qos.bcast.big;
790	d->bis = conn->iso_qos.bcast.bis;
791	d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags);
792
793	ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
794				 terminate_big_destroy);
795	if (ret)
796		kfree(d);
797
798	return ret;
799}
800
801static int big_terminate_sync(struct hci_dev *hdev, void *data)
802{
803	struct iso_list_data *d = data;
804
805	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big,
806		   d->sync_handle);
807
808	if (d->big_sync_term)
809		hci_le_big_terminate_sync(hdev, d->big);
810
811	if (d->pa_sync_term)
812		return hci_le_pa_terminate_sync(hdev, d->sync_handle);
813
814	return 0;
815}
816
817static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn)
818{
819	struct iso_list_data *d;
820	int ret;
821
822	bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle);
823
824	d = kzalloc(sizeof(*d), GFP_KERNEL);
825	if (!d)
826		return -ENOMEM;
827
828	d->big = big;
829	d->sync_handle = conn->sync_handle;
830	d->pa_sync_term = test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags);
831	d->big_sync_term = test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags);
832
833	ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
834				 terminate_big_destroy);
835	if (ret)
836		kfree(d);
837
838	return ret;
839}
840
841/* Cleanup BIS connection
842 *
843 * Detects if there any BIS left connected in a BIG
844 * broadcaster: Remove advertising instance and terminate BIG.
845 * broadcaster receiver: Teminate BIG sync and terminate PA sync.
846 */
847static void bis_cleanup(struct hci_conn *conn)
848{
849	struct hci_dev *hdev = conn->hdev;
850	struct hci_conn *bis;
851
852	bt_dev_dbg(hdev, "conn %p", conn);
853
854	if (conn->role == HCI_ROLE_MASTER) {
855		if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags))
856			return;
857
858		/* Check if ISO connection is a BIS and terminate advertising
859		 * set and BIG if there are no other connections using it.
860		 */
861		bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big);
862		if (bis)
863			return;
864
865		hci_le_terminate_big(hdev, conn);
866	} else {
867		bis = hci_conn_hash_lookup_big_any_dst(hdev,
868						       conn->iso_qos.bcast.big);
869
870		if (bis)
871			return;
872
873		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
874				     conn);
875	}
876}
877
878static int remove_cig_sync(struct hci_dev *hdev, void *data)
879{
880	u8 handle = PTR_UINT(data);
881
882	return hci_le_remove_cig_sync(hdev, handle);
883}
884
885static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle)
886{
887	bt_dev_dbg(hdev, "handle 0x%2.2x", handle);
888
889	return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle),
890				  NULL);
891}
892
893static void find_cis(struct hci_conn *conn, void *data)
894{
895	struct iso_list_data *d = data;
896
897	/* Ignore broadcast or if CIG don't match */
898	if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
899		return;
900
901	d->count++;
902}
903
904/* Cleanup CIS connection:
905 *
906 * Detects if there any CIS left connected in a CIG and remove it.
907 */
908static void cis_cleanup(struct hci_conn *conn)
909{
910	struct hci_dev *hdev = conn->hdev;
911	struct iso_list_data d;
912
913	if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
914		return;
915
916	memset(&d, 0, sizeof(d));
917	d.cig = conn->iso_qos.ucast.cig;
918
919	/* Check if ISO connection is a CIS and remove CIG if there are
920	 * no other connections using it.
921	 */
922	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
923	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
924	hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
925	if (d.count)
926		return;
927
928	hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig);
929}
930
931static int hci_conn_hash_alloc_unset(struct hci_dev *hdev)
932{
933	return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1,
934			       U16_MAX, GFP_ATOMIC);
935}
936
937struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
938			      u8 role, u16 handle)
939{
940	struct hci_conn *conn;
941
942	bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle);
943
944	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
945	if (!conn)
946		return NULL;
947
948	bacpy(&conn->dst, dst);
949	bacpy(&conn->src, &hdev->bdaddr);
950	conn->handle = handle;
951	conn->hdev  = hdev;
952	conn->type  = type;
953	conn->role  = role;
954	conn->mode  = HCI_CM_ACTIVE;
955	conn->state = BT_OPEN;
956	conn->auth_type = HCI_AT_GENERAL_BONDING;
957	conn->io_capability = hdev->io_capability;
958	conn->remote_auth = 0xff;
959	conn->key_type = 0xff;
960	conn->rssi = HCI_RSSI_INVALID;
961	conn->tx_power = HCI_TX_POWER_INVALID;
962	conn->max_tx_power = HCI_TX_POWER_INVALID;
963	conn->sync_handle = HCI_SYNC_HANDLE_INVALID;
964
965	set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
966	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
967
968	/* Set Default Authenticated payload timeout to 30s */
969	conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
970
971	if (conn->role == HCI_ROLE_MASTER)
972		conn->out = true;
973
974	switch (type) {
975	case ACL_LINK:
976		conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
977		break;
978	case LE_LINK:
979		/* conn->src should reflect the local identity address */
980		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
981		break;
982	case ISO_LINK:
983		/* conn->src should reflect the local identity address */
984		hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
985
986		/* set proper cleanup function */
987		if (!bacmp(dst, BDADDR_ANY))
988			conn->cleanup = bis_cleanup;
989		else if (conn->role == HCI_ROLE_MASTER)
990			conn->cleanup = cis_cleanup;
991
992		break;
993	case SCO_LINK:
994		if (lmp_esco_capable(hdev))
995			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
996					(hdev->esco_type & EDR_ESCO_MASK);
997		else
998			conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
999		break;
1000	case ESCO_LINK:
1001		conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
1002		break;
1003	}
1004
1005	skb_queue_head_init(&conn->data_q);
1006
1007	INIT_LIST_HEAD(&conn->chan_list);
1008	INIT_LIST_HEAD(&conn->link_list);
1009
1010	INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
1011	INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
1012	INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
1013	INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
1014
1015	atomic_set(&conn->refcnt, 0);
1016
1017	hci_dev_hold(hdev);
1018
1019	hci_conn_hash_add(hdev, conn);
1020
1021	/* The SCO and eSCO connections will only be notified when their
1022	 * setup has been completed. This is different to ACL links which
1023	 * can be notified right away.
1024	 */
1025	if (conn->type != SCO_LINK && conn->type != ESCO_LINK) {
1026		if (hdev->notify)
1027			hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
1028	}
1029
1030	hci_conn_init_sysfs(conn);
1031
1032	return conn;
1033}
1034
1035struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type,
1036				    bdaddr_t *dst, u8 role)
1037{
1038	int handle;
1039
1040	bt_dev_dbg(hdev, "dst %pMR", dst);
1041
1042	handle = hci_conn_hash_alloc_unset(hdev);
1043	if (unlikely(handle < 0))
1044		return NULL;
1045
1046	return hci_conn_add(hdev, type, dst, role, handle);
1047}
1048
1049static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
1050{
1051	if (!reason)
1052		reason = HCI_ERROR_REMOTE_USER_TERM;
1053
1054	/* Due to race, SCO/ISO conn might be not established yet at this point,
1055	 * and nothing else will clean it up. In other cases it is done via HCI
1056	 * events.
1057	 */
1058	switch (conn->type) {
1059	case SCO_LINK:
1060	case ESCO_LINK:
1061		if (HCI_CONN_HANDLE_UNSET(conn->handle))
1062			hci_conn_failed(conn, reason);
1063		break;
1064	case ISO_LINK:
1065		if (conn->state != BT_CONNECTED &&
1066		    !test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
1067			hci_conn_failed(conn, reason);
1068		break;
1069	}
1070}
1071
1072static void hci_conn_unlink(struct hci_conn *conn)
1073{
1074	struct hci_dev *hdev = conn->hdev;
1075
1076	bt_dev_dbg(hdev, "hcon %p", conn);
1077
1078	if (!conn->parent) {
1079		struct hci_link *link, *t;
1080
1081		list_for_each_entry_safe(link, t, &conn->link_list, list) {
1082			struct hci_conn *child = link->conn;
1083
1084			hci_conn_unlink(child);
1085
1086			/* If hdev is down it means
1087			 * hci_dev_close_sync/hci_conn_hash_flush is in progress
1088			 * and links don't need to be cleanup as all connections
1089			 * would be cleanup.
1090			 */
1091			if (!test_bit(HCI_UP, &hdev->flags))
1092				continue;
1093
1094			hci_conn_cleanup_child(child, conn->abort_reason);
1095		}
1096
1097		return;
1098	}
1099
1100	if (!conn->link)
1101		return;
1102
1103	list_del_rcu(&conn->link->list);
1104	synchronize_rcu();
1105
1106	hci_conn_drop(conn->parent);
1107	hci_conn_put(conn->parent);
1108	conn->parent = NULL;
1109
1110	kfree(conn->link);
1111	conn->link = NULL;
1112}
1113
1114void hci_conn_del(struct hci_conn *conn)
1115{
1116	struct hci_dev *hdev = conn->hdev;
1117
1118	BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
1119
1120	hci_conn_unlink(conn);
1121
1122	cancel_delayed_work_sync(&conn->disc_work);
1123	cancel_delayed_work_sync(&conn->auto_accept_work);
1124	cancel_delayed_work_sync(&conn->idle_work);
1125
1126	if (conn->type == ACL_LINK) {
1127		/* Unacked frames */
1128		hdev->acl_cnt += conn->sent;
1129	} else if (conn->type == LE_LINK) {
1130		cancel_delayed_work(&conn->le_conn_timeout);
1131
1132		if (hdev->le_pkts)
1133			hdev->le_cnt += conn->sent;
1134		else
1135			hdev->acl_cnt += conn->sent;
1136	} else {
1137		/* Unacked ISO frames */
1138		if (conn->type == ISO_LINK) {
1139			if (hdev->iso_pkts)
1140				hdev->iso_cnt += conn->sent;
1141			else if (hdev->le_pkts)
1142				hdev->le_cnt += conn->sent;
1143			else
1144				hdev->acl_cnt += conn->sent;
1145		}
1146	}
1147
1148	skb_queue_purge(&conn->data_q);
1149
1150	/* Remove the connection from the list and cleanup its remaining
1151	 * state. This is a separate function since for some cases like
1152	 * BT_CONNECT_SCAN we *only* want the cleanup part without the
1153	 * rest of hci_conn_del.
1154	 */
1155	hci_conn_cleanup(conn);
1156}
1157
1158struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
1159{
1160	int use_src = bacmp(src, BDADDR_ANY);
1161	struct hci_dev *hdev = NULL, *d;
1162
1163	BT_DBG("%pMR -> %pMR", src, dst);
1164
1165	read_lock(&hci_dev_list_lock);
1166
1167	list_for_each_entry(d, &hci_dev_list, list) {
1168		if (!test_bit(HCI_UP, &d->flags) ||
1169		    hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
1170		    d->dev_type != HCI_PRIMARY)
1171			continue;
1172
1173		/* Simple routing:
1174		 *   No source address - find interface with bdaddr != dst
1175		 *   Source address    - find interface with bdaddr == src
1176		 */
1177
1178		if (use_src) {
1179			bdaddr_t id_addr;
1180			u8 id_addr_type;
1181
1182			if (src_type == BDADDR_BREDR) {
1183				if (!lmp_bredr_capable(d))
1184					continue;
1185				bacpy(&id_addr, &d->bdaddr);
1186				id_addr_type = BDADDR_BREDR;
1187			} else {
1188				if (!lmp_le_capable(d))
1189					continue;
1190
1191				hci_copy_identity_address(d, &id_addr,
1192							  &id_addr_type);
1193
1194				/* Convert from HCI to three-value type */
1195				if (id_addr_type == ADDR_LE_DEV_PUBLIC)
1196					id_addr_type = BDADDR_LE_PUBLIC;
1197				else
1198					id_addr_type = BDADDR_LE_RANDOM;
1199			}
1200
1201			if (!bacmp(&id_addr, src) && id_addr_type == src_type) {
1202				hdev = d; break;
1203			}
1204		} else {
1205			if (bacmp(&d->bdaddr, dst)) {
1206				hdev = d; break;
1207			}
1208		}
1209	}
1210
1211	if (hdev)
1212		hdev = hci_dev_hold(hdev);
1213
1214	read_unlock(&hci_dev_list_lock);
1215	return hdev;
1216}
1217EXPORT_SYMBOL(hci_get_route);
1218
1219/* This function requires the caller holds hdev->lock */
1220static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
1221{
1222	struct hci_dev *hdev = conn->hdev;
1223
1224	hci_connect_le_scan_cleanup(conn, status);
1225
1226	/* Enable advertising in case this was a failed connection
1227	 * attempt as a peripheral.
1228	 */
1229	hci_enable_advertising(hdev);
1230}
1231
1232/* This function requires the caller holds hdev->lock */
1233void hci_conn_failed(struct hci_conn *conn, u8 status)
1234{
1235	struct hci_dev *hdev = conn->hdev;
1236
1237	bt_dev_dbg(hdev, "status 0x%2.2x", status);
1238
1239	switch (conn->type) {
1240	case LE_LINK:
1241		hci_le_conn_failed(conn, status);
1242		break;
1243	case ACL_LINK:
1244		mgmt_connect_failed(hdev, &conn->dst, conn->type,
1245				    conn->dst_type, status);
1246		break;
1247	}
1248
1249	conn->state = BT_CLOSED;
1250	hci_connect_cfm(conn, status);
1251	hci_conn_del(conn);
1252}
1253
1254/* This function requires the caller holds hdev->lock */
1255u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
1256{
1257	struct hci_dev *hdev = conn->hdev;
1258
1259	bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle);
1260
1261	if (conn->handle == handle)
1262		return 0;
1263
1264	if (handle > HCI_CONN_HANDLE_MAX) {
1265		bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
1266			   handle, HCI_CONN_HANDLE_MAX);
1267		return HCI_ERROR_INVALID_PARAMETERS;
1268	}
1269
1270	/* If abort_reason has been sent it means the connection is being
1271	 * aborted and the handle shall not be changed.
1272	 */
1273	if (conn->abort_reason)
1274		return conn->abort_reason;
1275
1276	if (HCI_CONN_HANDLE_UNSET(conn->handle))
1277		ida_free(&hdev->unset_handle_ida, conn->handle);
1278
1279	conn->handle = handle;
1280
1281	return 0;
1282}
1283
1284static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
1285{
1286	struct hci_conn *conn;
1287	u16 handle = PTR_UINT(data);
1288
1289	conn = hci_conn_hash_lookup_handle(hdev, handle);
1290	if (!conn)
1291		return;
1292
1293	bt_dev_dbg(hdev, "err %d", err);
1294
1295	hci_dev_lock(hdev);
1296
1297	if (!err) {
1298		hci_connect_le_scan_cleanup(conn, 0x00);
1299		goto done;
1300	}
1301
1302	/* Check if connection is still pending */
1303	if (conn != hci_lookup_le_connect(hdev))
1304		goto done;
1305
1306	/* Flush to make sure we send create conn cancel command if needed */
1307	flush_delayed_work(&conn->le_conn_timeout);
1308	hci_conn_failed(conn, bt_status(err));
1309
1310done:
1311	hci_dev_unlock(hdev);
1312}
1313
1314static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
1315{
1316	struct hci_conn *conn;
1317	u16 handle = PTR_UINT(data);
1318
1319	conn = hci_conn_hash_lookup_handle(hdev, handle);
1320	if (!conn)
1321		return 0;
1322
1323	bt_dev_dbg(hdev, "conn %p", conn);
1324
1325	clear_bit(HCI_CONN_SCANNING, &conn->flags);
1326	conn->state = BT_CONNECT;
1327
1328	return hci_le_create_conn_sync(hdev, conn);
1329}
1330
1331struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
1332				u8 dst_type, bool dst_resolved, u8 sec_level,
1333				u16 conn_timeout, u8 role)
1334{
1335	struct hci_conn *conn;
1336	struct smp_irk *irk;
1337	int err;
1338
1339	/* Let's make sure that le is enabled.*/
1340	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1341		if (lmp_le_capable(hdev))
1342			return ERR_PTR(-ECONNREFUSED);
1343
1344		return ERR_PTR(-EOPNOTSUPP);
1345	}
1346
1347	/* Since the controller supports only one LE connection attempt at a
1348	 * time, we return -EBUSY if there is any connection attempt running.
1349	 */
1350	if (hci_lookup_le_connect(hdev))
1351		return ERR_PTR(-EBUSY);
1352
1353	/* If there's already a connection object but it's not in
1354	 * scanning state it means it must already be established, in
1355	 * which case we can't do anything else except report a failure
1356	 * to connect.
1357	 */
1358	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1359	if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
1360		return ERR_PTR(-EBUSY);
1361	}
1362
1363	/* Check if the destination address has been resolved by the controller
1364	 * since if it did then the identity address shall be used.
1365	 */
1366	if (!dst_resolved) {
1367		/* When given an identity address with existing identity
1368		 * resolving key, the connection needs to be established
1369		 * to a resolvable random address.
1370		 *
1371		 * Storing the resolvable random address is required here
1372		 * to handle connection failures. The address will later
1373		 * be resolved back into the original identity address
1374		 * from the connect request.
1375		 */
1376		irk = hci_find_irk_by_addr(hdev, dst, dst_type);
1377		if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
1378			dst = &irk->rpa;
1379			dst_type = ADDR_LE_DEV_RANDOM;
1380		}
1381	}
1382
1383	if (conn) {
1384		bacpy(&conn->dst, dst);
1385	} else {
1386		conn = hci_conn_add_unset(hdev, LE_LINK, dst, role);
1387		if (!conn)
1388			return ERR_PTR(-ENOMEM);
1389		hci_conn_hold(conn);
1390		conn->pending_sec_level = sec_level;
1391	}
1392
1393	conn->dst_type = dst_type;
1394	conn->sec_level = BT_SECURITY_LOW;
1395	conn->conn_timeout = conn_timeout;
1396
1397	err = hci_cmd_sync_queue(hdev, hci_connect_le_sync,
1398				 UINT_PTR(conn->handle),
1399				 create_le_conn_complete);
1400	if (err) {
1401		hci_conn_del(conn);
1402		return ERR_PTR(err);
1403	}
1404
1405	return conn;
1406}
1407
1408static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
1409{
1410	struct hci_conn *conn;
1411
1412	conn = hci_conn_hash_lookup_le(hdev, addr, type);
1413	if (!conn)
1414		return false;
1415
1416	if (conn->state != BT_CONNECTED)
1417		return false;
1418
1419	return true;
1420}
1421
1422/* This function requires the caller holds hdev->lock */
1423static int hci_explicit_conn_params_set(struct hci_dev *hdev,
1424					bdaddr_t *addr, u8 addr_type)
1425{
1426	struct hci_conn_params *params;
1427
1428	if (is_connected(hdev, addr, addr_type))
1429		return -EISCONN;
1430
1431	params = hci_conn_params_lookup(hdev, addr, addr_type);
1432	if (!params) {
1433		params = hci_conn_params_add(hdev, addr, addr_type);
1434		if (!params)
1435			return -ENOMEM;
1436
1437		/* If we created new params, mark them to be deleted in
1438		 * hci_connect_le_scan_cleanup. It's different case than
1439		 * existing disabled params, those will stay after cleanup.
1440		 */
1441		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1442	}
1443
1444	/* We're trying to connect, so make sure params are at pend_le_conns */
1445	if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1446	    params->auto_connect == HCI_AUTO_CONN_REPORT ||
1447	    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1448		hci_pend_le_list_del_init(params);
1449		hci_pend_le_list_add(params, &hdev->pend_le_conns);
1450	}
1451
1452	params->explicit_connect = true;
1453
1454	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1455	       params->auto_connect);
1456
1457	return 0;
1458}
1459
1460static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos)
1461{
1462	struct hci_conn *conn;
1463	u8  big;
1464
1465	/* Allocate a BIG if not set */
1466	if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) {
1467		for (big = 0x00; big < 0xef; big++) {
1468
1469			conn = hci_conn_hash_lookup_big(hdev, big);
1470			if (!conn)
1471				break;
1472		}
1473
1474		if (big == 0xef)
1475			return -EADDRNOTAVAIL;
1476
1477		/* Update BIG */
1478		qos->bcast.big = big;
1479	}
1480
1481	return 0;
1482}
1483
1484static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos)
1485{
1486	struct hci_conn *conn;
1487	u8  bis;
1488
1489	/* Allocate BIS if not set */
1490	if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) {
1491		/* Find an unused adv set to advertise BIS, skip instance 0x00
1492		 * since it is reserved as general purpose set.
1493		 */
1494		for (bis = 0x01; bis < hdev->le_num_of_adv_sets;
1495		     bis++) {
1496
1497			conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis);
1498			if (!conn)
1499				break;
1500		}
1501
1502		if (bis == hdev->le_num_of_adv_sets)
1503			return -EADDRNOTAVAIL;
1504
1505		/* Update BIS */
1506		qos->bcast.bis = bis;
1507	}
1508
1509	return 0;
1510}
1511
1512/* This function requires the caller holds hdev->lock */
1513static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst,
1514				    struct bt_iso_qos *qos, __u8 base_len,
1515				    __u8 *base)
1516{
1517	struct hci_conn *conn;
1518	int err;
1519
1520	/* Let's make sure that le is enabled.*/
1521	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1522		if (lmp_le_capable(hdev))
1523			return ERR_PTR(-ECONNREFUSED);
1524		return ERR_PTR(-EOPNOTSUPP);
1525	}
1526
1527	err = qos_set_big(hdev, qos);
1528	if (err)
1529		return ERR_PTR(err);
1530
1531	err = qos_set_bis(hdev, qos);
1532	if (err)
1533		return ERR_PTR(err);
1534
1535	/* Check if the LE Create BIG command has already been sent */
1536	conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big,
1537						qos->bcast.big);
1538	if (conn)
1539		return ERR_PTR(-EADDRINUSE);
1540
1541	/* Check BIS settings against other bound BISes, since all
1542	 * BISes in a BIG must have the same value for all parameters
1543	 */
1544	conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big);
1545
1546	if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) ||
1547		     base_len != conn->le_per_adv_data_len ||
1548		     memcmp(conn->le_per_adv_data, base, base_len)))
1549		return ERR_PTR(-EADDRINUSE);
1550
1551	conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1552	if (!conn)
1553		return ERR_PTR(-ENOMEM);
1554
1555	conn->state = BT_CONNECT;
1556
1557	hci_conn_hold(conn);
1558	return conn;
1559}
1560
1561/* This function requires the caller holds hdev->lock */
1562struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1563				     u8 dst_type, u8 sec_level,
1564				     u16 conn_timeout,
1565				     enum conn_reasons conn_reason)
1566{
1567	struct hci_conn *conn;
1568
1569	/* Let's make sure that le is enabled.*/
1570	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1571		if (lmp_le_capable(hdev))
1572			return ERR_PTR(-ECONNREFUSED);
1573
1574		return ERR_PTR(-EOPNOTSUPP);
1575	}
1576
1577	/* Some devices send ATT messages as soon as the physical link is
1578	 * established. To be able to handle these ATT messages, the user-
1579	 * space first establishes the connection and then starts the pairing
1580	 * process.
1581	 *
1582	 * So if a hci_conn object already exists for the following connection
1583	 * attempt, we simply update pending_sec_level and auth_type fields
1584	 * and return the object found.
1585	 */
1586	conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1587	if (conn) {
1588		if (conn->pending_sec_level < sec_level)
1589			conn->pending_sec_level = sec_level;
1590		goto done;
1591	}
1592
1593	BT_DBG("requesting refresh of dst_addr");
1594
1595	conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
1596	if (!conn)
1597		return ERR_PTR(-ENOMEM);
1598
1599	if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) {
1600		hci_conn_del(conn);
1601		return ERR_PTR(-EBUSY);
1602	}
1603
1604	conn->state = BT_CONNECT;
1605	set_bit(HCI_CONN_SCANNING, &conn->flags);
1606	conn->dst_type = dst_type;
1607	conn->sec_level = BT_SECURITY_LOW;
1608	conn->pending_sec_level = sec_level;
1609	conn->conn_timeout = conn_timeout;
1610	conn->conn_reason = conn_reason;
1611
1612	hci_update_passive_scan(hdev);
1613
1614done:
1615	hci_conn_hold(conn);
1616	return conn;
1617}
1618
1619struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1620				 u8 sec_level, u8 auth_type,
1621				 enum conn_reasons conn_reason)
1622{
1623	struct hci_conn *acl;
1624
1625	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1626		if (lmp_bredr_capable(hdev))
1627			return ERR_PTR(-ECONNREFUSED);
1628
1629		return ERR_PTR(-EOPNOTSUPP);
1630	}
1631
1632	/* Reject outgoing connection to device with same BD ADDR against
1633	 * CVE-2020-26555
1634	 */
1635	if (!bacmp(&hdev->bdaddr, dst)) {
1636		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
1637			   dst);
1638		return ERR_PTR(-ECONNREFUSED);
1639	}
1640
1641	acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1642	if (!acl) {
1643		acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1644		if (!acl)
1645			return ERR_PTR(-ENOMEM);
1646	}
1647
1648	hci_conn_hold(acl);
1649
1650	acl->conn_reason = conn_reason;
1651	if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1652		acl->sec_level = BT_SECURITY_LOW;
1653		acl->pending_sec_level = sec_level;
1654		acl->auth_type = auth_type;
1655		hci_acl_create_connection(acl);
1656	}
1657
1658	return acl;
1659}
1660
1661static struct hci_link *hci_conn_link(struct hci_conn *parent,
1662				      struct hci_conn *conn)
1663{
1664	struct hci_dev *hdev = parent->hdev;
1665	struct hci_link *link;
1666
1667	bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn);
1668
1669	if (conn->link)
1670		return conn->link;
1671
1672	if (conn->parent)
1673		return NULL;
1674
1675	link = kzalloc(sizeof(*link), GFP_KERNEL);
1676	if (!link)
1677		return NULL;
1678
1679	link->conn = hci_conn_hold(conn);
1680	conn->link = link;
1681	conn->parent = hci_conn_get(parent);
1682
1683	/* Use list_add_tail_rcu append to the list */
1684	list_add_tail_rcu(&link->list, &parent->link_list);
1685
1686	return link;
1687}
1688
1689struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1690				 __u16 setting, struct bt_codec *codec)
1691{
1692	struct hci_conn *acl;
1693	struct hci_conn *sco;
1694	struct hci_link *link;
1695
1696	acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING,
1697			      CONN_REASON_SCO_CONNECT);
1698	if (IS_ERR(acl))
1699		return acl;
1700
1701	sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1702	if (!sco) {
1703		sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER);
1704		if (!sco) {
1705			hci_conn_drop(acl);
1706			return ERR_PTR(-ENOMEM);
1707		}
1708	}
1709
1710	link = hci_conn_link(acl, sco);
1711	if (!link) {
1712		hci_conn_drop(acl);
1713		hci_conn_drop(sco);
1714		return ERR_PTR(-ENOLINK);
1715	}
1716
1717	sco->setting = setting;
1718	sco->codec = *codec;
1719
1720	if (acl->state == BT_CONNECTED &&
1721	    (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1722		set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1723		hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1724
1725		if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1726			/* defer SCO setup until mode change completed */
1727			set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1728			return sco;
1729		}
1730
1731		hci_sco_setup(acl, 0x00);
1732	}
1733
1734	return sco;
1735}
1736
1737static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos)
1738{
1739	struct hci_dev *hdev = conn->hdev;
1740	struct hci_cp_le_create_big cp;
1741	struct iso_list_data data;
1742
1743	memset(&cp, 0, sizeof(cp));
1744
1745	data.big = qos->bcast.big;
1746	data.bis = qos->bcast.bis;
1747	data.count = 0;
1748
1749	/* Create a BIS for each bound connection */
1750	hci_conn_hash_list_state(hdev, bis_list, ISO_LINK,
1751				 BT_BOUND, &data);
1752
1753	cp.handle = qos->bcast.big;
1754	cp.adv_handle = qos->bcast.bis;
1755	cp.num_bis  = data.count;
1756	hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval);
1757	cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu);
1758	cp.bis.latency =  cpu_to_le16(qos->bcast.out.latency);
1759	cp.bis.rtn  = qos->bcast.out.rtn;
1760	cp.bis.phy  = qos->bcast.out.phy;
1761	cp.bis.packing = qos->bcast.packing;
1762	cp.bis.framing = qos->bcast.framing;
1763	cp.bis.encryption = qos->bcast.encryption;
1764	memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode));
1765
1766	return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp);
1767}
1768
1769static int set_cig_params_sync(struct hci_dev *hdev, void *data)
1770{
1771	u8 cig_id = PTR_UINT(data);
1772	struct hci_conn *conn;
1773	struct bt_iso_qos *qos;
1774	struct iso_cig_params pdu;
1775	u8 cis_id;
1776
1777	conn = hci_conn_hash_lookup_cig(hdev, cig_id);
1778	if (!conn)
1779		return 0;
1780
1781	memset(&pdu, 0, sizeof(pdu));
1782
1783	qos = &conn->iso_qos;
1784	pdu.cp.cig_id = cig_id;
1785	hci_cpu_to_le24(qos->ucast.out.interval, pdu.cp.c_interval);
1786	hci_cpu_to_le24(qos->ucast.in.interval, pdu.cp.p_interval);
1787	pdu.cp.sca = qos->ucast.sca;
1788	pdu.cp.packing = qos->ucast.packing;
1789	pdu.cp.framing = qos->ucast.framing;
1790	pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency);
1791	pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency);
1792
1793	/* Reprogram all CIS(s) with the same CIG, valid range are:
1794	 * num_cis: 0x00 to 0x1F
1795	 * cis_id: 0x00 to 0xEF
1796	 */
1797	for (cis_id = 0x00; cis_id < 0xf0 &&
1798	     pdu.cp.num_cis < ARRAY_SIZE(pdu.cis); cis_id++) {
1799		struct hci_cis_params *cis;
1800
1801		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id);
1802		if (!conn)
1803			continue;
1804
1805		qos = &conn->iso_qos;
1806
1807		cis = &pdu.cis[pdu.cp.num_cis++];
1808		cis->cis_id = cis_id;
1809		cis->c_sdu  = cpu_to_le16(conn->iso_qos.ucast.out.sdu);
1810		cis->p_sdu  = cpu_to_le16(conn->iso_qos.ucast.in.sdu);
1811		cis->c_phy  = qos->ucast.out.phy ? qos->ucast.out.phy :
1812			      qos->ucast.in.phy;
1813		cis->p_phy  = qos->ucast.in.phy ? qos->ucast.in.phy :
1814			      qos->ucast.out.phy;
1815		cis->c_rtn  = qos->ucast.out.rtn;
1816		cis->p_rtn  = qos->ucast.in.rtn;
1817	}
1818
1819	if (!pdu.cp.num_cis)
1820		return 0;
1821
1822	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS,
1823				     sizeof(pdu.cp) +
1824				     pdu.cp.num_cis * sizeof(pdu.cis[0]), &pdu,
1825				     HCI_CMD_TIMEOUT);
1826}
1827
1828static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
1829{
1830	struct hci_dev *hdev = conn->hdev;
1831	struct iso_list_data data;
1832
1833	memset(&data, 0, sizeof(data));
1834
1835	/* Allocate first still reconfigurable CIG if not set */
1836	if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
1837		for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
1838			data.count = 0;
1839
1840			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1841						 BT_CONNECT, &data);
1842			if (data.count)
1843				continue;
1844
1845			hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
1846						 BT_CONNECTED, &data);
1847			if (!data.count)
1848				break;
1849		}
1850
1851		if (data.cig == 0xf0)
1852			return false;
1853
1854		/* Update CIG */
1855		qos->ucast.cig = data.cig;
1856	}
1857
1858	if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) {
1859		if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig,
1860					     qos->ucast.cis))
1861			return false;
1862		goto done;
1863	}
1864
1865	/* Allocate first available CIS if not set */
1866	for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0;
1867	     data.cis++) {
1868		if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig,
1869					      data.cis)) {
1870			/* Update CIS */
1871			qos->ucast.cis = data.cis;
1872			break;
1873		}
1874	}
1875
1876	if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET)
1877		return false;
1878
1879done:
1880	if (hci_cmd_sync_queue(hdev, set_cig_params_sync,
1881			       UINT_PTR(qos->ucast.cig), NULL) < 0)
1882		return false;
1883
1884	return true;
1885}
1886
1887struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
1888			      __u8 dst_type, struct bt_iso_qos *qos)
1889{
1890	struct hci_conn *cis;
1891
1892	cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig,
1893				       qos->ucast.cis);
1894	if (!cis) {
1895		cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER);
1896		if (!cis)
1897			return ERR_PTR(-ENOMEM);
1898		cis->cleanup = cis_cleanup;
1899		cis->dst_type = dst_type;
1900		cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET;
1901		cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET;
1902	}
1903
1904	if (cis->state == BT_CONNECTED)
1905		return cis;
1906
1907	/* Check if CIS has been set and the settings matches */
1908	if (cis->state == BT_BOUND &&
1909	    !memcmp(&cis->iso_qos, qos, sizeof(*qos)))
1910		return cis;
1911
1912	/* Update LINK PHYs according to QoS preference */
1913	cis->le_tx_phy = qos->ucast.out.phy;
1914	cis->le_rx_phy = qos->ucast.in.phy;
1915
1916	/* If output interval is not set use the input interval as it cannot be
1917	 * 0x000000.
1918	 */
1919	if (!qos->ucast.out.interval)
1920		qos->ucast.out.interval = qos->ucast.in.interval;
1921
1922	/* If input interval is not set use the output interval as it cannot be
1923	 * 0x000000.
1924	 */
1925	if (!qos->ucast.in.interval)
1926		qos->ucast.in.interval = qos->ucast.out.interval;
1927
1928	/* If output latency is not set use the input latency as it cannot be
1929	 * 0x0000.
1930	 */
1931	if (!qos->ucast.out.latency)
1932		qos->ucast.out.latency = qos->ucast.in.latency;
1933
1934	/* If input latency is not set use the output latency as it cannot be
1935	 * 0x0000.
1936	 */
1937	if (!qos->ucast.in.latency)
1938		qos->ucast.in.latency = qos->ucast.out.latency;
1939
1940	if (!hci_le_set_cig_params(cis, qos)) {
1941		hci_conn_drop(cis);
1942		return ERR_PTR(-EINVAL);
1943	}
1944
1945	hci_conn_hold(cis);
1946
1947	cis->iso_qos = *qos;
1948	cis->state = BT_BOUND;
1949
1950	return cis;
1951}
1952
1953bool hci_iso_setup_path(struct hci_conn *conn)
1954{
1955	struct hci_dev *hdev = conn->hdev;
1956	struct hci_cp_le_setup_iso_path cmd;
1957
1958	memset(&cmd, 0, sizeof(cmd));
1959
1960	if (conn->iso_qos.ucast.out.sdu) {
1961		cmd.handle = cpu_to_le16(conn->handle);
1962		cmd.direction = 0x00; /* Input (Host to Controller) */
1963		cmd.path = 0x00; /* HCI path if enabled */
1964		cmd.codec = 0x03; /* Transparent Data */
1965
1966		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1967				 &cmd) < 0)
1968			return false;
1969	}
1970
1971	if (conn->iso_qos.ucast.in.sdu) {
1972		cmd.handle = cpu_to_le16(conn->handle);
1973		cmd.direction = 0x01; /* Output (Controller to Host) */
1974		cmd.path = 0x00; /* HCI path if enabled */
1975		cmd.codec = 0x03; /* Transparent Data */
1976
1977		if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd),
1978				 &cmd) < 0)
1979			return false;
1980	}
1981
1982	return true;
1983}
1984
1985int hci_conn_check_create_cis(struct hci_conn *conn)
1986{
1987	if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY))
1988		return -EINVAL;
1989
1990	if (!conn->parent || conn->parent->state != BT_CONNECTED ||
1991	    conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle))
1992		return 1;
1993
1994	return 0;
1995}
1996
1997static int hci_create_cis_sync(struct hci_dev *hdev, void *data)
1998{
1999	return hci_le_create_cis_sync(hdev);
2000}
2001
2002int hci_le_create_cis_pending(struct hci_dev *hdev)
2003{
2004	struct hci_conn *conn;
2005	bool pending = false;
2006
2007	rcu_read_lock();
2008
2009	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
2010		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) {
2011			rcu_read_unlock();
2012			return -EBUSY;
2013		}
2014
2015		if (!hci_conn_check_create_cis(conn))
2016			pending = true;
2017	}
2018
2019	rcu_read_unlock();
2020
2021	if (!pending)
2022		return 0;
2023
2024	/* Queue Create CIS */
2025	return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL);
2026}
2027
2028static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn,
2029			      struct bt_iso_io_qos *qos, __u8 phy)
2030{
2031	/* Only set MTU if PHY is enabled */
2032	if (!qos->sdu && qos->phy) {
2033		if (hdev->iso_mtu > 0)
2034			qos->sdu = hdev->iso_mtu;
2035		else if (hdev->le_mtu > 0)
2036			qos->sdu = hdev->le_mtu;
2037		else
2038			qos->sdu = hdev->acl_mtu;
2039	}
2040
2041	/* Use the same PHY as ACL if set to any */
2042	if (qos->phy == BT_ISO_PHY_ANY)
2043		qos->phy = phy;
2044
2045	/* Use LE ACL connection interval if not set */
2046	if (!qos->interval)
2047		/* ACL interval unit in 1.25 ms to us */
2048		qos->interval = conn->le_conn_interval * 1250;
2049
2050	/* Use LE ACL connection latency if not set */
2051	if (!qos->latency)
2052		qos->latency = conn->le_conn_latency;
2053}
2054
2055static int create_big_sync(struct hci_dev *hdev, void *data)
2056{
2057	struct hci_conn *conn = data;
2058	struct bt_iso_qos *qos = &conn->iso_qos;
2059	u16 interval, sync_interval = 0;
2060	u32 flags = 0;
2061	int err;
2062
2063	if (qos->bcast.out.phy == 0x02)
2064		flags |= MGMT_ADV_FLAG_SEC_2M;
2065
2066	/* Align intervals */
2067	interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor;
2068
2069	if (qos->bcast.bis)
2070		sync_interval = interval * 4;
2071
2072	err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len,
2073				     conn->le_per_adv_data, flags, interval,
2074				     interval, sync_interval);
2075	if (err)
2076		return err;
2077
2078	return hci_le_create_big(conn, &conn->iso_qos);
2079}
2080
2081static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
2082{
2083	struct hci_cp_le_pa_create_sync *cp = data;
2084
2085	bt_dev_dbg(hdev, "");
2086
2087	if (err)
2088		bt_dev_err(hdev, "Unable to create PA: %d", err);
2089
2090	kfree(cp);
2091}
2092
2093static int create_pa_sync(struct hci_dev *hdev, void *data)
2094{
2095	struct hci_cp_le_pa_create_sync *cp = data;
2096	int err;
2097
2098	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC,
2099				    sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2100	if (err) {
2101		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2102		return err;
2103	}
2104
2105	return hci_update_passive_scan_sync(hdev);
2106}
2107
2108int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type,
2109		       __u8 sid, struct bt_iso_qos *qos)
2110{
2111	struct hci_cp_le_pa_create_sync *cp;
2112
2113	if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC))
2114		return -EBUSY;
2115
2116	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2117	if (!cp) {
2118		hci_dev_clear_flag(hdev, HCI_PA_SYNC);
2119		return -ENOMEM;
2120	}
2121
2122	cp->options = qos->bcast.options;
2123	cp->sid = sid;
2124	cp->addr_type = dst_type;
2125	bacpy(&cp->addr, dst);
2126	cp->skip = cpu_to_le16(qos->bcast.skip);
2127	cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout);
2128	cp->sync_cte_type = qos->bcast.sync_cte_type;
2129
2130	/* Queue start pa_create_sync and scan */
2131	return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete);
2132}
2133
2134int hci_le_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
2135			   struct bt_iso_qos *qos,
2136			   __u16 sync_handle, __u8 num_bis, __u8 bis[])
2137{
2138	struct _packed {
2139		struct hci_cp_le_big_create_sync cp;
2140		__u8  bis[0x11];
2141	} pdu;
2142	int err;
2143
2144	if (num_bis > sizeof(pdu.bis))
2145		return -EINVAL;
2146
2147	err = qos_set_big(hdev, qos);
2148	if (err)
2149		return err;
2150
2151	if (hcon)
2152		hcon->iso_qos.bcast.big = qos->bcast.big;
2153
2154	memset(&pdu, 0, sizeof(pdu));
2155	pdu.cp.handle = qos->bcast.big;
2156	pdu.cp.sync_handle = cpu_to_le16(sync_handle);
2157	pdu.cp.encryption = qos->bcast.encryption;
2158	memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode));
2159	pdu.cp.mse = qos->bcast.mse;
2160	pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout);
2161	pdu.cp.num_bis = num_bis;
2162	memcpy(pdu.bis, bis, num_bis);
2163
2164	return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC,
2165			    sizeof(pdu.cp) + num_bis, &pdu);
2166}
2167
2168static void create_big_complete(struct hci_dev *hdev, void *data, int err)
2169{
2170	struct hci_conn *conn = data;
2171
2172	bt_dev_dbg(hdev, "conn %p", conn);
2173
2174	if (err) {
2175		bt_dev_err(hdev, "Unable to create BIG: %d", err);
2176		hci_connect_cfm(conn, err);
2177		hci_conn_del(conn);
2178	}
2179}
2180
2181struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
2182			      struct bt_iso_qos *qos,
2183			      __u8 base_len, __u8 *base)
2184{
2185	struct hci_conn *conn;
2186	__u8 eir[HCI_MAX_PER_AD_LENGTH];
2187
2188	if (base_len && base)
2189		base_len = eir_append_service_data(eir, 0,  0x1851,
2190						   base, base_len);
2191
2192	/* We need hci_conn object using the BDADDR_ANY as dst */
2193	conn = hci_add_bis(hdev, dst, qos, base_len, eir);
2194	if (IS_ERR(conn))
2195		return conn;
2196
2197	/* Update LINK PHYs according to QoS preference */
2198	conn->le_tx_phy = qos->bcast.out.phy;
2199	conn->le_tx_phy = qos->bcast.out.phy;
2200
2201	/* Add Basic Announcement into Peridic Adv Data if BASE is set */
2202	if (base_len && base) {
2203		memcpy(conn->le_per_adv_data,  eir, sizeof(eir));
2204		conn->le_per_adv_data_len = base_len;
2205	}
2206
2207	hci_iso_qos_setup(hdev, conn, &qos->bcast.out,
2208			  conn->le_tx_phy ? conn->le_tx_phy :
2209			  hdev->le_tx_def_phys);
2210
2211	conn->iso_qos = *qos;
2212	conn->state = BT_BOUND;
2213
2214	return conn;
2215}
2216
2217static void bis_mark_per_adv(struct hci_conn *conn, void *data)
2218{
2219	struct iso_list_data *d = data;
2220
2221	/* Skip if not broadcast/ANY address */
2222	if (bacmp(&conn->dst, BDADDR_ANY))
2223		return;
2224
2225	if (d->big != conn->iso_qos.bcast.big ||
2226	    d->bis == BT_ISO_QOS_BIS_UNSET ||
2227	    d->bis != conn->iso_qos.bcast.bis)
2228		return;
2229
2230	set_bit(HCI_CONN_PER_ADV, &conn->flags);
2231}
2232
2233struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
2234				 __u8 dst_type, struct bt_iso_qos *qos,
2235				 __u8 base_len, __u8 *base)
2236{
2237	struct hci_conn *conn;
2238	int err;
2239	struct iso_list_data data;
2240
2241	conn = hci_bind_bis(hdev, dst, qos, base_len, base);
2242	if (IS_ERR(conn))
2243		return conn;
2244
2245	data.big = qos->bcast.big;
2246	data.bis = qos->bcast.bis;
2247
2248	/* Set HCI_CONN_PER_ADV for all bound connections, to mark that
2249	 * the start periodic advertising and create BIG commands have
2250	 * been queued
2251	 */
2252	hci_conn_hash_list_state(hdev, bis_mark_per_adv, ISO_LINK,
2253				 BT_BOUND, &data);
2254
2255	/* Queue start periodic advertising and create BIG */
2256	err = hci_cmd_sync_queue(hdev, create_big_sync, conn,
2257				 create_big_complete);
2258	if (err < 0) {
2259		hci_conn_drop(conn);
2260		return ERR_PTR(err);
2261	}
2262
2263	return conn;
2264}
2265
2266struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
2267				 __u8 dst_type, struct bt_iso_qos *qos)
2268{
2269	struct hci_conn *le;
2270	struct hci_conn *cis;
2271	struct hci_link *link;
2272
2273	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2274		le = hci_connect_le(hdev, dst, dst_type, false,
2275				    BT_SECURITY_LOW,
2276				    HCI_LE_CONN_TIMEOUT,
2277				    HCI_ROLE_SLAVE);
2278	else
2279		le = hci_connect_le_scan(hdev, dst, dst_type,
2280					 BT_SECURITY_LOW,
2281					 HCI_LE_CONN_TIMEOUT,
2282					 CONN_REASON_ISO_CONNECT);
2283	if (IS_ERR(le))
2284		return le;
2285
2286	hci_iso_qos_setup(hdev, le, &qos->ucast.out,
2287			  le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys);
2288	hci_iso_qos_setup(hdev, le, &qos->ucast.in,
2289			  le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys);
2290
2291	cis = hci_bind_cis(hdev, dst, dst_type, qos);
2292	if (IS_ERR(cis)) {
2293		hci_conn_drop(le);
2294		return cis;
2295	}
2296
2297	link = hci_conn_link(le, cis);
2298	if (!link) {
2299		hci_conn_drop(le);
2300		hci_conn_drop(cis);
2301		return ERR_PTR(-ENOLINK);
2302	}
2303
2304	/* Link takes the refcount */
2305	hci_conn_drop(cis);
2306
2307	cis->state = BT_CONNECT;
2308
2309	hci_le_create_cis_pending(hdev);
2310
2311	return cis;
2312}
2313
2314/* Check link security requirement */
2315int hci_conn_check_link_mode(struct hci_conn *conn)
2316{
2317	BT_DBG("hcon %p", conn);
2318
2319	/* In Secure Connections Only mode, it is required that Secure
2320	 * Connections is used and the link is encrypted with AES-CCM
2321	 * using a P-256 authenticated combination key.
2322	 */
2323	if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
2324		if (!hci_conn_sc_enabled(conn) ||
2325		    !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2326		    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
2327			return 0;
2328	}
2329
2330	 /* AES encryption is required for Level 4:
2331	  *
2332	  * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C
2333	  * page 1319:
2334	  *
2335	  * 128-bit equivalent strength for link and encryption keys
2336	  * required using FIPS approved algorithms (E0 not allowed,
2337	  * SAFER+ not allowed, and P-192 not allowed; encryption key
2338	  * not shortened)
2339	  */
2340	if (conn->sec_level == BT_SECURITY_FIPS &&
2341	    !test_bit(HCI_CONN_AES_CCM, &conn->flags)) {
2342		bt_dev_err(conn->hdev,
2343			   "Invalid security: Missing AES-CCM usage");
2344		return 0;
2345	}
2346
2347	if (hci_conn_ssp_enabled(conn) &&
2348	    !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2349		return 0;
2350
2351	return 1;
2352}
2353
2354/* Authenticate remote device */
2355static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
2356{
2357	BT_DBG("hcon %p", conn);
2358
2359	if (conn->pending_sec_level > sec_level)
2360		sec_level = conn->pending_sec_level;
2361
2362	if (sec_level > conn->sec_level)
2363		conn->pending_sec_level = sec_level;
2364	else if (test_bit(HCI_CONN_AUTH, &conn->flags))
2365		return 1;
2366
2367	/* Make sure we preserve an existing MITM requirement*/
2368	auth_type |= (conn->auth_type & 0x01);
2369
2370	conn->auth_type = auth_type;
2371
2372	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2373		struct hci_cp_auth_requested cp;
2374
2375		cp.handle = cpu_to_le16(conn->handle);
2376		hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
2377			     sizeof(cp), &cp);
2378
2379		/* Set the ENCRYPT_PEND to trigger encryption after
2380		 * authentication.
2381		 */
2382		if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2383			set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2384	}
2385
2386	return 0;
2387}
2388
2389/* Encrypt the link */
2390static void hci_conn_encrypt(struct hci_conn *conn)
2391{
2392	BT_DBG("hcon %p", conn);
2393
2394	if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2395		struct hci_cp_set_conn_encrypt cp;
2396		cp.handle  = cpu_to_le16(conn->handle);
2397		cp.encrypt = 0x01;
2398		hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2399			     &cp);
2400	}
2401}
2402
2403/* Enable security */
2404int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
2405		      bool initiator)
2406{
2407	BT_DBG("hcon %p", conn);
2408
2409	if (conn->type == LE_LINK)
2410		return smp_conn_security(conn, sec_level);
2411
2412	/* For sdp we don't need the link key. */
2413	if (sec_level == BT_SECURITY_SDP)
2414		return 1;
2415
2416	/* For non 2.1 devices and low security level we don't need the link
2417	   key. */
2418	if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
2419		return 1;
2420
2421	/* For other security levels we need the link key. */
2422	if (!test_bit(HCI_CONN_AUTH, &conn->flags))
2423		goto auth;
2424
2425	switch (conn->key_type) {
2426	case HCI_LK_AUTH_COMBINATION_P256:
2427		/* An authenticated FIPS approved combination key has
2428		 * sufficient security for security level 4 or lower.
2429		 */
2430		if (sec_level <= BT_SECURITY_FIPS)
2431			goto encrypt;
2432		break;
2433	case HCI_LK_AUTH_COMBINATION_P192:
2434		/* An authenticated combination key has sufficient security for
2435		 * security level 3 or lower.
2436		 */
2437		if (sec_level <= BT_SECURITY_HIGH)
2438			goto encrypt;
2439		break;
2440	case HCI_LK_UNAUTH_COMBINATION_P192:
2441	case HCI_LK_UNAUTH_COMBINATION_P256:
2442		/* An unauthenticated combination key has sufficient security
2443		 * for security level 2 or lower.
2444		 */
2445		if (sec_level <= BT_SECURITY_MEDIUM)
2446			goto encrypt;
2447		break;
2448	case HCI_LK_COMBINATION:
2449		/* A combination key has always sufficient security for the
2450		 * security levels 2 or lower. High security level requires the
2451		 * combination key is generated using maximum PIN code length
2452		 * (16). For pre 2.1 units.
2453		 */
2454		if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
2455			goto encrypt;
2456		break;
2457	default:
2458		break;
2459	}
2460
2461auth:
2462	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
2463		return 0;
2464
2465	if (initiator)
2466		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2467
2468	if (!hci_conn_auth(conn, sec_level, auth_type))
2469		return 0;
2470
2471encrypt:
2472	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) {
2473		/* Ensure that the encryption key size has been read,
2474		 * otherwise stall the upper layer responses.
2475		 */
2476		if (!conn->enc_key_size)
2477			return 0;
2478
2479		/* Nothing else needed, all requirements are met */
2480		return 1;
2481	}
2482
2483	hci_conn_encrypt(conn);
2484	return 0;
2485}
2486EXPORT_SYMBOL(hci_conn_security);
2487
2488/* Check secure link requirement */
2489int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
2490{
2491	BT_DBG("hcon %p", conn);
2492
2493	/* Accept if non-secure or higher security level is required */
2494	if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
2495		return 1;
2496
2497	/* Accept if secure or higher security level is already present */
2498	if (conn->sec_level == BT_SECURITY_HIGH ||
2499	    conn->sec_level == BT_SECURITY_FIPS)
2500		return 1;
2501
2502	/* Reject not secure link */
2503	return 0;
2504}
2505EXPORT_SYMBOL(hci_conn_check_secure);
2506
2507/* Switch role */
2508int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
2509{
2510	BT_DBG("hcon %p", conn);
2511
2512	if (role == conn->role)
2513		return 1;
2514
2515	if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
2516		struct hci_cp_switch_role cp;
2517		bacpy(&cp.bdaddr, &conn->dst);
2518		cp.role = role;
2519		hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
2520	}
2521
2522	return 0;
2523}
2524EXPORT_SYMBOL(hci_conn_switch_role);
2525
2526/* Enter active mode */
2527void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
2528{
2529	struct hci_dev *hdev = conn->hdev;
2530
2531	BT_DBG("hcon %p mode %d", conn, conn->mode);
2532
2533	if (conn->mode != HCI_CM_SNIFF)
2534		goto timer;
2535
2536	if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
2537		goto timer;
2538
2539	if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2540		struct hci_cp_exit_sniff_mode cp;
2541		cp.handle = cpu_to_le16(conn->handle);
2542		hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
2543	}
2544
2545timer:
2546	if (hdev->idle_timeout > 0)
2547		queue_delayed_work(hdev->workqueue, &conn->idle_work,
2548				   msecs_to_jiffies(hdev->idle_timeout));
2549}
2550
2551/* Drop all connection on the device */
2552void hci_conn_hash_flush(struct hci_dev *hdev)
2553{
2554	struct list_head *head = &hdev->conn_hash.list;
2555	struct hci_conn *conn;
2556
2557	BT_DBG("hdev %s", hdev->name);
2558
2559	/* We should not traverse the list here, because hci_conn_del
2560	 * can remove extra links, which may cause the list traversal
2561	 * to hit items that have already been released.
2562	 */
2563	while ((conn = list_first_entry_or_null(head,
2564						struct hci_conn,
2565						list)) != NULL) {
2566		conn->state = BT_CLOSED;
2567		hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
2568		hci_conn_del(conn);
2569	}
2570}
2571
2572/* Check pending connect attempts */
2573void hci_conn_check_pending(struct hci_dev *hdev)
2574{
2575	struct hci_conn *conn;
2576
2577	BT_DBG("hdev %s", hdev->name);
2578
2579	hci_dev_lock(hdev);
2580
2581	conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
2582	if (conn)
2583		hci_acl_create_connection(conn);
2584
2585	hci_dev_unlock(hdev);
2586}
2587
2588static u32 get_link_mode(struct hci_conn *conn)
2589{
2590	u32 link_mode = 0;
2591
2592	if (conn->role == HCI_ROLE_MASTER)
2593		link_mode |= HCI_LM_MASTER;
2594
2595	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2596		link_mode |= HCI_LM_ENCRYPT;
2597
2598	if (test_bit(HCI_CONN_AUTH, &conn->flags))
2599		link_mode |= HCI_LM_AUTH;
2600
2601	if (test_bit(HCI_CONN_SECURE, &conn->flags))
2602		link_mode |= HCI_LM_SECURE;
2603
2604	if (test_bit(HCI_CONN_FIPS, &conn->flags))
2605		link_mode |= HCI_LM_FIPS;
2606
2607	return link_mode;
2608}
2609
2610int hci_get_conn_list(void __user *arg)
2611{
2612	struct hci_conn *c;
2613	struct hci_conn_list_req req, *cl;
2614	struct hci_conn_info *ci;
2615	struct hci_dev *hdev;
2616	int n = 0, size, err;
2617
2618	if (copy_from_user(&req, arg, sizeof(req)))
2619		return -EFAULT;
2620
2621	if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
2622		return -EINVAL;
2623
2624	size = sizeof(req) + req.conn_num * sizeof(*ci);
2625
2626	cl = kmalloc(size, GFP_KERNEL);
2627	if (!cl)
2628		return -ENOMEM;
2629
2630	hdev = hci_dev_get(req.dev_id);
2631	if (!hdev) {
2632		kfree(cl);
2633		return -ENODEV;
2634	}
2635
2636	ci = cl->conn_info;
2637
2638	hci_dev_lock(hdev);
2639	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2640		bacpy(&(ci + n)->bdaddr, &c->dst);
2641		(ci + n)->handle = c->handle;
2642		(ci + n)->type  = c->type;
2643		(ci + n)->out   = c->out;
2644		(ci + n)->state = c->state;
2645		(ci + n)->link_mode = get_link_mode(c);
2646		if (++n >= req.conn_num)
2647			break;
2648	}
2649	hci_dev_unlock(hdev);
2650
2651	cl->dev_id = hdev->id;
2652	cl->conn_num = n;
2653	size = sizeof(req) + n * sizeof(*ci);
2654
2655	hci_dev_put(hdev);
2656
2657	err = copy_to_user(arg, cl, size);
2658	kfree(cl);
2659
2660	return err ? -EFAULT : 0;
2661}
2662
2663int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
2664{
2665	struct hci_conn_info_req req;
2666	struct hci_conn_info ci;
2667	struct hci_conn *conn;
2668	char __user *ptr = arg + sizeof(req);
2669
2670	if (copy_from_user(&req, arg, sizeof(req)))
2671		return -EFAULT;
2672
2673	hci_dev_lock(hdev);
2674	conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
2675	if (conn) {
2676		bacpy(&ci.bdaddr, &conn->dst);
2677		ci.handle = conn->handle;
2678		ci.type  = conn->type;
2679		ci.out   = conn->out;
2680		ci.state = conn->state;
2681		ci.link_mode = get_link_mode(conn);
2682	}
2683	hci_dev_unlock(hdev);
2684
2685	if (!conn)
2686		return -ENOENT;
2687
2688	return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
2689}
2690
2691int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
2692{
2693	struct hci_auth_info_req req;
2694	struct hci_conn *conn;
2695
2696	if (copy_from_user(&req, arg, sizeof(req)))
2697		return -EFAULT;
2698
2699	hci_dev_lock(hdev);
2700	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
2701	if (conn)
2702		req.type = conn->auth_type;
2703	hci_dev_unlock(hdev);
2704
2705	if (!conn)
2706		return -ENOENT;
2707
2708	return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
2709}
2710
2711struct hci_chan *hci_chan_create(struct hci_conn *conn)
2712{
2713	struct hci_dev *hdev = conn->hdev;
2714	struct hci_chan *chan;
2715
2716	BT_DBG("%s hcon %p", hdev->name, conn);
2717
2718	if (test_bit(HCI_CONN_DROP, &conn->flags)) {
2719		BT_DBG("Refusing to create new hci_chan");
2720		return NULL;
2721	}
2722
2723	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
2724	if (!chan)
2725		return NULL;
2726
2727	chan->conn = hci_conn_get(conn);
2728	skb_queue_head_init(&chan->data_q);
2729	chan->state = BT_CONNECTED;
2730
2731	list_add_rcu(&chan->list, &conn->chan_list);
2732
2733	return chan;
2734}
2735
2736void hci_chan_del(struct hci_chan *chan)
2737{
2738	struct hci_conn *conn = chan->conn;
2739	struct hci_dev *hdev = conn->hdev;
2740
2741	BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
2742
2743	list_del_rcu(&chan->list);
2744
2745	synchronize_rcu();
2746
2747	/* Prevent new hci_chan's to be created for this hci_conn */
2748	set_bit(HCI_CONN_DROP, &conn->flags);
2749
2750	hci_conn_put(conn);
2751
2752	skb_queue_purge(&chan->data_q);
2753	kfree(chan);
2754}
2755
2756void hci_chan_list_flush(struct hci_conn *conn)
2757{
2758	struct hci_chan *chan, *n;
2759
2760	BT_DBG("hcon %p", conn);
2761
2762	list_for_each_entry_safe(chan, n, &conn->chan_list, list)
2763		hci_chan_del(chan);
2764}
2765
2766static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
2767						 __u16 handle)
2768{
2769	struct hci_chan *hchan;
2770
2771	list_for_each_entry(hchan, &hcon->chan_list, list) {
2772		if (hchan->handle == handle)
2773			return hchan;
2774	}
2775
2776	return NULL;
2777}
2778
2779struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
2780{
2781	struct hci_conn_hash *h = &hdev->conn_hash;
2782	struct hci_conn *hcon;
2783	struct hci_chan *hchan = NULL;
2784
2785	rcu_read_lock();
2786
2787	list_for_each_entry_rcu(hcon, &h->list, list) {
2788		hchan = __hci_chan_lookup_handle(hcon, handle);
2789		if (hchan)
2790			break;
2791	}
2792
2793	rcu_read_unlock();
2794
2795	return hchan;
2796}
2797
2798u32 hci_conn_get_phy(struct hci_conn *conn)
2799{
2800	u32 phys = 0;
2801
2802	/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
2803	 * Table 6.2: Packets defined for synchronous, asynchronous, and
2804	 * CPB logical transport types.
2805	 */
2806	switch (conn->type) {
2807	case SCO_LINK:
2808		/* SCO logical transport (1 Mb/s):
2809		 * HV1, HV2, HV3 and DV.
2810		 */
2811		phys |= BT_PHY_BR_1M_1SLOT;
2812
2813		break;
2814
2815	case ACL_LINK:
2816		/* ACL logical transport (1 Mb/s) ptt=0:
2817		 * DH1, DM3, DH3, DM5 and DH5.
2818		 */
2819		phys |= BT_PHY_BR_1M_1SLOT;
2820
2821		if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
2822			phys |= BT_PHY_BR_1M_3SLOT;
2823
2824		if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
2825			phys |= BT_PHY_BR_1M_5SLOT;
2826
2827		/* ACL logical transport (2 Mb/s) ptt=1:
2828		 * 2-DH1, 2-DH3 and 2-DH5.
2829		 */
2830		if (!(conn->pkt_type & HCI_2DH1))
2831			phys |= BT_PHY_EDR_2M_1SLOT;
2832
2833		if (!(conn->pkt_type & HCI_2DH3))
2834			phys |= BT_PHY_EDR_2M_3SLOT;
2835
2836		if (!(conn->pkt_type & HCI_2DH5))
2837			phys |= BT_PHY_EDR_2M_5SLOT;
2838
2839		/* ACL logical transport (3 Mb/s) ptt=1:
2840		 * 3-DH1, 3-DH3 and 3-DH5.
2841		 */
2842		if (!(conn->pkt_type & HCI_3DH1))
2843			phys |= BT_PHY_EDR_3M_1SLOT;
2844
2845		if (!(conn->pkt_type & HCI_3DH3))
2846			phys |= BT_PHY_EDR_3M_3SLOT;
2847
2848		if (!(conn->pkt_type & HCI_3DH5))
2849			phys |= BT_PHY_EDR_3M_5SLOT;
2850
2851		break;
2852
2853	case ESCO_LINK:
2854		/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
2855		phys |= BT_PHY_BR_1M_1SLOT;
2856
2857		if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
2858			phys |= BT_PHY_BR_1M_3SLOT;
2859
2860		/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
2861		if (!(conn->pkt_type & ESCO_2EV3))
2862			phys |= BT_PHY_EDR_2M_1SLOT;
2863
2864		if (!(conn->pkt_type & ESCO_2EV5))
2865			phys |= BT_PHY_EDR_2M_3SLOT;
2866
2867		/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
2868		if (!(conn->pkt_type & ESCO_3EV3))
2869			phys |= BT_PHY_EDR_3M_1SLOT;
2870
2871		if (!(conn->pkt_type & ESCO_3EV5))
2872			phys |= BT_PHY_EDR_3M_3SLOT;
2873
2874		break;
2875
2876	case LE_LINK:
2877		if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
2878			phys |= BT_PHY_LE_1M_TX;
2879
2880		if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
2881			phys |= BT_PHY_LE_1M_RX;
2882
2883		if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
2884			phys |= BT_PHY_LE_2M_TX;
2885
2886		if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
2887			phys |= BT_PHY_LE_2M_RX;
2888
2889		if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
2890			phys |= BT_PHY_LE_CODED_TX;
2891
2892		if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
2893			phys |= BT_PHY_LE_CODED_RX;
2894
2895		break;
2896	}
2897
2898	return phys;
2899}
2900
2901static int abort_conn_sync(struct hci_dev *hdev, void *data)
2902{
2903	struct hci_conn *conn;
2904	u16 handle = PTR_UINT(data);
2905
2906	conn = hci_conn_hash_lookup_handle(hdev, handle);
2907	if (!conn)
2908		return 0;
2909
2910	return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
2911}
2912
2913int hci_abort_conn(struct hci_conn *conn, u8 reason)
2914{
2915	struct hci_dev *hdev = conn->hdev;
2916
2917	/* If abort_reason has already been set it means the connection is
2918	 * already being aborted so don't attempt to overwrite it.
2919	 */
2920	if (conn->abort_reason)
2921		return 0;
2922
2923	bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
2924
2925	conn->abort_reason = reason;
2926
2927	/* If the connection is pending check the command opcode since that
2928	 * might be blocking on hci_cmd_sync_work while waiting its respective
2929	 * event so we need to hci_cmd_sync_cancel to cancel it.
2930	 *
2931	 * hci_connect_le serializes the connection attempts so only one
2932	 * connection can be in BT_CONNECT at time.
2933	 */
2934	if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
2935		switch (hci_skb_event(hdev->sent_cmd)) {
2936		case HCI_EV_LE_CONN_COMPLETE:
2937		case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
2938		case HCI_EVT_LE_CIS_ESTABLISHED:
2939			hci_cmd_sync_cancel(hdev, ECANCELED);
2940			break;
2941		}
2942	}
2943
2944	return hci_cmd_sync_queue(hdev, abort_conn_sync, UINT_PTR(conn->handle),
2945				  NULL);
2946}
2947