1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2016 Broadcom
4 */
5#include <linux/netdevice.h>
6#include <linux/gcd.h>
7#include <net/cfg80211.h>
8
9#include "core.h"
10#include "debug.h"
11#include "fwil.h"
12#include "fwil_types.h"
13#include "cfg80211.h"
14#include "pno.h"
15
16#define BRCMF_PNO_VERSION		2
17#define BRCMF_PNO_REPEAT		4
18#define BRCMF_PNO_FREQ_EXPO_MAX		3
19#define BRCMF_PNO_IMMEDIATE_SCAN_BIT	3
20#define BRCMF_PNO_ENABLE_BD_SCAN_BIT	5
21#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT	6
22#define BRCMF_PNO_REPORT_SEPARATELY_BIT	11
23#define BRCMF_PNO_SCAN_INCOMPLETE	0
24#define BRCMF_PNO_WPA_AUTH_ANY		0xFFFFFFFF
25#define BRCMF_PNO_HIDDEN_BIT		2
26#define BRCMF_PNO_SCHED_SCAN_PERIOD	30
27
28#define BRCMF_PNO_MAX_BUCKETS		16
29#define GSCAN_BATCH_NO_THR_SET			101
30#define GSCAN_RETRY_THRESHOLD			3
31
32struct brcmf_pno_info {
33	int n_reqs;
34	struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
35	struct mutex req_lock;
36};
37
38#define ifp_to_pno(_ifp)	((_ifp)->drvr->config->pno)
39
40static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
41				   struct cfg80211_sched_scan_request *req)
42{
43	if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
44		 "pno request storage full\n"))
45		return -ENOSPC;
46
47	brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
48	mutex_lock(&pi->req_lock);
49	pi->reqs[pi->n_reqs++] = req;
50	mutex_unlock(&pi->req_lock);
51	return 0;
52}
53
54static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
55{
56	int i, err = 0;
57
58	mutex_lock(&pi->req_lock);
59
60	/* Nothing to do if we have no requests */
61	if (pi->n_reqs == 0)
62		goto done;
63
64	/* find request */
65	for (i = 0; i < pi->n_reqs; i++) {
66		if (pi->reqs[i]->reqid == reqid)
67			break;
68	}
69	/* request not found */
70	if (WARN(i == pi->n_reqs, "reqid not found\n")) {
71		err = -ENOENT;
72		goto done;
73	}
74
75	brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
76	pi->n_reqs--;
77
78	/* if last we are done */
79	if (!pi->n_reqs || i == pi->n_reqs)
80		goto done;
81
82	/* fill the gap with remaining requests */
83	while (i <= pi->n_reqs - 1) {
84		pi->reqs[i] = pi->reqs[i + 1];
85		i++;
86	}
87
88done:
89	mutex_unlock(&pi->req_lock);
90	return err;
91}
92
93static int brcmf_pno_channel_config(struct brcmf_if *ifp,
94				    struct brcmf_pno_config_le *cfg)
95{
96	cfg->reporttype = 0;
97	cfg->flags = 0;
98
99	return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
100}
101
102static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
103			    u32 mscan, u32 bestn)
104{
105	struct brcmf_pub *drvr = ifp->drvr;
106	struct brcmf_pno_param_le pfn_param;
107	u16 flags;
108	u32 pfnmem;
109	s32 err;
110
111	memset(&pfn_param, 0, sizeof(pfn_param));
112	pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
113
114	/* set extra pno params */
115	flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
116		BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
117	pfn_param.repeat = BRCMF_PNO_REPEAT;
118	pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
119
120	/* set up pno scan fr */
121	pfn_param.scan_freq = cpu_to_le32(scan_freq);
122
123	if (mscan) {
124		pfnmem = bestn;
125
126		/* set bestn in firmware */
127		err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
128		if (err < 0) {
129			bphy_err(drvr, "failed to set pfnmem\n");
130			goto exit;
131		}
132		/* get max mscan which the firmware supports */
133		err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
134		if (err < 0) {
135			bphy_err(drvr, "failed to get pfnmem\n");
136			goto exit;
137		}
138		mscan = min_t(u32, mscan, pfnmem);
139		pfn_param.mscan = mscan;
140		pfn_param.bestn = bestn;
141		flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
142		brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
143	}
144
145	pfn_param.flags = cpu_to_le16(flags);
146	err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
147				       sizeof(pfn_param));
148	if (err)
149		bphy_err(drvr, "pfn_set failed, err=%d\n", err);
150
151exit:
152	return err;
153}
154
155static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
156{
157	struct brcmf_pub *drvr = ifp->drvr;
158	struct brcmf_pno_macaddr_le pfn_mac;
159	u8 *mac_addr = NULL;
160	u8 *mac_mask = NULL;
161	int err, i, ri;
162
163	for (ri = 0; ri < pi->n_reqs; ri++)
164		if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
165			mac_addr = pi->reqs[ri]->mac_addr;
166			mac_mask = pi->reqs[ri]->mac_addr_mask;
167			break;
168		}
169
170	/* no random mac requested */
171	if (!mac_addr)
172		return 0;
173
174	pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
175	pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
176
177	memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
178	for (i = 0; i < ETH_ALEN; i++) {
179		pfn_mac.mac[i] &= mac_mask[i];
180		pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]);
181	}
182	/* Clear multi bit */
183	pfn_mac.mac[0] &= 0xFE;
184	/* Set locally administered */
185	pfn_mac.mac[0] |= 0x02;
186
187	brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
188		  pi->reqs[ri]->reqid, pfn_mac.mac);
189	err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
190				       sizeof(pfn_mac));
191	if (err)
192		bphy_err(drvr, "pfn_macaddr failed, err=%d\n", err);
193
194	return err;
195}
196
197static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
198			      bool active)
199{
200	struct brcmf_pub *drvr = ifp->drvr;
201	struct brcmf_pno_net_param_le pfn;
202	int err;
203
204	pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
205	pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
206	pfn.wsec = cpu_to_le32(0);
207	pfn.infra = cpu_to_le32(1);
208	pfn.flags = 0;
209	if (active)
210		pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
211	pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
212	memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
213
214	brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
215	err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
216	if (err < 0)
217		bphy_err(drvr, "adding failed: err=%d\n", err);
218	return err;
219}
220
221static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
222{
223	struct brcmf_pub *drvr = ifp->drvr;
224	struct brcmf_pno_bssid_le bssid_cfg;
225	int err;
226
227	memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
228	bssid_cfg.flags = 0;
229
230	brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
231	err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
232				       sizeof(bssid_cfg));
233	if (err < 0)
234		bphy_err(drvr, "adding failed: err=%d\n", err);
235	return err;
236}
237
238static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
239				 struct cfg80211_sched_scan_request *req)
240{
241	int i;
242
243	if (!ssid || !req->ssids || !req->n_ssids)
244		return false;
245
246	for (i = 0; i < req->n_ssids; i++) {
247		if (ssid->ssid_len == req->ssids[i].ssid_len) {
248			if (!strncmp(ssid->ssid, req->ssids[i].ssid,
249				     ssid->ssid_len))
250				return true;
251		}
252	}
253	return false;
254}
255
256static int brcmf_pno_clean(struct brcmf_if *ifp)
257{
258	struct brcmf_pub *drvr = ifp->drvr;
259	int ret;
260
261	/* Disable pfn */
262	ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
263	if (ret == 0) {
264		/* clear pfn */
265		ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
266	}
267	if (ret < 0)
268		bphy_err(drvr, "failed code %d\n", ret);
269
270	return ret;
271}
272
273static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
274					 struct brcmf_pno_config_le *pno_cfg)
275{
276	u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
277	u16 chan;
278	int i, err = 0;
279
280	for (i = 0; i < r->n_channels; i++) {
281		if (n_chan >= BRCMF_NUMCHANNELS) {
282			err = -ENOSPC;
283			goto done;
284		}
285		chan = r->channels[i]->hw_value;
286		brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
287		pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
288	}
289	/* return number of channels */
290	err = n_chan;
291done:
292	pno_cfg->channel_num = cpu_to_le32(n_chan);
293	return err;
294}
295
296static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
297				   struct brcmf_pno_config_le *pno_cfg,
298				   struct brcmf_gscan_bucket_config **buckets,
299				   u32 *scan_freq)
300{
301	struct cfg80211_sched_scan_request *sr;
302	struct brcmf_gscan_bucket_config *fw_buckets;
303	int i, err, chidx;
304
305	brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
306	if (WARN_ON(!pi->n_reqs))
307		return -ENODATA;
308
309	/*
310	 * actual scan period is determined using gcd() for each
311	 * scheduled scan period.
312	 */
313	*scan_freq = pi->reqs[0]->scan_plans[0].interval;
314	for (i = 1; i < pi->n_reqs; i++) {
315		sr = pi->reqs[i];
316		*scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
317	}
318	if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
319		brcmf_dbg(SCAN, "scan period too small, using minimum\n");
320		*scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
321	}
322
323	*buckets = NULL;
324	fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
325	if (!fw_buckets)
326		return -ENOMEM;
327
328	memset(pno_cfg, 0, sizeof(*pno_cfg));
329	for (i = 0; i < pi->n_reqs; i++) {
330		sr = pi->reqs[i];
331		chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
332		if (chidx < 0) {
333			err = chidx;
334			goto fail;
335		}
336		fw_buckets[i].bucket_end_index = chidx - 1;
337		fw_buckets[i].bucket_freq_multiple =
338			sr->scan_plans[0].interval / *scan_freq;
339		/* assure period is non-zero */
340		if (!fw_buckets[i].bucket_freq_multiple)
341			fw_buckets[i].bucket_freq_multiple = 1;
342		fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
343	}
344
345	if (BRCMF_SCAN_ON()) {
346		brcmf_err("base period=%u\n", *scan_freq);
347		for (i = 0; i < pi->n_reqs; i++) {
348			brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
349				  i, fw_buckets[i].bucket_freq_multiple,
350				  le16_to_cpu(fw_buckets[i].max_freq_multiple),
351				  fw_buckets[i].repeat, fw_buckets[i].flag,
352				  fw_buckets[i].bucket_end_index);
353		}
354	}
355	*buckets = fw_buckets;
356	return pi->n_reqs;
357
358fail:
359	kfree(fw_buckets);
360	return err;
361}
362
363static int brcmf_pno_config_networks(struct brcmf_if *ifp,
364				     struct brcmf_pno_info *pi)
365{
366	struct cfg80211_sched_scan_request *r;
367	struct cfg80211_match_set *ms;
368	bool active;
369	int i, j, err = 0;
370
371	for (i = 0; i < pi->n_reqs; i++) {
372		r = pi->reqs[i];
373
374		for (j = 0; j < r->n_match_sets; j++) {
375			ms = &r->match_sets[j];
376			if (ms->ssid.ssid_len) {
377				active = brcmf_is_ssid_active(&ms->ssid, r);
378				err = brcmf_pno_add_ssid(ifp, &ms->ssid,
379							 active);
380			}
381			if (!err && is_valid_ether_addr(ms->bssid))
382				err = brcmf_pno_add_bssid(ifp, ms->bssid);
383
384			if (err < 0)
385				return err;
386		}
387	}
388	return 0;
389}
390
391static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
392{
393	struct brcmf_pub *drvr = ifp->drvr;
394	struct brcmf_pno_info *pi;
395	struct brcmf_gscan_config *gscan_cfg;
396	struct brcmf_gscan_bucket_config *buckets;
397	struct brcmf_pno_config_le pno_cfg;
398	size_t gsz;
399	u32 scan_freq;
400	int err, n_buckets;
401
402	pi = ifp_to_pno(ifp);
403	n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
404					    &scan_freq);
405	if (n_buckets < 0)
406		return n_buckets;
407
408	gsz = struct_size(gscan_cfg, bucket, n_buckets);
409	gscan_cfg = kzalloc(gsz, GFP_KERNEL);
410	if (!gscan_cfg) {
411		err = -ENOMEM;
412		goto free_buckets;
413	}
414
415	/* clean up everything */
416	err = brcmf_pno_clean(ifp);
417	if  (err < 0) {
418		bphy_err(drvr, "failed error=%d\n", err);
419		goto free_gscan;
420	}
421
422	/* configure pno */
423	err = brcmf_pno_config(ifp, scan_freq, 0, 0);
424	if (err < 0)
425		goto free_gscan;
426
427	err = brcmf_pno_channel_config(ifp, &pno_cfg);
428	if (err < 0)
429		goto clean;
430
431	gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
432	gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
433	gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
434	gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
435
436	gscan_cfg->count_of_channel_buckets = n_buckets;
437	memcpy(gscan_cfg->bucket, buckets,
438	       array_size(n_buckets, sizeof(*buckets)));
439
440	err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
441
442	if (err < 0)
443		goto clean;
444
445	/* configure random mac */
446	err = brcmf_pno_set_random(ifp, pi);
447	if (err < 0)
448		goto clean;
449
450	err = brcmf_pno_config_networks(ifp, pi);
451	if (err < 0)
452		goto clean;
453
454	/* Enable the PNO */
455	err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
456
457clean:
458	if (err < 0)
459		brcmf_pno_clean(ifp);
460free_gscan:
461	kfree(gscan_cfg);
462free_buckets:
463	kfree(buckets);
464	return err;
465}
466
467int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
468			       struct cfg80211_sched_scan_request *req)
469{
470	struct brcmf_pno_info *pi;
471	int ret;
472
473	brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
474
475	pi = ifp_to_pno(ifp);
476	ret = brcmf_pno_store_request(pi, req);
477	if (ret < 0)
478		return ret;
479
480	ret = brcmf_pno_config_sched_scans(ifp);
481	if (ret < 0) {
482		brcmf_pno_remove_request(pi, req->reqid);
483		if (pi->n_reqs)
484			(void)brcmf_pno_config_sched_scans(ifp);
485		return ret;
486	}
487	return 0;
488}
489
490int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
491{
492	struct brcmf_pno_info *pi;
493	int err;
494
495	brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
496
497	pi = ifp_to_pno(ifp);
498
499	/* No PNO request */
500	if (!pi->n_reqs)
501		return 0;
502
503	err = brcmf_pno_remove_request(pi, reqid);
504	if (err)
505		return err;
506
507	brcmf_pno_clean(ifp);
508
509	if (pi->n_reqs)
510		(void)brcmf_pno_config_sched_scans(ifp);
511
512	return 0;
513}
514
515int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
516{
517	struct brcmf_pno_info *pi;
518
519	brcmf_dbg(TRACE, "enter\n");
520	pi = kzalloc(sizeof(*pi), GFP_KERNEL);
521	if (!pi)
522		return -ENOMEM;
523
524	cfg->pno = pi;
525	mutex_init(&pi->req_lock);
526	return 0;
527}
528
529void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
530{
531	struct brcmf_pno_info *pi;
532
533	brcmf_dbg(TRACE, "enter\n");
534	pi = cfg->pno;
535	cfg->pno = NULL;
536
537	WARN_ON(pi->n_reqs);
538	mutex_destroy(&pi->req_lock);
539	kfree(pi);
540}
541
542void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
543{
544	/* scheduled scan settings */
545	wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
546	wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
547	wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
548	wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
549	wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
550}
551
552u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
553{
554	u64 reqid = 0;
555
556	mutex_lock(&pi->req_lock);
557
558	if (bucket < pi->n_reqs)
559		reqid = pi->reqs[bucket]->reqid;
560
561	mutex_unlock(&pi->req_lock);
562	return reqid;
563}
564
565u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
566			     struct brcmf_pno_net_info_le *ni)
567{
568	struct cfg80211_sched_scan_request *req;
569	struct cfg80211_match_set *ms;
570	u32 bucket_map = 0;
571	int i, j;
572
573	mutex_lock(&pi->req_lock);
574	for (i = 0; i < pi->n_reqs; i++) {
575		req = pi->reqs[i];
576
577		if (!req->n_match_sets)
578			continue;
579		for (j = 0; j < req->n_match_sets; j++) {
580			ms = &req->match_sets[j];
581			if (ms->ssid.ssid_len == ni->SSID_len &&
582			    !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
583				bucket_map |= BIT(i);
584				break;
585			}
586			if (is_valid_ether_addr(ms->bssid) &&
587			    !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
588				bucket_map |= BIT(i);
589				break;
590			}
591		}
592	}
593	mutex_unlock(&pi->req_lock);
594	return bucket_map;
595}
596