]> git.ipfire.org Git - thirdparty/linux.git/blame - net/bluetooth/hci_core.c
LE Create Connection command timeout increased to 20 secs
[thirdparty/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
8c520a59 29#include <linux/rfkill.h>
baf27f6e 30#include <linux/debugfs.h>
99780a7b 31#include <linux/crypto.h>
9f30de9e 32#include <linux/kcov.h>
7a0e5b15 33#include <linux/property.h>
9952d90e
APS
34#include <linux/suspend.h>
35#include <linux/wait.h>
47219839 36#include <asm/unaligned.h>
1da177e4
LT
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
4bc58f51 40#include <net/bluetooth/l2cap.h>
af58925c 41#include <net/bluetooth/mgmt.h>
1da177e4 42
0857dd3b 43#include "hci_request.h"
60c5f5fb 44#include "hci_debugfs.h"
970c4e46 45#include "smp.h"
6d5d2ee6 46#include "leds.h"
145373cb 47#include "msft.h"
f67743f9 48#include "aosp.h"
8961987f 49#include "hci_codec.h"
970c4e46 50
b78752cc 51static void hci_rx_work(struct work_struct *work);
c347b765 52static void hci_cmd_work(struct work_struct *work);
3eff45ea 53static void hci_tx_work(struct work_struct *work);
1da177e4 54
1da177e4
LT
55/* HCI device list */
56LIST_HEAD(hci_dev_list);
57DEFINE_RWLOCK(hci_dev_list_lock);
58
59/* HCI callback list */
60LIST_HEAD(hci_cb_list);
fba7ecf0 61DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 62
3df92b31
SL
63/* HCI ID Numbering */
64static DEFINE_IDA(hci_index_ida);
65
a1d01db1 66static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
67{
68 __u8 scan = opt;
69
42c6b129 70 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
71
72 /* Inquiry and Page scans */
42c6b129 73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 74 return 0;
1da177e4
LT
75}
76
a1d01db1 77static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
78{
79 __u8 auth = opt;
80
42c6b129 81 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
82
83 /* Authentication */
42c6b129 84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 85 return 0;
1da177e4
LT
86}
87
a1d01db1 88static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
89{
90 __u8 encrypt = opt;
91
42c6b129 92 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 93
e4e8e37c 94 /* Encryption */
42c6b129 95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 96 return 0;
1da177e4
LT
97}
98
a1d01db1 99static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
100{
101 __le16 policy = cpu_to_le16(opt);
102
42c6b129 103 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
104
105 /* Default link policy */
42c6b129 106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 107 return 0;
e4e8e37c
MH
108}
109
8e87d142 110/* Get HCI device by index.
1da177e4
LT
111 * Device is held on return. */
112struct hci_dev *hci_dev_get(int index)
113{
8035ded4 114 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
115
116 BT_DBG("%d", index);
117
118 if (index < 0)
119 return NULL;
120
121 read_lock(&hci_dev_list_lock);
8035ded4 122 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
125 break;
126 }
127 }
128 read_unlock(&hci_dev_list_lock);
129 return hdev;
130}
1da177e4
LT
131
132/* ---- Inquiry support ---- */
ff9ef578 133
30dc78e1
JH
134bool hci_discovery_active(struct hci_dev *hdev)
135{
136 struct discovery_state *discov = &hdev->discovery;
137
6fbe195d 138 switch (discov->state) {
343f935b 139 case DISCOVERY_FINDING:
6fbe195d 140 case DISCOVERY_RESOLVING:
30dc78e1
JH
141 return true;
142
6fbe195d
AG
143 default:
144 return false;
145 }
30dc78e1
JH
146}
147
ff9ef578
JH
148void hci_discovery_set_state(struct hci_dev *hdev, int state)
149{
bb3e0a33
JH
150 int old_state = hdev->discovery.state;
151
bb3e0a33 152 if (old_state == state)
ff9ef578
JH
153 return;
154
bb3e0a33
JH
155 hdev->discovery.state = state;
156
ff9ef578
JH
157 switch (state) {
158 case DISCOVERY_STOPPED:
5bee2fd6 159 hci_update_passive_scan(hdev);
c54c3860 160
bb3e0a33 161 if (old_state != DISCOVERY_STARTING)
7b99b659 162 mgmt_discovering(hdev, 0);
ff9ef578
JH
163 break;
164 case DISCOVERY_STARTING:
165 break;
343f935b 166 case DISCOVERY_FINDING:
2e2515c1
LAD
167 /* If discovery was not started then it was initiated by the
168 * MGMT interface so no MGMT event shall be generated either
169 */
170 if (old_state != DISCOVERY_STARTING) {
171 hdev->discovery.state = old_state;
172 return;
173 }
ff9ef578
JH
174 mgmt_discovering(hdev, 1);
175 break;
30dc78e1
JH
176 case DISCOVERY_RESOLVING:
177 break;
ff9ef578
JH
178 case DISCOVERY_STOPPING:
179 break;
180 }
2e2515c1
LAD
181
182 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
ff9ef578
JH
183}
184
1f9b9a5d 185void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 186{
30883512 187 struct discovery_state *cache = &hdev->discovery;
b57c1a56 188 struct inquiry_entry *p, *n;
1da177e4 189
561aafbc
JH
190 list_for_each_entry_safe(p, n, &cache->all, all) {
191 list_del(&p->all);
b57c1a56 192 kfree(p);
1da177e4 193 }
561aafbc
JH
194
195 INIT_LIST_HEAD(&cache->unknown);
196 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
197}
198
a8c5fb1a
GP
199struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
200 bdaddr_t *bdaddr)
1da177e4 201{
30883512 202 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
203 struct inquiry_entry *e;
204
6ed93dc6 205 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 206
561aafbc
JH
207 list_for_each_entry(e, &cache->all, all) {
208 if (!bacmp(&e->data.bdaddr, bdaddr))
209 return e;
210 }
211
212 return NULL;
213}
214
215struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 216 bdaddr_t *bdaddr)
561aafbc 217{
30883512 218 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
219 struct inquiry_entry *e;
220
6ed93dc6 221 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
222
223 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 224 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
225 return e;
226 }
227
228 return NULL;
1da177e4
LT
229}
230
30dc78e1 231struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
232 bdaddr_t *bdaddr,
233 int state)
30dc78e1
JH
234{
235 struct discovery_state *cache = &hdev->discovery;
236 struct inquiry_entry *e;
237
6ed93dc6 238 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
239
240 list_for_each_entry(e, &cache->resolve, list) {
241 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
242 return e;
243 if (!bacmp(&e->data.bdaddr, bdaddr))
244 return e;
245 }
246
247 return NULL;
248}
249
a3d4e20a 250void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 251 struct inquiry_entry *ie)
a3d4e20a
JH
252{
253 struct discovery_state *cache = &hdev->discovery;
254 struct list_head *pos = &cache->resolve;
255 struct inquiry_entry *p;
256
257 list_del(&ie->list);
258
259 list_for_each_entry(p, &cache->resolve, list) {
260 if (p->name_state != NAME_PENDING &&
a8c5fb1a 261 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
262 break;
263 pos = &p->list;
264 }
265
266 list_add(&ie->list, pos);
267}
268
af58925c
MH
269u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
270 bool name_known)
1da177e4 271{
30883512 272 struct discovery_state *cache = &hdev->discovery;
70f23020 273 struct inquiry_entry *ie;
af58925c 274 u32 flags = 0;
1da177e4 275
6ed93dc6 276 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 277
6928a924 278 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 279
af58925c
MH
280 if (!data->ssp_mode)
281 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 282
70f23020 283 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 284 if (ie) {
af58925c
MH
285 if (!ie->data.ssp_mode)
286 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 287
a3d4e20a 288 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 289 data->rssi != ie->data.rssi) {
a3d4e20a
JH
290 ie->data.rssi = data->rssi;
291 hci_inquiry_cache_update_resolve(hdev, ie);
292 }
293
561aafbc 294 goto update;
a3d4e20a 295 }
561aafbc
JH
296
297 /* Entry not in the cache. Add new one. */
27f70f3e 298 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
299 if (!ie) {
300 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
301 goto done;
302 }
561aafbc
JH
303
304 list_add(&ie->all, &cache->all);
305
306 if (name_known) {
307 ie->name_state = NAME_KNOWN;
308 } else {
309 ie->name_state = NAME_NOT_KNOWN;
310 list_add(&ie->list, &cache->unknown);
311 }
70f23020 312
561aafbc
JH
313update:
314 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 315 ie->name_state != NAME_PENDING) {
561aafbc
JH
316 ie->name_state = NAME_KNOWN;
317 list_del(&ie->list);
1da177e4
LT
318 }
319
70f23020
AE
320 memcpy(&ie->data, data, sizeof(*data));
321 ie->timestamp = jiffies;
1da177e4 322 cache->timestamp = jiffies;
3175405b
JH
323
324 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 325 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 326
af58925c
MH
327done:
328 return flags;
1da177e4
LT
329}
330
331static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
332{
30883512 333 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
334 struct inquiry_info *info = (struct inquiry_info *) buf;
335 struct inquiry_entry *e;
336 int copied = 0;
337
561aafbc 338 list_for_each_entry(e, &cache->all, all) {
1da177e4 339 struct inquiry_data *data = &e->data;
b57c1a56
JH
340
341 if (copied >= num)
342 break;
343
1da177e4
LT
344 bacpy(&info->bdaddr, &data->bdaddr);
345 info->pscan_rep_mode = data->pscan_rep_mode;
346 info->pscan_period_mode = data->pscan_period_mode;
347 info->pscan_mode = data->pscan_mode;
348 memcpy(info->dev_class, data->dev_class, 3);
349 info->clock_offset = data->clock_offset;
b57c1a56 350
1da177e4 351 info++;
b57c1a56 352 copied++;
1da177e4
LT
353 }
354
355 BT_DBG("cache %p, copied %d", cache, copied);
356 return copied;
357}
358
a1d01db1 359static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
360{
361 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 362 struct hci_dev *hdev = req->hdev;
1da177e4
LT
363 struct hci_cp_inquiry cp;
364
365 BT_DBG("%s", hdev->name);
366
367 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 368 return 0;
1da177e4
LT
369
370 /* Start Inquiry */
371 memcpy(&cp.lap, &ir->lap, 3);
372 cp.length = ir->length;
373 cp.num_rsp = ir->num_rsp;
42c6b129 374 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
375
376 return 0;
1da177e4
LT
377}
378
379int hci_inquiry(void __user *arg)
380{
381 __u8 __user *ptr = arg;
382 struct hci_inquiry_req ir;
383 struct hci_dev *hdev;
384 int err = 0, do_inquiry = 0, max_rsp;
385 long timeo;
386 __u8 *buf;
387
388 if (copy_from_user(&ir, ptr, sizeof(ir)))
389 return -EFAULT;
390
5a08ecce
AE
391 hdev = hci_dev_get(ir.dev_id);
392 if (!hdev)
1da177e4
LT
393 return -ENODEV;
394
d7a5a11d 395 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
396 err = -EBUSY;
397 goto done;
398 }
399
d7a5a11d 400 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
401 err = -EOPNOTSUPP;
402 goto done;
403 }
404
ca8bee5d 405 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
406 err = -EOPNOTSUPP;
407 goto done;
408 }
409
d7a5a11d 410 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
411 err = -EOPNOTSUPP;
412 goto done;
413 }
414
f41a4b2b
PS
415 /* Restrict maximum inquiry length to 60 seconds */
416 if (ir.length > 60) {
417 err = -EINVAL;
418 goto done;
419 }
420
09fd0de5 421 hci_dev_lock(hdev);
8e87d142 422 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 423 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 424 hci_inquiry_cache_flush(hdev);
1da177e4
LT
425 do_inquiry = 1;
426 }
09fd0de5 427 hci_dev_unlock(hdev);
1da177e4 428
04837f64 429 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
430
431 if (do_inquiry) {
01178cd4 432 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 433 timeo, NULL);
70f23020
AE
434 if (err < 0)
435 goto done;
3e13fa1e
AG
436
437 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
438 * cleared). If it is interrupted by a signal, return -EINTR.
439 */
74316201 440 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
28a758c8
PB
441 TASK_INTERRUPTIBLE)) {
442 err = -EINTR;
443 goto done;
444 }
70f23020 445 }
1da177e4 446
8fc9ced3
GP
447 /* for unlimited number of responses we will use buffer with
448 * 255 entries
449 */
1da177e4
LT
450 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
451
452 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
453 * copy it to the user space.
454 */
6da2ec56 455 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 456 if (!buf) {
1da177e4
LT
457 err = -ENOMEM;
458 goto done;
459 }
460
09fd0de5 461 hci_dev_lock(hdev);
1da177e4 462 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 463 hci_dev_unlock(hdev);
1da177e4
LT
464
465 BT_DBG("num_rsp %d", ir.num_rsp);
466
467 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
468 ptr += sizeof(ir);
469 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 470 ir.num_rsp))
1da177e4 471 err = -EFAULT;
8e87d142 472 } else
1da177e4
LT
473 err = -EFAULT;
474
475 kfree(buf);
476
477done:
478 hci_dev_put(hdev);
479 return err;
480}
481
cf75ad8b
LAD
482static int hci_dev_do_open(struct hci_dev *hdev)
483{
484 int ret = 0;
485
486 BT_DBG("%s %p", hdev->name, hdev);
487
488 hci_req_sync_lock(hdev);
489
490 ret = hci_dev_open_sync(hdev);
491
b504430c 492 hci_req_sync_unlock(hdev);
1da177e4
LT
493 return ret;
494}
495
cbed0ca1
JH
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int err;
502
503 hdev = hci_dev_get(dev);
504 if (!hdev)
505 return -ENODEV;
506
4a964404 507 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
508 * up as user channel. Trying to bring them up as normal devices
509 * will result into a failure. Only user channel operation is
510 * possible.
511 *
512 * When this function is called for a user channel, the flag
513 * HCI_USER_CHANNEL will be set first before attempting to
514 * open the device.
515 */
d7a5a11d
MH
516 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
517 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
518 err = -EOPNOTSUPP;
519 goto done;
520 }
521
e1d08f40
JH
522 /* We need to ensure that no other power on/off work is pending
523 * before proceeding to call hci_dev_do_open. This is
524 * particularly important if the setup procedure has not yet
525 * completed.
526 */
a69d8927 527 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
528 cancel_delayed_work(&hdev->power_off);
529
a5c8f270
MH
530 /* After this call it is guaranteed that the setup procedure
531 * has finished. This means that error conditions like RFKILL
532 * or no valid public or static random address apply.
533 */
e1d08f40
JH
534 flush_workqueue(hdev->req_workqueue);
535
12aa4f0a 536 /* For controllers not using the management interface and that
b6ae8457 537 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
538 * so that pairing works for them. Once the management interface
539 * is in use this bit will be cleared again and userspace has
540 * to explicitly enable it.
541 */
d7a5a11d
MH
542 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
543 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 544 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 545
cbed0ca1
JH
546 err = hci_dev_do_open(hdev);
547
fee746b0 548done:
cbed0ca1 549 hci_dev_put(hdev);
cbed0ca1
JH
550 return err;
551}
552
cf75ad8b
LAD
553int hci_dev_do_close(struct hci_dev *hdev)
554{
555 int err;
556
557 BT_DBG("%s %p", hdev->name, hdev);
558
559 hci_req_sync_lock(hdev);
560
561 err = hci_dev_close_sync(hdev);
562
b504430c 563 hci_req_sync_unlock(hdev);
1da177e4 564
61969ef8 565 return err;
1da177e4
LT
566}
567
568int hci_dev_close(__u16 dev)
569{
570 struct hci_dev *hdev;
571 int err;
572
70f23020
AE
573 hdev = hci_dev_get(dev);
574 if (!hdev)
1da177e4 575 return -ENODEV;
8ee56540 576
d7a5a11d 577 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
578 err = -EBUSY;
579 goto done;
580 }
581
e36bea6e 582 cancel_work_sync(&hdev->power_on);
a69d8927 583 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
584 cancel_delayed_work(&hdev->power_off);
585
1da177e4 586 err = hci_dev_do_close(hdev);
8ee56540 587
0736cfa8 588done:
1da177e4
LT
589 hci_dev_put(hdev);
590 return err;
591}
592
5c912495 593static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 594{
5c912495 595 int ret;
1da177e4 596
5c912495 597 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 598
b504430c 599 hci_req_sync_lock(hdev);
1da177e4 600
1da177e4
LT
601 /* Drop queues */
602 skb_queue_purge(&hdev->rx_q);
603 skb_queue_purge(&hdev->cmd_q);
604
877afada
SS
605 /* Cancel these to avoid queueing non-chained pending work */
606 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
deee93d1
TH
607 /* Wait for
608 *
609 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
610 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
611 *
612 * inside RCU section to see the flag or complete scheduling.
613 */
614 synchronize_rcu();
615 /* Explicitly cancel works in case scheduled after setting the flag. */
877afada
SS
616 cancel_delayed_work(&hdev->cmd_timer);
617 cancel_delayed_work(&hdev->ncmd_timer);
618
76727c02
JH
619 /* Avoid potential lockdep warnings from the *_flush() calls by
620 * ensuring the workqueue is empty up front.
621 */
622 drain_workqueue(hdev->workqueue);
623
09fd0de5 624 hci_dev_lock(hdev);
1f9b9a5d 625 hci_inquiry_cache_flush(hdev);
1da177e4 626 hci_conn_hash_flush(hdev);
09fd0de5 627 hci_dev_unlock(hdev);
1da177e4
LT
628
629 if (hdev->flush)
630 hdev->flush(hdev);
631
877afada
SS
632 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
633
8e87d142 634 atomic_set(&hdev->cmd_cnt, 1);
26afbd82
LAD
635 hdev->acl_cnt = 0;
636 hdev->sco_cnt = 0;
637 hdev->le_cnt = 0;
638 hdev->iso_cnt = 0;
1da177e4 639
d0b13706 640 ret = hci_reset_sync(hdev);
1da177e4 641
b504430c 642 hci_req_sync_unlock(hdev);
1da177e4
LT
643 return ret;
644}
645
5c912495
MH
646int hci_dev_reset(__u16 dev)
647{
648 struct hci_dev *hdev;
649 int err;
650
651 hdev = hci_dev_get(dev);
652 if (!hdev)
653 return -ENODEV;
654
655 if (!test_bit(HCI_UP, &hdev->flags)) {
656 err = -ENETDOWN;
657 goto done;
658 }
659
d7a5a11d 660 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
661 err = -EBUSY;
662 goto done;
663 }
664
d7a5a11d 665 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
666 err = -EOPNOTSUPP;
667 goto done;
668 }
669
670 err = hci_dev_do_reset(hdev);
671
672done:
673 hci_dev_put(hdev);
674 return err;
675}
676
1da177e4
LT
677int hci_dev_reset_stat(__u16 dev)
678{
679 struct hci_dev *hdev;
680 int ret = 0;
681
70f23020
AE
682 hdev = hci_dev_get(dev);
683 if (!hdev)
1da177e4
LT
684 return -ENODEV;
685
d7a5a11d 686 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
687 ret = -EBUSY;
688 goto done;
689 }
690
d7a5a11d 691 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
692 ret = -EOPNOTSUPP;
693 goto done;
694 }
695
1da177e4
LT
696 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
697
0736cfa8 698done:
1da177e4 699 hci_dev_put(hdev);
1da177e4
LT
700 return ret;
701}
702
5bee2fd6 703static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
123abc08 704{
bc6d2d04 705 bool conn_changed, discov_changed;
123abc08
JH
706
707 BT_DBG("%s scan 0x%02x", hdev->name, scan);
708
709 if ((scan & SCAN_PAGE))
238be788
MH
710 conn_changed = !hci_dev_test_and_set_flag(hdev,
711 HCI_CONNECTABLE);
123abc08 712 else
a69d8927
MH
713 conn_changed = hci_dev_test_and_clear_flag(hdev,
714 HCI_CONNECTABLE);
123abc08 715
bc6d2d04 716 if ((scan & SCAN_INQUIRY)) {
238be788
MH
717 discov_changed = !hci_dev_test_and_set_flag(hdev,
718 HCI_DISCOVERABLE);
bc6d2d04 719 } else {
a358dc11 720 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
721 discov_changed = hci_dev_test_and_clear_flag(hdev,
722 HCI_DISCOVERABLE);
bc6d2d04
JH
723 }
724
d7a5a11d 725 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
726 return;
727
bc6d2d04
JH
728 if (conn_changed || discov_changed) {
729 /* In case this was disabled through mgmt */
a1536da2 730 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 731
d7a5a11d 732 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
651cd3d6 733 hci_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 734
123abc08 735 mgmt_new_settings(hdev);
bc6d2d04 736 }
123abc08
JH
737}
738
1da177e4
LT
739int hci_dev_cmd(unsigned int cmd, void __user *arg)
740{
741 struct hci_dev *hdev;
742 struct hci_dev_req dr;
743 int err = 0;
744
745 if (copy_from_user(&dr, arg, sizeof(dr)))
746 return -EFAULT;
747
70f23020
AE
748 hdev = hci_dev_get(dr.dev_id);
749 if (!hdev)
1da177e4
LT
750 return -ENODEV;
751
d7a5a11d 752 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
753 err = -EBUSY;
754 goto done;
755 }
756
d7a5a11d 757 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
758 err = -EOPNOTSUPP;
759 goto done;
760 }
761
ca8bee5d 762 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
763 err = -EOPNOTSUPP;
764 goto done;
765 }
766
d7a5a11d 767 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
768 err = -EOPNOTSUPP;
769 goto done;
770 }
771
1da177e4
LT
772 switch (cmd) {
773 case HCISETAUTH:
01178cd4 774 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 775 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
776 break;
777
778 case HCISETENCRYPT:
779 if (!lmp_encrypt_capable(hdev)) {
780 err = -EOPNOTSUPP;
781 break;
782 }
783
784 if (!test_bit(HCI_AUTH, &hdev->flags)) {
785 /* Auth must be enabled first */
01178cd4 786 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 787 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
788 if (err)
789 break;
790 }
791
01178cd4 792 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 793 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
794 break;
795
796 case HCISETSCAN:
01178cd4 797 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 798 HCI_INIT_TIMEOUT, NULL);
91a668b0 799
bc6d2d04
JH
800 /* Ensure that the connectable and discoverable states
801 * get correctly modified as this was a non-mgmt change.
91a668b0 802 */
123abc08 803 if (!err)
5bee2fd6 804 hci_update_passive_scan_state(hdev, dr.dev_opt);
1da177e4
LT
805 break;
806
1da177e4 807 case HCISETLINKPOL:
01178cd4 808 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 809 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
810 break;
811
812 case HCISETLINKMODE:
e4e8e37c
MH
813 hdev->link_mode = ((__u16) dr.dev_opt) &
814 (HCI_LM_MASTER | HCI_LM_ACCEPT);
815 break;
816
817 case HCISETPTYPE:
b7c23df8
JK
818 if (hdev->pkt_type == (__u16) dr.dev_opt)
819 break;
820
e4e8e37c 821 hdev->pkt_type = (__u16) dr.dev_opt;
b7c23df8 822 mgmt_phy_configuration_changed(hdev, NULL);
1da177e4
LT
823 break;
824
825 case HCISETACLMTU:
e4e8e37c
MH
826 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
827 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
828 break;
829
830 case HCISETSCOMTU:
e4e8e37c
MH
831 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
832 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
833 break;
834
835 default:
836 err = -EINVAL;
837 break;
838 }
e4e8e37c 839
0736cfa8 840done:
1da177e4
LT
841 hci_dev_put(hdev);
842 return err;
843}
844
845int hci_get_dev_list(void __user *arg)
846{
8035ded4 847 struct hci_dev *hdev;
1da177e4
LT
848 struct hci_dev_list_req *dl;
849 struct hci_dev_req *dr;
1da177e4
LT
850 int n = 0, size, err;
851 __u16 dev_num;
852
853 if (get_user(dev_num, (__u16 __user *) arg))
854 return -EFAULT;
855
856 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
857 return -EINVAL;
858
859 size = sizeof(*dl) + dev_num * sizeof(*dr);
860
70f23020
AE
861 dl = kzalloc(size, GFP_KERNEL);
862 if (!dl)
1da177e4
LT
863 return -ENOMEM;
864
865 dr = dl->dev_req;
866
f20d09d5 867 read_lock(&hci_dev_list_lock);
8035ded4 868 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 869 unsigned long flags = hdev->flags;
c542a06c 870
2e84d8db
MH
871 /* When the auto-off is configured it means the transport
872 * is running, but in that case still indicate that the
873 * device is actually down.
874 */
d7a5a11d 875 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 876 flags &= ~BIT(HCI_UP);
c542a06c 877
1da177e4 878 (dr + n)->dev_id = hdev->id;
2e84d8db 879 (dr + n)->dev_opt = flags;
c542a06c 880
1da177e4
LT
881 if (++n >= dev_num)
882 break;
883 }
f20d09d5 884 read_unlock(&hci_dev_list_lock);
1da177e4
LT
885
886 dl->dev_num = n;
887 size = sizeof(*dl) + n * sizeof(*dr);
888
889 err = copy_to_user(arg, dl, size);
890 kfree(dl);
891
892 return err ? -EFAULT : 0;
893}
894
895int hci_get_dev_info(void __user *arg)
896{
897 struct hci_dev *hdev;
898 struct hci_dev_info di;
2e84d8db 899 unsigned long flags;
1da177e4
LT
900 int err = 0;
901
902 if (copy_from_user(&di, arg, sizeof(di)))
903 return -EFAULT;
904
70f23020
AE
905 hdev = hci_dev_get(di.dev_id);
906 if (!hdev)
1da177e4
LT
907 return -ENODEV;
908
2e84d8db
MH
909 /* When the auto-off is configured it means the transport
910 * is running, but in that case still indicate that the
911 * device is actually down.
912 */
d7a5a11d 913 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
914 flags = hdev->flags & ~BIT(HCI_UP);
915 else
916 flags = hdev->flags;
c542a06c 917
81137162 918 strscpy(di.name, hdev->name, sizeof(di.name));
1da177e4 919 di.bdaddr = hdev->bdaddr;
60f2a3ed 920 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 921 di.flags = flags;
1da177e4 922 di.pkt_type = hdev->pkt_type;
572c7f84
JH
923 if (lmp_bredr_capable(hdev)) {
924 di.acl_mtu = hdev->acl_mtu;
925 di.acl_pkts = hdev->acl_pkts;
926 di.sco_mtu = hdev->sco_mtu;
927 di.sco_pkts = hdev->sco_pkts;
928 } else {
929 di.acl_mtu = hdev->le_mtu;
930 di.acl_pkts = hdev->le_pkts;
931 di.sco_mtu = 0;
932 di.sco_pkts = 0;
933 }
1da177e4
LT
934 di.link_policy = hdev->link_policy;
935 di.link_mode = hdev->link_mode;
936
937 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
938 memcpy(&di.features, &hdev->features, sizeof(di.features));
939
940 if (copy_to_user(arg, &di, sizeof(di)))
941 err = -EFAULT;
942
943 hci_dev_put(hdev);
944
945 return err;
946}
947
948/* ---- Interface to HCI drivers ---- */
949
d77433cd
JD
950static int hci_dev_do_poweroff(struct hci_dev *hdev)
951{
952 int err;
953
954 BT_DBG("%s %p", hdev->name, hdev);
955
956 hci_req_sync_lock(hdev);
957
958 err = hci_set_powered_sync(hdev, false);
959
960 hci_req_sync_unlock(hdev);
961
962 return err;
963}
964
611b30f7
MH
965static int hci_rfkill_set_block(void *data, bool blocked)
966{
967 struct hci_dev *hdev = data;
d77433cd 968 int err;
611b30f7
MH
969
970 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
971
d7a5a11d 972 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
973 return -EBUSY;
974
d77433cd
JD
975 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
976 return 0;
977
5e130367 978 if (blocked) {
a1536da2 979 hci_dev_set_flag(hdev, HCI_RFKILLED);
d77433cd 980
d7a5a11d 981 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
d77433cd
JD
982 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
983 err = hci_dev_do_poweroff(hdev);
984 if (err) {
985 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
986 err);
987
988 /* Make sure the device is still closed even if
989 * anything during power off sequence (eg.
990 * disconnecting devices) failed.
991 */
992 hci_dev_do_close(hdev);
993 }
994 }
5e130367 995 } else {
a358dc11 996 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 997 }
611b30f7
MH
998
999 return 0;
1000}
1001
1002static const struct rfkill_ops hci_rfkill_ops = {
1003 .set_block = hci_rfkill_set_block,
1004};
1005
ab81cbf9
JH
1006static void hci_power_on(struct work_struct *work)
1007{
1008 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1009 int err;
ab81cbf9
JH
1010
1011 BT_DBG("%s", hdev->name);
1012
2ff13894
JH
1013 if (test_bit(HCI_UP, &hdev->flags) &&
1014 hci_dev_test_flag(hdev, HCI_MGMT) &&
1015 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 1016 cancel_delayed_work(&hdev->power_off);
cf75ad8b 1017 err = hci_powered_update_sync(hdev);
2ff13894
JH
1018 mgmt_power_on(hdev, err);
1019 return;
1020 }
1021
cbed0ca1 1022 err = hci_dev_do_open(hdev);
96570ffc 1023 if (err < 0) {
3ad67582 1024 hci_dev_lock(hdev);
96570ffc 1025 mgmt_set_powered_failed(hdev, err);
3ad67582 1026 hci_dev_unlock(hdev);
ab81cbf9 1027 return;
96570ffc 1028 }
ab81cbf9 1029
a5c8f270
MH
1030 /* During the HCI setup phase, a few error conditions are
1031 * ignored and they need to be checked now. If they are still
1032 * valid, it is important to turn the device back off.
1033 */
d7a5a11d
MH
1034 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1035 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 1036 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1037 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1038 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 1039 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 1040 hci_dev_do_close(hdev);
d7a5a11d 1041 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
1042 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1043 HCI_AUTO_OFF_TIMEOUT);
bf543036 1044 }
ab81cbf9 1045
a69d8927 1046 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
1047 /* For unconfigured devices, set the HCI_RAW flag
1048 * so that userspace can easily identify them.
4a964404 1049 */
d7a5a11d 1050 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 1051 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
1052
1053 /* For fully configured devices, this will send
1054 * the Index Added event. For unconfigured devices,
1055 * it will send Unconfigued Index Added event.
1056 *
1057 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1058 * and no event will be send.
1059 */
1060 mgmt_index_added(hdev);
a69d8927 1061 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
1062 /* When the controller is now configured, then it
1063 * is important to clear the HCI_RAW flag.
1064 */
d7a5a11d 1065 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
1066 clear_bit(HCI_RAW, &hdev->flags);
1067
d603b76b
MH
1068 /* Powering on the controller with HCI_CONFIG set only
1069 * happens with the transition from unconfigured to
1070 * configured. This will send the Index Added event.
1071 */
744cf19e 1072 mgmt_index_added(hdev);
fee746b0 1073 }
ab81cbf9
JH
1074}
1075
1076static void hci_power_off(struct work_struct *work)
1077{
3243553f 1078 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1079 power_off.work);
ab81cbf9
JH
1080
1081 BT_DBG("%s", hdev->name);
1082
8ee56540 1083 hci_dev_do_close(hdev);
ab81cbf9
JH
1084}
1085
c7741d16
MH
1086static void hci_error_reset(struct work_struct *work)
1087{
1088 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1089
2449007d 1090 hci_dev_hold(hdev);
c7741d16
MH
1091 BT_DBG("%s", hdev->name);
1092
1093 if (hdev->hw_error)
1094 hdev->hw_error(hdev, hdev->hw_error_code);
1095 else
2064ee33 1096 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16 1097
2449007d
YH
1098 if (!hci_dev_do_close(hdev))
1099 hci_dev_do_open(hdev);
c7741d16 1100
2449007d 1101 hci_dev_put(hdev);
c7741d16
MH
1102}
1103
35f7498a 1104void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 1105{
4821002c 1106 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1107
4821002c
JH
1108 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1109 list_del(&uuid->list);
2aeb9a1a
JH
1110 kfree(uuid);
1111 }
2aeb9a1a
JH
1112}
1113
35f7498a 1114void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 1115{
3673952c 1116 struct link_key *key, *tmp;
55ed8ca1 1117
3673952c 1118 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
0378b597
JH
1119 list_del_rcu(&key->list);
1120 kfree_rcu(key, rcu);
55ed8ca1 1121 }
55ed8ca1
JH
1122}
1123
35f7498a 1124void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 1125{
3673952c 1126 struct smp_ltk *k, *tmp;
b899efaf 1127
3673952c 1128 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
970d0f1b
JH
1129 list_del_rcu(&k->list);
1130 kfree_rcu(k, rcu);
b899efaf 1131 }
b899efaf
VCG
1132}
1133
970c4e46
JH
1134void hci_smp_irks_clear(struct hci_dev *hdev)
1135{
3673952c 1136 struct smp_irk *k, *tmp;
970c4e46 1137
3673952c 1138 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
adae20cb
JH
1139 list_del_rcu(&k->list);
1140 kfree_rcu(k, rcu);
970c4e46
JH
1141 }
1142}
1143
600a8749
AM
1144void hci_blocked_keys_clear(struct hci_dev *hdev)
1145{
3673952c 1146 struct blocked_key *b, *tmp;
600a8749 1147
3673952c 1148 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
600a8749
AM
1149 list_del_rcu(&b->list);
1150 kfree_rcu(b, rcu);
1151 }
1152}
1153
1154bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1155{
1156 bool blocked = false;
1157 struct blocked_key *b;
1158
1159 rcu_read_lock();
0c2ac7d4 1160 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
600a8749
AM
1161 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1162 blocked = true;
1163 break;
1164 }
1165 }
1166
1167 rcu_read_unlock();
1168 return blocked;
1169}
1170
55ed8ca1
JH
1171struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1172{
8035ded4 1173 struct link_key *k;
55ed8ca1 1174
0378b597
JH
1175 rcu_read_lock();
1176 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1177 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1178 rcu_read_unlock();
600a8749
AM
1179
1180 if (hci_is_blocked_key(hdev,
1181 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1182 k->val)) {
1183 bt_dev_warn_ratelimited(hdev,
1184 "Link key blocked for %pMR",
1185 &k->bdaddr);
1186 return NULL;
1187 }
1188
55ed8ca1 1189 return k;
0378b597
JH
1190 }
1191 }
1192 rcu_read_unlock();
55ed8ca1
JH
1193
1194 return NULL;
1195}
1196
745c0ce3 1197static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1198 u8 key_type, u8 old_key_type)
d25e28ab
JH
1199{
1200 /* Legacy key */
1201 if (key_type < 0x03)
745c0ce3 1202 return true;
d25e28ab
JH
1203
1204 /* Debug keys are insecure so don't store them persistently */
1205 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1206 return false;
d25e28ab
JH
1207
1208 /* Changed combination key and there's no previous one */
1209 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1210 return false;
d25e28ab
JH
1211
1212 /* Security mode 3 case */
1213 if (!conn)
745c0ce3 1214 return true;
d25e28ab 1215
e3befab9
JH
1216 /* BR/EDR key derived using SC from an LE link */
1217 if (conn->type == LE_LINK)
1218 return true;
1219
d25e28ab
JH
1220 /* Neither local nor remote side had no-bonding as requirement */
1221 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1222 return true;
d25e28ab
JH
1223
1224 /* Local side had dedicated bonding as requirement */
1225 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1226 return true;
d25e28ab
JH
1227
1228 /* Remote side had dedicated bonding as requirement */
1229 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1230 return true;
d25e28ab
JH
1231
1232 /* If none of the above criteria match, then don't store the key
1233 * persistently */
745c0ce3 1234 return false;
d25e28ab
JH
1235}
1236
e804d25d 1237static u8 ltk_role(u8 type)
98a0b845 1238{
e804d25d
JH
1239 if (type == SMP_LTK)
1240 return HCI_ROLE_MASTER;
98a0b845 1241
e804d25d 1242 return HCI_ROLE_SLAVE;
98a0b845
JH
1243}
1244
f3a73d97
JH
1245struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1246 u8 addr_type, u8 role)
75d262c2 1247{
c9839a11 1248 struct smp_ltk *k;
75d262c2 1249
970d0f1b
JH
1250 rcu_read_lock();
1251 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
1252 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1253 continue;
1254
923e2414 1255 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 1256 rcu_read_unlock();
600a8749
AM
1257
1258 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1259 k->val)) {
1260 bt_dev_warn_ratelimited(hdev,
1261 "LTK blocked for %pMR",
1262 &k->bdaddr);
1263 return NULL;
1264 }
1265
75d262c2 1266 return k;
970d0f1b
JH
1267 }
1268 }
1269 rcu_read_unlock();
75d262c2
VCG
1270
1271 return NULL;
1272}
75d262c2 1273
970c4e46
JH
1274struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1275{
600a8749 1276 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
1277 struct smp_irk *irk;
1278
adae20cb
JH
1279 rcu_read_lock();
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (!bacmp(&irk->rpa, rpa)) {
600a8749
AM
1282 irk_to_return = irk;
1283 goto done;
adae20cb 1284 }
970c4e46
JH
1285 }
1286
adae20cb 1287 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 1288 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 1289 bacpy(&irk->rpa, rpa);
600a8749
AM
1290 irk_to_return = irk;
1291 goto done;
970c4e46
JH
1292 }
1293 }
600a8749
AM
1294
1295done:
1296 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1297 irk_to_return->val)) {
1298 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1299 &irk_to_return->bdaddr);
1300 irk_to_return = NULL;
1301 }
1302
adae20cb 1303 rcu_read_unlock();
970c4e46 1304
600a8749 1305 return irk_to_return;
970c4e46
JH
1306}
1307
1308struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1309 u8 addr_type)
1310{
600a8749 1311 struct smp_irk *irk_to_return = NULL;
970c4e46
JH
1312 struct smp_irk *irk;
1313
6cfc9988
JH
1314 /* Identity Address must be public or static random */
1315 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1316 return NULL;
1317
adae20cb
JH
1318 rcu_read_lock();
1319 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 1320 if (addr_type == irk->addr_type &&
adae20cb 1321 bacmp(bdaddr, &irk->bdaddr) == 0) {
600a8749
AM
1322 irk_to_return = irk;
1323 goto done;
adae20cb 1324 }
970c4e46 1325 }
600a8749
AM
1326
1327done:
1328
1329 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1330 irk_to_return->val)) {
1331 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1332 &irk_to_return->bdaddr);
1333 irk_to_return = NULL;
1334 }
1335
adae20cb 1336 rcu_read_unlock();
970c4e46 1337
600a8749 1338 return irk_to_return;
970c4e46
JH
1339}
1340
567fa2aa 1341struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
1342 bdaddr_t *bdaddr, u8 *val, u8 type,
1343 u8 pin_len, bool *persistent)
55ed8ca1
JH
1344{
1345 struct link_key *key, *old_key;
745c0ce3 1346 u8 old_key_type;
55ed8ca1
JH
1347
1348 old_key = hci_find_link_key(hdev, bdaddr);
1349 if (old_key) {
1350 old_key_type = old_key->type;
1351 key = old_key;
1352 } else {
12adcf3a 1353 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 1354 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 1355 if (!key)
567fa2aa 1356 return NULL;
0378b597 1357 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
1358 }
1359
6ed93dc6 1360 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1361
d25e28ab
JH
1362 /* Some buggy controller combinations generate a changed
1363 * combination key for legacy pairing even when there's no
1364 * previous key */
1365 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1366 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1367 type = HCI_LK_COMBINATION;
655fe6ec
JH
1368 if (conn)
1369 conn->key_type = type;
1370 }
d25e28ab 1371
55ed8ca1 1372 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1373 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1374 key->pin_len = pin_len;
1375
b6020ba0 1376 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1377 key->type = old_key_type;
4748fed2
JH
1378 else
1379 key->type = type;
1380
7652ff6a
JH
1381 if (persistent)
1382 *persistent = hci_persistent_key(hdev, conn, type,
1383 old_key_type);
4df378a1 1384
567fa2aa 1385 return key;
55ed8ca1
JH
1386}
1387
ca9142b8 1388struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 1389 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 1390 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 1391{
c9839a11 1392 struct smp_ltk *key, *old_key;
e804d25d 1393 u8 role = ltk_role(type);
75d262c2 1394
f3a73d97 1395 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 1396 if (old_key)
75d262c2 1397 key = old_key;
c9839a11 1398 else {
0a14ab41 1399 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 1400 if (!key)
ca9142b8 1401 return NULL;
970d0f1b 1402 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1403 }
1404
75d262c2 1405 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1406 key->bdaddr_type = addr_type;
1407 memcpy(key->val, tk, sizeof(key->val));
1408 key->authenticated = authenticated;
1409 key->ediv = ediv;
fe39c7b2 1410 key->rand = rand;
c9839a11
VCG
1411 key->enc_size = enc_size;
1412 key->type = type;
75d262c2 1413
ca9142b8 1414 return key;
75d262c2
VCG
1415}
1416
ca9142b8
JH
1417struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1418 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
1419{
1420 struct smp_irk *irk;
1421
1422 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1423 if (!irk) {
1424 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1425 if (!irk)
ca9142b8 1426 return NULL;
970c4e46
JH
1427
1428 bacpy(&irk->bdaddr, bdaddr);
1429 irk->addr_type = addr_type;
1430
adae20cb 1431 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
1432 }
1433
1434 memcpy(irk->val, val, 16);
1435 bacpy(&irk->rpa, rpa);
1436
ca9142b8 1437 return irk;
970c4e46
JH
1438}
1439
55ed8ca1
JH
1440int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1441{
1442 struct link_key *key;
1443
1444 key = hci_find_link_key(hdev, bdaddr);
1445 if (!key)
1446 return -ENOENT;
1447
6ed93dc6 1448 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 1449
0378b597
JH
1450 list_del_rcu(&key->list);
1451 kfree_rcu(key, rcu);
55ed8ca1
JH
1452
1453 return 0;
1454}
1455
e0b2b27e 1456int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 1457{
c5d2b6fa 1458 struct smp_ltk *k, *tmp;
c51ffa0b 1459 int removed = 0;
b899efaf 1460
c5d2b6fa 1461 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 1462 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
1463 continue;
1464
6ed93dc6 1465 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 1466
970d0f1b
JH
1467 list_del_rcu(&k->list);
1468 kfree_rcu(k, rcu);
c51ffa0b 1469 removed++;
b899efaf
VCG
1470 }
1471
c51ffa0b 1472 return removed ? 0 : -ENOENT;
b899efaf
VCG
1473}
1474
a7ec7338
JH
1475void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1476{
c5d2b6fa 1477 struct smp_irk *k, *tmp;
a7ec7338 1478
c5d2b6fa 1479 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
1480 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1481 continue;
1482
1483 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1484
adae20cb
JH
1485 list_del_rcu(&k->list);
1486 kfree_rcu(k, rcu);
a7ec7338
JH
1487 }
1488}
1489
55e76b38
JH
1490bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1491{
1492 struct smp_ltk *k;
4ba9faf3 1493 struct smp_irk *irk;
55e76b38
JH
1494 u8 addr_type;
1495
1496 if (type == BDADDR_BREDR) {
1497 if (hci_find_link_key(hdev, bdaddr))
1498 return true;
1499 return false;
1500 }
1501
1502 /* Convert to HCI addr type which struct smp_ltk uses */
1503 if (type == BDADDR_LE_PUBLIC)
1504 addr_type = ADDR_LE_DEV_PUBLIC;
1505 else
1506 addr_type = ADDR_LE_DEV_RANDOM;
1507
4ba9faf3
JH
1508 irk = hci_get_irk(hdev, bdaddr, addr_type);
1509 if (irk) {
1510 bdaddr = &irk->bdaddr;
1511 addr_type = irk->addr_type;
1512 }
1513
55e76b38
JH
1514 rcu_read_lock();
1515 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
1516 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1517 rcu_read_unlock();
55e76b38 1518 return true;
87c8b28d 1519 }
55e76b38
JH
1520 }
1521 rcu_read_unlock();
1522
1523 return false;
1524}
1525
6bd32326 1526/* HCI command timer function */
65cc2b49 1527static void hci_cmd_timeout(struct work_struct *work)
6bd32326 1528{
65cc2b49
MH
1529 struct hci_dev *hdev = container_of(work, struct hci_dev,
1530 cmd_timer.work);
6bd32326 1531
2615fd9a
LAD
1532 if (hdev->req_skb) {
1533 u16 opcode = hci_skb_opcode(hdev->req_skb);
bda4f23a 1534
2064ee33 1535 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
63298d6e
LAD
1536
1537 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
bda4f23a 1538 } else {
2064ee33 1539 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
1540 }
1541
e2bef384
RJ
1542 if (hdev->cmd_timeout)
1543 hdev->cmd_timeout(hdev);
1544
6bd32326 1545 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1546 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1547}
1548
de75cd0d
MM
1549/* HCI ncmd timer function */
1550static void hci_ncmd_timeout(struct work_struct *work)
1551{
1552 struct hci_dev *hdev = container_of(work, struct hci_dev,
1553 ncmd_timer.work);
1554
1555 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1556
1557 /* During HCI_INIT phase no events can be injected if the ncmd timer
1558 * triggers since the procedure has its own timeout handling.
1559 */
1560 if (test_bit(HCI_INIT, &hdev->flags))
1561 return;
1562
1563 /* This is an irrecoverable state, inject hardware error event */
1564 hci_reset_dev(hdev);
1565}
1566
2763eda6 1567struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 1568 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
1569{
1570 struct oob_data *data;
1571
6928a924
JH
1572 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1573 if (bacmp(bdaddr, &data->bdaddr) != 0)
1574 continue;
1575 if (data->bdaddr_type != bdaddr_type)
1576 continue;
1577 return data;
1578 }
2763eda6
SJ
1579
1580 return NULL;
1581}
1582
6928a924
JH
1583int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1584 u8 bdaddr_type)
2763eda6
SJ
1585{
1586 struct oob_data *data;
1587
6928a924 1588 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
1589 if (!data)
1590 return -ENOENT;
1591
6928a924 1592 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
1593
1594 list_del(&data->list);
1595 kfree(data);
1596
1597 return 0;
1598}
1599
35f7498a 1600void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
1601{
1602 struct oob_data *data, *n;
1603
1604 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1605 list_del(&data->list);
1606 kfree(data);
1607 }
2763eda6
SJ
1608}
1609
0798872e 1610int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 1611 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 1612 u8 *hash256, u8 *rand256)
2763eda6
SJ
1613{
1614 struct oob_data *data;
1615
6928a924 1616 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 1617 if (!data) {
0a14ab41 1618 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
1619 if (!data)
1620 return -ENOMEM;
1621
1622 bacpy(&data->bdaddr, bdaddr);
6928a924 1623 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
1624 list_add(&data->list, &hdev->remote_oob_data);
1625 }
1626
81328d5c
JH
1627 if (hash192 && rand192) {
1628 memcpy(data->hash192, hash192, sizeof(data->hash192));
1629 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
1630 if (hash256 && rand256)
1631 data->present = 0x03;
81328d5c
JH
1632 } else {
1633 memset(data->hash192, 0, sizeof(data->hash192));
1634 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
1635 if (hash256 && rand256)
1636 data->present = 0x02;
1637 else
1638 data->present = 0x00;
0798872e
MH
1639 }
1640
81328d5c
JH
1641 if (hash256 && rand256) {
1642 memcpy(data->hash256, hash256, sizeof(data->hash256));
1643 memcpy(data->rand256, rand256, sizeof(data->rand256));
1644 } else {
1645 memset(data->hash256, 0, sizeof(data->hash256));
1646 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
1647 if (hash192 && rand192)
1648 data->present = 0x01;
81328d5c 1649 }
0798872e 1650
6ed93dc6 1651 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1652
1653 return 0;
1654}
1655
d2609b34
FG
1656/* This function requires the caller holds hdev->lock */
1657struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1658{
1659 struct adv_info *adv_instance;
1660
1661 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1662 if (adv_instance->instance == instance)
1663 return adv_instance;
1664 }
1665
1666 return NULL;
1667}
1668
1669/* This function requires the caller holds hdev->lock */
74b93e9f
PK
1670struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1671{
d2609b34
FG
1672 struct adv_info *cur_instance;
1673
1674 cur_instance = hci_find_adv_instance(hdev, instance);
1675 if (!cur_instance)
1676 return NULL;
1677
1678 if (cur_instance == list_last_entry(&hdev->adv_instances,
1679 struct adv_info, list))
1680 return list_first_entry(&hdev->adv_instances,
1681 struct adv_info, list);
1682 else
1683 return list_next_entry(cur_instance, list);
1684}
1685
1686/* This function requires the caller holds hdev->lock */
1687int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1688{
1689 struct adv_info *adv_instance;
1690
1691 adv_instance = hci_find_adv_instance(hdev, instance);
1692 if (!adv_instance)
1693 return -ENOENT;
1694
1695 BT_DBG("%s removing %dMR", hdev->name, instance);
1696
cab054ab
JH
1697 if (hdev->cur_adv_instance == instance) {
1698 if (hdev->adv_instance_timeout) {
1699 cancel_delayed_work(&hdev->adv_instance_expire);
1700 hdev->adv_instance_timeout = 0;
1701 }
1702 hdev->cur_adv_instance = 0x00;
5d900e46
FG
1703 }
1704
a73c046a
JK
1705 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1706
d2609b34
FG
1707 list_del(&adv_instance->list);
1708 kfree(adv_instance);
1709
1710 hdev->adv_instance_cnt--;
1711
1712 return 0;
1713}
1714
a73c046a
JK
1715void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1716{
1717 struct adv_info *adv_instance, *n;
1718
1719 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1720 adv_instance->rpa_expired = rpa_expired;
1721}
1722
d2609b34
FG
1723/* This function requires the caller holds hdev->lock */
1724void hci_adv_instances_clear(struct hci_dev *hdev)
1725{
1726 struct adv_info *adv_instance, *n;
1727
5d900e46
FG
1728 if (hdev->adv_instance_timeout) {
1729 cancel_delayed_work(&hdev->adv_instance_expire);
1730 hdev->adv_instance_timeout = 0;
1731 }
1732
d2609b34 1733 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
a73c046a 1734 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
d2609b34
FG
1735 list_del(&adv_instance->list);
1736 kfree(adv_instance);
1737 }
1738
1739 hdev->adv_instance_cnt = 0;
cab054ab 1740 hdev->cur_adv_instance = 0x00;
d2609b34
FG
1741}
1742
a73c046a
JK
1743static void adv_instance_rpa_expired(struct work_struct *work)
1744{
1745 struct adv_info *adv_instance = container_of(work, struct adv_info,
1746 rpa_expired_cb.work);
1747
1748 BT_DBG("");
1749
1750 adv_instance->rpa_expired = true;
1751}
1752
d2609b34 1753/* This function requires the caller holds hdev->lock */
eca0ae4a
LAD
1754struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1755 u32 flags, u16 adv_data_len, u8 *adv_data,
1756 u16 scan_rsp_len, u8 *scan_rsp_data,
1757 u16 timeout, u16 duration, s8 tx_power,
b338d917
BG
1758 u32 min_interval, u32 max_interval,
1759 u8 mesh_handle)
d2609b34 1760{
eca0ae4a 1761 struct adv_info *adv;
d2609b34 1762
eca0ae4a
LAD
1763 adv = hci_find_adv_instance(hdev, instance);
1764 if (adv) {
1765 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1766 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1767 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
d2609b34 1768 } else {
1d0fac2c 1769 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
b338d917 1770 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
eca0ae4a 1771 return ERR_PTR(-EOVERFLOW);
d2609b34 1772
eca0ae4a
LAD
1773 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1774 if (!adv)
1775 return ERR_PTR(-ENOMEM);
d2609b34 1776
eca0ae4a
LAD
1777 adv->pending = true;
1778 adv->instance = instance;
1779 list_add(&adv->list, &hdev->adv_instances);
d2609b34
FG
1780 hdev->adv_instance_cnt++;
1781 }
1782
eca0ae4a
LAD
1783 adv->flags = flags;
1784 adv->min_interval = min_interval;
1785 adv->max_interval = max_interval;
1786 adv->tx_power = tx_power;
b338d917
BG
1787 /* Defining a mesh_handle changes the timing units to ms,
1788 * rather than seconds, and ties the instance to the requested
1789 * mesh_tx queue.
1790 */
1791 adv->mesh = mesh_handle;
d2609b34 1792
34a718bc
LAD
1793 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1794 scan_rsp_len, scan_rsp_data);
d2609b34 1795
eca0ae4a
LAD
1796 adv->timeout = timeout;
1797 adv->remaining_time = timeout;
d2609b34
FG
1798
1799 if (duration == 0)
eca0ae4a 1800 adv->duration = hdev->def_multi_adv_rotation_duration;
d2609b34 1801 else
eca0ae4a 1802 adv->duration = duration;
d2609b34 1803
eca0ae4a 1804 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
a73c046a 1805
d2609b34
FG
1806 BT_DBG("%s for %dMR", hdev->name, instance);
1807
eca0ae4a
LAD
1808 return adv;
1809}
1810
1811/* This function requires the caller holds hdev->lock */
1812struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1813 u32 flags, u8 data_len, u8 *data,
1814 u32 min_interval, u32 max_interval)
1815{
1816 struct adv_info *adv;
1817
1818 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1819 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
b338d917 1820 min_interval, max_interval, 0);
eca0ae4a
LAD
1821 if (IS_ERR(adv))
1822 return adv;
1823
1824 adv->periodic = true;
1825 adv->per_adv_data_len = data_len;
1826
1827 if (data)
1828 memcpy(adv->per_adv_data, data, data_len);
1829
1830 return adv;
d2609b34
FG
1831}
1832
31aab5c2
DW
1833/* This function requires the caller holds hdev->lock */
1834int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1835 u16 adv_data_len, u8 *adv_data,
1836 u16 scan_rsp_len, u8 *scan_rsp_data)
1837{
34a718bc 1838 struct adv_info *adv;
31aab5c2 1839
34a718bc 1840 adv = hci_find_adv_instance(hdev, instance);
31aab5c2
DW
1841
1842 /* If advertisement doesn't exist, we can't modify its data */
34a718bc 1843 if (!adv)
31aab5c2
DW
1844 return -ENOENT;
1845
34a718bc
LAD
1846 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1847 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1848 memcpy(adv->adv_data, adv_data, adv_data_len);
1849 adv->adv_data_len = adv_data_len;
1850 adv->adv_data_changed = true;
31aab5c2
DW
1851 }
1852
34a718bc
LAD
1853 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1854 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1855 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1856 adv->scan_rsp_len = scan_rsp_len;
1857 adv->scan_rsp_changed = true;
31aab5c2
DW
1858 }
1859
34a718bc
LAD
1860 /* Mark as changed if there are flags which would affect it */
1861 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1862 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1863 adv->scan_rsp_changed = true;
1864
31aab5c2
DW
1865 return 0;
1866}
1867
01ce70b0
LAD
1868/* This function requires the caller holds hdev->lock */
1869u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1870{
1871 u32 flags;
1872 struct adv_info *adv;
1873
1874 if (instance == 0x00) {
1875 /* Instance 0 always manages the "Tx Power" and "Flags"
1876 * fields
1877 */
1878 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1879
1880 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1881 * corresponds to the "connectable" instance flag.
1882 */
1883 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1884 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1885
1886 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1887 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1888 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1889 flags |= MGMT_ADV_FLAG_DISCOV;
1890
1891 return flags;
1892 }
1893
1894 adv = hci_find_adv_instance(hdev, instance);
1895
1896 /* Return 0 when we got an invalid instance identifier. */
1897 if (!adv)
1898 return 0;
1899
1900 return adv->flags;
1901}
1902
1903bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1904{
1905 struct adv_info *adv;
1906
1907 /* Instance 0x00 always set local name */
1908 if (instance == 0x00)
1909 return true;
1910
1911 adv = hci_find_adv_instance(hdev, instance);
1912 if (!adv)
1913 return false;
1914
1915 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1916 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1917 return true;
1918
1919 return adv->scan_rsp_len ? true : false;
1920}
1921
e5e1e7fd
MC
1922/* This function requires the caller holds hdev->lock */
1923void hci_adv_monitors_clear(struct hci_dev *hdev)
1924{
b139553d
MC
1925 struct adv_monitor *monitor;
1926 int handle;
1927
1928 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
66bd095a 1929 hci_free_adv_monitor(hdev, monitor);
b139553d 1930
e5e1e7fd
MC
1931 idr_destroy(&hdev->adv_monitors_idr);
1932}
1933
66bd095a
AP
1934/* Frees the monitor structure and do some bookkeepings.
1935 * This function requires the caller holds hdev->lock.
1936 */
1937void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
b139553d
MC
1938{
1939 struct adv_pattern *pattern;
1940 struct adv_pattern *tmp;
1941
1942 if (!monitor)
1943 return;
1944
66bd095a
AP
1945 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1946 list_del(&pattern->list);
b139553d 1947 kfree(pattern);
66bd095a
AP
1948 }
1949
1950 if (monitor->handle)
1951 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1952
1953 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1954 hdev->adv_monitors_cnt--;
1955 mgmt_adv_monitor_removed(hdev, monitor->handle);
1956 }
b139553d
MC
1957
1958 kfree(monitor);
1959}
1960
a2a4dedf
AP
1961/* Assigns handle to a monitor, and if offloading is supported and power is on,
1962 * also attempts to forward the request to the controller.
b747a836 1963 * This function requires the caller holds hci_req_sync_lock.
a2a4dedf 1964 */
b747a836 1965int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
b139553d
MC
1966{
1967 int min, max, handle;
b747a836 1968 int status = 0;
b139553d 1969
b747a836
MM
1970 if (!monitor)
1971 return -EINVAL;
a2a4dedf 1972
b747a836 1973 hci_dev_lock(hdev);
b139553d
MC
1974
1975 min = HCI_MIN_ADV_MONITOR_HANDLE;
1976 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1977 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1978 GFP_KERNEL);
b747a836
MM
1979
1980 hci_dev_unlock(hdev);
1981
1982 if (handle < 0)
1983 return handle;
b139553d 1984
b139553d 1985 monitor->handle = handle;
8208f5a9 1986
a2a4dedf 1987 if (!hdev_is_powered(hdev))
b747a836 1988 return status;
8208f5a9 1989
a2a4dedf
AP
1990 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1991 case HCI_ADV_MONITOR_EXT_NONE:
6f55eea1 1992 bt_dev_dbg(hdev, "add monitor %d status %d",
b747a836 1993 monitor->handle, status);
a2a4dedf 1994 /* Message was not forwarded to controller - not an error */
b747a836
MM
1995 break;
1996
a2a4dedf 1997 case HCI_ADV_MONITOR_EXT_MSFT:
b747a836 1998 status = msft_add_monitor_pattern(hdev, monitor);
6f55eea1 1999 bt_dev_dbg(hdev, "add monitor %d msft status %d",
a2bcd2b6 2000 handle, status);
a2a4dedf
AP
2001 break;
2002 }
2003
b747a836 2004 return status;
b139553d
MC
2005}
2006
66bd095a
AP
2007/* Attempts to tell the controller and free the monitor. If somehow the
2008 * controller doesn't have a corresponding handle, remove anyway.
7cf5c297 2009 * This function requires the caller holds hci_req_sync_lock.
66bd095a 2010 */
7cf5c297
MM
2011static int hci_remove_adv_monitor(struct hci_dev *hdev,
2012 struct adv_monitor *monitor)
bd2fbc6c 2013{
7cf5c297 2014 int status = 0;
de6dfcef 2015 int handle;
bd2fbc6c 2016
66bd095a
AP
2017 switch (hci_get_adv_monitor_offload_ext(hdev)) {
2018 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
6f55eea1 2019 bt_dev_dbg(hdev, "remove monitor %d status %d",
7cf5c297 2020 monitor->handle, status);
66bd095a 2021 goto free_monitor;
7cf5c297 2022
66bd095a 2023 case HCI_ADV_MONITOR_EXT_MSFT:
de6dfcef 2024 handle = monitor->handle;
7cf5c297 2025 status = msft_remove_monitor(hdev, monitor);
6f55eea1
DA
2026 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2027 handle, status);
66bd095a
AP
2028 break;
2029 }
bd2fbc6c 2030
66bd095a 2031 /* In case no matching handle registered, just free the monitor */
7cf5c297 2032 if (status == -ENOENT)
66bd095a
AP
2033 goto free_monitor;
2034
7cf5c297 2035 return status;
66bd095a
AP
2036
2037free_monitor:
7cf5c297 2038 if (status == -ENOENT)
66bd095a
AP
2039 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2040 monitor->handle);
2041 hci_free_adv_monitor(hdev, monitor);
2042
7cf5c297 2043 return status;
bd2fbc6c
MC
2044}
2045
7cf5c297
MM
2046/* This function requires the caller holds hci_req_sync_lock */
2047int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
66bd095a
AP
2048{
2049 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
66bd095a 2050
7cf5c297
MM
2051 if (!monitor)
2052 return -EINVAL;
66bd095a 2053
7cf5c297 2054 return hci_remove_adv_monitor(hdev, monitor);
66bd095a
AP
2055}
2056
7cf5c297
MM
2057/* This function requires the caller holds hci_req_sync_lock */
2058int hci_remove_all_adv_monitor(struct hci_dev *hdev)
bd2fbc6c
MC
2059{
2060 struct adv_monitor *monitor;
66bd095a 2061 int idr_next_id = 0;
7cf5c297 2062 int status = 0;
66bd095a 2063
7cf5c297 2064 while (1) {
66bd095a 2065 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
bd2fbc6c 2066 if (!monitor)
66bd095a 2067 break;
bd2fbc6c 2068
7cf5c297
MM
2069 status = hci_remove_adv_monitor(hdev, monitor);
2070 if (status)
2071 return status;
66bd095a 2072
7cf5c297 2073 idr_next_id++;
bd2fbc6c
MC
2074 }
2075
7cf5c297 2076 return status;
bd2fbc6c
MC
2077}
2078
8208f5a9
MC
2079/* This function requires the caller holds hdev->lock */
2080bool hci_is_adv_monitoring(struct hci_dev *hdev)
2081{
2082 return !idr_is_empty(&hdev->adv_monitors_idr);
2083}
2084
a2a4dedf
AP
2085int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2086{
2087 if (msft_monitor_supported(hdev))
2088 return HCI_ADV_MONITOR_EXT_MSFT;
2089
2090 return HCI_ADV_MONITOR_EXT_NONE;
2091}
2092
dcc36c16 2093struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2094 bdaddr_t *bdaddr, u8 type)
b2a66aad 2095{
8035ded4 2096 struct bdaddr_list *b;
b2a66aad 2097
dcc36c16 2098 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2099 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2100 return b;
b9ee0a78 2101 }
b2a66aad
AJ
2102
2103 return NULL;
2104}
2105
b950aa88
AN
2106struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2107 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2108 u8 type)
2109{
2110 struct bdaddr_list_with_irk *b;
2111
2112 list_for_each_entry(b, bdaddr_list, list) {
2113 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2114 return b;
2115 }
2116
2117 return NULL;
2118}
2119
8baaa403
APS
2120struct bdaddr_list_with_flags *
2121hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2122 bdaddr_t *bdaddr, u8 type)
2123{
2124 struct bdaddr_list_with_flags *b;
2125
2126 list_for_each_entry(b, bdaddr_list, list) {
2127 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2128 return b;
2129 }
2130
2131 return NULL;
2132}
2133
dcc36c16 2134void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 2135{
7eb7404f 2136 struct bdaddr_list *b, *n;
b2a66aad 2137
7eb7404f
GT
2138 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2139 list_del(&b->list);
b2a66aad
AJ
2140 kfree(b);
2141 }
b2a66aad
AJ
2142}
2143
dcc36c16 2144int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2145{
2146 struct bdaddr_list *entry;
b2a66aad 2147
b9ee0a78 2148 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2149 return -EBADF;
2150
dcc36c16 2151 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2152 return -EEXIST;
b2a66aad 2153
27f70f3e 2154 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2155 if (!entry)
2156 return -ENOMEM;
b2a66aad
AJ
2157
2158 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2159 entry->bdaddr_type = type;
b2a66aad 2160
dcc36c16 2161 list_add(&entry->list, list);
b2a66aad 2162
2a8357f2 2163 return 0;
b2a66aad
AJ
2164}
2165
b950aa88
AN
2166int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2167 u8 type, u8 *peer_irk, u8 *local_irk)
2168{
2169 struct bdaddr_list_with_irk *entry;
2170
2171 if (!bacmp(bdaddr, BDADDR_ANY))
2172 return -EBADF;
2173
2174 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2175 return -EEXIST;
2176
2177 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2178 if (!entry)
2179 return -ENOMEM;
2180
2181 bacpy(&entry->bdaddr, bdaddr);
2182 entry->bdaddr_type = type;
2183
2184 if (peer_irk)
2185 memcpy(entry->peer_irk, peer_irk, 16);
2186
2187 if (local_irk)
2188 memcpy(entry->local_irk, local_irk, 16);
2189
2190 list_add(&entry->list, list);
2191
2192 return 0;
2193}
2194
8baaa403
APS
2195int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2196 u8 type, u32 flags)
2197{
2198 struct bdaddr_list_with_flags *entry;
2199
2200 if (!bacmp(bdaddr, BDADDR_ANY))
2201 return -EBADF;
2202
2203 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2204 return -EEXIST;
2205
2206 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2207 if (!entry)
2208 return -ENOMEM;
2209
2210 bacpy(&entry->bdaddr, bdaddr);
2211 entry->bdaddr_type = type;
e1cff700 2212 entry->flags = flags;
8baaa403
APS
2213
2214 list_add(&entry->list, list);
2215
2216 return 0;
2217}
2218
dcc36c16 2219int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2220{
2221 struct bdaddr_list *entry;
b2a66aad 2222
35f7498a 2223 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2224 hci_bdaddr_list_clear(list);
35f7498a
JH
2225 return 0;
2226 }
b2a66aad 2227
dcc36c16 2228 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2229 if (!entry)
2230 return -ENOENT;
2231
2232 list_del(&entry->list);
2233 kfree(entry);
2234
2235 return 0;
2236}
2237
b950aa88
AN
2238int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2239 u8 type)
2240{
2241 struct bdaddr_list_with_irk *entry;
2242
2243 if (!bacmp(bdaddr, BDADDR_ANY)) {
2244 hci_bdaddr_list_clear(list);
2245 return 0;
2246 }
2247
2248 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2249 if (!entry)
2250 return -ENOENT;
2251
2252 list_del(&entry->list);
2253 kfree(entry);
2254
2255 return 0;
2256}
2257
8baaa403
APS
2258int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2259 u8 type)
2260{
2261 struct bdaddr_list_with_flags *entry;
2262
2263 if (!bacmp(bdaddr, BDADDR_ANY)) {
2264 hci_bdaddr_list_clear(list);
2265 return 0;
2266 }
2267
2268 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2269 if (!entry)
2270 return -ENOENT;
2271
2272 list_del(&entry->list);
2273 kfree(entry);
2274
2275 return 0;
2276}
2277
15819a70
AG
2278/* This function requires the caller holds hdev->lock */
2279struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2280 bdaddr_t *addr, u8 addr_type)
2281{
2282 struct hci_conn_params *params;
2283
2284 list_for_each_entry(params, &hdev->le_conn_params, list) {
2285 if (bacmp(&params->addr, addr) == 0 &&
2286 params->addr_type == addr_type) {
2287 return params;
2288 }
2289 }
2290
2291 return NULL;
2292}
2293
195ef75e 2294/* This function requires the caller holds hdev->lock or rcu_read_lock */
501f8827
JH
2295struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2296 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2297{
912b42ef 2298 struct hci_conn_params *param;
a9b0a04c 2299
195ef75e
PV
2300 rcu_read_lock();
2301
2302 list_for_each_entry_rcu(param, list, action) {
912b42ef 2303 if (bacmp(&param->addr, addr) == 0 &&
195ef75e
PV
2304 param->addr_type == addr_type) {
2305 rcu_read_unlock();
912b42ef 2306 return param;
195ef75e 2307 }
4b10966f
MH
2308 }
2309
195ef75e
PV
2310 rcu_read_unlock();
2311
4b10966f 2312 return NULL;
a9b0a04c
AG
2313}
2314
195ef75e
PV
2315/* This function requires the caller holds hdev->lock */
2316void hci_pend_le_list_del_init(struct hci_conn_params *param)
2317{
2318 if (list_empty(&param->action))
2319 return;
2320
2321 list_del_rcu(&param->action);
2322 synchronize_rcu();
2323 INIT_LIST_HEAD(&param->action);
2324}
2325
2326/* This function requires the caller holds hdev->lock */
2327void hci_pend_le_list_add(struct hci_conn_params *param,
2328 struct list_head *list)
2329{
2330 list_add_rcu(&param->action, list);
2331}
2332
15819a70 2333/* This function requires the caller holds hdev->lock */
51d167c0
MH
2334struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2335 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2336{
2337 struct hci_conn_params *params;
2338
2339 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2340 if (params)
51d167c0 2341 return params;
15819a70
AG
2342
2343 params = kzalloc(sizeof(*params), GFP_KERNEL);
2344 if (!params) {
2064ee33 2345 bt_dev_err(hdev, "out of memory");
51d167c0 2346 return NULL;
15819a70
AG
2347 }
2348
2349 bacpy(&params->addr, addr);
2350 params->addr_type = addr_type;
cef952ce
AG
2351
2352 list_add(&params->list, &hdev->le_conn_params);
93450c75 2353 INIT_LIST_HEAD(&params->action);
cef952ce 2354
bf5b3c8b
MH
2355 params->conn_min_interval = hdev->le_conn_min_interval;
2356 params->conn_max_interval = hdev->le_conn_max_interval;
2357 params->conn_latency = hdev->le_conn_latency;
2358 params->supervision_timeout = hdev->le_supv_timeout;
2359 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2360
2361 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2362
51d167c0 2363 return params;
bf5b3c8b
MH
2364}
2365
195ef75e 2366void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2367{
195ef75e
PV
2368 hci_pend_le_list_del_init(params);
2369
f8aaf9b6 2370 if (params->conn) {
f161dd41 2371 hci_conn_drop(params->conn);
f8aaf9b6
JH
2372 hci_conn_put(params->conn);
2373 }
f161dd41 2374
15819a70
AG
2375 list_del(&params->list);
2376 kfree(params);
f6c63249
JH
2377}
2378
2379/* This function requires the caller holds hdev->lock */
2380void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2381{
2382 struct hci_conn_params *params;
2383
2384 params = hci_conn_params_lookup(hdev, addr, addr_type);
2385 if (!params)
2386 return;
2387
2388 hci_conn_params_free(params);
15819a70 2389
5bee2fd6 2390 hci_update_passive_scan(hdev);
95305baa 2391
15819a70
AG
2392 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2393}
2394
2395/* This function requires the caller holds hdev->lock */
55af49a8 2396void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2397{
2398 struct hci_conn_params *params, *tmp;
2399
2400 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2401 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2402 continue;
f75113a2 2403
91641b79 2404 /* If trying to establish one time connection to disabled
f75113a2
JP
2405 * device, leave the params, but mark them as just once.
2406 */
2407 if (params->explicit_connect) {
2408 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2409 continue;
2410 }
2411
195ef75e 2412 hci_conn_params_free(params);
15819a70
AG
2413 }
2414
55af49a8 2415 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2416}
2417
2418/* This function requires the caller holds hdev->lock */
030e7f81 2419static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2420{
15819a70 2421 struct hci_conn_params *params, *tmp;
77a77a30 2422
f6c63249
JH
2423 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2424 hci_conn_params_free(params);
77a77a30 2425
15819a70 2426 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2427}
2428
a1f4c318
JH
2429/* Copy the Identity Address of the controller.
2430 *
2431 * If the controller has a public BD_ADDR, then by default use that one.
2432 * If this is a LE only controller without a public address, default to
2433 * the static random address.
2434 *
2435 * For debugging purposes it is possible to force controllers with a
2436 * public address to use the static random address instead.
50b5b952
MH
2437 *
2438 * In case BR/EDR has been disabled on a dual-mode controller and
2439 * userspace has configured a static address, then that address
2440 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2441 */
2442void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2443 u8 *bdaddr_type)
2444{
b7cb93e5 2445 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2446 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2447 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2448 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2449 bacpy(bdaddr, &hdev->static_addr);
2450 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2451 } else {
2452 bacpy(bdaddr, &hdev->bdaddr);
2453 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2454 }
2455}
2456
2f20216c
APS
2457static void hci_clear_wake_reason(struct hci_dev *hdev)
2458{
2459 hci_dev_lock(hdev);
2460
2461 hdev->wake_reason = 0;
2462 bacpy(&hdev->wake_addr, BDADDR_ANY);
2463 hdev->wake_addr_type = 0;
2464
2465 hci_dev_unlock(hdev);
2466}
2467
9952d90e
APS
2468static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2469 void *data)
2470{
2471 struct hci_dev *hdev =
2472 container_of(nb, struct hci_dev, suspend_notifier);
2473 int ret = 0;
9952d90e 2474
4b8af331
APS
2475 /* Userspace has full control of this device. Do nothing. */
2476 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2477 return NOTIFY_DONE;
2478
573ebae1
YH
2479 /* To avoid a potential race with hci_unregister_dev. */
2480 hci_dev_hold(hdev);
2481
e1b77d68
LAD
2482 if (action == PM_SUSPEND_PREPARE)
2483 ret = hci_suspend_dev(hdev);
2484 else if (action == PM_POST_SUSPEND)
2485 ret = hci_resume_dev(hdev);
2f20216c 2486
a9ec8423
APS
2487 if (ret)
2488 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2489 action, ret);
2490
573ebae1 2491 hci_dev_put(hdev);
24b06572 2492 return NOTIFY_DONE;
9952d90e 2493}
8731840a 2494
9be0dab7 2495/* Alloc HCI device */
6ec56613 2496struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
9be0dab7
DR
2497{
2498 struct hci_dev *hdev;
6ec56613 2499 unsigned int alloc_size;
9be0dab7 2500
6ec56613
THJA
2501 alloc_size = sizeof(*hdev);
2502 if (sizeof_priv) {
2503 /* Fixme: May need ALIGN-ment? */
2504 alloc_size += sizeof_priv;
2505 }
9be0dab7 2506
6ec56613 2507 hdev = kzalloc(alloc_size, GFP_KERNEL);
9be0dab7
DR
2508 if (!hdev)
2509 return NULL;
2510
b1b813d4
DR
2511 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2512 hdev->esco_type = (ESCO_HV1);
2513 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2514 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2515 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 2516 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
2517 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2518 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
2519 hdev->adv_instance_cnt = 0;
2520 hdev->cur_adv_instance = 0x00;
5d900e46 2521 hdev->adv_instance_timeout = 0;
b1b813d4 2522
c4f1f408
HC
2523 hdev->advmon_allowlist_duration = 300;
2524 hdev->advmon_no_filter_duration = 500;
80af16a3 2525 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
c4f1f408 2526
b1b813d4
DR
2527 hdev->sniff_max_interval = 800;
2528 hdev->sniff_min_interval = 80;
2529
3f959d46 2530 hdev->le_adv_channel_map = 0x07;
628531c9
GL
2531 hdev->le_adv_min_interval = 0x0800;
2532 hdev->le_adv_max_interval = 0x0800;
7c2cc5b1
LAD
2533 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2534 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2535 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2536 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
10873f99
AM
2537 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2538 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
7c2cc5b1
LAD
2539 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2540 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2541 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2542 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
b48c3b59
JH
2543 hdev->le_conn_min_interval = 0x0018;
2544 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
2545 hdev->le_conn_latency = 0x0000;
2546 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
2547 hdev->le_def_tx_len = 0x001b;
2548 hdev->le_def_tx_time = 0x0148;
2549 hdev->le_max_tx_len = 0x001b;
2550 hdev->le_max_tx_time = 0x0148;
2551 hdev->le_max_rx_len = 0x001b;
2552 hdev->le_max_rx_time = 0x0148;
30d65e08
MK
2553 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2554 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
6decb5b4
JK
2555 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2556 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
1d0fac2c 2557 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
10873f99 2558 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
21d74b6b 2559 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
7c395ea5
DW
2560 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2561 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
bef64738 2562
d6bfd59c 2563 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 2564 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
2565 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2566 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
302975cb 2567 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
58a96fc3 2568 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
d6bfd59c 2569
10873f99
AM
2570 /* default 1.28 sec page scan */
2571 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2572 hdev->def_page_scan_int = 0x0800;
2573 hdev->def_page_scan_window = 0x0012;
2574
b1b813d4
DR
2575 mutex_init(&hdev->lock);
2576 mutex_init(&hdev->req_lock);
2577
181a42ed
ZX
2578 ida_init(&hdev->unset_handle_ida);
2579
b338d917 2580 INIT_LIST_HEAD(&hdev->mesh_pending);
b1b813d4 2581 INIT_LIST_HEAD(&hdev->mgmt_pending);
3d4f9c00
AP
2582 INIT_LIST_HEAD(&hdev->reject_list);
2583 INIT_LIST_HEAD(&hdev->accept_list);
b1b813d4
DR
2584 INIT_LIST_HEAD(&hdev->uuids);
2585 INIT_LIST_HEAD(&hdev->link_keys);
2586 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 2587 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 2588 INIT_LIST_HEAD(&hdev->remote_oob_data);
3d4f9c00 2589 INIT_LIST_HEAD(&hdev->le_accept_list);
cfdb0c2d 2590 INIT_LIST_HEAD(&hdev->le_resolv_list);
15819a70 2591 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 2592 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 2593 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 2594 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 2595 INIT_LIST_HEAD(&hdev->adv_instances);
600a8749 2596 INIT_LIST_HEAD(&hdev->blocked_keys);
3368aa35 2597 INIT_LIST_HEAD(&hdev->monitored_devices);
b1b813d4 2598
8961987f 2599 INIT_LIST_HEAD(&hdev->local_codecs);
b1b813d4
DR
2600 INIT_WORK(&hdev->rx_work, hci_rx_work);
2601 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2602 INIT_WORK(&hdev->tx_work, hci_tx_work);
2603 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 2604 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 2605
6a98e383
MH
2606 hci_cmd_sync_init(hdev);
2607
b1b813d4 2608 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 2609
b1b813d4
DR
2610 skb_queue_head_init(&hdev->rx_q);
2611 skb_queue_head_init(&hdev->cmd_q);
2612 skb_queue_head_init(&hdev->raw_q);
2613
2614 init_waitqueue_head(&hdev->req_wait_q);
2615
65cc2b49 2616 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
de75cd0d 2617 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
b1b813d4 2618
9695ef87 2619 hci_devcd_setup(hdev);
5fc16cc4
JH
2620 hci_request_setup(hdev);
2621
b1b813d4
DR
2622 hci_init_sysfs(hdev);
2623 discovery_init(hdev);
9be0dab7
DR
2624
2625 return hdev;
2626}
6ec56613 2627EXPORT_SYMBOL(hci_alloc_dev_priv);
9be0dab7
DR
2628
2629/* Free HCI device */
2630void hci_free_dev(struct hci_dev *hdev)
2631{
9be0dab7
DR
2632 /* will free via device release */
2633 put_device(&hdev->dev);
2634}
2635EXPORT_SYMBOL(hci_free_dev);
2636
1da177e4
LT
2637/* Register HCI device */
2638int hci_register_dev(struct hci_dev *hdev)
2639{
b1b813d4 2640 int id, error;
1da177e4 2641
74292d5a 2642 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
2643 return -EINVAL;
2644
08add513
MM
2645 /* Do not allow HCI_AMP devices to register at index 0,
2646 * so the index can be used as the AMP controller ID.
2647 */
3df92b31 2648 switch (hdev->dev_type) {
ca8bee5d 2649 case HCI_PRIMARY:
9c16d0c8 2650 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
3df92b31
SL
2651 break;
2652 case HCI_AMP:
9c16d0c8
CJ
2653 id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
2654 GFP_KERNEL);
3df92b31
SL
2655 break;
2656 default:
2657 return -EINVAL;
1da177e4 2658 }
8e87d142 2659
3df92b31
SL
2660 if (id < 0)
2661 return id;
2662
dcda1657
LAD
2663 error = dev_set_name(&hdev->dev, "hci%u", id);
2664 if (error)
2665 return error;
2666
2667 hdev->name = dev_name(&hdev->dev);
1da177e4 2668 hdev->id = id;
2d8b3a11
AE
2669
2670 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2671
29e2dd0d 2672 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DR
2673 if (!hdev->workqueue) {
2674 error = -ENOMEM;
2675 goto err;
2676 }
f48fd9c8 2677
29e2dd0d
TH
2678 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2679 hdev->name);
6ead1bbc
JH
2680 if (!hdev->req_workqueue) {
2681 destroy_workqueue(hdev->workqueue);
2682 error = -ENOMEM;
2683 goto err;
2684 }
2685
0153e2ec
MH
2686 if (!IS_ERR_OR_NULL(bt_debugfs))
2687 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2688
bdc3e0f1 2689 error = device_add(&hdev->dev);
33ca954d 2690 if (error < 0)
54506918 2691 goto err_wqueue;
1da177e4 2692
6d5d2ee6
HK
2693 hci_leds_init(hdev);
2694
611b30f7 2695 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2696 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2697 hdev);
611b30f7
MH
2698 if (hdev->rfkill) {
2699 if (rfkill_register(hdev->rfkill) < 0) {
2700 rfkill_destroy(hdev->rfkill);
2701 hdev->rfkill = NULL;
2702 }
2703 }
2704
5e130367 2705 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 2706 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 2707
a1536da2
MH
2708 hci_dev_set_flag(hdev, HCI_SETUP);
2709 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 2710
ca8bee5d 2711 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
2712 /* Assume BR/EDR support until proven otherwise (such as
2713 * through reading supported features during init.
2714 */
a1536da2 2715 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 2716 }
ce2be9ac 2717
fcee3377
GP
2718 write_lock(&hci_dev_list_lock);
2719 list_add(&hdev->list, &hci_dev_list);
2720 write_unlock(&hci_dev_list_lock);
2721
4a964404
MH
2722 /* Devices that are marked for raw-only usage are unconfigured
2723 * and should not be included in normal operation.
fee746b0
MH
2724 */
2725 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 2726 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 2727
fe92ee64
LAD
2728 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2729 * callback.
2730 */
2731 if (hdev->wakeup)
e1cff700 2732 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
fe92ee64 2733
05fcd4c4 2734 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 2735 hci_dev_hold(hdev);
1da177e4 2736
91117864
DC
2737 error = hci_register_suspend_notifier(hdev);
2738 if (error)
0d75da38 2739 BT_WARN("register suspend notifier failed error:%d\n", error);
9952d90e 2740
19202573 2741 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2742
e5e1e7fd 2743 idr_init(&hdev->adv_monitors_idr);
5031ffcc 2744 msft_register(hdev);
e5e1e7fd 2745
1da177e4 2746 return id;
f48fd9c8 2747
33ca954d 2748err_wqueue:
5a4bb6a8 2749 debugfs_remove_recursive(hdev->debugfs);
33ca954d 2750 destroy_workqueue(hdev->workqueue);
6ead1bbc 2751 destroy_workqueue(hdev->req_workqueue);
33ca954d 2752err:
9c16d0c8 2753 ida_free(&hci_index_ida, hdev->id);
f48fd9c8 2754
33ca954d 2755 return error;
1da177e4
LT
2756}
2757EXPORT_SYMBOL(hci_register_dev);
2758
2759/* Unregister HCI device */
59735631 2760void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2761{
c13854ce 2762 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2763
1857c199 2764 mutex_lock(&hdev->unregister_lock);
a1536da2 2765 hci_dev_set_flag(hdev, HCI_UNREGISTER);
1857c199 2766 mutex_unlock(&hdev->unregister_lock);
94324962 2767
f20d09d5 2768 write_lock(&hci_dev_list_lock);
1da177e4 2769 list_del(&hdev->list);
f20d09d5 2770 write_unlock(&hci_dev_list_lock);
1da177e4 2771
e36bea6e
VV
2772 cancel_work_sync(&hdev->power_on);
2773
6a98e383
MH
2774 hci_cmd_sync_clear(hdev);
2775
359ee4f8 2776 hci_unregister_suspend_notifier(hdev);
4e8c36c3
APS
2777
2778 hci_dev_do_close(hdev);
9952d90e 2779
ab81cbf9 2780 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
2781 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2782 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 2783 hci_dev_lock(hdev);
744cf19e 2784 mgmt_index_removed(hdev);
09fd0de5 2785 hci_dev_unlock(hdev);
56e5cb86 2786 }
ab81cbf9 2787
2e58ef3e
JH
2788 /* mgmt_index_removed should take care of emptying the
2789 * pending list */
2790 BUG_ON(!list_empty(&hdev->mgmt_pending));
2791
05fcd4c4 2792 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 2793
611b30f7
MH
2794 if (hdev->rfkill) {
2795 rfkill_unregister(hdev->rfkill);
2796 rfkill_destroy(hdev->rfkill);
2797 }
2798
bdc3e0f1 2799 device_del(&hdev->dev);
e61fbee7 2800 /* Actual cleanup is deferred until hci_release_dev(). */
e0448092
TH
2801 hci_dev_put(hdev);
2802}
2803EXPORT_SYMBOL(hci_unregister_dev);
147e2d59 2804
58ce6d5b
TH
2805/* Release HCI device */
2806void hci_release_dev(struct hci_dev *hdev)
e0448092 2807{
0153e2ec 2808 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
2809 kfree_const(hdev->hw_info);
2810 kfree_const(hdev->fw_info);
0153e2ec 2811
f48fd9c8 2812 destroy_workqueue(hdev->workqueue);
6ead1bbc 2813 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2814
09fd0de5 2815 hci_dev_lock(hdev);
3d4f9c00
AP
2816 hci_bdaddr_list_clear(&hdev->reject_list);
2817 hci_bdaddr_list_clear(&hdev->accept_list);
2aeb9a1a 2818 hci_uuids_clear(hdev);
55ed8ca1 2819 hci_link_keys_clear(hdev);
b899efaf 2820 hci_smp_ltks_clear(hdev);
970c4e46 2821 hci_smp_irks_clear(hdev);
2763eda6 2822 hci_remote_oob_data_clear(hdev);
d2609b34 2823 hci_adv_instances_clear(hdev);
e5e1e7fd 2824 hci_adv_monitors_clear(hdev);
3d4f9c00 2825 hci_bdaddr_list_clear(&hdev->le_accept_list);
cfdb0c2d 2826 hci_bdaddr_list_clear(&hdev->le_resolv_list);
373110c5 2827 hci_conn_params_clear_all(hdev);
22078800 2828 hci_discovery_filter_clear(hdev);
600a8749 2829 hci_blocked_keys_clear(hdev);
b938790e 2830 hci_codec_list_clear(&hdev->local_codecs);
10f9f426 2831 msft_release(hdev);
09fd0de5 2832 hci_dev_unlock(hdev);
e2e0cacb 2833
181a42ed 2834 ida_destroy(&hdev->unset_handle_ida);
9c16d0c8 2835 ida_free(&hci_index_ida, hdev->id);
dd3b1dc3 2836 kfree_skb(hdev->sent_cmd);
2615fd9a 2837 kfree_skb(hdev->req_skb);
dfe6d5c3 2838 kfree_skb(hdev->recv_event);
58ce6d5b 2839 kfree(hdev);
1da177e4 2840}
58ce6d5b 2841EXPORT_SYMBOL(hci_release_dev);
1da177e4 2842
359ee4f8
APS
2843int hci_register_suspend_notifier(struct hci_dev *hdev)
2844{
2845 int ret = 0;
2846
b5ca3387
LAD
2847 if (!hdev->suspend_notifier.notifier_call &&
2848 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
359ee4f8
APS
2849 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2850 ret = register_pm_notifier(&hdev->suspend_notifier);
2851 }
2852
2853 return ret;
2854}
2855
2856int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2857{
2858 int ret = 0;
2859
b5ca3387 2860 if (hdev->suspend_notifier.notifier_call) {
359ee4f8 2861 ret = unregister_pm_notifier(&hdev->suspend_notifier);
b5ca3387
LAD
2862 if (!ret)
2863 hdev->suspend_notifier.notifier_call = NULL;
2864 }
359ee4f8
APS
2865
2866 return ret;
2867}
2868
63298d6e
LAD
2869/* Cancel ongoing command synchronously:
2870 *
2871 * - Cancel command timer
2872 * - Reset command counter
2873 * - Cancel command request
2874 */
2875static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2876{
2877 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2878
2879 cancel_delayed_work_sync(&hdev->cmd_timer);
2880 cancel_delayed_work_sync(&hdev->ncmd_timer);
2881 atomic_set(&hdev->cmd_cnt, 1);
2882
6946b9c9 2883 hci_cmd_sync_cancel_sync(hdev, err);
63298d6e
LAD
2884}
2885
1da177e4
LT
2886/* Suspend HCI device */
2887int hci_suspend_dev(struct hci_dev *hdev)
2888{
e1b77d68 2889 int ret;
e1b77d68
LAD
2890
2891 bt_dev_dbg(hdev, "");
2892
2893 /* Suspend should only act on when powered. */
2894 if (!hdev_is_powered(hdev) ||
2895 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2896 return 0;
2897
182ee45d
LAD
2898 /* If powering down don't attempt to suspend */
2899 if (mgmt_powering_down(hdev))
2900 return 0;
4539ca67 2901
f4198635 2902 /* Cancel potentially blocking sync operation before suspend */
6946b9c9 2903 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
f4198635 2904
182ee45d
LAD
2905 hci_req_sync_lock(hdev);
2906 ret = hci_suspend_sync(hdev);
2907 hci_req_sync_unlock(hdev);
e1b77d68
LAD
2908
2909 hci_clear_wake_reason(hdev);
182ee45d 2910 mgmt_suspending(hdev, hdev->suspend_state);
e1b77d68 2911
05fcd4c4 2912 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
e1b77d68 2913 return ret;
1da177e4
LT
2914}
2915EXPORT_SYMBOL(hci_suspend_dev);
2916
2917/* Resume HCI device */
2918int hci_resume_dev(struct hci_dev *hdev)
2919{
e1b77d68
LAD
2920 int ret;
2921
2922 bt_dev_dbg(hdev, "");
2923
2924 /* Resume should only act on when powered. */
2925 if (!hdev_is_powered(hdev) ||
2926 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2927 return 0;
2928
2929 /* If powering down don't attempt to resume */
2930 if (mgmt_powering_down(hdev))
2931 return 0;
2932
182ee45d
LAD
2933 hci_req_sync_lock(hdev);
2934 ret = hci_resume_sync(hdev);
2935 hci_req_sync_unlock(hdev);
e1b77d68
LAD
2936
2937 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
182ee45d 2938 hdev->wake_addr_type);
e1b77d68 2939
05fcd4c4 2940 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
e1b77d68 2941 return ret;
1da177e4
LT
2942}
2943EXPORT_SYMBOL(hci_resume_dev);
2944
75e0569f
MH
2945/* Reset HCI device */
2946int hci_reset_dev(struct hci_dev *hdev)
2947{
1e4b6e91 2948 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
75e0569f
MH
2949 struct sk_buff *skb;
2950
2951 skb = bt_skb_alloc(3, GFP_ATOMIC);
2952 if (!skb)
2953 return -ENOMEM;
2954
d79f34e3 2955 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 2956 skb_put_data(skb, hw_err, 3);
75e0569f 2957
de75cd0d
MM
2958 bt_dev_err(hdev, "Injecting HCI hardware error event");
2959
75e0569f
MH
2960 /* Send Hardware Error to upper stack */
2961 return hci_recv_frame(hdev, skb);
2962}
2963EXPORT_SYMBOL(hci_reset_dev);
2964
76bca880 2965/* Receive frame from HCI drivers */
e1a26170 2966int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 2967{
76bca880 2968 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2969 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2970 kfree_skb(skb);
2971 return -ENXIO;
2972 }
2973
876e7810
LAD
2974 switch (hci_skb_pkt_type(skb)) {
2975 case HCI_EVENT_PKT:
2976 break;
2977 case HCI_ACLDATA_PKT:
2978 /* Detect if ISO packet has been sent as ACL */
2979 if (hci_conn_num(hdev, ISO_LINK)) {
2980 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2981 __u8 type;
2982
2983 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2984 if (type == ISO_LINK)
2985 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2986 }
2987 break;
2988 case HCI_SCODATA_PKT:
2989 break;
2990 case HCI_ISODATA_PKT:
2991 break;
2992 default:
fe806dce
MH
2993 kfree_skb(skb);
2994 return -EINVAL;
2995 }
2996
d82603c6 2997 /* Incoming skb */
76bca880
MH
2998 bt_cb(skb)->incoming = 1;
2999
3000 /* Time stamp */
3001 __net_timestamp(skb);
3002
76bca880 3003 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3004 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3005
76bca880
MH
3006 return 0;
3007}
3008EXPORT_SYMBOL(hci_recv_frame);
3009
e875ff84
MH
3010/* Receive diagnostic message from HCI drivers */
3011int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3012{
581d6fd6 3013 /* Mark as diagnostic packet */
d79f34e3 3014 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 3015
e875ff84
MH
3016 /* Time stamp */
3017 __net_timestamp(skb);
3018
581d6fd6
MH
3019 skb_queue_tail(&hdev->rx_q, skb);
3020 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3021
e875ff84
MH
3022 return 0;
3023}
3024EXPORT_SYMBOL(hci_recv_diag);
3025
5177a838
MH
3026void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3027{
3028 va_list vargs;
3029
3030 va_start(vargs, fmt);
3031 kfree_const(hdev->hw_info);
3032 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3033 va_end(vargs);
3034}
3035EXPORT_SYMBOL(hci_set_hw_info);
3036
3037void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3038{
3039 va_list vargs;
3040
3041 va_start(vargs, fmt);
3042 kfree_const(hdev->fw_info);
3043 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3044 va_end(vargs);
3045}
3046EXPORT_SYMBOL(hci_set_fw_info);
3047
1da177e4
LT
3048/* ---- Interface to upper protocols ---- */
3049
1da177e4
LT
3050int hci_register_cb(struct hci_cb *cb)
3051{
3052 BT_DBG("%p name %s", cb, cb->name);
3053
fba7ecf0 3054 mutex_lock(&hci_cb_list_lock);
00629e0f 3055 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3056 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3057
3058 return 0;
3059}
3060EXPORT_SYMBOL(hci_register_cb);
3061
3062int hci_unregister_cb(struct hci_cb *cb)
3063{
3064 BT_DBG("%p name %s", cb, cb->name);
3065
fba7ecf0 3066 mutex_lock(&hci_cb_list_lock);
1da177e4 3067 list_del(&cb->list);
fba7ecf0 3068 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3069
3070 return 0;
3071}
3072EXPORT_SYMBOL(hci_unregister_cb);
3073
2250abad 3074static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3075{
cdc52faa
MH
3076 int err;
3077
d79f34e3
MH
3078 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3079 skb->len);
1da177e4 3080
cd82e61c
MH
3081 /* Time stamp */
3082 __net_timestamp(skb);
1da177e4 3083
cd82e61c
MH
3084 /* Send copy to monitor */
3085 hci_send_to_monitor(hdev, skb);
3086
3087 if (atomic_read(&hdev->promisc)) {
3088 /* Send copy to the sockets */
470fe1b5 3089 hci_send_to_sock(hdev, skb);
1da177e4
LT
3090 }
3091
3092 /* Get rid of skb owner, prior to sending to the driver. */
3093 skb_orphan(skb);
3094
73d0d3c8
MH
3095 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3096 kfree_skb(skb);
2250abad 3097 return -EINVAL;
73d0d3c8
MH
3098 }
3099
cdc52faa
MH
3100 err = hdev->send(hdev, skb);
3101 if (err < 0) {
2064ee33 3102 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa 3103 kfree_skb(skb);
2250abad 3104 return err;
cdc52faa 3105 }
2250abad
BB
3106
3107 return 0;
1da177e4
LT
3108}
3109
1ca3a9d0 3110/* Send HCI command */
07dc93dd
JH
3111int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3112 const void *param)
1ca3a9d0
JH
3113{
3114 struct sk_buff *skb;
3115
3116 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3117
3118 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3119 if (!skb) {
2064ee33 3120 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
3121 return -ENOMEM;
3122 }
3123
49c922bb 3124 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3125 * single-command requests.
3126 */
44d27137 3127 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 3128
1da177e4 3129 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3130 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3131
3132 return 0;
3133}
1da177e4 3134
d6ee6ad7
LP
3135int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3136 const void *param)
3137{
3138 struct sk_buff *skb;
3139
3140 if (hci_opcode_ogf(opcode) != 0x3f) {
3141 /* A controller receiving a command shall respond with either
3142 * a Command Status Event or a Command Complete Event.
3143 * Therefore, all standard HCI commands must be sent via the
3144 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3145 * Some vendors do not comply with this rule for vendor-specific
3146 * commands and do not return any event. We want to support
3147 * unresponded commands for such cases only.
3148 */
3149 bt_dev_err(hdev, "unresponded command not supported");
3150 return -EINVAL;
3151 }
3152
3153 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3154 if (!skb) {
3155 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3156 opcode);
3157 return -ENOMEM;
3158 }
3159
3160 hci_send_frame(hdev, skb);
3161
3162 return 0;
3163}
3164EXPORT_SYMBOL(__hci_cmd_send);
3165
1da177e4 3166/* Get data from the previously sent command */
2615fd9a 3167static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
1da177e4
LT
3168{
3169 struct hci_command_hdr *hdr;
3170
2615fd9a 3171 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
1da177e4
LT
3172 return NULL;
3173
2615fd9a 3174 hdr = (void *)skb->data;
1da177e4 3175
a9de9248 3176 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3177 return NULL;
3178
2615fd9a
LAD
3179 return skb->data + HCI_COMMAND_HDR_SIZE;
3180}
1da177e4 3181
2615fd9a
LAD
3182/* Get data from the previously sent command */
3183void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3184{
3185 void *data;
3186
3187 /* Check if opcode matches last sent command */
3188 data = hci_cmd_data(hdev->sent_cmd, opcode);
3189 if (!data)
3190 /* Check if opcode matches last request */
3191 data = hci_cmd_data(hdev->req_skb, opcode);
3192
3193 return data;
1da177e4
LT
3194}
3195
dfe6d5c3
LAD
3196/* Get data from last received event */
3197void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3198{
3199 struct hci_event_hdr *hdr;
3200 int offset;
3201
3202 if (!hdev->recv_event)
3203 return NULL;
3204
3205 hdr = (void *)hdev->recv_event->data;
3206 offset = sizeof(*hdr);
3207
3208 if (hdr->evt != event) {
3209 /* In case of LE metaevent check the subevent match */
3210 if (hdr->evt == HCI_EV_LE_META) {
3211 struct hci_ev_le_meta *ev;
3212
3213 ev = (void *)hdev->recv_event->data + offset;
3214 offset += sizeof(*ev);
3215 if (ev->subevent == event)
3216 goto found;
3217 }
3218 return NULL;
3219 }
3220
3221found:
3222 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3223
3224 return hdev->recv_event->data + offset;
3225}
3226
1da177e4
LT
3227/* Send ACL data */
3228static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3229{
3230 struct hci_acl_hdr *hdr;
3231 int len = skb->len;
3232
badff6d0
ACM
3233 skb_push(skb, HCI_ACL_HDR_SIZE);
3234 skb_reset_transport_header(skb);
9c70220b 3235 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3236 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3237 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3238}
3239
ee22be7e 3240static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3241 struct sk_buff *skb, __u16 flags)
1da177e4 3242{
ee22be7e 3243 struct hci_conn *conn = chan->conn;
1da177e4
LT
3244 struct hci_dev *hdev = conn->hdev;
3245 struct sk_buff *list;
3246
087bfd99
GP
3247 skb->len = skb_headlen(skb);
3248 skb->data_len = 0;
3249
d79f34e3 3250 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
3251
3252 switch (hdev->dev_type) {
ca8bee5d 3253 case HCI_PRIMARY:
204a6e54
AE
3254 hci_add_acl_hdr(skb, conn->handle, flags);
3255 break;
3256 case HCI_AMP:
3257 hci_add_acl_hdr(skb, chan->handle, flags);
3258 break;
3259 default:
2064ee33 3260 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
3261 return;
3262 }
087bfd99 3263
70f23020
AE
3264 list = skb_shinfo(skb)->frag_list;
3265 if (!list) {
1da177e4
LT
3266 /* Non fragmented */
3267 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3268
73d80deb 3269 skb_queue_tail(queue, skb);
1da177e4
LT
3270 } else {
3271 /* Fragmented */
3272 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3273
3274 skb_shinfo(skb)->frag_list = NULL;
3275
9cfd5a23
JR
3276 /* Queue all fragments atomically. We need to use spin_lock_bh
3277 * here because of 6LoWPAN links, as there this function is
3278 * called from softirq and using normal spin lock could cause
3279 * deadlocks.
3280 */
3281 spin_lock_bh(&queue->lock);
1da177e4 3282
73d80deb 3283 __skb_queue_tail(queue, skb);
e702112f
AE
3284
3285 flags &= ~ACL_START;
3286 flags |= ACL_CONT;
1da177e4
LT
3287 do {
3288 skb = list; list = list->next;
8e87d142 3289
d79f34e3 3290 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 3291 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3292
3293 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3294
73d80deb 3295 __skb_queue_tail(queue, skb);
1da177e4
LT
3296 } while (list);
3297
9cfd5a23 3298 spin_unlock_bh(&queue->lock);
1da177e4 3299 }
73d80deb
LAD
3300}
3301
3302void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3303{
ee22be7e 3304 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3305
f0e09510 3306 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3307
ee22be7e 3308 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3309
3eff45ea 3310 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3311}
1da177e4
LT
3312
3313/* Send SCO data */
0d861d8b 3314void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3315{
3316 struct hci_dev *hdev = conn->hdev;
3317 struct hci_sco_hdr hdr;
3318
3319 BT_DBG("%s len %d", hdev->name, skb->len);
3320
aca3192c 3321 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3322 hdr.dlen = skb->len;
3323
badff6d0
ACM
3324 skb_push(skb, HCI_SCO_HDR_SIZE);
3325 skb_reset_transport_header(skb);
9c70220b 3326 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3327
d79f34e3 3328 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 3329
1da177e4 3330 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3331 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3332}
1da177e4 3333
26afbd82
LAD
3334/* Send ISO data */
3335static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3336{
3337 struct hci_iso_hdr *hdr;
3338 int len = skb->len;
3339
3340 skb_push(skb, HCI_ISO_HDR_SIZE);
3341 skb_reset_transport_header(skb);
3342 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3343 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3344 hdr->dlen = cpu_to_le16(len);
3345}
3346
3347static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3348 struct sk_buff *skb)
3349{
3350 struct hci_dev *hdev = conn->hdev;
3351 struct sk_buff *list;
3352 __u16 flags;
3353
3354 skb->len = skb_headlen(skb);
3355 skb->data_len = 0;
3356
3357 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3358
3359 list = skb_shinfo(skb)->frag_list;
3360
3361 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3362 hci_add_iso_hdr(skb, conn->handle, flags);
3363
3364 if (!list) {
3365 /* Non fragmented */
3366 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3367
3368 skb_queue_tail(queue, skb);
3369 } else {
3370 /* Fragmented */
3371 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3372
3373 skb_shinfo(skb)->frag_list = NULL;
3374
3375 __skb_queue_tail(queue, skb);
3376
3377 do {
3378 skb = list; list = list->next;
3379
3380 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3381 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3382 0x00);
3383 hci_add_iso_hdr(skb, conn->handle, flags);
3384
3385 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3386
3387 __skb_queue_tail(queue, skb);
3388 } while (list);
3389 }
3390}
3391
3392void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3393{
3394 struct hci_dev *hdev = conn->hdev;
3395
3396 BT_DBG("%s len %d", hdev->name, skb->len);
3397
3398 hci_queue_iso(conn, &conn->data_q, skb);
3399
3400 queue_work(hdev->workqueue, &hdev->tx_work);
3401}
3402
1da177e4
LT
3403/* ---- HCI TX task (outgoing data) ---- */
3404
3405/* HCI Connection scheduler */
26afbd82
LAD
3406static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3407{
3408 struct hci_dev *hdev;
3409 int cnt, q;
3410
3411 if (!conn) {
3412 *quote = 0;
3413 return;
3414 }
3415
3416 hdev = conn->hdev;
3417
3418 switch (conn->type) {
3419 case ACL_LINK:
3420 cnt = hdev->acl_cnt;
3421 break;
3422 case AMP_LINK:
3423 cnt = hdev->block_cnt;
3424 break;
3425 case SCO_LINK:
3426 case ESCO_LINK:
3427 cnt = hdev->sco_cnt;
3428 break;
3429 case LE_LINK:
3430 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3431 break;
3432 case ISO_LINK:
3433 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3434 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3435 break;
3436 default:
3437 cnt = 0;
3438 bt_dev_err(hdev, "unknown link type %d", conn->type);
3439 }
3440
3441 q = cnt / num;
3442 *quote = q ? q : 1;
3443}
3444
6039aa73
GP
3445static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3446 int *quote)
1da177e4
LT
3447{
3448 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3449 struct hci_conn *conn = NULL, *c;
abc5de8f 3450 unsigned int num = 0, min = ~0;
1da177e4 3451
8e87d142 3452 /* We don't have to lock device here. Connections are always
1da177e4 3453 * added and removed with TX task disabled. */
bf4c6325
GP
3454
3455 rcu_read_lock();
3456
3457 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3458 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3459 continue;
769be974
MH
3460
3461 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3462 continue;
3463
1da177e4
LT
3464 num++;
3465
3466 if (c->sent < min) {
3467 min = c->sent;
3468 conn = c;
3469 }
52087a79
LAD
3470
3471 if (hci_conn_num(hdev, type) == num)
3472 break;
1da177e4
LT
3473 }
3474
bf4c6325
GP
3475 rcu_read_unlock();
3476
26afbd82 3477 hci_quote_sent(conn, num, quote);
1da177e4
LT
3478
3479 BT_DBG("conn %p quote %d", conn, *quote);
3480 return conn;
3481}
3482
6039aa73 3483static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3484{
3485 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3486 struct hci_conn *c;
1da177e4 3487
2064ee33 3488 bt_dev_err(hdev, "link tx timeout");
1da177e4 3489
bf4c6325
GP
3490 rcu_read_lock();
3491
1da177e4 3492 /* Kill stalled connections */
bf4c6325 3493 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3494 if (c->type == type && c->sent) {
2064ee33
MH
3495 bt_dev_err(hdev, "killing stalled connection %pMR",
3496 &c->dst);
c7eaf80b
YH
3497 /* hci_disconnect might sleep, so, we have to release
3498 * the RCU read lock before calling it.
3499 */
3500 rcu_read_unlock();
bed71748 3501 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
c7eaf80b 3502 rcu_read_lock();
1da177e4
LT
3503 }
3504 }
bf4c6325
GP
3505
3506 rcu_read_unlock();
1da177e4
LT
3507}
3508
6039aa73
GP
3509static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3510 int *quote)
1da177e4 3511{
73d80deb
LAD
3512 struct hci_conn_hash *h = &hdev->conn_hash;
3513 struct hci_chan *chan = NULL;
abc5de8f 3514 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3515 struct hci_conn *conn;
26afbd82 3516 int conn_num = 0;
73d80deb
LAD
3517
3518 BT_DBG("%s", hdev->name);
3519
bf4c6325
GP
3520 rcu_read_lock();
3521
3522 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3523 struct hci_chan *tmp;
3524
3525 if (conn->type != type)
3526 continue;
3527
3528 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3529 continue;
3530
3531 conn_num++;
3532
8192edef 3533 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3534 struct sk_buff *skb;
3535
3536 if (skb_queue_empty(&tmp->data_q))
3537 continue;
3538
3539 skb = skb_peek(&tmp->data_q);
3540 if (skb->priority < cur_prio)
3541 continue;
3542
3543 if (skb->priority > cur_prio) {
3544 num = 0;
3545 min = ~0;
3546 cur_prio = skb->priority;
3547 }
3548
3549 num++;
3550
3551 if (conn->sent < min) {
3552 min = conn->sent;
3553 chan = tmp;
3554 }
3555 }
3556
3557 if (hci_conn_num(hdev, type) == conn_num)
3558 break;
3559 }
3560
bf4c6325
GP
3561 rcu_read_unlock();
3562
73d80deb
LAD
3563 if (!chan)
3564 return NULL;
3565
26afbd82 3566 hci_quote_sent(chan->conn, num, quote);
73d80deb 3567
73d80deb
LAD
3568 BT_DBG("chan %p quote %d", chan, *quote);
3569 return chan;
3570}
3571
02b20f0b
LAD
3572static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3573{
3574 struct hci_conn_hash *h = &hdev->conn_hash;
3575 struct hci_conn *conn;
3576 int num = 0;
3577
3578 BT_DBG("%s", hdev->name);
3579
bf4c6325
GP
3580 rcu_read_lock();
3581
3582 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3583 struct hci_chan *chan;
3584
3585 if (conn->type != type)
3586 continue;
3587
3588 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3589 continue;
3590
3591 num++;
3592
8192edef 3593 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3594 struct sk_buff *skb;
3595
3596 if (chan->sent) {
3597 chan->sent = 0;
3598 continue;
3599 }
3600
3601 if (skb_queue_empty(&chan->data_q))
3602 continue;
3603
3604 skb = skb_peek(&chan->data_q);
3605 if (skb->priority >= HCI_PRIO_MAX - 1)
3606 continue;
3607
3608 skb->priority = HCI_PRIO_MAX - 1;
3609
3610 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3611 skb->priority);
02b20f0b
LAD
3612 }
3613
3614 if (hci_conn_num(hdev, type) == num)
3615 break;
3616 }
bf4c6325
GP
3617
3618 rcu_read_unlock();
3619
02b20f0b
LAD
3620}
3621
b71d385a
AE
3622static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3623{
3624 /* Calculate count of blocks used by this packet */
3625 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3626}
3627
116523c8 3628static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
73d80deb 3629{
116523c8
LAD
3630 unsigned long last_tx;
3631
3632 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3633 return;
3634
3635 switch (type) {
3636 case LE_LINK:
3637 last_tx = hdev->le_last_tx;
3638 break;
3639 default:
3640 last_tx = hdev->acl_last_tx;
3641 break;
1da177e4 3642 }
116523c8
LAD
3643
3644 /* tx timeout must be longer than maximum link supervision timeout
3645 * (40.9 seconds)
3646 */
3647 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3648 hci_link_tx_to(hdev, type);
63d2bc1b 3649}
1da177e4 3650
7fedd3bb
APS
3651/* Schedule SCO */
3652static void hci_sched_sco(struct hci_dev *hdev)
3653{
3654 struct hci_conn *conn;
3655 struct sk_buff *skb;
3656 int quote;
3657
3658 BT_DBG("%s", hdev->name);
3659
3660 if (!hci_conn_num(hdev, SCO_LINK))
3661 return;
3662
3663 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3664 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3665 BT_DBG("skb %p len %d", skb, skb->len);
3666 hci_send_frame(hdev, skb);
3667
3668 conn->sent++;
3669 if (conn->sent == ~0)
3670 conn->sent = 0;
3671 }
3672 }
3673}
3674
3675static void hci_sched_esco(struct hci_dev *hdev)
3676{
3677 struct hci_conn *conn;
3678 struct sk_buff *skb;
3679 int quote;
3680
3681 BT_DBG("%s", hdev->name);
3682
3683 if (!hci_conn_num(hdev, ESCO_LINK))
3684 return;
3685
3686 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3687 &quote))) {
3688 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3689 BT_DBG("skb %p len %d", skb, skb->len);
3690 hci_send_frame(hdev, skb);
3691
3692 conn->sent++;
3693 if (conn->sent == ~0)
3694 conn->sent = 0;
3695 }
3696 }
3697}
3698
6039aa73 3699static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3700{
3701 unsigned int cnt = hdev->acl_cnt;
3702 struct hci_chan *chan;
3703 struct sk_buff *skb;
3704 int quote;
3705
116523c8 3706 __check_timeout(hdev, cnt, ACL_LINK);
04837f64 3707
73d80deb 3708 while (hdev->acl_cnt &&
a8c5fb1a 3709 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3710 u32 priority = (skb_peek(&chan->data_q))->priority;
3711 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3712 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3713 skb->len, skb->priority);
73d80deb 3714
ec1cce24
LAD
3715 /* Stop if priority has changed */
3716 if (skb->priority < priority)
3717 break;
3718
3719 skb = skb_dequeue(&chan->data_q);
3720
73d80deb 3721 hci_conn_enter_active_mode(chan->conn,
04124681 3722 bt_cb(skb)->force_active);
04837f64 3723
57d17d70 3724 hci_send_frame(hdev, skb);
1da177e4
LT
3725 hdev->acl_last_tx = jiffies;
3726
3727 hdev->acl_cnt--;
73d80deb
LAD
3728 chan->sent++;
3729 chan->conn->sent++;
7fedd3bb
APS
3730
3731 /* Send pending SCO packets right away */
3732 hci_sched_sco(hdev);
3733 hci_sched_esco(hdev);
1da177e4
LT
3734 }
3735 }
02b20f0b
LAD
3736
3737 if (cnt != hdev->acl_cnt)
3738 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3739}
3740
6039aa73 3741static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3742{
63d2bc1b 3743 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3744 struct hci_chan *chan;
3745 struct sk_buff *skb;
3746 int quote;
bd1eb66b 3747 u8 type;
b71d385a 3748
bd1eb66b
AE
3749 BT_DBG("%s", hdev->name);
3750
3751 if (hdev->dev_type == HCI_AMP)
3752 type = AMP_LINK;
3753 else
3754 type = ACL_LINK;
3755
116523c8
LAD
3756 __check_timeout(hdev, cnt, type);
3757
b71d385a 3758 while (hdev->block_cnt > 0 &&
bd1eb66b 3759 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3760 u32 priority = (skb_peek(&chan->data_q))->priority;
3761 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3762 int blocks;
3763
3764 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3765 skb->len, skb->priority);
b71d385a
AE
3766
3767 /* Stop if priority has changed */
3768 if (skb->priority < priority)
3769 break;
3770
3771 skb = skb_dequeue(&chan->data_q);
3772
3773 blocks = __get_blocks(hdev, skb);
3774 if (blocks > hdev->block_cnt)
3775 return;
3776
3777 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3778 bt_cb(skb)->force_active);
b71d385a 3779
57d17d70 3780 hci_send_frame(hdev, skb);
b71d385a
AE
3781 hdev->acl_last_tx = jiffies;
3782
3783 hdev->block_cnt -= blocks;
3784 quote -= blocks;
3785
3786 chan->sent += blocks;
3787 chan->conn->sent += blocks;
3788 }
3789 }
3790
3791 if (cnt != hdev->block_cnt)
bd1eb66b 3792 hci_prio_recalculate(hdev, type);
b71d385a
AE
3793}
3794
6039aa73 3795static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3796{
3797 BT_DBG("%s", hdev->name);
3798
bd1eb66b 3799 /* No ACL link over BR/EDR controller */
ca8bee5d 3800 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
3801 return;
3802
3803 /* No AMP link over AMP controller */
3804 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3805 return;
3806
3807 switch (hdev->flow_ctl_mode) {
3808 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3809 hci_sched_acl_pkt(hdev);
3810 break;
3811
3812 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3813 hci_sched_acl_blk(hdev);
3814 break;
3815 }
3816}
3817
6039aa73 3818static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3819{
73d80deb 3820 struct hci_chan *chan;
6ed58ec5 3821 struct sk_buff *skb;
02b20f0b 3822 int quote, cnt, tmp;
6ed58ec5
VT
3823
3824 BT_DBG("%s", hdev->name);
3825
52087a79
LAD
3826 if (!hci_conn_num(hdev, LE_LINK))
3827 return;
3828
6ed58ec5 3829 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1b1d29e5 3830
116523c8 3831 __check_timeout(hdev, cnt, LE_LINK);
1b1d29e5 3832
02b20f0b 3833 tmp = cnt;
73d80deb 3834 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3835 u32 priority = (skb_peek(&chan->data_q))->priority;
3836 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3837 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3838 skb->len, skb->priority);
6ed58ec5 3839
ec1cce24
LAD
3840 /* Stop if priority has changed */
3841 if (skb->priority < priority)
3842 break;
3843
3844 skb = skb_dequeue(&chan->data_q);
3845
57d17d70 3846 hci_send_frame(hdev, skb);
6ed58ec5
VT
3847 hdev->le_last_tx = jiffies;
3848
3849 cnt--;
73d80deb
LAD
3850 chan->sent++;
3851 chan->conn->sent++;
7fedd3bb
APS
3852
3853 /* Send pending SCO packets right away */
3854 hci_sched_sco(hdev);
3855 hci_sched_esco(hdev);
6ed58ec5
VT
3856 }
3857 }
73d80deb 3858
6ed58ec5
VT
3859 if (hdev->le_pkts)
3860 hdev->le_cnt = cnt;
3861 else
3862 hdev->acl_cnt = cnt;
02b20f0b
LAD
3863
3864 if (cnt != tmp)
3865 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3866}
3867
26afbd82
LAD
3868/* Schedule CIS */
3869static void hci_sched_iso(struct hci_dev *hdev)
3870{
3871 struct hci_conn *conn;
3872 struct sk_buff *skb;
3873 int quote, *cnt;
3874
3875 BT_DBG("%s", hdev->name);
3876
3877 if (!hci_conn_num(hdev, ISO_LINK))
3878 return;
3879
3880 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3881 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3882 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3883 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3884 BT_DBG("skb %p len %d", skb, skb->len);
3885 hci_send_frame(hdev, skb);
3886
3887 conn->sent++;
3888 if (conn->sent == ~0)
3889 conn->sent = 0;
3890 (*cnt)--;
3891 }
3892 }
3893}
3894
3eff45ea 3895static void hci_tx_work(struct work_struct *work)
1da177e4 3896{
3eff45ea 3897 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3898 struct sk_buff *skb;
3899
26afbd82
LAD
3900 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3901 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
1da177e4 3902
d7a5a11d 3903 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e 3904 /* Schedule queues and send stuff to HCI driver */
52de599e
MH
3905 hci_sched_sco(hdev);
3906 hci_sched_esco(hdev);
26afbd82 3907 hci_sched_iso(hdev);
7fedd3bb 3908 hci_sched_acl(hdev);
52de599e
MH
3909 hci_sched_le(hdev);
3910 }
6ed58ec5 3911
1da177e4
LT
3912 /* Send next queued raw (unknown type) packet */
3913 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3914 hci_send_frame(hdev, skb);
1da177e4
LT
3915}
3916
25985edc 3917/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3918
3919/* ACL data packet */
6039aa73 3920static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3921{
3922 struct hci_acl_hdr *hdr = (void *) skb->data;
3923 struct hci_conn *conn;
3924 __u16 handle, flags;
3925
3926 skb_pull(skb, HCI_ACL_HDR_SIZE);
3927
3928 handle = __le16_to_cpu(hdr->handle);
3929 flags = hci_flags(handle);
3930 handle = hci_handle(handle);
3931
f0e09510 3932 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3933 handle, flags);
1da177e4
LT
3934
3935 hdev->stat.acl_rx++;
3936
3937 hci_dev_lock(hdev);
3938 conn = hci_conn_hash_lookup_handle(hdev, handle);
3939 hci_dev_unlock(hdev);
8e87d142 3940
1da177e4 3941 if (conn) {
65983fc7 3942 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3943
1da177e4 3944 /* Send to upper protocol */
686ebf28
UF
3945 l2cap_recv_acldata(conn, skb, flags);
3946 return;
1da177e4 3947 } else {
2064ee33
MH
3948 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3949 handle);
1da177e4
LT
3950 }
3951
3952 kfree_skb(skb);
3953}
3954
3955/* SCO data packet */
6039aa73 3956static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3957{
3958 struct hci_sco_hdr *hdr = (void *) skb->data;
3959 struct hci_conn *conn;
debdedf2 3960 __u16 handle, flags;
1da177e4
LT
3961
3962 skb_pull(skb, HCI_SCO_HDR_SIZE);
3963
3964 handle = __le16_to_cpu(hdr->handle);
debdedf2
MH
3965 flags = hci_flags(handle);
3966 handle = hci_handle(handle);
1da177e4 3967
debdedf2
MH
3968 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3969 handle, flags);
1da177e4
LT
3970
3971 hdev->stat.sco_rx++;
3972
3973 hci_dev_lock(hdev);
3974 conn = hci_conn_hash_lookup_handle(hdev, handle);
3975 hci_dev_unlock(hdev);
3976
3977 if (conn) {
1da177e4 3978 /* Send to upper protocol */
3f19ffb2 3979 hci_skb_pkt_status(skb) = flags & 0x03;
686ebf28
UF
3980 sco_recv_scodata(conn, skb);
3981 return;
1da177e4 3982 } else {
2d4b37b6
LAD
3983 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3984 handle);
1da177e4
LT
3985 }
3986
3987 kfree_skb(skb);
3988}
3989
26afbd82
LAD
3990static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3991{
3992 struct hci_iso_hdr *hdr;
3993 struct hci_conn *conn;
3994 __u16 handle, flags;
3995
3996 hdr = skb_pull_data(skb, sizeof(*hdr));
3997 if (!hdr) {
3998 bt_dev_err(hdev, "ISO packet too small");
3999 goto drop;
4000 }
4001
4002 handle = __le16_to_cpu(hdr->handle);
4003 flags = hci_flags(handle);
4004 handle = hci_handle(handle);
4005
4006 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
4007 handle, flags);
4008
4009 hci_dev_lock(hdev);
4010 conn = hci_conn_hash_lookup_handle(hdev, handle);
4011 hci_dev_unlock(hdev);
4012
26afbd82
LAD
4013 if (!conn) {
4014 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
4015 handle);
ccf74f23 4016 goto drop;
26afbd82
LAD
4017 }
4018
ccf74f23
LAD
4019 /* Send to upper protocol */
4020 iso_recv(conn, skb, flags);
4021 return;
4022
26afbd82
LAD
4023drop:
4024 kfree_skb(skb);
4025}
4026
9238f36a
JH
4027static bool hci_req_is_complete(struct hci_dev *hdev)
4028{
4029 struct sk_buff *skb;
4030
4031 skb = skb_peek(&hdev->cmd_q);
4032 if (!skb)
4033 return true;
4034
44d27137 4035 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4036}
4037
42c6b129
JH
4038static void hci_resend_last(struct hci_dev *hdev)
4039{
4040 struct hci_command_hdr *sent;
4041 struct sk_buff *skb;
4042 u16 opcode;
4043
4044 if (!hdev->sent_cmd)
4045 return;
4046
4047 sent = (void *) hdev->sent_cmd->data;
4048 opcode = __le16_to_cpu(sent->opcode);
4049 if (opcode == HCI_OP_RESET)
4050 return;
4051
4052 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4053 if (!skb)
4054 return;
4055
4056 skb_queue_head(&hdev->cmd_q, skb);
4057 queue_work(hdev->workqueue, &hdev->cmd_work);
4058}
4059
e6214487
JH
4060void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4061 hci_req_complete_t *req_complete,
4062 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4063{
9238f36a
JH
4064 struct sk_buff *skb;
4065 unsigned long flags;
4066
4067 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4068
42c6b129
JH
4069 /* If the completed command doesn't match the last one that was
4070 * sent we need to do special handling of it.
9238f36a 4071 */
42c6b129
JH
4072 if (!hci_sent_cmd_data(hdev, opcode)) {
4073 /* Some CSR based controllers generate a spontaneous
4074 * reset complete event during init and any pending
4075 * command will never be completed. In such a case we
4076 * need to resend whatever was the last sent
4077 * command.
4078 */
4079 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4080 hci_resend_last(hdev);
4081
9238f36a 4082 return;
42c6b129 4083 }
9238f36a 4084
f80c5dad
JPRV
4085 /* If we reach this point this event matches the last command sent */
4086 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4087
9238f36a
JH
4088 /* If the command succeeded and there's still more commands in
4089 * this request the request is not yet complete.
4090 */
4091 if (!status && !hci_req_is_complete(hdev))
4092 return;
4093
2615fd9a
LAD
4094 skb = hdev->req_skb;
4095
9238f36a 4096 /* If this was the last command in a request the complete
2615fd9a 4097 * callback would be found in hdev->req_skb instead of the
9238f36a
JH
4098 * command queue (hdev->cmd_q).
4099 */
2615fd9a
LAD
4100 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
4101 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
e6214487
JH
4102 return;
4103 }
53e21fbc 4104
2615fd9a
LAD
4105 if (skb && bt_cb(skb)->hci.req_complete) {
4106 *req_complete = bt_cb(skb)->hci.req_complete;
e6214487 4107 return;
9238f36a
JH
4108 }
4109
4110 /* Remove all pending commands belonging to this request */
4111 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4112 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4113 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4114 __skb_queue_head(&hdev->cmd_q, skb);
4115 break;
4116 }
4117
3bd7594e
DA
4118 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4119 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4120 else
4121 *req_complete = bt_cb(skb)->hci.req_complete;
39c1eb6f 4122 dev_kfree_skb_irq(skb);
9238f36a
JH
4123 }
4124 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4125}
4126
b78752cc 4127static void hci_rx_work(struct work_struct *work)
1da177e4 4128{
b78752cc 4129 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4130 struct sk_buff *skb;
4131
4132 BT_DBG("%s", hdev->name);
4133
9f30de9e
TK
4134 /* The kcov_remote functions used for collecting packet parsing
4135 * coverage information from this background thread and associate
4136 * the coverage with the syscall's thread which originally injected
4137 * the packet. This helps fuzzing the kernel.
4138 */
4139 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4140 kcov_remote_start_common(skb_get_kcov_handle(skb));
4141
cd82e61c
MH
4142 /* Send copy to monitor */
4143 hci_send_to_monitor(hdev, skb);
4144
1da177e4
LT
4145 if (atomic_read(&hdev->promisc)) {
4146 /* Send copy to the sockets */
470fe1b5 4147 hci_send_to_sock(hdev, skb);
1da177e4
LT
4148 }
4149
eb8c101e
MK
4150 /* If the device has been opened in HCI_USER_CHANNEL,
4151 * the userspace has exclusive access to device.
4152 * When device is HCI_INIT, we still need to process
4153 * the data packets to the driver in order
4154 * to complete its setup().
4155 */
4156 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4157 !test_bit(HCI_INIT, &hdev->flags)) {
1da177e4
LT
4158 kfree_skb(skb);
4159 continue;
4160 }
4161
4162 if (test_bit(HCI_INIT, &hdev->flags)) {
4163 /* Don't process data packets in this states. */
d79f34e3 4164 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4165 case HCI_ACLDATA_PKT:
4166 case HCI_SCODATA_PKT:
cc974003 4167 case HCI_ISODATA_PKT:
1da177e4
LT
4168 kfree_skb(skb);
4169 continue;
3ff50b79 4170 }
1da177e4
LT
4171 }
4172
4173 /* Process frame */
d79f34e3 4174 switch (hci_skb_pkt_type(skb)) {
1da177e4 4175 case HCI_EVENT_PKT:
b78752cc 4176 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4177 hci_event_packet(hdev, skb);
4178 break;
4179
4180 case HCI_ACLDATA_PKT:
4181 BT_DBG("%s ACL data packet", hdev->name);
4182 hci_acldata_packet(hdev, skb);
4183 break;
4184
4185 case HCI_SCODATA_PKT:
4186 BT_DBG("%s SCO data packet", hdev->name);
4187 hci_scodata_packet(hdev, skb);
4188 break;
4189
26afbd82
LAD
4190 case HCI_ISODATA_PKT:
4191 BT_DBG("%s ISO data packet", hdev->name);
4192 hci_isodata_packet(hdev, skb);
4193 break;
4194
1da177e4
LT
4195 default:
4196 kfree_skb(skb);
4197 break;
4198 }
4199 }
1da177e4
LT
4200}
4201
63298d6e
LAD
4202static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4203{
4204 int err;
4205
4206 bt_dev_dbg(hdev, "skb %p", skb);
4207
4208 kfree_skb(hdev->sent_cmd);
4209
4210 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4211 if (!hdev->sent_cmd) {
4212 skb_queue_head(&hdev->cmd_q, skb);
4213 queue_work(hdev->workqueue, &hdev->cmd_work);
4214 return;
4215 }
4216
4217 err = hci_send_frame(hdev, skb);
4218 if (err < 0) {
6946b9c9 4219 hci_cmd_sync_cancel_sync(hdev, -err);
63298d6e
LAD
4220 return;
4221 }
4222
2615fd9a
LAD
4223 if (hci_req_status_pend(hdev) &&
4224 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4225 kfree_skb(hdev->req_skb);
947ec0d0 4226 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
2615fd9a 4227 }
63298d6e
LAD
4228
4229 atomic_dec(&hdev->cmd_cnt);
4230}
4231
c347b765 4232static void hci_cmd_work(struct work_struct *work)
1da177e4 4233{
c347b765 4234 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4235 struct sk_buff *skb;
4236
2104786b
AE
4237 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4238 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4239
1da177e4 4240 /* Send queued commands */
5a08ecce
AE
4241 if (atomic_read(&hdev->cmd_cnt)) {
4242 skb = skb_dequeue(&hdev->cmd_q);
4243 if (!skb)
4244 return;
4245
63298d6e 4246 hci_send_cmd_sync(hdev, skb);
2250abad 4247
63298d6e
LAD
4248 rcu_read_lock();
4249 if (test_bit(HCI_RESET, &hdev->flags) ||
4250 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4251 cancel_delayed_work(&hdev->cmd_timer);
4252 else
4253 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4254 HCI_CMD_TIMEOUT);
4255 rcu_read_unlock();
1da177e4
LT
4256 }
4257}