]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/bluetooth/hci_event.c
Bluetooth: Implement Set ADV set random address
[thirdparty/kernel/stable.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42 /* Handle HCI Event packets */
43
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 __u8 status = *((__u8 *) skb->data);
47
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
49
50 if (status)
51 return;
52
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56
57 hci_dev_lock(hdev);
58 /* Set discovery state to stopped if we're not doing LE active
59 * scanning.
60 */
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
64 hci_dev_unlock(hdev);
65
66 hci_conn_check_pending(hdev);
67 }
68
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
70 {
71 __u8 status = *((__u8 *) skb->data);
72
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
74
75 if (status)
76 return;
77
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
79 }
80
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
82 {
83 __u8 status = *((__u8 *) skb->data);
84
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
86
87 if (status)
88 return;
89
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
91
92 hci_conn_check_pending(hdev);
93 }
94
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
96 struct sk_buff *skb)
97 {
98 BT_DBG("%s", hdev->name);
99 }
100
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
102 {
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
105
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
107
108 if (rp->status)
109 return;
110
111 hci_dev_lock(hdev);
112
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
114 if (conn)
115 conn->role = rp->role;
116
117 hci_dev_unlock(hdev);
118 }
119
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
121 {
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
124
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
126
127 if (rp->status)
128 return;
129
130 hci_dev_lock(hdev);
131
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 if (conn)
134 conn->link_policy = __le16_to_cpu(rp->policy);
135
136 hci_dev_unlock(hdev);
137 }
138
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 {
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
143 void *sent;
144
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
146
147 if (rp->status)
148 return;
149
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
151 if (!sent)
152 return;
153
154 hci_dev_lock(hdev);
155
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
157 if (conn)
158 conn->link_policy = get_unaligned_le16(sent + 2);
159
160 hci_dev_unlock(hdev);
161 }
162
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
164 struct sk_buff *skb)
165 {
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
167
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
169
170 if (rp->status)
171 return;
172
173 hdev->link_policy = __le16_to_cpu(rp->policy);
174 }
175
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
177 struct sk_buff *skb)
178 {
179 __u8 status = *((__u8 *) skb->data);
180 void *sent;
181
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
183
184 if (status)
185 return;
186
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
188 if (!sent)
189 return;
190
191 hdev->link_policy = get_unaligned_le16(sent);
192 }
193
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
195 {
196 __u8 status = *((__u8 *) skb->data);
197
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
199
200 clear_bit(HCI_RESET, &hdev->flags);
201
202 if (status)
203 return;
204
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
207
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
209
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
212
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
215
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
218
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
220
221 hdev->ssp_debug_mode = 0;
222
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
225 }
226
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
228 struct sk_buff *skb)
229 {
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
232
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
234
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
236 if (!sent)
237 return;
238
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
242 }
243 }
244
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
246 struct sk_buff *skb)
247 {
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
249
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
251
252 if (rp->status)
253 return;
254
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
257 else
258 hdev->stored_num_keys = 0;
259 }
260
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
262 {
263 __u8 status = *((__u8 *) skb->data);
264 void *sent;
265
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
267
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
269 if (!sent)
270 return;
271
272 hci_dev_lock(hdev);
273
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
276 else if (!status)
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
278
279 hci_dev_unlock(hdev);
280 }
281
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
283 {
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
285
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
287
288 if (rp->status)
289 return;
290
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
294 }
295
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 __u8 status = *((__u8 *) skb->data);
299 void *sent;
300
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
302
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
304 if (!sent)
305 return;
306
307 hci_dev_lock(hdev);
308
309 if (!status) {
310 __u8 param = *((__u8 *) sent);
311
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
314 else
315 clear_bit(HCI_AUTH, &hdev->flags);
316 }
317
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
320
321 hci_dev_unlock(hdev);
322 }
323
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
325 {
326 __u8 status = *((__u8 *) skb->data);
327 __u8 param;
328 void *sent;
329
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
331
332 if (status)
333 return;
334
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
336 if (!sent)
337 return;
338
339 param = *((__u8 *) sent);
340
341 if (param)
342 set_bit(HCI_ENCRYPT, &hdev->flags);
343 else
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
345 }
346
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 __u8 param;
351 void *sent;
352
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
354
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
356 if (!sent)
357 return;
358
359 param = *((__u8 *) sent);
360
361 hci_dev_lock(hdev);
362
363 if (status) {
364 hdev->discov_timeout = 0;
365 goto done;
366 }
367
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
370 else
371 clear_bit(HCI_ISCAN, &hdev->flags);
372
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
375 else
376 clear_bit(HCI_PSCAN, &hdev->flags);
377
378 done:
379 hci_dev_unlock(hdev);
380 }
381
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
383 {
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
385
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
387
388 if (rp->status)
389 return;
390
391 memcpy(hdev->dev_class, rp->dev_class, 3);
392
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
395 }
396
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 {
399 __u8 status = *((__u8 *) skb->data);
400 void *sent;
401
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
405 if (!sent)
406 return;
407
408 hci_dev_lock(hdev);
409
410 if (status == 0)
411 memcpy(hdev->dev_class, sent, 3);
412
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
415
416 hci_dev_unlock(hdev);
417 }
418
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
420 {
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
422 __u16 setting;
423
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425
426 if (rp->status)
427 return;
428
429 setting = __le16_to_cpu(rp->voice_setting);
430
431 if (hdev->voice_setting == setting)
432 return;
433
434 hdev->voice_setting = setting;
435
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
437
438 if (hdev->notify)
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
440 }
441
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
443 struct sk_buff *skb)
444 {
445 __u8 status = *((__u8 *) skb->data);
446 __u16 setting;
447 void *sent;
448
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
450
451 if (status)
452 return;
453
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
455 if (!sent)
456 return;
457
458 setting = get_unaligned_le16(sent);
459
460 if (hdev->voice_setting == setting)
461 return;
462
463 hdev->voice_setting = setting;
464
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
466
467 if (hdev->notify)
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
469 }
470
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
472 struct sk_buff *skb)
473 {
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
475
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
477
478 if (rp->status)
479 return;
480
481 hdev->num_iac = rp->num_iac;
482
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
484 }
485
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
487 {
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
490
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
492
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
494 if (!sent)
495 return;
496
497 hci_dev_lock(hdev);
498
499 if (!status) {
500 if (sent->mode)
501 hdev->features[1][0] |= LMP_HOST_SSP;
502 else
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
504 }
505
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
508 else if (!status) {
509 if (sent->mode)
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
511 else
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
513 }
514
515 hci_dev_unlock(hdev);
516 }
517
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
519 {
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
522
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
524
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
526 if (!sent)
527 return;
528
529 hci_dev_lock(hdev);
530
531 if (!status) {
532 if (sent->support)
533 hdev->features[1][0] |= LMP_HOST_SC;
534 else
535 hdev->features[1][0] &= ~LMP_HOST_SC;
536 }
537
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
539 if (sent->support)
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
541 else
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
543 }
544
545 hci_dev_unlock(hdev);
546 }
547
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
549 {
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
551
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
553
554 if (rp->status)
555 return;
556
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
564 }
565 }
566
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
568 struct sk_buff *skb)
569 {
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
571
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
573
574 if (rp->status)
575 return;
576
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
580 }
581
582 static void hci_cc_read_local_features(struct hci_dev *hdev,
583 struct sk_buff *skb)
584 {
585 struct hci_rp_read_local_features *rp = (void *) skb->data;
586
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
588
589 if (rp->status)
590 return;
591
592 memcpy(hdev->features, rp->features, 8);
593
594 /* Adjust default settings according to features
595 * supported by device. */
596
597 if (hdev->features[0][0] & LMP_3SLOT)
598 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
599
600 if (hdev->features[0][0] & LMP_5SLOT)
601 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
602
603 if (hdev->features[0][1] & LMP_HV2) {
604 hdev->pkt_type |= (HCI_HV2);
605 hdev->esco_type |= (ESCO_HV2);
606 }
607
608 if (hdev->features[0][1] & LMP_HV3) {
609 hdev->pkt_type |= (HCI_HV3);
610 hdev->esco_type |= (ESCO_HV3);
611 }
612
613 if (lmp_esco_capable(hdev))
614 hdev->esco_type |= (ESCO_EV3);
615
616 if (hdev->features[0][4] & LMP_EV4)
617 hdev->esco_type |= (ESCO_EV4);
618
619 if (hdev->features[0][4] & LMP_EV5)
620 hdev->esco_type |= (ESCO_EV5);
621
622 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
623 hdev->esco_type |= (ESCO_2EV3);
624
625 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
626 hdev->esco_type |= (ESCO_3EV3);
627
628 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
629 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
630 }
631
632 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
633 struct sk_buff *skb)
634 {
635 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
636
637 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
638
639 if (rp->status)
640 return;
641
642 if (hdev->max_page < rp->max_page)
643 hdev->max_page = rp->max_page;
644
645 if (rp->page < HCI_MAX_PAGES)
646 memcpy(hdev->features[rp->page], rp->features, 8);
647 }
648
649 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
650 struct sk_buff *skb)
651 {
652 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
653
654 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655
656 if (rp->status)
657 return;
658
659 hdev->flow_ctl_mode = rp->mode;
660 }
661
662 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
663 {
664 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
665
666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
667
668 if (rp->status)
669 return;
670
671 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
672 hdev->sco_mtu = rp->sco_mtu;
673 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
674 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
675
676 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
677 hdev->sco_mtu = 64;
678 hdev->sco_pkts = 8;
679 }
680
681 hdev->acl_cnt = hdev->acl_pkts;
682 hdev->sco_cnt = hdev->sco_pkts;
683
684 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
685 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
686 }
687
688 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
689 {
690 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
691
692 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
693
694 if (rp->status)
695 return;
696
697 if (test_bit(HCI_INIT, &hdev->flags))
698 bacpy(&hdev->bdaddr, &rp->bdaddr);
699
700 if (hci_dev_test_flag(hdev, HCI_SETUP))
701 bacpy(&hdev->setup_addr, &rp->bdaddr);
702 }
703
704 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
705 struct sk_buff *skb)
706 {
707 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
708
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
710
711 if (rp->status)
712 return;
713
714 if (test_bit(HCI_INIT, &hdev->flags)) {
715 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
716 hdev->page_scan_window = __le16_to_cpu(rp->window);
717 }
718 }
719
720 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
721 struct sk_buff *skb)
722 {
723 u8 status = *((u8 *) skb->data);
724 struct hci_cp_write_page_scan_activity *sent;
725
726 BT_DBG("%s status 0x%2.2x", hdev->name, status);
727
728 if (status)
729 return;
730
731 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
732 if (!sent)
733 return;
734
735 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
736 hdev->page_scan_window = __le16_to_cpu(sent->window);
737 }
738
739 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
740 struct sk_buff *skb)
741 {
742 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
743
744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745
746 if (rp->status)
747 return;
748
749 if (test_bit(HCI_INIT, &hdev->flags))
750 hdev->page_scan_type = rp->type;
751 }
752
753 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
754 struct sk_buff *skb)
755 {
756 u8 status = *((u8 *) skb->data);
757 u8 *type;
758
759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
760
761 if (status)
762 return;
763
764 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
765 if (type)
766 hdev->page_scan_type = *type;
767 }
768
769 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
770 struct sk_buff *skb)
771 {
772 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
773
774 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
775
776 if (rp->status)
777 return;
778
779 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
780 hdev->block_len = __le16_to_cpu(rp->block_len);
781 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
782
783 hdev->block_cnt = hdev->num_blocks;
784
785 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
786 hdev->block_cnt, hdev->block_len);
787 }
788
789 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
790 {
791 struct hci_rp_read_clock *rp = (void *) skb->data;
792 struct hci_cp_read_clock *cp;
793 struct hci_conn *conn;
794
795 BT_DBG("%s", hdev->name);
796
797 if (skb->len < sizeof(*rp))
798 return;
799
800 if (rp->status)
801 return;
802
803 hci_dev_lock(hdev);
804
805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
806 if (!cp)
807 goto unlock;
808
809 if (cp->which == 0x00) {
810 hdev->clock = le32_to_cpu(rp->clock);
811 goto unlock;
812 }
813
814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
815 if (conn) {
816 conn->clock = le32_to_cpu(rp->clock);
817 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
818 }
819
820 unlock:
821 hci_dev_unlock(hdev);
822 }
823
824 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
825 struct sk_buff *skb)
826 {
827 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
828
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
830
831 if (rp->status)
832 return;
833
834 hdev->amp_status = rp->amp_status;
835 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
836 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
837 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
838 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
839 hdev->amp_type = rp->amp_type;
840 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
841 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
842 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
843 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
844 }
845
846 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
847 struct sk_buff *skb)
848 {
849 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
850
851 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852
853 if (rp->status)
854 return;
855
856 hdev->inq_tx_power = rp->tx_power;
857 }
858
859 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
860 {
861 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
862 struct hci_cp_pin_code_reply *cp;
863 struct hci_conn *conn;
864
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
866
867 hci_dev_lock(hdev);
868
869 if (hci_dev_test_flag(hdev, HCI_MGMT))
870 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
871
872 if (rp->status)
873 goto unlock;
874
875 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
876 if (!cp)
877 goto unlock;
878
879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
880 if (conn)
881 conn->pin_length = cp->pin_len;
882
883 unlock:
884 hci_dev_unlock(hdev);
885 }
886
887 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
888 {
889 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
890
891 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
892
893 hci_dev_lock(hdev);
894
895 if (hci_dev_test_flag(hdev, HCI_MGMT))
896 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
897 rp->status);
898
899 hci_dev_unlock(hdev);
900 }
901
902 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
903 struct sk_buff *skb)
904 {
905 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
906
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
908
909 if (rp->status)
910 return;
911
912 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
913 hdev->le_pkts = rp->le_max_pkt;
914
915 hdev->le_cnt = hdev->le_pkts;
916
917 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
918 }
919
920 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
921 struct sk_buff *skb)
922 {
923 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
924
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926
927 if (rp->status)
928 return;
929
930 memcpy(hdev->le_features, rp->features, 8);
931 }
932
933 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
934 struct sk_buff *skb)
935 {
936 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
937
938 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
939
940 if (rp->status)
941 return;
942
943 hdev->adv_tx_power = rp->tx_power;
944 }
945
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
947 {
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
949
950 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
951
952 hci_dev_lock(hdev);
953
954 if (hci_dev_test_flag(hdev, HCI_MGMT))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
956 rp->status);
957
958 hci_dev_unlock(hdev);
959 }
960
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
962 struct sk_buff *skb)
963 {
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
965
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
967
968 hci_dev_lock(hdev);
969
970 if (hci_dev_test_flag(hdev, HCI_MGMT))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 ACL_LINK, 0, rp->status);
973
974 hci_dev_unlock(hdev);
975 }
976
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
978 {
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
982
983 hci_dev_lock(hdev);
984
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
987 0, rp->status);
988
989 hci_dev_unlock(hdev);
990 }
991
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
993 struct sk_buff *skb)
994 {
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 ACL_LINK, 0, rp->status);
1004
1005 hci_dev_unlock(hdev);
1006 }
1007
1008 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010 {
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014 }
1015
1016 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1018 {
1019 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1020
1021 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1022 }
1023
1024 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1025 {
1026 __u8 status = *((__u8 *) skb->data);
1027 bdaddr_t *sent;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1030
1031 if (status)
1032 return;
1033
1034 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1035 if (!sent)
1036 return;
1037
1038 hci_dev_lock(hdev);
1039
1040 bacpy(&hdev->random_addr, sent);
1041
1042 hci_dev_unlock(hdev);
1043 }
1044
1045 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1046 {
1047 __u8 status = *((__u8 *) skb->data);
1048 struct hci_cp_le_set_default_phy *cp;
1049
1050 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1051
1052 if (status)
1053 return;
1054
1055 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1056 if (!cp)
1057 return;
1058
1059 hci_dev_lock(hdev);
1060
1061 hdev->le_tx_def_phys = cp->tx_phys;
1062 hdev->le_rx_def_phys = cp->rx_phys;
1063
1064 hci_dev_unlock(hdev);
1065 }
1066
1067 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1069 {
1070 __u8 status = *((__u8 *) skb->data);
1071 struct hci_cp_le_set_adv_set_rand_addr *cp;
1072 struct adv_info *adv_instance;
1073
1074 if (status)
1075 return;
1076
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1078 if (!cp)
1079 return;
1080
1081 hci_dev_lock(hdev);
1082
1083 if (!hdev->cur_adv_instance) {
1084 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1085 bacpy(&hdev->random_addr, &cp->bdaddr);
1086 } else {
1087 adv_instance = hci_find_adv_instance(hdev,
1088 hdev->cur_adv_instance);
1089 if (adv_instance)
1090 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1091 }
1092
1093 hci_dev_unlock(hdev);
1094 }
1095
1096 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1097 {
1098 __u8 *sent, status = *((__u8 *) skb->data);
1099
1100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1101
1102 if (status)
1103 return;
1104
1105 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1106 if (!sent)
1107 return;
1108
1109 hci_dev_lock(hdev);
1110
1111 /* If we're doing connection initiation as peripheral. Set a
1112 * timeout in case something goes wrong.
1113 */
1114 if (*sent) {
1115 struct hci_conn *conn;
1116
1117 hci_dev_set_flag(hdev, HCI_LE_ADV);
1118
1119 conn = hci_lookup_le_connect(hdev);
1120 if (conn)
1121 queue_delayed_work(hdev->workqueue,
1122 &conn->le_conn_timeout,
1123 conn->conn_timeout);
1124 } else {
1125 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1126 }
1127
1128 hci_dev_unlock(hdev);
1129 }
1130
1131 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1132 struct sk_buff *skb)
1133 {
1134 struct hci_cp_le_set_ext_adv_enable *cp;
1135 struct hci_cp_ext_adv_set *adv_set;
1136 __u8 status = *((__u8 *) skb->data);
1137
1138 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1139
1140 if (status)
1141 return;
1142
1143 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1144 if (!cp)
1145 return;
1146
1147 adv_set = (void *) cp->data;
1148
1149 hci_dev_lock(hdev);
1150
1151 if (cp->enable) {
1152 struct hci_conn *conn;
1153
1154 hci_dev_set_flag(hdev, HCI_LE_ADV);
1155
1156 conn = hci_lookup_le_connect(hdev);
1157 if (conn)
1158 queue_delayed_work(hdev->workqueue,
1159 &conn->le_conn_timeout,
1160 conn->conn_timeout);
1161 } else {
1162 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1163 }
1164
1165 hci_dev_unlock(hdev);
1166 }
1167
1168 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1169 {
1170 struct hci_cp_le_set_scan_param *cp;
1171 __u8 status = *((__u8 *) skb->data);
1172
1173 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1174
1175 if (status)
1176 return;
1177
1178 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1179 if (!cp)
1180 return;
1181
1182 hci_dev_lock(hdev);
1183
1184 hdev->le_scan_type = cp->type;
1185
1186 hci_dev_unlock(hdev);
1187 }
1188
1189 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1190 struct sk_buff *skb)
1191 {
1192 struct hci_cp_le_set_ext_scan_params *cp;
1193 __u8 status = *((__u8 *) skb->data);
1194 struct hci_cp_le_scan_phy_params *phy_param;
1195
1196 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1197
1198 if (status)
1199 return;
1200
1201 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1202 if (!cp)
1203 return;
1204
1205 phy_param = (void *)cp->data;
1206
1207 hci_dev_lock(hdev);
1208
1209 hdev->le_scan_type = phy_param->type;
1210
1211 hci_dev_unlock(hdev);
1212 }
1213
1214 static bool has_pending_adv_report(struct hci_dev *hdev)
1215 {
1216 struct discovery_state *d = &hdev->discovery;
1217
1218 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1219 }
1220
1221 static void clear_pending_adv_report(struct hci_dev *hdev)
1222 {
1223 struct discovery_state *d = &hdev->discovery;
1224
1225 bacpy(&d->last_adv_addr, BDADDR_ANY);
1226 d->last_adv_data_len = 0;
1227 }
1228
1229 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1230 u8 bdaddr_type, s8 rssi, u32 flags,
1231 u8 *data, u8 len)
1232 {
1233 struct discovery_state *d = &hdev->discovery;
1234
1235 bacpy(&d->last_adv_addr, bdaddr);
1236 d->last_adv_addr_type = bdaddr_type;
1237 d->last_adv_rssi = rssi;
1238 d->last_adv_flags = flags;
1239 memcpy(d->last_adv_data, data, len);
1240 d->last_adv_data_len = len;
1241 }
1242
1243 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1244 {
1245 hci_dev_lock(hdev);
1246
1247 switch (enable) {
1248 case LE_SCAN_ENABLE:
1249 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1250 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1251 clear_pending_adv_report(hdev);
1252 break;
1253
1254 case LE_SCAN_DISABLE:
1255 /* We do this here instead of when setting DISCOVERY_STOPPED
1256 * since the latter would potentially require waiting for
1257 * inquiry to stop too.
1258 */
1259 if (has_pending_adv_report(hdev)) {
1260 struct discovery_state *d = &hdev->discovery;
1261
1262 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1263 d->last_adv_addr_type, NULL,
1264 d->last_adv_rssi, d->last_adv_flags,
1265 d->last_adv_data,
1266 d->last_adv_data_len, NULL, 0);
1267 }
1268
1269 /* Cancel this timer so that we don't try to disable scanning
1270 * when it's already disabled.
1271 */
1272 cancel_delayed_work(&hdev->le_scan_disable);
1273
1274 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1275
1276 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1277 * interrupted scanning due to a connect request. Mark
1278 * therefore discovery as stopped. If this was not
1279 * because of a connect request advertising might have
1280 * been disabled because of active scanning, so
1281 * re-enable it again if necessary.
1282 */
1283 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1284 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1285 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1286 hdev->discovery.state == DISCOVERY_FINDING)
1287 hci_req_reenable_advertising(hdev);
1288
1289 break;
1290
1291 default:
1292 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1293 enable);
1294 break;
1295 }
1296
1297 hci_dev_unlock(hdev);
1298 }
1299
1300 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1301 struct sk_buff *skb)
1302 {
1303 struct hci_cp_le_set_scan_enable *cp;
1304 __u8 status = *((__u8 *) skb->data);
1305
1306 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1307
1308 if (status)
1309 return;
1310
1311 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1312 if (!cp)
1313 return;
1314
1315 le_set_scan_enable_complete(hdev, cp->enable);
1316 }
1317
1318 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1319 struct sk_buff *skb)
1320 {
1321 struct hci_cp_le_set_ext_scan_enable *cp;
1322 __u8 status = *((__u8 *) skb->data);
1323
1324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1325
1326 if (status)
1327 return;
1328
1329 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1330 if (!cp)
1331 return;
1332
1333 le_set_scan_enable_complete(hdev, cp->enable);
1334 }
1335
1336 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1337 struct sk_buff *skb)
1338 {
1339 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1340
1341 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1342 rp->num_of_sets);
1343
1344 if (rp->status)
1345 return;
1346
1347 hdev->le_num_of_adv_sets = rp->num_of_sets;
1348 }
1349
1350 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1351 struct sk_buff *skb)
1352 {
1353 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1354
1355 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1356
1357 if (rp->status)
1358 return;
1359
1360 hdev->le_white_list_size = rp->size;
1361 }
1362
1363 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1364 struct sk_buff *skb)
1365 {
1366 __u8 status = *((__u8 *) skb->data);
1367
1368 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1369
1370 if (status)
1371 return;
1372
1373 hci_bdaddr_list_clear(&hdev->le_white_list);
1374 }
1375
1376 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1377 struct sk_buff *skb)
1378 {
1379 struct hci_cp_le_add_to_white_list *sent;
1380 __u8 status = *((__u8 *) skb->data);
1381
1382 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1383
1384 if (status)
1385 return;
1386
1387 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1388 if (!sent)
1389 return;
1390
1391 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1392 sent->bdaddr_type);
1393 }
1394
1395 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1396 struct sk_buff *skb)
1397 {
1398 struct hci_cp_le_del_from_white_list *sent;
1399 __u8 status = *((__u8 *) skb->data);
1400
1401 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1402
1403 if (status)
1404 return;
1405
1406 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1407 if (!sent)
1408 return;
1409
1410 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1411 sent->bdaddr_type);
1412 }
1413
1414 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1415 struct sk_buff *skb)
1416 {
1417 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1418
1419 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1420
1421 if (rp->status)
1422 return;
1423
1424 memcpy(hdev->le_states, rp->le_states, 8);
1425 }
1426
1427 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1428 struct sk_buff *skb)
1429 {
1430 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1431
1432 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1433
1434 if (rp->status)
1435 return;
1436
1437 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1438 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1439 }
1440
1441 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1442 struct sk_buff *skb)
1443 {
1444 struct hci_cp_le_write_def_data_len *sent;
1445 __u8 status = *((__u8 *) skb->data);
1446
1447 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1448
1449 if (status)
1450 return;
1451
1452 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1453 if (!sent)
1454 return;
1455
1456 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1457 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1458 }
1459
1460 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1461 struct sk_buff *skb)
1462 {
1463 __u8 status = *((__u8 *) skb->data);
1464
1465 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1466
1467 if (status)
1468 return;
1469
1470 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1471 }
1472
1473 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1474 struct sk_buff *skb)
1475 {
1476 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1477
1478 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1479
1480 if (rp->status)
1481 return;
1482
1483 hdev->le_resolv_list_size = rp->size;
1484 }
1485
1486 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1487 struct sk_buff *skb)
1488 {
1489 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1490
1491 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1492
1493 if (rp->status)
1494 return;
1495
1496 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1497 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1498 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1499 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1500 }
1501
1502 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1503 struct sk_buff *skb)
1504 {
1505 struct hci_cp_write_le_host_supported *sent;
1506 __u8 status = *((__u8 *) skb->data);
1507
1508 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1509
1510 if (status)
1511 return;
1512
1513 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1514 if (!sent)
1515 return;
1516
1517 hci_dev_lock(hdev);
1518
1519 if (sent->le) {
1520 hdev->features[1][0] |= LMP_HOST_LE;
1521 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1522 } else {
1523 hdev->features[1][0] &= ~LMP_HOST_LE;
1524 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1525 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1526 }
1527
1528 if (sent->simul)
1529 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1530 else
1531 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1532
1533 hci_dev_unlock(hdev);
1534 }
1535
1536 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1537 {
1538 struct hci_cp_le_set_adv_param *cp;
1539 u8 status = *((u8 *) skb->data);
1540
1541 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1542
1543 if (status)
1544 return;
1545
1546 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1547 if (!cp)
1548 return;
1549
1550 hci_dev_lock(hdev);
1551 hdev->adv_addr_type = cp->own_address_type;
1552 hci_dev_unlock(hdev);
1553 }
1554
1555 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1556 {
1557 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1558 struct hci_cp_le_set_ext_adv_params *cp;
1559 struct adv_info *adv_instance;
1560
1561 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1562
1563 if (rp->status)
1564 return;
1565
1566 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1567 if (!cp)
1568 return;
1569
1570 hci_dev_lock(hdev);
1571 hdev->adv_addr_type = cp->own_addr_type;
1572 if (!hdev->cur_adv_instance) {
1573 /* Store in hdev for instance 0 */
1574 hdev->adv_tx_power = rp->tx_power;
1575 } else {
1576 adv_instance = hci_find_adv_instance(hdev,
1577 hdev->cur_adv_instance);
1578 if (adv_instance)
1579 adv_instance->tx_power = rp->tx_power;
1580 }
1581 /* Update adv data as tx power is known now */
1582 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1583 hci_dev_unlock(hdev);
1584 }
1585
1586 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1587 {
1588 struct hci_rp_read_rssi *rp = (void *) skb->data;
1589 struct hci_conn *conn;
1590
1591 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1592
1593 if (rp->status)
1594 return;
1595
1596 hci_dev_lock(hdev);
1597
1598 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1599 if (conn)
1600 conn->rssi = rp->rssi;
1601
1602 hci_dev_unlock(hdev);
1603 }
1604
1605 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1606 {
1607 struct hci_cp_read_tx_power *sent;
1608 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1609 struct hci_conn *conn;
1610
1611 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1612
1613 if (rp->status)
1614 return;
1615
1616 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1617 if (!sent)
1618 return;
1619
1620 hci_dev_lock(hdev);
1621
1622 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1623 if (!conn)
1624 goto unlock;
1625
1626 switch (sent->type) {
1627 case 0x00:
1628 conn->tx_power = rp->tx_power;
1629 break;
1630 case 0x01:
1631 conn->max_tx_power = rp->tx_power;
1632 break;
1633 }
1634
1635 unlock:
1636 hci_dev_unlock(hdev);
1637 }
1638
1639 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1640 {
1641 u8 status = *((u8 *) skb->data);
1642 u8 *mode;
1643
1644 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1645
1646 if (status)
1647 return;
1648
1649 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1650 if (mode)
1651 hdev->ssp_debug_mode = *mode;
1652 }
1653
1654 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1655 {
1656 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1657
1658 if (status) {
1659 hci_conn_check_pending(hdev);
1660 return;
1661 }
1662
1663 set_bit(HCI_INQUIRY, &hdev->flags);
1664 }
1665
1666 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1667 {
1668 struct hci_cp_create_conn *cp;
1669 struct hci_conn *conn;
1670
1671 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1672
1673 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1674 if (!cp)
1675 return;
1676
1677 hci_dev_lock(hdev);
1678
1679 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1680
1681 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1682
1683 if (status) {
1684 if (conn && conn->state == BT_CONNECT) {
1685 if (status != 0x0c || conn->attempt > 2) {
1686 conn->state = BT_CLOSED;
1687 hci_connect_cfm(conn, status);
1688 hci_conn_del(conn);
1689 } else
1690 conn->state = BT_CONNECT2;
1691 }
1692 } else {
1693 if (!conn) {
1694 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1695 HCI_ROLE_MASTER);
1696 if (!conn)
1697 bt_dev_err(hdev, "no memory for new connection");
1698 }
1699 }
1700
1701 hci_dev_unlock(hdev);
1702 }
1703
1704 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1705 {
1706 struct hci_cp_add_sco *cp;
1707 struct hci_conn *acl, *sco;
1708 __u16 handle;
1709
1710 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1711
1712 if (!status)
1713 return;
1714
1715 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1716 if (!cp)
1717 return;
1718
1719 handle = __le16_to_cpu(cp->handle);
1720
1721 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1722
1723 hci_dev_lock(hdev);
1724
1725 acl = hci_conn_hash_lookup_handle(hdev, handle);
1726 if (acl) {
1727 sco = acl->link;
1728 if (sco) {
1729 sco->state = BT_CLOSED;
1730
1731 hci_connect_cfm(sco, status);
1732 hci_conn_del(sco);
1733 }
1734 }
1735
1736 hci_dev_unlock(hdev);
1737 }
1738
1739 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1740 {
1741 struct hci_cp_auth_requested *cp;
1742 struct hci_conn *conn;
1743
1744 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1745
1746 if (!status)
1747 return;
1748
1749 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1750 if (!cp)
1751 return;
1752
1753 hci_dev_lock(hdev);
1754
1755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1756 if (conn) {
1757 if (conn->state == BT_CONFIG) {
1758 hci_connect_cfm(conn, status);
1759 hci_conn_drop(conn);
1760 }
1761 }
1762
1763 hci_dev_unlock(hdev);
1764 }
1765
1766 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1767 {
1768 struct hci_cp_set_conn_encrypt *cp;
1769 struct hci_conn *conn;
1770
1771 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1772
1773 if (!status)
1774 return;
1775
1776 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1777 if (!cp)
1778 return;
1779
1780 hci_dev_lock(hdev);
1781
1782 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1783 if (conn) {
1784 if (conn->state == BT_CONFIG) {
1785 hci_connect_cfm(conn, status);
1786 hci_conn_drop(conn);
1787 }
1788 }
1789
1790 hci_dev_unlock(hdev);
1791 }
1792
1793 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1794 struct hci_conn *conn)
1795 {
1796 if (conn->state != BT_CONFIG || !conn->out)
1797 return 0;
1798
1799 if (conn->pending_sec_level == BT_SECURITY_SDP)
1800 return 0;
1801
1802 /* Only request authentication for SSP connections or non-SSP
1803 * devices with sec_level MEDIUM or HIGH or if MITM protection
1804 * is requested.
1805 */
1806 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1807 conn->pending_sec_level != BT_SECURITY_FIPS &&
1808 conn->pending_sec_level != BT_SECURITY_HIGH &&
1809 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1810 return 0;
1811
1812 return 1;
1813 }
1814
1815 static int hci_resolve_name(struct hci_dev *hdev,
1816 struct inquiry_entry *e)
1817 {
1818 struct hci_cp_remote_name_req cp;
1819
1820 memset(&cp, 0, sizeof(cp));
1821
1822 bacpy(&cp.bdaddr, &e->data.bdaddr);
1823 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1824 cp.pscan_mode = e->data.pscan_mode;
1825 cp.clock_offset = e->data.clock_offset;
1826
1827 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1828 }
1829
1830 static bool hci_resolve_next_name(struct hci_dev *hdev)
1831 {
1832 struct discovery_state *discov = &hdev->discovery;
1833 struct inquiry_entry *e;
1834
1835 if (list_empty(&discov->resolve))
1836 return false;
1837
1838 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1839 if (!e)
1840 return false;
1841
1842 if (hci_resolve_name(hdev, e) == 0) {
1843 e->name_state = NAME_PENDING;
1844 return true;
1845 }
1846
1847 return false;
1848 }
1849
1850 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1851 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1852 {
1853 struct discovery_state *discov = &hdev->discovery;
1854 struct inquiry_entry *e;
1855
1856 /* Update the mgmt connected state if necessary. Be careful with
1857 * conn objects that exist but are not (yet) connected however.
1858 * Only those in BT_CONFIG or BT_CONNECTED states can be
1859 * considered connected.
1860 */
1861 if (conn &&
1862 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1863 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1864 mgmt_device_connected(hdev, conn, 0, name, name_len);
1865
1866 if (discov->state == DISCOVERY_STOPPED)
1867 return;
1868
1869 if (discov->state == DISCOVERY_STOPPING)
1870 goto discov_complete;
1871
1872 if (discov->state != DISCOVERY_RESOLVING)
1873 return;
1874
1875 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1876 /* If the device was not found in a list of found devices names of which
1877 * are pending. there is no need to continue resolving a next name as it
1878 * will be done upon receiving another Remote Name Request Complete
1879 * Event */
1880 if (!e)
1881 return;
1882
1883 list_del(&e->list);
1884 if (name) {
1885 e->name_state = NAME_KNOWN;
1886 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1887 e->data.rssi, name, name_len);
1888 } else {
1889 e->name_state = NAME_NOT_KNOWN;
1890 }
1891
1892 if (hci_resolve_next_name(hdev))
1893 return;
1894
1895 discov_complete:
1896 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1897 }
1898
1899 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1900 {
1901 struct hci_cp_remote_name_req *cp;
1902 struct hci_conn *conn;
1903
1904 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1905
1906 /* If successful wait for the name req complete event before
1907 * checking for the need to do authentication */
1908 if (!status)
1909 return;
1910
1911 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1912 if (!cp)
1913 return;
1914
1915 hci_dev_lock(hdev);
1916
1917 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1918
1919 if (hci_dev_test_flag(hdev, HCI_MGMT))
1920 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1921
1922 if (!conn)
1923 goto unlock;
1924
1925 if (!hci_outgoing_auth_needed(hdev, conn))
1926 goto unlock;
1927
1928 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1929 struct hci_cp_auth_requested auth_cp;
1930
1931 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1932
1933 auth_cp.handle = __cpu_to_le16(conn->handle);
1934 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1935 sizeof(auth_cp), &auth_cp);
1936 }
1937
1938 unlock:
1939 hci_dev_unlock(hdev);
1940 }
1941
1942 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1943 {
1944 struct hci_cp_read_remote_features *cp;
1945 struct hci_conn *conn;
1946
1947 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1948
1949 if (!status)
1950 return;
1951
1952 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1953 if (!cp)
1954 return;
1955
1956 hci_dev_lock(hdev);
1957
1958 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1959 if (conn) {
1960 if (conn->state == BT_CONFIG) {
1961 hci_connect_cfm(conn, status);
1962 hci_conn_drop(conn);
1963 }
1964 }
1965
1966 hci_dev_unlock(hdev);
1967 }
1968
1969 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1970 {
1971 struct hci_cp_read_remote_ext_features *cp;
1972 struct hci_conn *conn;
1973
1974 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1975
1976 if (!status)
1977 return;
1978
1979 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1980 if (!cp)
1981 return;
1982
1983 hci_dev_lock(hdev);
1984
1985 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1986 if (conn) {
1987 if (conn->state == BT_CONFIG) {
1988 hci_connect_cfm(conn, status);
1989 hci_conn_drop(conn);
1990 }
1991 }
1992
1993 hci_dev_unlock(hdev);
1994 }
1995
1996 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1997 {
1998 struct hci_cp_setup_sync_conn *cp;
1999 struct hci_conn *acl, *sco;
2000 __u16 handle;
2001
2002 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2003
2004 if (!status)
2005 return;
2006
2007 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2008 if (!cp)
2009 return;
2010
2011 handle = __le16_to_cpu(cp->handle);
2012
2013 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2014
2015 hci_dev_lock(hdev);
2016
2017 acl = hci_conn_hash_lookup_handle(hdev, handle);
2018 if (acl) {
2019 sco = acl->link;
2020 if (sco) {
2021 sco->state = BT_CLOSED;
2022
2023 hci_connect_cfm(sco, status);
2024 hci_conn_del(sco);
2025 }
2026 }
2027
2028 hci_dev_unlock(hdev);
2029 }
2030
2031 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2032 {
2033 struct hci_cp_sniff_mode *cp;
2034 struct hci_conn *conn;
2035
2036 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037
2038 if (!status)
2039 return;
2040
2041 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2042 if (!cp)
2043 return;
2044
2045 hci_dev_lock(hdev);
2046
2047 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2048 if (conn) {
2049 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2050
2051 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2052 hci_sco_setup(conn, status);
2053 }
2054
2055 hci_dev_unlock(hdev);
2056 }
2057
2058 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2059 {
2060 struct hci_cp_exit_sniff_mode *cp;
2061 struct hci_conn *conn;
2062
2063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2064
2065 if (!status)
2066 return;
2067
2068 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2069 if (!cp)
2070 return;
2071
2072 hci_dev_lock(hdev);
2073
2074 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2075 if (conn) {
2076 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2077
2078 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2079 hci_sco_setup(conn, status);
2080 }
2081
2082 hci_dev_unlock(hdev);
2083 }
2084
2085 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2086 {
2087 struct hci_cp_disconnect *cp;
2088 struct hci_conn *conn;
2089
2090 if (!status)
2091 return;
2092
2093 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2094 if (!cp)
2095 return;
2096
2097 hci_dev_lock(hdev);
2098
2099 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2100 if (conn)
2101 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2102 conn->dst_type, status);
2103
2104 hci_dev_unlock(hdev);
2105 }
2106
2107 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2108 u8 peer_addr_type, u8 own_address_type,
2109 u8 filter_policy)
2110 {
2111 struct hci_conn *conn;
2112
2113 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2114 peer_addr_type);
2115 if (!conn)
2116 return;
2117
2118 /* Store the initiator and responder address information which
2119 * is needed for SMP. These values will not change during the
2120 * lifetime of the connection.
2121 */
2122 conn->init_addr_type = own_address_type;
2123 if (own_address_type == ADDR_LE_DEV_RANDOM)
2124 bacpy(&conn->init_addr, &hdev->random_addr);
2125 else
2126 bacpy(&conn->init_addr, &hdev->bdaddr);
2127
2128 conn->resp_addr_type = peer_addr_type;
2129 bacpy(&conn->resp_addr, peer_addr);
2130
2131 /* We don't want the connection attempt to stick around
2132 * indefinitely since LE doesn't have a page timeout concept
2133 * like BR/EDR. Set a timer for any connection that doesn't use
2134 * the white list for connecting.
2135 */
2136 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2137 queue_delayed_work(conn->hdev->workqueue,
2138 &conn->le_conn_timeout,
2139 conn->conn_timeout);
2140 }
2141
2142 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2143 {
2144 struct hci_cp_le_create_conn *cp;
2145
2146 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2147
2148 /* All connection failure handling is taken care of by the
2149 * hci_le_conn_failed function which is triggered by the HCI
2150 * request completion callbacks used for connecting.
2151 */
2152 if (status)
2153 return;
2154
2155 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2156 if (!cp)
2157 return;
2158
2159 hci_dev_lock(hdev);
2160
2161 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2162 cp->own_address_type, cp->filter_policy);
2163
2164 hci_dev_unlock(hdev);
2165 }
2166
2167 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2168 {
2169 struct hci_cp_le_ext_create_conn *cp;
2170
2171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2172
2173 /* All connection failure handling is taken care of by the
2174 * hci_le_conn_failed function which is triggered by the HCI
2175 * request completion callbacks used for connecting.
2176 */
2177 if (status)
2178 return;
2179
2180 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2181 if (!cp)
2182 return;
2183
2184 hci_dev_lock(hdev);
2185
2186 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2187 cp->own_addr_type, cp->filter_policy);
2188
2189 hci_dev_unlock(hdev);
2190 }
2191
2192 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2193 {
2194 struct hci_cp_le_read_remote_features *cp;
2195 struct hci_conn *conn;
2196
2197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2198
2199 if (!status)
2200 return;
2201
2202 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2203 if (!cp)
2204 return;
2205
2206 hci_dev_lock(hdev);
2207
2208 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2209 if (conn) {
2210 if (conn->state == BT_CONFIG) {
2211 hci_connect_cfm(conn, status);
2212 hci_conn_drop(conn);
2213 }
2214 }
2215
2216 hci_dev_unlock(hdev);
2217 }
2218
2219 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2220 {
2221 struct hci_cp_le_start_enc *cp;
2222 struct hci_conn *conn;
2223
2224 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2225
2226 if (!status)
2227 return;
2228
2229 hci_dev_lock(hdev);
2230
2231 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2232 if (!cp)
2233 goto unlock;
2234
2235 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2236 if (!conn)
2237 goto unlock;
2238
2239 if (conn->state != BT_CONNECTED)
2240 goto unlock;
2241
2242 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2243 hci_conn_drop(conn);
2244
2245 unlock:
2246 hci_dev_unlock(hdev);
2247 }
2248
2249 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2250 {
2251 struct hci_cp_switch_role *cp;
2252 struct hci_conn *conn;
2253
2254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2255
2256 if (!status)
2257 return;
2258
2259 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2260 if (!cp)
2261 return;
2262
2263 hci_dev_lock(hdev);
2264
2265 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2266 if (conn)
2267 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2268
2269 hci_dev_unlock(hdev);
2270 }
2271
2272 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2273 {
2274 __u8 status = *((__u8 *) skb->data);
2275 struct discovery_state *discov = &hdev->discovery;
2276 struct inquiry_entry *e;
2277
2278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2279
2280 hci_conn_check_pending(hdev);
2281
2282 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2283 return;
2284
2285 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2286 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2287
2288 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2289 return;
2290
2291 hci_dev_lock(hdev);
2292
2293 if (discov->state != DISCOVERY_FINDING)
2294 goto unlock;
2295
2296 if (list_empty(&discov->resolve)) {
2297 /* When BR/EDR inquiry is active and no LE scanning is in
2298 * progress, then change discovery state to indicate completion.
2299 *
2300 * When running LE scanning and BR/EDR inquiry simultaneously
2301 * and the LE scan already finished, then change the discovery
2302 * state to indicate completion.
2303 */
2304 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2305 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2306 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2307 goto unlock;
2308 }
2309
2310 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2311 if (e && hci_resolve_name(hdev, e) == 0) {
2312 e->name_state = NAME_PENDING;
2313 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2314 } else {
2315 /* When BR/EDR inquiry is active and no LE scanning is in
2316 * progress, then change discovery state to indicate completion.
2317 *
2318 * When running LE scanning and BR/EDR inquiry simultaneously
2319 * and the LE scan already finished, then change the discovery
2320 * state to indicate completion.
2321 */
2322 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2323 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2324 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2325 }
2326
2327 unlock:
2328 hci_dev_unlock(hdev);
2329 }
2330
2331 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2332 {
2333 struct inquiry_data data;
2334 struct inquiry_info *info = (void *) (skb->data + 1);
2335 int num_rsp = *((__u8 *) skb->data);
2336
2337 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2338
2339 if (!num_rsp)
2340 return;
2341
2342 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2343 return;
2344
2345 hci_dev_lock(hdev);
2346
2347 for (; num_rsp; num_rsp--, info++) {
2348 u32 flags;
2349
2350 bacpy(&data.bdaddr, &info->bdaddr);
2351 data.pscan_rep_mode = info->pscan_rep_mode;
2352 data.pscan_period_mode = info->pscan_period_mode;
2353 data.pscan_mode = info->pscan_mode;
2354 memcpy(data.dev_class, info->dev_class, 3);
2355 data.clock_offset = info->clock_offset;
2356 data.rssi = HCI_RSSI_INVALID;
2357 data.ssp_mode = 0x00;
2358
2359 flags = hci_inquiry_cache_update(hdev, &data, false);
2360
2361 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2362 info->dev_class, HCI_RSSI_INVALID,
2363 flags, NULL, 0, NULL, 0);
2364 }
2365
2366 hci_dev_unlock(hdev);
2367 }
2368
2369 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2370 {
2371 struct hci_ev_conn_complete *ev = (void *) skb->data;
2372 struct hci_conn *conn;
2373
2374 BT_DBG("%s", hdev->name);
2375
2376 hci_dev_lock(hdev);
2377
2378 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2379 if (!conn) {
2380 if (ev->link_type != SCO_LINK)
2381 goto unlock;
2382
2383 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2384 if (!conn)
2385 goto unlock;
2386
2387 conn->type = SCO_LINK;
2388 }
2389
2390 if (!ev->status) {
2391 conn->handle = __le16_to_cpu(ev->handle);
2392
2393 if (conn->type == ACL_LINK) {
2394 conn->state = BT_CONFIG;
2395 hci_conn_hold(conn);
2396
2397 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2398 !hci_find_link_key(hdev, &ev->bdaddr))
2399 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2400 else
2401 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2402 } else
2403 conn->state = BT_CONNECTED;
2404
2405 hci_debugfs_create_conn(conn);
2406 hci_conn_add_sysfs(conn);
2407
2408 if (test_bit(HCI_AUTH, &hdev->flags))
2409 set_bit(HCI_CONN_AUTH, &conn->flags);
2410
2411 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2412 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2413
2414 /* Get remote features */
2415 if (conn->type == ACL_LINK) {
2416 struct hci_cp_read_remote_features cp;
2417 cp.handle = ev->handle;
2418 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2419 sizeof(cp), &cp);
2420
2421 hci_req_update_scan(hdev);
2422 }
2423
2424 /* Set packet type for incoming connection */
2425 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2426 struct hci_cp_change_conn_ptype cp;
2427 cp.handle = ev->handle;
2428 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2429 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2430 &cp);
2431 }
2432 } else {
2433 conn->state = BT_CLOSED;
2434 if (conn->type == ACL_LINK)
2435 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2436 conn->dst_type, ev->status);
2437 }
2438
2439 if (conn->type == ACL_LINK)
2440 hci_sco_setup(conn, ev->status);
2441
2442 if (ev->status) {
2443 hci_connect_cfm(conn, ev->status);
2444 hci_conn_del(conn);
2445 } else if (ev->link_type != ACL_LINK)
2446 hci_connect_cfm(conn, ev->status);
2447
2448 unlock:
2449 hci_dev_unlock(hdev);
2450
2451 hci_conn_check_pending(hdev);
2452 }
2453
2454 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2455 {
2456 struct hci_cp_reject_conn_req cp;
2457
2458 bacpy(&cp.bdaddr, bdaddr);
2459 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2460 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2461 }
2462
2463 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2464 {
2465 struct hci_ev_conn_request *ev = (void *) skb->data;
2466 int mask = hdev->link_mode;
2467 struct inquiry_entry *ie;
2468 struct hci_conn *conn;
2469 __u8 flags = 0;
2470
2471 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2472 ev->link_type);
2473
2474 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2475 &flags);
2476
2477 if (!(mask & HCI_LM_ACCEPT)) {
2478 hci_reject_conn(hdev, &ev->bdaddr);
2479 return;
2480 }
2481
2482 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2483 BDADDR_BREDR)) {
2484 hci_reject_conn(hdev, &ev->bdaddr);
2485 return;
2486 }
2487
2488 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2489 * connection. These features are only touched through mgmt so
2490 * only do the checks if HCI_MGMT is set.
2491 */
2492 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2493 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2494 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2495 BDADDR_BREDR)) {
2496 hci_reject_conn(hdev, &ev->bdaddr);
2497 return;
2498 }
2499
2500 /* Connection accepted */
2501
2502 hci_dev_lock(hdev);
2503
2504 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2505 if (ie)
2506 memcpy(ie->data.dev_class, ev->dev_class, 3);
2507
2508 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2509 &ev->bdaddr);
2510 if (!conn) {
2511 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2512 HCI_ROLE_SLAVE);
2513 if (!conn) {
2514 bt_dev_err(hdev, "no memory for new connection");
2515 hci_dev_unlock(hdev);
2516 return;
2517 }
2518 }
2519
2520 memcpy(conn->dev_class, ev->dev_class, 3);
2521
2522 hci_dev_unlock(hdev);
2523
2524 if (ev->link_type == ACL_LINK ||
2525 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2526 struct hci_cp_accept_conn_req cp;
2527 conn->state = BT_CONNECT;
2528
2529 bacpy(&cp.bdaddr, &ev->bdaddr);
2530
2531 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2532 cp.role = 0x00; /* Become master */
2533 else
2534 cp.role = 0x01; /* Remain slave */
2535
2536 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2537 } else if (!(flags & HCI_PROTO_DEFER)) {
2538 struct hci_cp_accept_sync_conn_req cp;
2539 conn->state = BT_CONNECT;
2540
2541 bacpy(&cp.bdaddr, &ev->bdaddr);
2542 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2543
2544 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2545 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2546 cp.max_latency = cpu_to_le16(0xffff);
2547 cp.content_format = cpu_to_le16(hdev->voice_setting);
2548 cp.retrans_effort = 0xff;
2549
2550 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2551 &cp);
2552 } else {
2553 conn->state = BT_CONNECT2;
2554 hci_connect_cfm(conn, 0);
2555 }
2556 }
2557
2558 static u8 hci_to_mgmt_reason(u8 err)
2559 {
2560 switch (err) {
2561 case HCI_ERROR_CONNECTION_TIMEOUT:
2562 return MGMT_DEV_DISCONN_TIMEOUT;
2563 case HCI_ERROR_REMOTE_USER_TERM:
2564 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2565 case HCI_ERROR_REMOTE_POWER_OFF:
2566 return MGMT_DEV_DISCONN_REMOTE;
2567 case HCI_ERROR_LOCAL_HOST_TERM:
2568 return MGMT_DEV_DISCONN_LOCAL_HOST;
2569 default:
2570 return MGMT_DEV_DISCONN_UNKNOWN;
2571 }
2572 }
2573
2574 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575 {
2576 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2577 u8 reason;
2578 struct hci_conn_params *params;
2579 struct hci_conn *conn;
2580 bool mgmt_connected;
2581 u8 type;
2582
2583 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2584
2585 hci_dev_lock(hdev);
2586
2587 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2588 if (!conn)
2589 goto unlock;
2590
2591 if (ev->status) {
2592 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2593 conn->dst_type, ev->status);
2594 goto unlock;
2595 }
2596
2597 conn->state = BT_CLOSED;
2598
2599 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2600
2601 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2602 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2603 else
2604 reason = hci_to_mgmt_reason(ev->reason);
2605
2606 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2607 reason, mgmt_connected);
2608
2609 if (conn->type == ACL_LINK) {
2610 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2611 hci_remove_link_key(hdev, &conn->dst);
2612
2613 hci_req_update_scan(hdev);
2614 }
2615
2616 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2617 if (params) {
2618 switch (params->auto_connect) {
2619 case HCI_AUTO_CONN_LINK_LOSS:
2620 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2621 break;
2622 /* Fall through */
2623
2624 case HCI_AUTO_CONN_DIRECT:
2625 case HCI_AUTO_CONN_ALWAYS:
2626 list_del_init(&params->action);
2627 list_add(&params->action, &hdev->pend_le_conns);
2628 hci_update_background_scan(hdev);
2629 break;
2630
2631 default:
2632 break;
2633 }
2634 }
2635
2636 type = conn->type;
2637
2638 hci_disconn_cfm(conn, ev->reason);
2639 hci_conn_del(conn);
2640
2641 /* Re-enable advertising if necessary, since it might
2642 * have been disabled by the connection. From the
2643 * HCI_LE_Set_Advertise_Enable command description in
2644 * the core specification (v4.0):
2645 * "The Controller shall continue advertising until the Host
2646 * issues an LE_Set_Advertise_Enable command with
2647 * Advertising_Enable set to 0x00 (Advertising is disabled)
2648 * or until a connection is created or until the Advertising
2649 * is timed out due to Directed Advertising."
2650 */
2651 if (type == LE_LINK)
2652 hci_req_reenable_advertising(hdev);
2653
2654 unlock:
2655 hci_dev_unlock(hdev);
2656 }
2657
2658 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2659 {
2660 struct hci_ev_auth_complete *ev = (void *) skb->data;
2661 struct hci_conn *conn;
2662
2663 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2664
2665 hci_dev_lock(hdev);
2666
2667 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2668 if (!conn)
2669 goto unlock;
2670
2671 if (!ev->status) {
2672 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2673
2674 if (!hci_conn_ssp_enabled(conn) &&
2675 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2676 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2677 } else {
2678 set_bit(HCI_CONN_AUTH, &conn->flags);
2679 conn->sec_level = conn->pending_sec_level;
2680 }
2681 } else {
2682 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2683 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2684
2685 mgmt_auth_failed(conn, ev->status);
2686 }
2687
2688 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2689 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2690
2691 if (conn->state == BT_CONFIG) {
2692 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2693 struct hci_cp_set_conn_encrypt cp;
2694 cp.handle = ev->handle;
2695 cp.encrypt = 0x01;
2696 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2697 &cp);
2698 } else {
2699 conn->state = BT_CONNECTED;
2700 hci_connect_cfm(conn, ev->status);
2701 hci_conn_drop(conn);
2702 }
2703 } else {
2704 hci_auth_cfm(conn, ev->status);
2705
2706 hci_conn_hold(conn);
2707 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2708 hci_conn_drop(conn);
2709 }
2710
2711 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2712 if (!ev->status) {
2713 struct hci_cp_set_conn_encrypt cp;
2714 cp.handle = ev->handle;
2715 cp.encrypt = 0x01;
2716 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2717 &cp);
2718 } else {
2719 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2720 hci_encrypt_cfm(conn, ev->status, 0x00);
2721 }
2722 }
2723
2724 unlock:
2725 hci_dev_unlock(hdev);
2726 }
2727
2728 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2729 {
2730 struct hci_ev_remote_name *ev = (void *) skb->data;
2731 struct hci_conn *conn;
2732
2733 BT_DBG("%s", hdev->name);
2734
2735 hci_conn_check_pending(hdev);
2736
2737 hci_dev_lock(hdev);
2738
2739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2740
2741 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2742 goto check_auth;
2743
2744 if (ev->status == 0)
2745 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2746 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2747 else
2748 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2749
2750 check_auth:
2751 if (!conn)
2752 goto unlock;
2753
2754 if (!hci_outgoing_auth_needed(hdev, conn))
2755 goto unlock;
2756
2757 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2758 struct hci_cp_auth_requested cp;
2759
2760 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2761
2762 cp.handle = __cpu_to_le16(conn->handle);
2763 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2764 }
2765
2766 unlock:
2767 hci_dev_unlock(hdev);
2768 }
2769
2770 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2771 u16 opcode, struct sk_buff *skb)
2772 {
2773 const struct hci_rp_read_enc_key_size *rp;
2774 struct hci_conn *conn;
2775 u16 handle;
2776
2777 BT_DBG("%s status 0x%02x", hdev->name, status);
2778
2779 if (!skb || skb->len < sizeof(*rp)) {
2780 bt_dev_err(hdev, "invalid read key size response");
2781 return;
2782 }
2783
2784 rp = (void *)skb->data;
2785 handle = le16_to_cpu(rp->handle);
2786
2787 hci_dev_lock(hdev);
2788
2789 conn = hci_conn_hash_lookup_handle(hdev, handle);
2790 if (!conn)
2791 goto unlock;
2792
2793 /* If we fail to read the encryption key size, assume maximum
2794 * (which is the same we do also when this HCI command isn't
2795 * supported.
2796 */
2797 if (rp->status) {
2798 bt_dev_err(hdev, "failed to read key size for handle %u",
2799 handle);
2800 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2801 } else {
2802 conn->enc_key_size = rp->key_size;
2803 }
2804
2805 if (conn->state == BT_CONFIG) {
2806 conn->state = BT_CONNECTED;
2807 hci_connect_cfm(conn, 0);
2808 hci_conn_drop(conn);
2809 } else {
2810 u8 encrypt;
2811
2812 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2813 encrypt = 0x00;
2814 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2815 encrypt = 0x02;
2816 else
2817 encrypt = 0x01;
2818
2819 hci_encrypt_cfm(conn, 0, encrypt);
2820 }
2821
2822 unlock:
2823 hci_dev_unlock(hdev);
2824 }
2825
2826 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2827 {
2828 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2829 struct hci_conn *conn;
2830
2831 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2832
2833 hci_dev_lock(hdev);
2834
2835 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2836 if (!conn)
2837 goto unlock;
2838
2839 if (!ev->status) {
2840 if (ev->encrypt) {
2841 /* Encryption implies authentication */
2842 set_bit(HCI_CONN_AUTH, &conn->flags);
2843 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2844 conn->sec_level = conn->pending_sec_level;
2845
2846 /* P-256 authentication key implies FIPS */
2847 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2848 set_bit(HCI_CONN_FIPS, &conn->flags);
2849
2850 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2851 conn->type == LE_LINK)
2852 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2853 } else {
2854 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2855 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2856 }
2857 }
2858
2859 /* We should disregard the current RPA and generate a new one
2860 * whenever the encryption procedure fails.
2861 */
2862 if (ev->status && conn->type == LE_LINK) {
2863 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2864 hci_adv_instances_set_rpa_expired(hdev, true);
2865 }
2866
2867 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2868
2869 if (ev->status && conn->state == BT_CONNECTED) {
2870 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2871 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2872
2873 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2874 hci_conn_drop(conn);
2875 goto unlock;
2876 }
2877
2878 /* In Secure Connections Only mode, do not allow any connections
2879 * that are not encrypted with AES-CCM using a P-256 authenticated
2880 * combination key.
2881 */
2882 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2883 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2884 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2885 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2886 hci_conn_drop(conn);
2887 goto unlock;
2888 }
2889
2890 /* Try reading the encryption key size for encrypted ACL links */
2891 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2892 struct hci_cp_read_enc_key_size cp;
2893 struct hci_request req;
2894
2895 /* Only send HCI_Read_Encryption_Key_Size if the
2896 * controller really supports it. If it doesn't, assume
2897 * the default size (16).
2898 */
2899 if (!(hdev->commands[20] & 0x10)) {
2900 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2901 goto notify;
2902 }
2903
2904 hci_req_init(&req, hdev);
2905
2906 cp.handle = cpu_to_le16(conn->handle);
2907 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2908
2909 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2910 bt_dev_err(hdev, "sending read key size failed");
2911 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2912 goto notify;
2913 }
2914
2915 goto unlock;
2916 }
2917
2918 notify:
2919 if (conn->state == BT_CONFIG) {
2920 if (!ev->status)
2921 conn->state = BT_CONNECTED;
2922
2923 hci_connect_cfm(conn, ev->status);
2924 hci_conn_drop(conn);
2925 } else
2926 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2927
2928 unlock:
2929 hci_dev_unlock(hdev);
2930 }
2931
2932 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2933 struct sk_buff *skb)
2934 {
2935 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2936 struct hci_conn *conn;
2937
2938 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2939
2940 hci_dev_lock(hdev);
2941
2942 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2943 if (conn) {
2944 if (!ev->status)
2945 set_bit(HCI_CONN_SECURE, &conn->flags);
2946
2947 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2948
2949 hci_key_change_cfm(conn, ev->status);
2950 }
2951
2952 hci_dev_unlock(hdev);
2953 }
2954
2955 static void hci_remote_features_evt(struct hci_dev *hdev,
2956 struct sk_buff *skb)
2957 {
2958 struct hci_ev_remote_features *ev = (void *) skb->data;
2959 struct hci_conn *conn;
2960
2961 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2962
2963 hci_dev_lock(hdev);
2964
2965 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2966 if (!conn)
2967 goto unlock;
2968
2969 if (!ev->status)
2970 memcpy(conn->features[0], ev->features, 8);
2971
2972 if (conn->state != BT_CONFIG)
2973 goto unlock;
2974
2975 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2976 lmp_ext_feat_capable(conn)) {
2977 struct hci_cp_read_remote_ext_features cp;
2978 cp.handle = ev->handle;
2979 cp.page = 0x01;
2980 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2981 sizeof(cp), &cp);
2982 goto unlock;
2983 }
2984
2985 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2986 struct hci_cp_remote_name_req cp;
2987 memset(&cp, 0, sizeof(cp));
2988 bacpy(&cp.bdaddr, &conn->dst);
2989 cp.pscan_rep_mode = 0x02;
2990 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2991 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2992 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2993
2994 if (!hci_outgoing_auth_needed(hdev, conn)) {
2995 conn->state = BT_CONNECTED;
2996 hci_connect_cfm(conn, ev->status);
2997 hci_conn_drop(conn);
2998 }
2999
3000 unlock:
3001 hci_dev_unlock(hdev);
3002 }
3003
3004 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3005 u16 *opcode, u8 *status,
3006 hci_req_complete_t *req_complete,
3007 hci_req_complete_skb_t *req_complete_skb)
3008 {
3009 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3010
3011 *opcode = __le16_to_cpu(ev->opcode);
3012 *status = skb->data[sizeof(*ev)];
3013
3014 skb_pull(skb, sizeof(*ev));
3015
3016 switch (*opcode) {
3017 case HCI_OP_INQUIRY_CANCEL:
3018 hci_cc_inquiry_cancel(hdev, skb);
3019 break;
3020
3021 case HCI_OP_PERIODIC_INQ:
3022 hci_cc_periodic_inq(hdev, skb);
3023 break;
3024
3025 case HCI_OP_EXIT_PERIODIC_INQ:
3026 hci_cc_exit_periodic_inq(hdev, skb);
3027 break;
3028
3029 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3030 hci_cc_remote_name_req_cancel(hdev, skb);
3031 break;
3032
3033 case HCI_OP_ROLE_DISCOVERY:
3034 hci_cc_role_discovery(hdev, skb);
3035 break;
3036
3037 case HCI_OP_READ_LINK_POLICY:
3038 hci_cc_read_link_policy(hdev, skb);
3039 break;
3040
3041 case HCI_OP_WRITE_LINK_POLICY:
3042 hci_cc_write_link_policy(hdev, skb);
3043 break;
3044
3045 case HCI_OP_READ_DEF_LINK_POLICY:
3046 hci_cc_read_def_link_policy(hdev, skb);
3047 break;
3048
3049 case HCI_OP_WRITE_DEF_LINK_POLICY:
3050 hci_cc_write_def_link_policy(hdev, skb);
3051 break;
3052
3053 case HCI_OP_RESET:
3054 hci_cc_reset(hdev, skb);
3055 break;
3056
3057 case HCI_OP_READ_STORED_LINK_KEY:
3058 hci_cc_read_stored_link_key(hdev, skb);
3059 break;
3060
3061 case HCI_OP_DELETE_STORED_LINK_KEY:
3062 hci_cc_delete_stored_link_key(hdev, skb);
3063 break;
3064
3065 case HCI_OP_WRITE_LOCAL_NAME:
3066 hci_cc_write_local_name(hdev, skb);
3067 break;
3068
3069 case HCI_OP_READ_LOCAL_NAME:
3070 hci_cc_read_local_name(hdev, skb);
3071 break;
3072
3073 case HCI_OP_WRITE_AUTH_ENABLE:
3074 hci_cc_write_auth_enable(hdev, skb);
3075 break;
3076
3077 case HCI_OP_WRITE_ENCRYPT_MODE:
3078 hci_cc_write_encrypt_mode(hdev, skb);
3079 break;
3080
3081 case HCI_OP_WRITE_SCAN_ENABLE:
3082 hci_cc_write_scan_enable(hdev, skb);
3083 break;
3084
3085 case HCI_OP_READ_CLASS_OF_DEV:
3086 hci_cc_read_class_of_dev(hdev, skb);
3087 break;
3088
3089 case HCI_OP_WRITE_CLASS_OF_DEV:
3090 hci_cc_write_class_of_dev(hdev, skb);
3091 break;
3092
3093 case HCI_OP_READ_VOICE_SETTING:
3094 hci_cc_read_voice_setting(hdev, skb);
3095 break;
3096
3097 case HCI_OP_WRITE_VOICE_SETTING:
3098 hci_cc_write_voice_setting(hdev, skb);
3099 break;
3100
3101 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3102 hci_cc_read_num_supported_iac(hdev, skb);
3103 break;
3104
3105 case HCI_OP_WRITE_SSP_MODE:
3106 hci_cc_write_ssp_mode(hdev, skb);
3107 break;
3108
3109 case HCI_OP_WRITE_SC_SUPPORT:
3110 hci_cc_write_sc_support(hdev, skb);
3111 break;
3112
3113 case HCI_OP_READ_LOCAL_VERSION:
3114 hci_cc_read_local_version(hdev, skb);
3115 break;
3116
3117 case HCI_OP_READ_LOCAL_COMMANDS:
3118 hci_cc_read_local_commands(hdev, skb);
3119 break;
3120
3121 case HCI_OP_READ_LOCAL_FEATURES:
3122 hci_cc_read_local_features(hdev, skb);
3123 break;
3124
3125 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3126 hci_cc_read_local_ext_features(hdev, skb);
3127 break;
3128
3129 case HCI_OP_READ_BUFFER_SIZE:
3130 hci_cc_read_buffer_size(hdev, skb);
3131 break;
3132
3133 case HCI_OP_READ_BD_ADDR:
3134 hci_cc_read_bd_addr(hdev, skb);
3135 break;
3136
3137 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3138 hci_cc_read_page_scan_activity(hdev, skb);
3139 break;
3140
3141 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3142 hci_cc_write_page_scan_activity(hdev, skb);
3143 break;
3144
3145 case HCI_OP_READ_PAGE_SCAN_TYPE:
3146 hci_cc_read_page_scan_type(hdev, skb);
3147 break;
3148
3149 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3150 hci_cc_write_page_scan_type(hdev, skb);
3151 break;
3152
3153 case HCI_OP_READ_DATA_BLOCK_SIZE:
3154 hci_cc_read_data_block_size(hdev, skb);
3155 break;
3156
3157 case HCI_OP_READ_FLOW_CONTROL_MODE:
3158 hci_cc_read_flow_control_mode(hdev, skb);
3159 break;
3160
3161 case HCI_OP_READ_LOCAL_AMP_INFO:
3162 hci_cc_read_local_amp_info(hdev, skb);
3163 break;
3164
3165 case HCI_OP_READ_CLOCK:
3166 hci_cc_read_clock(hdev, skb);
3167 break;
3168
3169 case HCI_OP_READ_INQ_RSP_TX_POWER:
3170 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3171 break;
3172
3173 case HCI_OP_PIN_CODE_REPLY:
3174 hci_cc_pin_code_reply(hdev, skb);
3175 break;
3176
3177 case HCI_OP_PIN_CODE_NEG_REPLY:
3178 hci_cc_pin_code_neg_reply(hdev, skb);
3179 break;
3180
3181 case HCI_OP_READ_LOCAL_OOB_DATA:
3182 hci_cc_read_local_oob_data(hdev, skb);
3183 break;
3184
3185 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3186 hci_cc_read_local_oob_ext_data(hdev, skb);
3187 break;
3188
3189 case HCI_OP_LE_READ_BUFFER_SIZE:
3190 hci_cc_le_read_buffer_size(hdev, skb);
3191 break;
3192
3193 case HCI_OP_LE_READ_LOCAL_FEATURES:
3194 hci_cc_le_read_local_features(hdev, skb);
3195 break;
3196
3197 case HCI_OP_LE_READ_ADV_TX_POWER:
3198 hci_cc_le_read_adv_tx_power(hdev, skb);
3199 break;
3200
3201 case HCI_OP_USER_CONFIRM_REPLY:
3202 hci_cc_user_confirm_reply(hdev, skb);
3203 break;
3204
3205 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3206 hci_cc_user_confirm_neg_reply(hdev, skb);
3207 break;
3208
3209 case HCI_OP_USER_PASSKEY_REPLY:
3210 hci_cc_user_passkey_reply(hdev, skb);
3211 break;
3212
3213 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3214 hci_cc_user_passkey_neg_reply(hdev, skb);
3215 break;
3216
3217 case HCI_OP_LE_SET_RANDOM_ADDR:
3218 hci_cc_le_set_random_addr(hdev, skb);
3219 break;
3220
3221 case HCI_OP_LE_SET_ADV_ENABLE:
3222 hci_cc_le_set_adv_enable(hdev, skb);
3223 break;
3224
3225 case HCI_OP_LE_SET_SCAN_PARAM:
3226 hci_cc_le_set_scan_param(hdev, skb);
3227 break;
3228
3229 case HCI_OP_LE_SET_SCAN_ENABLE:
3230 hci_cc_le_set_scan_enable(hdev, skb);
3231 break;
3232
3233 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3234 hci_cc_le_read_white_list_size(hdev, skb);
3235 break;
3236
3237 case HCI_OP_LE_CLEAR_WHITE_LIST:
3238 hci_cc_le_clear_white_list(hdev, skb);
3239 break;
3240
3241 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3242 hci_cc_le_add_to_white_list(hdev, skb);
3243 break;
3244
3245 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3246 hci_cc_le_del_from_white_list(hdev, skb);
3247 break;
3248
3249 case HCI_OP_LE_READ_SUPPORTED_STATES:
3250 hci_cc_le_read_supported_states(hdev, skb);
3251 break;
3252
3253 case HCI_OP_LE_READ_DEF_DATA_LEN:
3254 hci_cc_le_read_def_data_len(hdev, skb);
3255 break;
3256
3257 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3258 hci_cc_le_write_def_data_len(hdev, skb);
3259 break;
3260
3261 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3262 hci_cc_le_clear_resolv_list(hdev, skb);
3263 break;
3264
3265 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3266 hci_cc_le_read_resolv_list_size(hdev, skb);
3267 break;
3268
3269 case HCI_OP_LE_READ_MAX_DATA_LEN:
3270 hci_cc_le_read_max_data_len(hdev, skb);
3271 break;
3272
3273 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3274 hci_cc_write_le_host_supported(hdev, skb);
3275 break;
3276
3277 case HCI_OP_LE_SET_ADV_PARAM:
3278 hci_cc_set_adv_param(hdev, skb);
3279 break;
3280
3281 case HCI_OP_READ_RSSI:
3282 hci_cc_read_rssi(hdev, skb);
3283 break;
3284
3285 case HCI_OP_READ_TX_POWER:
3286 hci_cc_read_tx_power(hdev, skb);
3287 break;
3288
3289 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3290 hci_cc_write_ssp_debug_mode(hdev, skb);
3291 break;
3292
3293 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3294 hci_cc_le_set_ext_scan_param(hdev, skb);
3295 break;
3296
3297 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3298 hci_cc_le_set_ext_scan_enable(hdev, skb);
3299 break;
3300
3301 case HCI_OP_LE_SET_DEFAULT_PHY:
3302 hci_cc_le_set_default_phy(hdev, skb);
3303 break;
3304
3305 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3306 hci_cc_le_read_num_adv_sets(hdev, skb);
3307 break;
3308
3309 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3310 hci_cc_set_ext_adv_param(hdev, skb);
3311 break;
3312
3313 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3314 hci_cc_le_set_ext_adv_enable(hdev, skb);
3315 break;
3316
3317 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3318 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3319 break;
3320
3321 default:
3322 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3323 break;
3324 }
3325
3326 if (*opcode != HCI_OP_NOP)
3327 cancel_delayed_work(&hdev->cmd_timer);
3328
3329 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3330 atomic_set(&hdev->cmd_cnt, 1);
3331
3332 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3333 req_complete_skb);
3334
3335 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3336 queue_work(hdev->workqueue, &hdev->cmd_work);
3337 }
3338
3339 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3340 u16 *opcode, u8 *status,
3341 hci_req_complete_t *req_complete,
3342 hci_req_complete_skb_t *req_complete_skb)
3343 {
3344 struct hci_ev_cmd_status *ev = (void *) skb->data;
3345
3346 skb_pull(skb, sizeof(*ev));
3347
3348 *opcode = __le16_to_cpu(ev->opcode);
3349 *status = ev->status;
3350
3351 switch (*opcode) {
3352 case HCI_OP_INQUIRY:
3353 hci_cs_inquiry(hdev, ev->status);
3354 break;
3355
3356 case HCI_OP_CREATE_CONN:
3357 hci_cs_create_conn(hdev, ev->status);
3358 break;
3359
3360 case HCI_OP_DISCONNECT:
3361 hci_cs_disconnect(hdev, ev->status);
3362 break;
3363
3364 case HCI_OP_ADD_SCO:
3365 hci_cs_add_sco(hdev, ev->status);
3366 break;
3367
3368 case HCI_OP_AUTH_REQUESTED:
3369 hci_cs_auth_requested(hdev, ev->status);
3370 break;
3371
3372 case HCI_OP_SET_CONN_ENCRYPT:
3373 hci_cs_set_conn_encrypt(hdev, ev->status);
3374 break;
3375
3376 case HCI_OP_REMOTE_NAME_REQ:
3377 hci_cs_remote_name_req(hdev, ev->status);
3378 break;
3379
3380 case HCI_OP_READ_REMOTE_FEATURES:
3381 hci_cs_read_remote_features(hdev, ev->status);
3382 break;
3383
3384 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3385 hci_cs_read_remote_ext_features(hdev, ev->status);
3386 break;
3387
3388 case HCI_OP_SETUP_SYNC_CONN:
3389 hci_cs_setup_sync_conn(hdev, ev->status);
3390 break;
3391
3392 case HCI_OP_SNIFF_MODE:
3393 hci_cs_sniff_mode(hdev, ev->status);
3394 break;
3395
3396 case HCI_OP_EXIT_SNIFF_MODE:
3397 hci_cs_exit_sniff_mode(hdev, ev->status);
3398 break;
3399
3400 case HCI_OP_SWITCH_ROLE:
3401 hci_cs_switch_role(hdev, ev->status);
3402 break;
3403
3404 case HCI_OP_LE_CREATE_CONN:
3405 hci_cs_le_create_conn(hdev, ev->status);
3406 break;
3407
3408 case HCI_OP_LE_READ_REMOTE_FEATURES:
3409 hci_cs_le_read_remote_features(hdev, ev->status);
3410 break;
3411
3412 case HCI_OP_LE_START_ENC:
3413 hci_cs_le_start_enc(hdev, ev->status);
3414 break;
3415
3416 case HCI_OP_LE_EXT_CREATE_CONN:
3417 hci_cs_le_ext_create_conn(hdev, ev->status);
3418 break;
3419
3420 default:
3421 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3422 break;
3423 }
3424
3425 if (*opcode != HCI_OP_NOP)
3426 cancel_delayed_work(&hdev->cmd_timer);
3427
3428 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3429 atomic_set(&hdev->cmd_cnt, 1);
3430
3431 /* Indicate request completion if the command failed. Also, if
3432 * we're not waiting for a special event and we get a success
3433 * command status we should try to flag the request as completed
3434 * (since for this kind of commands there will not be a command
3435 * complete event).
3436 */
3437 if (ev->status ||
3438 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3439 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3440 req_complete_skb);
3441
3442 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3443 queue_work(hdev->workqueue, &hdev->cmd_work);
3444 }
3445
3446 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3447 {
3448 struct hci_ev_hardware_error *ev = (void *) skb->data;
3449
3450 hdev->hw_error_code = ev->code;
3451
3452 queue_work(hdev->req_workqueue, &hdev->error_reset);
3453 }
3454
3455 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3456 {
3457 struct hci_ev_role_change *ev = (void *) skb->data;
3458 struct hci_conn *conn;
3459
3460 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3461
3462 hci_dev_lock(hdev);
3463
3464 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3465 if (conn) {
3466 if (!ev->status)
3467 conn->role = ev->role;
3468
3469 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3470
3471 hci_role_switch_cfm(conn, ev->status, ev->role);
3472 }
3473
3474 hci_dev_unlock(hdev);
3475 }
3476
3477 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3478 {
3479 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3480 int i;
3481
3482 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3483 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3484 return;
3485 }
3486
3487 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3488 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3489 BT_DBG("%s bad parameters", hdev->name);
3490 return;
3491 }
3492
3493 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3494
3495 for (i = 0; i < ev->num_hndl; i++) {
3496 struct hci_comp_pkts_info *info = &ev->handles[i];
3497 struct hci_conn *conn;
3498 __u16 handle, count;
3499
3500 handle = __le16_to_cpu(info->handle);
3501 count = __le16_to_cpu(info->count);
3502
3503 conn = hci_conn_hash_lookup_handle(hdev, handle);
3504 if (!conn)
3505 continue;
3506
3507 conn->sent -= count;
3508
3509 switch (conn->type) {
3510 case ACL_LINK:
3511 hdev->acl_cnt += count;
3512 if (hdev->acl_cnt > hdev->acl_pkts)
3513 hdev->acl_cnt = hdev->acl_pkts;
3514 break;
3515
3516 case LE_LINK:
3517 if (hdev->le_pkts) {
3518 hdev->le_cnt += count;
3519 if (hdev->le_cnt > hdev->le_pkts)
3520 hdev->le_cnt = hdev->le_pkts;
3521 } else {
3522 hdev->acl_cnt += count;
3523 if (hdev->acl_cnt > hdev->acl_pkts)
3524 hdev->acl_cnt = hdev->acl_pkts;
3525 }
3526 break;
3527
3528 case SCO_LINK:
3529 hdev->sco_cnt += count;
3530 if (hdev->sco_cnt > hdev->sco_pkts)
3531 hdev->sco_cnt = hdev->sco_pkts;
3532 break;
3533
3534 default:
3535 bt_dev_err(hdev, "unknown type %d conn %p",
3536 conn->type, conn);
3537 break;
3538 }
3539 }
3540
3541 queue_work(hdev->workqueue, &hdev->tx_work);
3542 }
3543
3544 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3545 __u16 handle)
3546 {
3547 struct hci_chan *chan;
3548
3549 switch (hdev->dev_type) {
3550 case HCI_PRIMARY:
3551 return hci_conn_hash_lookup_handle(hdev, handle);
3552 case HCI_AMP:
3553 chan = hci_chan_lookup_handle(hdev, handle);
3554 if (chan)
3555 return chan->conn;
3556 break;
3557 default:
3558 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3559 break;
3560 }
3561
3562 return NULL;
3563 }
3564
3565 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3566 {
3567 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3568 int i;
3569
3570 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3571 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3572 return;
3573 }
3574
3575 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3576 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3577 BT_DBG("%s bad parameters", hdev->name);
3578 return;
3579 }
3580
3581 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3582 ev->num_hndl);
3583
3584 for (i = 0; i < ev->num_hndl; i++) {
3585 struct hci_comp_blocks_info *info = &ev->handles[i];
3586 struct hci_conn *conn = NULL;
3587 __u16 handle, block_count;
3588
3589 handle = __le16_to_cpu(info->handle);
3590 block_count = __le16_to_cpu(info->blocks);
3591
3592 conn = __hci_conn_lookup_handle(hdev, handle);
3593 if (!conn)
3594 continue;
3595
3596 conn->sent -= block_count;
3597
3598 switch (conn->type) {
3599 case ACL_LINK:
3600 case AMP_LINK:
3601 hdev->block_cnt += block_count;
3602 if (hdev->block_cnt > hdev->num_blocks)
3603 hdev->block_cnt = hdev->num_blocks;
3604 break;
3605
3606 default:
3607 bt_dev_err(hdev, "unknown type %d conn %p",
3608 conn->type, conn);
3609 break;
3610 }
3611 }
3612
3613 queue_work(hdev->workqueue, &hdev->tx_work);
3614 }
3615
3616 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3617 {
3618 struct hci_ev_mode_change *ev = (void *) skb->data;
3619 struct hci_conn *conn;
3620
3621 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3622
3623 hci_dev_lock(hdev);
3624
3625 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3626 if (conn) {
3627 conn->mode = ev->mode;
3628
3629 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3630 &conn->flags)) {
3631 if (conn->mode == HCI_CM_ACTIVE)
3632 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3633 else
3634 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3635 }
3636
3637 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3638 hci_sco_setup(conn, ev->status);
3639 }
3640
3641 hci_dev_unlock(hdev);
3642 }
3643
3644 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3645 {
3646 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3647 struct hci_conn *conn;
3648
3649 BT_DBG("%s", hdev->name);
3650
3651 hci_dev_lock(hdev);
3652
3653 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3654 if (!conn)
3655 goto unlock;
3656
3657 if (conn->state == BT_CONNECTED) {
3658 hci_conn_hold(conn);
3659 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3660 hci_conn_drop(conn);
3661 }
3662
3663 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3664 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3665 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3666 sizeof(ev->bdaddr), &ev->bdaddr);
3667 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3668 u8 secure;
3669
3670 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3671 secure = 1;
3672 else
3673 secure = 0;
3674
3675 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3676 }
3677
3678 unlock:
3679 hci_dev_unlock(hdev);
3680 }
3681
3682 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3683 {
3684 if (key_type == HCI_LK_CHANGED_COMBINATION)
3685 return;
3686
3687 conn->pin_length = pin_len;
3688 conn->key_type = key_type;
3689
3690 switch (key_type) {
3691 case HCI_LK_LOCAL_UNIT:
3692 case HCI_LK_REMOTE_UNIT:
3693 case HCI_LK_DEBUG_COMBINATION:
3694 return;
3695 case HCI_LK_COMBINATION:
3696 if (pin_len == 16)
3697 conn->pending_sec_level = BT_SECURITY_HIGH;
3698 else
3699 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3700 break;
3701 case HCI_LK_UNAUTH_COMBINATION_P192:
3702 case HCI_LK_UNAUTH_COMBINATION_P256:
3703 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3704 break;
3705 case HCI_LK_AUTH_COMBINATION_P192:
3706 conn->pending_sec_level = BT_SECURITY_HIGH;
3707 break;
3708 case HCI_LK_AUTH_COMBINATION_P256:
3709 conn->pending_sec_level = BT_SECURITY_FIPS;
3710 break;
3711 }
3712 }
3713
3714 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3715 {
3716 struct hci_ev_link_key_req *ev = (void *) skb->data;
3717 struct hci_cp_link_key_reply cp;
3718 struct hci_conn *conn;
3719 struct link_key *key;
3720
3721 BT_DBG("%s", hdev->name);
3722
3723 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3724 return;
3725
3726 hci_dev_lock(hdev);
3727
3728 key = hci_find_link_key(hdev, &ev->bdaddr);
3729 if (!key) {
3730 BT_DBG("%s link key not found for %pMR", hdev->name,
3731 &ev->bdaddr);
3732 goto not_found;
3733 }
3734
3735 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3736 &ev->bdaddr);
3737
3738 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3739 if (conn) {
3740 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3741
3742 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3743 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3744 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3745 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3746 goto not_found;
3747 }
3748
3749 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3750 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3751 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3752 BT_DBG("%s ignoring key unauthenticated for high security",
3753 hdev->name);
3754 goto not_found;
3755 }
3756
3757 conn_set_key(conn, key->type, key->pin_len);
3758 }
3759
3760 bacpy(&cp.bdaddr, &ev->bdaddr);
3761 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3762
3763 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3764
3765 hci_dev_unlock(hdev);
3766
3767 return;
3768
3769 not_found:
3770 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3771 hci_dev_unlock(hdev);
3772 }
3773
3774 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3775 {
3776 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3777 struct hci_conn *conn;
3778 struct link_key *key;
3779 bool persistent;
3780 u8 pin_len = 0;
3781
3782 BT_DBG("%s", hdev->name);
3783
3784 hci_dev_lock(hdev);
3785
3786 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3787 if (!conn)
3788 goto unlock;
3789
3790 hci_conn_hold(conn);
3791 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3792 hci_conn_drop(conn);
3793
3794 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3795 conn_set_key(conn, ev->key_type, conn->pin_length);
3796
3797 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3798 goto unlock;
3799
3800 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3801 ev->key_type, pin_len, &persistent);
3802 if (!key)
3803 goto unlock;
3804
3805 /* Update connection information since adding the key will have
3806 * fixed up the type in the case of changed combination keys.
3807 */
3808 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3809 conn_set_key(conn, key->type, key->pin_len);
3810
3811 mgmt_new_link_key(hdev, key, persistent);
3812
3813 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3814 * is set. If it's not set simply remove the key from the kernel
3815 * list (we've still notified user space about it but with
3816 * store_hint being 0).
3817 */
3818 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3819 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3820 list_del_rcu(&key->list);
3821 kfree_rcu(key, rcu);
3822 goto unlock;
3823 }
3824
3825 if (persistent)
3826 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3827 else
3828 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3829
3830 unlock:
3831 hci_dev_unlock(hdev);
3832 }
3833
3834 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3835 {
3836 struct hci_ev_clock_offset *ev = (void *) skb->data;
3837 struct hci_conn *conn;
3838
3839 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3840
3841 hci_dev_lock(hdev);
3842
3843 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3844 if (conn && !ev->status) {
3845 struct inquiry_entry *ie;
3846
3847 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3848 if (ie) {
3849 ie->data.clock_offset = ev->clock_offset;
3850 ie->timestamp = jiffies;
3851 }
3852 }
3853
3854 hci_dev_unlock(hdev);
3855 }
3856
3857 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3858 {
3859 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3860 struct hci_conn *conn;
3861
3862 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3863
3864 hci_dev_lock(hdev);
3865
3866 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3867 if (conn && !ev->status)
3868 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3869
3870 hci_dev_unlock(hdev);
3871 }
3872
3873 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3874 {
3875 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3876 struct inquiry_entry *ie;
3877
3878 BT_DBG("%s", hdev->name);
3879
3880 hci_dev_lock(hdev);
3881
3882 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3883 if (ie) {
3884 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3885 ie->timestamp = jiffies;
3886 }
3887
3888 hci_dev_unlock(hdev);
3889 }
3890
3891 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3892 struct sk_buff *skb)
3893 {
3894 struct inquiry_data data;
3895 int num_rsp = *((__u8 *) skb->data);
3896
3897 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3898
3899 if (!num_rsp)
3900 return;
3901
3902 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3903 return;
3904
3905 hci_dev_lock(hdev);
3906
3907 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3908 struct inquiry_info_with_rssi_and_pscan_mode *info;
3909 info = (void *) (skb->data + 1);
3910
3911 for (; num_rsp; num_rsp--, info++) {
3912 u32 flags;
3913
3914 bacpy(&data.bdaddr, &info->bdaddr);
3915 data.pscan_rep_mode = info->pscan_rep_mode;
3916 data.pscan_period_mode = info->pscan_period_mode;
3917 data.pscan_mode = info->pscan_mode;
3918 memcpy(data.dev_class, info->dev_class, 3);
3919 data.clock_offset = info->clock_offset;
3920 data.rssi = info->rssi;
3921 data.ssp_mode = 0x00;
3922
3923 flags = hci_inquiry_cache_update(hdev, &data, false);
3924
3925 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3926 info->dev_class, info->rssi,
3927 flags, NULL, 0, NULL, 0);
3928 }
3929 } else {
3930 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3931
3932 for (; num_rsp; num_rsp--, info++) {
3933 u32 flags;
3934
3935 bacpy(&data.bdaddr, &info->bdaddr);
3936 data.pscan_rep_mode = info->pscan_rep_mode;
3937 data.pscan_period_mode = info->pscan_period_mode;
3938 data.pscan_mode = 0x00;
3939 memcpy(data.dev_class, info->dev_class, 3);
3940 data.clock_offset = info->clock_offset;
3941 data.rssi = info->rssi;
3942 data.ssp_mode = 0x00;
3943
3944 flags = hci_inquiry_cache_update(hdev, &data, false);
3945
3946 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3947 info->dev_class, info->rssi,
3948 flags, NULL, 0, NULL, 0);
3949 }
3950 }
3951
3952 hci_dev_unlock(hdev);
3953 }
3954
3955 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3956 struct sk_buff *skb)
3957 {
3958 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3959 struct hci_conn *conn;
3960
3961 BT_DBG("%s", hdev->name);
3962
3963 hci_dev_lock(hdev);
3964
3965 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3966 if (!conn)
3967 goto unlock;
3968
3969 if (ev->page < HCI_MAX_PAGES)
3970 memcpy(conn->features[ev->page], ev->features, 8);
3971
3972 if (!ev->status && ev->page == 0x01) {
3973 struct inquiry_entry *ie;
3974
3975 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3976 if (ie)
3977 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3978
3979 if (ev->features[0] & LMP_HOST_SSP) {
3980 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3981 } else {
3982 /* It is mandatory by the Bluetooth specification that
3983 * Extended Inquiry Results are only used when Secure
3984 * Simple Pairing is enabled, but some devices violate
3985 * this.
3986 *
3987 * To make these devices work, the internal SSP
3988 * enabled flag needs to be cleared if the remote host
3989 * features do not indicate SSP support */
3990 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3991 }
3992
3993 if (ev->features[0] & LMP_HOST_SC)
3994 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3995 }
3996
3997 if (conn->state != BT_CONFIG)
3998 goto unlock;
3999
4000 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4001 struct hci_cp_remote_name_req cp;
4002 memset(&cp, 0, sizeof(cp));
4003 bacpy(&cp.bdaddr, &conn->dst);
4004 cp.pscan_rep_mode = 0x02;
4005 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4006 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4007 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4008
4009 if (!hci_outgoing_auth_needed(hdev, conn)) {
4010 conn->state = BT_CONNECTED;
4011 hci_connect_cfm(conn, ev->status);
4012 hci_conn_drop(conn);
4013 }
4014
4015 unlock:
4016 hci_dev_unlock(hdev);
4017 }
4018
4019 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4020 struct sk_buff *skb)
4021 {
4022 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4023 struct hci_conn *conn;
4024
4025 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4026
4027 hci_dev_lock(hdev);
4028
4029 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4030 if (!conn) {
4031 if (ev->link_type == ESCO_LINK)
4032 goto unlock;
4033
4034 /* When the link type in the event indicates SCO connection
4035 * and lookup of the connection object fails, then check
4036 * if an eSCO connection object exists.
4037 *
4038 * The core limits the synchronous connections to either
4039 * SCO or eSCO. The eSCO connection is preferred and tried
4040 * to be setup first and until successfully established,
4041 * the link type will be hinted as eSCO.
4042 */
4043 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4044 if (!conn)
4045 goto unlock;
4046 }
4047
4048 switch (ev->status) {
4049 case 0x00:
4050 conn->handle = __le16_to_cpu(ev->handle);
4051 conn->state = BT_CONNECTED;
4052 conn->type = ev->link_type;
4053
4054 hci_debugfs_create_conn(conn);
4055 hci_conn_add_sysfs(conn);
4056 break;
4057
4058 case 0x10: /* Connection Accept Timeout */
4059 case 0x0d: /* Connection Rejected due to Limited Resources */
4060 case 0x11: /* Unsupported Feature or Parameter Value */
4061 case 0x1c: /* SCO interval rejected */
4062 case 0x1a: /* Unsupported Remote Feature */
4063 case 0x1f: /* Unspecified error */
4064 case 0x20: /* Unsupported LMP Parameter value */
4065 if (conn->out) {
4066 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4067 (hdev->esco_type & EDR_ESCO_MASK);
4068 if (hci_setup_sync(conn, conn->link->handle))
4069 goto unlock;
4070 }
4071 /* fall through */
4072
4073 default:
4074 conn->state = BT_CLOSED;
4075 break;
4076 }
4077
4078 hci_connect_cfm(conn, ev->status);
4079 if (ev->status)
4080 hci_conn_del(conn);
4081
4082 unlock:
4083 hci_dev_unlock(hdev);
4084 }
4085
4086 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4087 {
4088 size_t parsed = 0;
4089
4090 while (parsed < eir_len) {
4091 u8 field_len = eir[0];
4092
4093 if (field_len == 0)
4094 return parsed;
4095
4096 parsed += field_len + 1;
4097 eir += field_len + 1;
4098 }
4099
4100 return eir_len;
4101 }
4102
4103 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4104 struct sk_buff *skb)
4105 {
4106 struct inquiry_data data;
4107 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4108 int num_rsp = *((__u8 *) skb->data);
4109 size_t eir_len;
4110
4111 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4112
4113 if (!num_rsp)
4114 return;
4115
4116 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4117 return;
4118
4119 hci_dev_lock(hdev);
4120
4121 for (; num_rsp; num_rsp--, info++) {
4122 u32 flags;
4123 bool name_known;
4124
4125 bacpy(&data.bdaddr, &info->bdaddr);
4126 data.pscan_rep_mode = info->pscan_rep_mode;
4127 data.pscan_period_mode = info->pscan_period_mode;
4128 data.pscan_mode = 0x00;
4129 memcpy(data.dev_class, info->dev_class, 3);
4130 data.clock_offset = info->clock_offset;
4131 data.rssi = info->rssi;
4132 data.ssp_mode = 0x01;
4133
4134 if (hci_dev_test_flag(hdev, HCI_MGMT))
4135 name_known = eir_get_data(info->data,
4136 sizeof(info->data),
4137 EIR_NAME_COMPLETE, NULL);
4138 else
4139 name_known = true;
4140
4141 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4142
4143 eir_len = eir_get_length(info->data, sizeof(info->data));
4144
4145 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4146 info->dev_class, info->rssi,
4147 flags, info->data, eir_len, NULL, 0);
4148 }
4149
4150 hci_dev_unlock(hdev);
4151 }
4152
4153 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4154 struct sk_buff *skb)
4155 {
4156 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4157 struct hci_conn *conn;
4158
4159 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4160 __le16_to_cpu(ev->handle));
4161
4162 hci_dev_lock(hdev);
4163
4164 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4165 if (!conn)
4166 goto unlock;
4167
4168 /* For BR/EDR the necessary steps are taken through the
4169 * auth_complete event.
4170 */
4171 if (conn->type != LE_LINK)
4172 goto unlock;
4173
4174 if (!ev->status)
4175 conn->sec_level = conn->pending_sec_level;
4176
4177 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4178
4179 if (ev->status && conn->state == BT_CONNECTED) {
4180 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4181 hci_conn_drop(conn);
4182 goto unlock;
4183 }
4184
4185 if (conn->state == BT_CONFIG) {
4186 if (!ev->status)
4187 conn->state = BT_CONNECTED;
4188
4189 hci_connect_cfm(conn, ev->status);
4190 hci_conn_drop(conn);
4191 } else {
4192 hci_auth_cfm(conn, ev->status);
4193
4194 hci_conn_hold(conn);
4195 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4196 hci_conn_drop(conn);
4197 }
4198
4199 unlock:
4200 hci_dev_unlock(hdev);
4201 }
4202
4203 static u8 hci_get_auth_req(struct hci_conn *conn)
4204 {
4205 /* If remote requests no-bonding follow that lead */
4206 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4207 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4208 return conn->remote_auth | (conn->auth_type & 0x01);
4209
4210 /* If both remote and local have enough IO capabilities, require
4211 * MITM protection
4212 */
4213 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4214 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4215 return conn->remote_auth | 0x01;
4216
4217 /* No MITM protection possible so ignore remote requirement */
4218 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4219 }
4220
4221 static u8 bredr_oob_data_present(struct hci_conn *conn)
4222 {
4223 struct hci_dev *hdev = conn->hdev;
4224 struct oob_data *data;
4225
4226 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4227 if (!data)
4228 return 0x00;
4229
4230 if (bredr_sc_enabled(hdev)) {
4231 /* When Secure Connections is enabled, then just
4232 * return the present value stored with the OOB
4233 * data. The stored value contains the right present
4234 * information. However it can only be trusted when
4235 * not in Secure Connection Only mode.
4236 */
4237 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4238 return data->present;
4239
4240 /* When Secure Connections Only mode is enabled, then
4241 * the P-256 values are required. If they are not
4242 * available, then do not declare that OOB data is
4243 * present.
4244 */
4245 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4246 !memcmp(data->hash256, ZERO_KEY, 16))
4247 return 0x00;
4248
4249 return 0x02;
4250 }
4251
4252 /* When Secure Connections is not enabled or actually
4253 * not supported by the hardware, then check that if
4254 * P-192 data values are present.
4255 */
4256 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4257 !memcmp(data->hash192, ZERO_KEY, 16))
4258 return 0x00;
4259
4260 return 0x01;
4261 }
4262
4263 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4264 {
4265 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4266 struct hci_conn *conn;
4267
4268 BT_DBG("%s", hdev->name);
4269
4270 hci_dev_lock(hdev);
4271
4272 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4273 if (!conn)
4274 goto unlock;
4275
4276 hci_conn_hold(conn);
4277
4278 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4279 goto unlock;
4280
4281 /* Allow pairing if we're pairable, the initiators of the
4282 * pairing or if the remote is not requesting bonding.
4283 */
4284 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4285 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4286 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4287 struct hci_cp_io_capability_reply cp;
4288
4289 bacpy(&cp.bdaddr, &ev->bdaddr);
4290 /* Change the IO capability from KeyboardDisplay
4291 * to DisplayYesNo as it is not supported by BT spec. */
4292 cp.capability = (conn->io_capability == 0x04) ?
4293 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4294
4295 /* If we are initiators, there is no remote information yet */
4296 if (conn->remote_auth == 0xff) {
4297 /* Request MITM protection if our IO caps allow it
4298 * except for the no-bonding case.
4299 */
4300 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4301 conn->auth_type != HCI_AT_NO_BONDING)
4302 conn->auth_type |= 0x01;
4303 } else {
4304 conn->auth_type = hci_get_auth_req(conn);
4305 }
4306
4307 /* If we're not bondable, force one of the non-bondable
4308 * authentication requirement values.
4309 */
4310 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4311 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4312
4313 cp.authentication = conn->auth_type;
4314 cp.oob_data = bredr_oob_data_present(conn);
4315
4316 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4317 sizeof(cp), &cp);
4318 } else {
4319 struct hci_cp_io_capability_neg_reply cp;
4320
4321 bacpy(&cp.bdaddr, &ev->bdaddr);
4322 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4323
4324 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4325 sizeof(cp), &cp);
4326 }
4327
4328 unlock:
4329 hci_dev_unlock(hdev);
4330 }
4331
4332 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4333 {
4334 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4335 struct hci_conn *conn;
4336
4337 BT_DBG("%s", hdev->name);
4338
4339 hci_dev_lock(hdev);
4340
4341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4342 if (!conn)
4343 goto unlock;
4344
4345 conn->remote_cap = ev->capability;
4346 conn->remote_auth = ev->authentication;
4347
4348 unlock:
4349 hci_dev_unlock(hdev);
4350 }
4351
4352 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4353 struct sk_buff *skb)
4354 {
4355 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4356 int loc_mitm, rem_mitm, confirm_hint = 0;
4357 struct hci_conn *conn;
4358
4359 BT_DBG("%s", hdev->name);
4360
4361 hci_dev_lock(hdev);
4362
4363 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4364 goto unlock;
4365
4366 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4367 if (!conn)
4368 goto unlock;
4369
4370 loc_mitm = (conn->auth_type & 0x01);
4371 rem_mitm = (conn->remote_auth & 0x01);
4372
4373 /* If we require MITM but the remote device can't provide that
4374 * (it has NoInputNoOutput) then reject the confirmation
4375 * request. We check the security level here since it doesn't
4376 * necessarily match conn->auth_type.
4377 */
4378 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4379 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4380 BT_DBG("Rejecting request: remote device can't provide MITM");
4381 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4382 sizeof(ev->bdaddr), &ev->bdaddr);
4383 goto unlock;
4384 }
4385
4386 /* If no side requires MITM protection; auto-accept */
4387 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4388 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4389
4390 /* If we're not the initiators request authorization to
4391 * proceed from user space (mgmt_user_confirm with
4392 * confirm_hint set to 1). The exception is if neither
4393 * side had MITM or if the local IO capability is
4394 * NoInputNoOutput, in which case we do auto-accept
4395 */
4396 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4397 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4398 (loc_mitm || rem_mitm)) {
4399 BT_DBG("Confirming auto-accept as acceptor");
4400 confirm_hint = 1;
4401 goto confirm;
4402 }
4403
4404 BT_DBG("Auto-accept of user confirmation with %ums delay",
4405 hdev->auto_accept_delay);
4406
4407 if (hdev->auto_accept_delay > 0) {
4408 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4409 queue_delayed_work(conn->hdev->workqueue,
4410 &conn->auto_accept_work, delay);
4411 goto unlock;
4412 }
4413
4414 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4415 sizeof(ev->bdaddr), &ev->bdaddr);
4416 goto unlock;
4417 }
4418
4419 confirm:
4420 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4421 le32_to_cpu(ev->passkey), confirm_hint);
4422
4423 unlock:
4424 hci_dev_unlock(hdev);
4425 }
4426
4427 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4428 struct sk_buff *skb)
4429 {
4430 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4431
4432 BT_DBG("%s", hdev->name);
4433
4434 if (hci_dev_test_flag(hdev, HCI_MGMT))
4435 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4436 }
4437
4438 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4439 struct sk_buff *skb)
4440 {
4441 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4442 struct hci_conn *conn;
4443
4444 BT_DBG("%s", hdev->name);
4445
4446 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4447 if (!conn)
4448 return;
4449
4450 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4451 conn->passkey_entered = 0;
4452
4453 if (hci_dev_test_flag(hdev, HCI_MGMT))
4454 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4455 conn->dst_type, conn->passkey_notify,
4456 conn->passkey_entered);
4457 }
4458
4459 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4460 {
4461 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4462 struct hci_conn *conn;
4463
4464 BT_DBG("%s", hdev->name);
4465
4466 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4467 if (!conn)
4468 return;
4469
4470 switch (ev->type) {
4471 case HCI_KEYPRESS_STARTED:
4472 conn->passkey_entered = 0;
4473 return;
4474
4475 case HCI_KEYPRESS_ENTERED:
4476 conn->passkey_entered++;
4477 break;
4478
4479 case HCI_KEYPRESS_ERASED:
4480 conn->passkey_entered--;
4481 break;
4482
4483 case HCI_KEYPRESS_CLEARED:
4484 conn->passkey_entered = 0;
4485 break;
4486
4487 case HCI_KEYPRESS_COMPLETED:
4488 return;
4489 }
4490
4491 if (hci_dev_test_flag(hdev, HCI_MGMT))
4492 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4493 conn->dst_type, conn->passkey_notify,
4494 conn->passkey_entered);
4495 }
4496
4497 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4498 struct sk_buff *skb)
4499 {
4500 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4501 struct hci_conn *conn;
4502
4503 BT_DBG("%s", hdev->name);
4504
4505 hci_dev_lock(hdev);
4506
4507 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4508 if (!conn)
4509 goto unlock;
4510
4511 /* Reset the authentication requirement to unknown */
4512 conn->remote_auth = 0xff;
4513
4514 /* To avoid duplicate auth_failed events to user space we check
4515 * the HCI_CONN_AUTH_PEND flag which will be set if we
4516 * initiated the authentication. A traditional auth_complete
4517 * event gets always produced as initiator and is also mapped to
4518 * the mgmt_auth_failed event */
4519 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4520 mgmt_auth_failed(conn, ev->status);
4521
4522 hci_conn_drop(conn);
4523
4524 unlock:
4525 hci_dev_unlock(hdev);
4526 }
4527
4528 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4529 struct sk_buff *skb)
4530 {
4531 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4532 struct inquiry_entry *ie;
4533 struct hci_conn *conn;
4534
4535 BT_DBG("%s", hdev->name);
4536
4537 hci_dev_lock(hdev);
4538
4539 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4540 if (conn)
4541 memcpy(conn->features[1], ev->features, 8);
4542
4543 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4544 if (ie)
4545 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4546
4547 hci_dev_unlock(hdev);
4548 }
4549
4550 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4551 struct sk_buff *skb)
4552 {
4553 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4554 struct oob_data *data;
4555
4556 BT_DBG("%s", hdev->name);
4557
4558 hci_dev_lock(hdev);
4559
4560 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4561 goto unlock;
4562
4563 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4564 if (!data) {
4565 struct hci_cp_remote_oob_data_neg_reply cp;
4566
4567 bacpy(&cp.bdaddr, &ev->bdaddr);
4568 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4569 sizeof(cp), &cp);
4570 goto unlock;
4571 }
4572
4573 if (bredr_sc_enabled(hdev)) {
4574 struct hci_cp_remote_oob_ext_data_reply cp;
4575
4576 bacpy(&cp.bdaddr, &ev->bdaddr);
4577 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4578 memset(cp.hash192, 0, sizeof(cp.hash192));
4579 memset(cp.rand192, 0, sizeof(cp.rand192));
4580 } else {
4581 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4582 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4583 }
4584 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4585 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4586
4587 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4588 sizeof(cp), &cp);
4589 } else {
4590 struct hci_cp_remote_oob_data_reply cp;
4591
4592 bacpy(&cp.bdaddr, &ev->bdaddr);
4593 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4594 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4595
4596 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4597 sizeof(cp), &cp);
4598 }
4599
4600 unlock:
4601 hci_dev_unlock(hdev);
4602 }
4603
4604 #if IS_ENABLED(CONFIG_BT_HS)
4605 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4606 {
4607 struct hci_ev_channel_selected *ev = (void *)skb->data;
4608 struct hci_conn *hcon;
4609
4610 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4611
4612 skb_pull(skb, sizeof(*ev));
4613
4614 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4615 if (!hcon)
4616 return;
4617
4618 amp_read_loc_assoc_final_data(hdev, hcon);
4619 }
4620
4621 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4622 struct sk_buff *skb)
4623 {
4624 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4625 struct hci_conn *hcon, *bredr_hcon;
4626
4627 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4628 ev->status);
4629
4630 hci_dev_lock(hdev);
4631
4632 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4633 if (!hcon) {
4634 hci_dev_unlock(hdev);
4635 return;
4636 }
4637
4638 if (ev->status) {
4639 hci_conn_del(hcon);
4640 hci_dev_unlock(hdev);
4641 return;
4642 }
4643
4644 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4645
4646 hcon->state = BT_CONNECTED;
4647 bacpy(&hcon->dst, &bredr_hcon->dst);
4648
4649 hci_conn_hold(hcon);
4650 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4651 hci_conn_drop(hcon);
4652
4653 hci_debugfs_create_conn(hcon);
4654 hci_conn_add_sysfs(hcon);
4655
4656 amp_physical_cfm(bredr_hcon, hcon);
4657
4658 hci_dev_unlock(hdev);
4659 }
4660
4661 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4662 {
4663 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4664 struct hci_conn *hcon;
4665 struct hci_chan *hchan;
4666 struct amp_mgr *mgr;
4667
4668 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4669 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4670 ev->status);
4671
4672 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4673 if (!hcon)
4674 return;
4675
4676 /* Create AMP hchan */
4677 hchan = hci_chan_create(hcon);
4678 if (!hchan)
4679 return;
4680
4681 hchan->handle = le16_to_cpu(ev->handle);
4682
4683 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4684
4685 mgr = hcon->amp_mgr;
4686 if (mgr && mgr->bredr_chan) {
4687 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4688
4689 l2cap_chan_lock(bredr_chan);
4690
4691 bredr_chan->conn->mtu = hdev->block_mtu;
4692 l2cap_logical_cfm(bredr_chan, hchan, 0);
4693 hci_conn_hold(hcon);
4694
4695 l2cap_chan_unlock(bredr_chan);
4696 }
4697 }
4698
4699 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4700 struct sk_buff *skb)
4701 {
4702 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4703 struct hci_chan *hchan;
4704
4705 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4706 le16_to_cpu(ev->handle), ev->status);
4707
4708 if (ev->status)
4709 return;
4710
4711 hci_dev_lock(hdev);
4712
4713 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4714 if (!hchan)
4715 goto unlock;
4716
4717 amp_destroy_logical_link(hchan, ev->reason);
4718
4719 unlock:
4720 hci_dev_unlock(hdev);
4721 }
4722
4723 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4724 struct sk_buff *skb)
4725 {
4726 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4727 struct hci_conn *hcon;
4728
4729 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4730
4731 if (ev->status)
4732 return;
4733
4734 hci_dev_lock(hdev);
4735
4736 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4737 if (hcon) {
4738 hcon->state = BT_CLOSED;
4739 hci_conn_del(hcon);
4740 }
4741
4742 hci_dev_unlock(hdev);
4743 }
4744 #endif
4745
4746 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4747 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4748 u16 interval, u16 latency, u16 supervision_timeout)
4749 {
4750 struct hci_conn_params *params;
4751 struct hci_conn *conn;
4752 struct smp_irk *irk;
4753 u8 addr_type;
4754
4755 hci_dev_lock(hdev);
4756
4757 /* All controllers implicitly stop advertising in the event of a
4758 * connection, so ensure that the state bit is cleared.
4759 */
4760 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4761
4762 conn = hci_lookup_le_connect(hdev);
4763 if (!conn) {
4764 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4765 if (!conn) {
4766 bt_dev_err(hdev, "no memory for new connection");
4767 goto unlock;
4768 }
4769
4770 conn->dst_type = bdaddr_type;
4771
4772 /* If we didn't have a hci_conn object previously
4773 * but we're in master role this must be something
4774 * initiated using a white list. Since white list based
4775 * connections are not "first class citizens" we don't
4776 * have full tracking of them. Therefore, we go ahead
4777 * with a "best effort" approach of determining the
4778 * initiator address based on the HCI_PRIVACY flag.
4779 */
4780 if (conn->out) {
4781 conn->resp_addr_type = bdaddr_type;
4782 bacpy(&conn->resp_addr, bdaddr);
4783 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4784 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4785 bacpy(&conn->init_addr, &hdev->rpa);
4786 } else {
4787 hci_copy_identity_address(hdev,
4788 &conn->init_addr,
4789 &conn->init_addr_type);
4790 }
4791 }
4792 } else {
4793 cancel_delayed_work(&conn->le_conn_timeout);
4794 }
4795
4796 if (!conn->out) {
4797 /* Set the responder (our side) address type based on
4798 * the advertising address type.
4799 */
4800 conn->resp_addr_type = hdev->adv_addr_type;
4801 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4802 bacpy(&conn->resp_addr, &hdev->random_addr);
4803 else
4804 bacpy(&conn->resp_addr, &hdev->bdaddr);
4805
4806 conn->init_addr_type = bdaddr_type;
4807 bacpy(&conn->init_addr, bdaddr);
4808
4809 /* For incoming connections, set the default minimum
4810 * and maximum connection interval. They will be used
4811 * to check if the parameters are in range and if not
4812 * trigger the connection update procedure.
4813 */
4814 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4815 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4816 }
4817
4818 /* Lookup the identity address from the stored connection
4819 * address and address type.
4820 *
4821 * When establishing connections to an identity address, the
4822 * connection procedure will store the resolvable random
4823 * address first. Now if it can be converted back into the
4824 * identity address, start using the identity address from
4825 * now on.
4826 */
4827 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4828 if (irk) {
4829 bacpy(&conn->dst, &irk->bdaddr);
4830 conn->dst_type = irk->addr_type;
4831 }
4832
4833 if (status) {
4834 hci_le_conn_failed(conn, status);
4835 goto unlock;
4836 }
4837
4838 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4839 addr_type = BDADDR_LE_PUBLIC;
4840 else
4841 addr_type = BDADDR_LE_RANDOM;
4842
4843 /* Drop the connection if the device is blocked */
4844 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4845 hci_conn_drop(conn);
4846 goto unlock;
4847 }
4848
4849 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4850 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4851
4852 conn->sec_level = BT_SECURITY_LOW;
4853 conn->handle = handle;
4854 conn->state = BT_CONFIG;
4855
4856 conn->le_conn_interval = interval;
4857 conn->le_conn_latency = latency;
4858 conn->le_supv_timeout = supervision_timeout;
4859
4860 hci_debugfs_create_conn(conn);
4861 hci_conn_add_sysfs(conn);
4862
4863 if (!status) {
4864 /* The remote features procedure is defined for master
4865 * role only. So only in case of an initiated connection
4866 * request the remote features.
4867 *
4868 * If the local controller supports slave-initiated features
4869 * exchange, then requesting the remote features in slave
4870 * role is possible. Otherwise just transition into the
4871 * connected state without requesting the remote features.
4872 */
4873 if (conn->out ||
4874 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4875 struct hci_cp_le_read_remote_features cp;
4876
4877 cp.handle = __cpu_to_le16(conn->handle);
4878
4879 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4880 sizeof(cp), &cp);
4881
4882 hci_conn_hold(conn);
4883 } else {
4884 conn->state = BT_CONNECTED;
4885 hci_connect_cfm(conn, status);
4886 }
4887 } else {
4888 hci_connect_cfm(conn, status);
4889 }
4890
4891 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4892 conn->dst_type);
4893 if (params) {
4894 list_del_init(&params->action);
4895 if (params->conn) {
4896 hci_conn_drop(params->conn);
4897 hci_conn_put(params->conn);
4898 params->conn = NULL;
4899 }
4900 }
4901
4902 unlock:
4903 hci_update_background_scan(hdev);
4904 hci_dev_unlock(hdev);
4905 }
4906
4907 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4908 {
4909 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4910
4911 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4912
4913 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4914 ev->role, le16_to_cpu(ev->handle),
4915 le16_to_cpu(ev->interval),
4916 le16_to_cpu(ev->latency),
4917 le16_to_cpu(ev->supervision_timeout));
4918 }
4919
4920 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
4921 struct sk_buff *skb)
4922 {
4923 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
4924
4925 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4926
4927 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4928 ev->role, le16_to_cpu(ev->handle),
4929 le16_to_cpu(ev->interval),
4930 le16_to_cpu(ev->latency),
4931 le16_to_cpu(ev->supervision_timeout));
4932 }
4933
4934 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4935 struct sk_buff *skb)
4936 {
4937 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4938 struct hci_conn *conn;
4939
4940 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4941
4942 if (ev->status)
4943 return;
4944
4945 hci_dev_lock(hdev);
4946
4947 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4948 if (conn) {
4949 conn->le_conn_interval = le16_to_cpu(ev->interval);
4950 conn->le_conn_latency = le16_to_cpu(ev->latency);
4951 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4952 }
4953
4954 hci_dev_unlock(hdev);
4955 }
4956
4957 /* This function requires the caller holds hdev->lock */
4958 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4959 bdaddr_t *addr,
4960 u8 addr_type, u8 adv_type,
4961 bdaddr_t *direct_rpa)
4962 {
4963 struct hci_conn *conn;
4964 struct hci_conn_params *params;
4965
4966 /* If the event is not connectable don't proceed further */
4967 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4968 return NULL;
4969
4970 /* Ignore if the device is blocked */
4971 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4972 return NULL;
4973
4974 /* Most controller will fail if we try to create new connections
4975 * while we have an existing one in slave role.
4976 */
4977 if (hdev->conn_hash.le_num_slave > 0)
4978 return NULL;
4979
4980 /* If we're not connectable only connect devices that we have in
4981 * our pend_le_conns list.
4982 */
4983 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4984 addr_type);
4985 if (!params)
4986 return NULL;
4987
4988 if (!params->explicit_connect) {
4989 switch (params->auto_connect) {
4990 case HCI_AUTO_CONN_DIRECT:
4991 /* Only devices advertising with ADV_DIRECT_IND are
4992 * triggering a connection attempt. This is allowing
4993 * incoming connections from slave devices.
4994 */
4995 if (adv_type != LE_ADV_DIRECT_IND)
4996 return NULL;
4997 break;
4998 case HCI_AUTO_CONN_ALWAYS:
4999 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5000 * are triggering a connection attempt. This means
5001 * that incoming connectioms from slave device are
5002 * accepted and also outgoing connections to slave
5003 * devices are established when found.
5004 */
5005 break;
5006 default:
5007 return NULL;
5008 }
5009 }
5010
5011 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5012 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5013 direct_rpa);
5014 if (!IS_ERR(conn)) {
5015 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5016 * by higher layer that tried to connect, if no then
5017 * store the pointer since we don't really have any
5018 * other owner of the object besides the params that
5019 * triggered it. This way we can abort the connection if
5020 * the parameters get removed and keep the reference
5021 * count consistent once the connection is established.
5022 */
5023
5024 if (!params->explicit_connect)
5025 params->conn = hci_conn_get(conn);
5026
5027 return conn;
5028 }
5029
5030 switch (PTR_ERR(conn)) {
5031 case -EBUSY:
5032 /* If hci_connect() returns -EBUSY it means there is already
5033 * an LE connection attempt going on. Since controllers don't
5034 * support more than one connection attempt at the time, we
5035 * don't consider this an error case.
5036 */
5037 break;
5038 default:
5039 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5040 return NULL;
5041 }
5042
5043 return NULL;
5044 }
5045
5046 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5047 u8 bdaddr_type, bdaddr_t *direct_addr,
5048 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5049 {
5050 struct discovery_state *d = &hdev->discovery;
5051 struct smp_irk *irk;
5052 struct hci_conn *conn;
5053 bool match;
5054 u32 flags;
5055 u8 *ptr, real_len;
5056
5057 switch (type) {
5058 case LE_ADV_IND:
5059 case LE_ADV_DIRECT_IND:
5060 case LE_ADV_SCAN_IND:
5061 case LE_ADV_NONCONN_IND:
5062 case LE_ADV_SCAN_RSP:
5063 break;
5064 default:
5065 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5066 "type: 0x%02x", type);
5067 return;
5068 }
5069
5070 /* Find the end of the data in case the report contains padded zero
5071 * bytes at the end causing an invalid length value.
5072 *
5073 * When data is NULL, len is 0 so there is no need for extra ptr
5074 * check as 'ptr < data + 0' is already false in such case.
5075 */
5076 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5077 if (ptr + 1 + *ptr > data + len)
5078 break;
5079 }
5080
5081 real_len = ptr - data;
5082
5083 /* Adjust for actual length */
5084 if (len != real_len) {
5085 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5086 len = real_len;
5087 }
5088
5089 /* If the direct address is present, then this report is from
5090 * a LE Direct Advertising Report event. In that case it is
5091 * important to see if the address is matching the local
5092 * controller address.
5093 */
5094 if (direct_addr) {
5095 /* Only resolvable random addresses are valid for these
5096 * kind of reports and others can be ignored.
5097 */
5098 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5099 return;
5100
5101 /* If the controller is not using resolvable random
5102 * addresses, then this report can be ignored.
5103 */
5104 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5105 return;
5106
5107 /* If the local IRK of the controller does not match
5108 * with the resolvable random address provided, then
5109 * this report can be ignored.
5110 */
5111 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5112 return;
5113 }
5114
5115 /* Check if we need to convert to identity address */
5116 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5117 if (irk) {
5118 bdaddr = &irk->bdaddr;
5119 bdaddr_type = irk->addr_type;
5120 }
5121
5122 /* Check if we have been requested to connect to this device.
5123 *
5124 * direct_addr is set only for directed advertising reports (it is NULL
5125 * for advertising reports) and is already verified to be RPA above.
5126 */
5127 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5128 direct_addr);
5129 if (conn && type == LE_ADV_IND) {
5130 /* Store report for later inclusion by
5131 * mgmt_device_connected
5132 */
5133 memcpy(conn->le_adv_data, data, len);
5134 conn->le_adv_data_len = len;
5135 }
5136
5137 /* Passive scanning shouldn't trigger any device found events,
5138 * except for devices marked as CONN_REPORT for which we do send
5139 * device found events.
5140 */
5141 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5142 if (type == LE_ADV_DIRECT_IND)
5143 return;
5144
5145 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5146 bdaddr, bdaddr_type))
5147 return;
5148
5149 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5150 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5151 else
5152 flags = 0;
5153 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5154 rssi, flags, data, len, NULL, 0);
5155 return;
5156 }
5157
5158 /* When receiving non-connectable or scannable undirected
5159 * advertising reports, this means that the remote device is
5160 * not connectable and then clearly indicate this in the
5161 * device found event.
5162 *
5163 * When receiving a scan response, then there is no way to
5164 * know if the remote device is connectable or not. However
5165 * since scan responses are merged with a previously seen
5166 * advertising report, the flags field from that report
5167 * will be used.
5168 *
5169 * In the really unlikely case that a controller get confused
5170 * and just sends a scan response event, then it is marked as
5171 * not connectable as well.
5172 */
5173 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5174 type == LE_ADV_SCAN_RSP)
5175 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5176 else
5177 flags = 0;
5178
5179 /* If there's nothing pending either store the data from this
5180 * event or send an immediate device found event if the data
5181 * should not be stored for later.
5182 */
5183 if (!has_pending_adv_report(hdev)) {
5184 /* If the report will trigger a SCAN_REQ store it for
5185 * later merging.
5186 */
5187 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5188 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5189 rssi, flags, data, len);
5190 return;
5191 }
5192
5193 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5194 rssi, flags, data, len, NULL, 0);
5195 return;
5196 }
5197
5198 /* Check if the pending report is for the same device as the new one */
5199 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5200 bdaddr_type == d->last_adv_addr_type);
5201
5202 /* If the pending data doesn't match this report or this isn't a
5203 * scan response (e.g. we got a duplicate ADV_IND) then force
5204 * sending of the pending data.
5205 */
5206 if (type != LE_ADV_SCAN_RSP || !match) {
5207 /* Send out whatever is in the cache, but skip duplicates */
5208 if (!match)
5209 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5210 d->last_adv_addr_type, NULL,
5211 d->last_adv_rssi, d->last_adv_flags,
5212 d->last_adv_data,
5213 d->last_adv_data_len, NULL, 0);
5214
5215 /* If the new report will trigger a SCAN_REQ store it for
5216 * later merging.
5217 */
5218 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5219 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5220 rssi, flags, data, len);
5221 return;
5222 }
5223
5224 /* The advertising reports cannot be merged, so clear
5225 * the pending report and send out a device found event.
5226 */
5227 clear_pending_adv_report(hdev);
5228 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5229 rssi, flags, data, len, NULL, 0);
5230 return;
5231 }
5232
5233 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5234 * the new event is a SCAN_RSP. We can therefore proceed with
5235 * sending a merged device found event.
5236 */
5237 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5238 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5239 d->last_adv_data, d->last_adv_data_len, data, len);
5240 clear_pending_adv_report(hdev);
5241 }
5242
5243 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5244 {
5245 u8 num_reports = skb->data[0];
5246 void *ptr = &skb->data[1];
5247
5248 hci_dev_lock(hdev);
5249
5250 while (num_reports--) {
5251 struct hci_ev_le_advertising_info *ev = ptr;
5252 s8 rssi;
5253
5254 if (ev->length <= HCI_MAX_AD_LENGTH) {
5255 rssi = ev->data[ev->length];
5256 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5257 ev->bdaddr_type, NULL, 0, rssi,
5258 ev->data, ev->length);
5259 } else {
5260 bt_dev_err(hdev, "Dropping invalid advertising data");
5261 }
5262
5263 ptr += sizeof(*ev) + ev->length + 1;
5264 }
5265
5266 hci_dev_unlock(hdev);
5267 }
5268
5269 static u8 ext_evt_type_to_legacy(u16 evt_type)
5270 {
5271 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5272 switch (evt_type) {
5273 case LE_LEGACY_ADV_IND:
5274 return LE_ADV_IND;
5275 case LE_LEGACY_ADV_DIRECT_IND:
5276 return LE_ADV_DIRECT_IND;
5277 case LE_LEGACY_ADV_SCAN_IND:
5278 return LE_ADV_SCAN_IND;
5279 case LE_LEGACY_NONCONN_IND:
5280 return LE_ADV_NONCONN_IND;
5281 case LE_LEGACY_SCAN_RSP_ADV:
5282 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5283 return LE_ADV_SCAN_RSP;
5284 }
5285
5286 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5287 evt_type);
5288
5289 return LE_ADV_INVALID;
5290 }
5291
5292 if (evt_type & LE_EXT_ADV_CONN_IND) {
5293 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5294 return LE_ADV_DIRECT_IND;
5295
5296 return LE_ADV_IND;
5297 }
5298
5299 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5300 return LE_ADV_SCAN_RSP;
5301
5302 if (evt_type & LE_EXT_ADV_SCAN_IND)
5303 return LE_ADV_SCAN_IND;
5304
5305 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5306 evt_type & LE_EXT_ADV_DIRECT_IND)
5307 return LE_ADV_NONCONN_IND;
5308
5309 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5310 evt_type);
5311
5312 return LE_ADV_INVALID;
5313 }
5314
5315 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5316 {
5317 u8 num_reports = skb->data[0];
5318 void *ptr = &skb->data[1];
5319
5320 hci_dev_lock(hdev);
5321
5322 while (num_reports--) {
5323 struct hci_ev_le_ext_adv_report *ev = ptr;
5324 u8 legacy_evt_type;
5325 u16 evt_type;
5326
5327 evt_type = __le16_to_cpu(ev->evt_type);
5328 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5329 if (legacy_evt_type != LE_ADV_INVALID) {
5330 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5331 ev->bdaddr_type, NULL, 0, ev->rssi,
5332 ev->data, ev->length);
5333 }
5334
5335 ptr += sizeof(*ev) + ev->length + 1;
5336 }
5337
5338 hci_dev_unlock(hdev);
5339 }
5340
5341 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5342 struct sk_buff *skb)
5343 {
5344 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5345 struct hci_conn *conn;
5346
5347 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5348
5349 hci_dev_lock(hdev);
5350
5351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5352 if (conn) {
5353 if (!ev->status)
5354 memcpy(conn->features[0], ev->features, 8);
5355
5356 if (conn->state == BT_CONFIG) {
5357 __u8 status;
5358
5359 /* If the local controller supports slave-initiated
5360 * features exchange, but the remote controller does
5361 * not, then it is possible that the error code 0x1a
5362 * for unsupported remote feature gets returned.
5363 *
5364 * In this specific case, allow the connection to
5365 * transition into connected state and mark it as
5366 * successful.
5367 */
5368 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5369 !conn->out && ev->status == 0x1a)
5370 status = 0x00;
5371 else
5372 status = ev->status;
5373
5374 conn->state = BT_CONNECTED;
5375 hci_connect_cfm(conn, status);
5376 hci_conn_drop(conn);
5377 }
5378 }
5379
5380 hci_dev_unlock(hdev);
5381 }
5382
5383 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5384 {
5385 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5386 struct hci_cp_le_ltk_reply cp;
5387 struct hci_cp_le_ltk_neg_reply neg;
5388 struct hci_conn *conn;
5389 struct smp_ltk *ltk;
5390
5391 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5392
5393 hci_dev_lock(hdev);
5394
5395 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5396 if (conn == NULL)
5397 goto not_found;
5398
5399 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5400 if (!ltk)
5401 goto not_found;
5402
5403 if (smp_ltk_is_sc(ltk)) {
5404 /* With SC both EDiv and Rand are set to zero */
5405 if (ev->ediv || ev->rand)
5406 goto not_found;
5407 } else {
5408 /* For non-SC keys check that EDiv and Rand match */
5409 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5410 goto not_found;
5411 }
5412
5413 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5414 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5415 cp.handle = cpu_to_le16(conn->handle);
5416
5417 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5418
5419 conn->enc_key_size = ltk->enc_size;
5420
5421 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5422
5423 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5424 * temporary key used to encrypt a connection following
5425 * pairing. It is used during the Encrypted Session Setup to
5426 * distribute the keys. Later, security can be re-established
5427 * using a distributed LTK.
5428 */
5429 if (ltk->type == SMP_STK) {
5430 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5431 list_del_rcu(&ltk->list);
5432 kfree_rcu(ltk, rcu);
5433 } else {
5434 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5435 }
5436
5437 hci_dev_unlock(hdev);
5438
5439 return;
5440
5441 not_found:
5442 neg.handle = ev->handle;
5443 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5444 hci_dev_unlock(hdev);
5445 }
5446
5447 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5448 u8 reason)
5449 {
5450 struct hci_cp_le_conn_param_req_neg_reply cp;
5451
5452 cp.handle = cpu_to_le16(handle);
5453 cp.reason = reason;
5454
5455 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5456 &cp);
5457 }
5458
5459 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5460 struct sk_buff *skb)
5461 {
5462 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5463 struct hci_cp_le_conn_param_req_reply cp;
5464 struct hci_conn *hcon;
5465 u16 handle, min, max, latency, timeout;
5466
5467 handle = le16_to_cpu(ev->handle);
5468 min = le16_to_cpu(ev->interval_min);
5469 max = le16_to_cpu(ev->interval_max);
5470 latency = le16_to_cpu(ev->latency);
5471 timeout = le16_to_cpu(ev->timeout);
5472
5473 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5474 if (!hcon || hcon->state != BT_CONNECTED)
5475 return send_conn_param_neg_reply(hdev, handle,
5476 HCI_ERROR_UNKNOWN_CONN_ID);
5477
5478 if (hci_check_conn_params(min, max, latency, timeout))
5479 return send_conn_param_neg_reply(hdev, handle,
5480 HCI_ERROR_INVALID_LL_PARAMS);
5481
5482 if (hcon->role == HCI_ROLE_MASTER) {
5483 struct hci_conn_params *params;
5484 u8 store_hint;
5485
5486 hci_dev_lock(hdev);
5487
5488 params = hci_conn_params_lookup(hdev, &hcon->dst,
5489 hcon->dst_type);
5490 if (params) {
5491 params->conn_min_interval = min;
5492 params->conn_max_interval = max;
5493 params->conn_latency = latency;
5494 params->supervision_timeout = timeout;
5495 store_hint = 0x01;
5496 } else{
5497 store_hint = 0x00;
5498 }
5499
5500 hci_dev_unlock(hdev);
5501
5502 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5503 store_hint, min, max, latency, timeout);
5504 }
5505
5506 cp.handle = ev->handle;
5507 cp.interval_min = ev->interval_min;
5508 cp.interval_max = ev->interval_max;
5509 cp.latency = ev->latency;
5510 cp.timeout = ev->timeout;
5511 cp.min_ce_len = 0;
5512 cp.max_ce_len = 0;
5513
5514 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5515 }
5516
5517 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5518 struct sk_buff *skb)
5519 {
5520 u8 num_reports = skb->data[0];
5521 void *ptr = &skb->data[1];
5522
5523 hci_dev_lock(hdev);
5524
5525 while (num_reports--) {
5526 struct hci_ev_le_direct_adv_info *ev = ptr;
5527
5528 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5529 ev->bdaddr_type, &ev->direct_addr,
5530 ev->direct_addr_type, ev->rssi, NULL, 0);
5531
5532 ptr += sizeof(*ev);
5533 }
5534
5535 hci_dev_unlock(hdev);
5536 }
5537
5538 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5539 {
5540 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5541
5542 skb_pull(skb, sizeof(*le_ev));
5543
5544 switch (le_ev->subevent) {
5545 case HCI_EV_LE_CONN_COMPLETE:
5546 hci_le_conn_complete_evt(hdev, skb);
5547 break;
5548
5549 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5550 hci_le_conn_update_complete_evt(hdev, skb);
5551 break;
5552
5553 case HCI_EV_LE_ADVERTISING_REPORT:
5554 hci_le_adv_report_evt(hdev, skb);
5555 break;
5556
5557 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5558 hci_le_remote_feat_complete_evt(hdev, skb);
5559 break;
5560
5561 case HCI_EV_LE_LTK_REQ:
5562 hci_le_ltk_request_evt(hdev, skb);
5563 break;
5564
5565 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5566 hci_le_remote_conn_param_req_evt(hdev, skb);
5567 break;
5568
5569 case HCI_EV_LE_DIRECT_ADV_REPORT:
5570 hci_le_direct_adv_report_evt(hdev, skb);
5571 break;
5572
5573 case HCI_EV_LE_EXT_ADV_REPORT:
5574 hci_le_ext_adv_report_evt(hdev, skb);
5575 break;
5576
5577 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5578 hci_le_enh_conn_complete_evt(hdev, skb);
5579 break;
5580
5581 default:
5582 break;
5583 }
5584 }
5585
5586 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5587 u8 event, struct sk_buff *skb)
5588 {
5589 struct hci_ev_cmd_complete *ev;
5590 struct hci_event_hdr *hdr;
5591
5592 if (!skb)
5593 return false;
5594
5595 if (skb->len < sizeof(*hdr)) {
5596 bt_dev_err(hdev, "too short HCI event");
5597 return false;
5598 }
5599
5600 hdr = (void *) skb->data;
5601 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5602
5603 if (event) {
5604 if (hdr->evt != event)
5605 return false;
5606 return true;
5607 }
5608
5609 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5610 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5611 hdr->evt);
5612 return false;
5613 }
5614
5615 if (skb->len < sizeof(*ev)) {
5616 bt_dev_err(hdev, "too short cmd_complete event");
5617 return false;
5618 }
5619
5620 ev = (void *) skb->data;
5621 skb_pull(skb, sizeof(*ev));
5622
5623 if (opcode != __le16_to_cpu(ev->opcode)) {
5624 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5625 __le16_to_cpu(ev->opcode));
5626 return false;
5627 }
5628
5629 return true;
5630 }
5631
5632 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5633 {
5634 struct hci_event_hdr *hdr = (void *) skb->data;
5635 hci_req_complete_t req_complete = NULL;
5636 hci_req_complete_skb_t req_complete_skb = NULL;
5637 struct sk_buff *orig_skb = NULL;
5638 u8 status = 0, event = hdr->evt, req_evt = 0;
5639 u16 opcode = HCI_OP_NOP;
5640
5641 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5642 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5643 opcode = __le16_to_cpu(cmd_hdr->opcode);
5644 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5645 &req_complete_skb);
5646 req_evt = event;
5647 }
5648
5649 /* If it looks like we might end up having to call
5650 * req_complete_skb, store a pristine copy of the skb since the
5651 * various handlers may modify the original one through
5652 * skb_pull() calls, etc.
5653 */
5654 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5655 event == HCI_EV_CMD_COMPLETE)
5656 orig_skb = skb_clone(skb, GFP_KERNEL);
5657
5658 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5659
5660 switch (event) {
5661 case HCI_EV_INQUIRY_COMPLETE:
5662 hci_inquiry_complete_evt(hdev, skb);
5663 break;
5664
5665 case HCI_EV_INQUIRY_RESULT:
5666 hci_inquiry_result_evt(hdev, skb);
5667 break;
5668
5669 case HCI_EV_CONN_COMPLETE:
5670 hci_conn_complete_evt(hdev, skb);
5671 break;
5672
5673 case HCI_EV_CONN_REQUEST:
5674 hci_conn_request_evt(hdev, skb);
5675 break;
5676
5677 case HCI_EV_DISCONN_COMPLETE:
5678 hci_disconn_complete_evt(hdev, skb);
5679 break;
5680
5681 case HCI_EV_AUTH_COMPLETE:
5682 hci_auth_complete_evt(hdev, skb);
5683 break;
5684
5685 case HCI_EV_REMOTE_NAME:
5686 hci_remote_name_evt(hdev, skb);
5687 break;
5688
5689 case HCI_EV_ENCRYPT_CHANGE:
5690 hci_encrypt_change_evt(hdev, skb);
5691 break;
5692
5693 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5694 hci_change_link_key_complete_evt(hdev, skb);
5695 break;
5696
5697 case HCI_EV_REMOTE_FEATURES:
5698 hci_remote_features_evt(hdev, skb);
5699 break;
5700
5701 case HCI_EV_CMD_COMPLETE:
5702 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5703 &req_complete, &req_complete_skb);
5704 break;
5705
5706 case HCI_EV_CMD_STATUS:
5707 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5708 &req_complete_skb);
5709 break;
5710
5711 case HCI_EV_HARDWARE_ERROR:
5712 hci_hardware_error_evt(hdev, skb);
5713 break;
5714
5715 case HCI_EV_ROLE_CHANGE:
5716 hci_role_change_evt(hdev, skb);
5717 break;
5718
5719 case HCI_EV_NUM_COMP_PKTS:
5720 hci_num_comp_pkts_evt(hdev, skb);
5721 break;
5722
5723 case HCI_EV_MODE_CHANGE:
5724 hci_mode_change_evt(hdev, skb);
5725 break;
5726
5727 case HCI_EV_PIN_CODE_REQ:
5728 hci_pin_code_request_evt(hdev, skb);
5729 break;
5730
5731 case HCI_EV_LINK_KEY_REQ:
5732 hci_link_key_request_evt(hdev, skb);
5733 break;
5734
5735 case HCI_EV_LINK_KEY_NOTIFY:
5736 hci_link_key_notify_evt(hdev, skb);
5737 break;
5738
5739 case HCI_EV_CLOCK_OFFSET:
5740 hci_clock_offset_evt(hdev, skb);
5741 break;
5742
5743 case HCI_EV_PKT_TYPE_CHANGE:
5744 hci_pkt_type_change_evt(hdev, skb);
5745 break;
5746
5747 case HCI_EV_PSCAN_REP_MODE:
5748 hci_pscan_rep_mode_evt(hdev, skb);
5749 break;
5750
5751 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5752 hci_inquiry_result_with_rssi_evt(hdev, skb);
5753 break;
5754
5755 case HCI_EV_REMOTE_EXT_FEATURES:
5756 hci_remote_ext_features_evt(hdev, skb);
5757 break;
5758
5759 case HCI_EV_SYNC_CONN_COMPLETE:
5760 hci_sync_conn_complete_evt(hdev, skb);
5761 break;
5762
5763 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5764 hci_extended_inquiry_result_evt(hdev, skb);
5765 break;
5766
5767 case HCI_EV_KEY_REFRESH_COMPLETE:
5768 hci_key_refresh_complete_evt(hdev, skb);
5769 break;
5770
5771 case HCI_EV_IO_CAPA_REQUEST:
5772 hci_io_capa_request_evt(hdev, skb);
5773 break;
5774
5775 case HCI_EV_IO_CAPA_REPLY:
5776 hci_io_capa_reply_evt(hdev, skb);
5777 break;
5778
5779 case HCI_EV_USER_CONFIRM_REQUEST:
5780 hci_user_confirm_request_evt(hdev, skb);
5781 break;
5782
5783 case HCI_EV_USER_PASSKEY_REQUEST:
5784 hci_user_passkey_request_evt(hdev, skb);
5785 break;
5786
5787 case HCI_EV_USER_PASSKEY_NOTIFY:
5788 hci_user_passkey_notify_evt(hdev, skb);
5789 break;
5790
5791 case HCI_EV_KEYPRESS_NOTIFY:
5792 hci_keypress_notify_evt(hdev, skb);
5793 break;
5794
5795 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5796 hci_simple_pair_complete_evt(hdev, skb);
5797 break;
5798
5799 case HCI_EV_REMOTE_HOST_FEATURES:
5800 hci_remote_host_features_evt(hdev, skb);
5801 break;
5802
5803 case HCI_EV_LE_META:
5804 hci_le_meta_evt(hdev, skb);
5805 break;
5806
5807 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5808 hci_remote_oob_data_request_evt(hdev, skb);
5809 break;
5810
5811 #if IS_ENABLED(CONFIG_BT_HS)
5812 case HCI_EV_CHANNEL_SELECTED:
5813 hci_chan_selected_evt(hdev, skb);
5814 break;
5815
5816 case HCI_EV_PHY_LINK_COMPLETE:
5817 hci_phy_link_complete_evt(hdev, skb);
5818 break;
5819
5820 case HCI_EV_LOGICAL_LINK_COMPLETE:
5821 hci_loglink_complete_evt(hdev, skb);
5822 break;
5823
5824 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5825 hci_disconn_loglink_complete_evt(hdev, skb);
5826 break;
5827
5828 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5829 hci_disconn_phylink_complete_evt(hdev, skb);
5830 break;
5831 #endif
5832
5833 case HCI_EV_NUM_COMP_BLOCKS:
5834 hci_num_comp_blocks_evt(hdev, skb);
5835 break;
5836
5837 default:
5838 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5839 break;
5840 }
5841
5842 if (req_complete) {
5843 req_complete(hdev, status, opcode);
5844 } else if (req_complete_skb) {
5845 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5846 kfree_skb(orig_skb);
5847 orig_skb = NULL;
5848 }
5849 req_complete_skb(hdev, status, opcode, orig_skb);
5850 }
5851
5852 kfree_skb(orig_skb);
5853 kfree_skb(skb);
5854 hdev->stat.evt_rx++;
5855 }