]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - net/bluetooth/mgmt.c
Bluetooth: Implement Set ADV set random address
[thirdparty/kernel/stable.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 14
42
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 MGMT_OP_SET_APPEARANCE,
109 };
110
111 static const u16 mgmt_events[] = {
112 MGMT_EV_CONTROLLER_ERROR,
113 MGMT_EV_INDEX_ADDED,
114 MGMT_EV_INDEX_REMOVED,
115 MGMT_EV_NEW_SETTINGS,
116 MGMT_EV_CLASS_OF_DEV_CHANGED,
117 MGMT_EV_LOCAL_NAME_CHANGED,
118 MGMT_EV_NEW_LINK_KEY,
119 MGMT_EV_NEW_LONG_TERM_KEY,
120 MGMT_EV_DEVICE_CONNECTED,
121 MGMT_EV_DEVICE_DISCONNECTED,
122 MGMT_EV_CONNECT_FAILED,
123 MGMT_EV_PIN_CODE_REQUEST,
124 MGMT_EV_USER_CONFIRM_REQUEST,
125 MGMT_EV_USER_PASSKEY_REQUEST,
126 MGMT_EV_AUTH_FAILED,
127 MGMT_EV_DEVICE_FOUND,
128 MGMT_EV_DISCOVERING,
129 MGMT_EV_DEVICE_BLOCKED,
130 MGMT_EV_DEVICE_UNBLOCKED,
131 MGMT_EV_DEVICE_UNPAIRED,
132 MGMT_EV_PASSKEY_NOTIFY,
133 MGMT_EV_NEW_IRK,
134 MGMT_EV_NEW_CSRK,
135 MGMT_EV_DEVICE_ADDED,
136 MGMT_EV_DEVICE_REMOVED,
137 MGMT_EV_NEW_CONN_PARAM,
138 MGMT_EV_UNCONF_INDEX_ADDED,
139 MGMT_EV_UNCONF_INDEX_REMOVED,
140 MGMT_EV_NEW_CONFIG_OPTIONS,
141 MGMT_EV_EXT_INDEX_ADDED,
142 MGMT_EV_EXT_INDEX_REMOVED,
143 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 MGMT_EV_ADVERTISING_ADDED,
145 MGMT_EV_ADVERTISING_REMOVED,
146 MGMT_EV_EXT_INFO_CHANGED,
147 };
148
149 static const u16 mgmt_untrusted_commands[] = {
150 MGMT_OP_READ_INDEX_LIST,
151 MGMT_OP_READ_INFO,
152 MGMT_OP_READ_UNCONF_INDEX_LIST,
153 MGMT_OP_READ_CONFIG_INFO,
154 MGMT_OP_READ_EXT_INDEX_LIST,
155 MGMT_OP_READ_EXT_INFO,
156 };
157
158 static const u16 mgmt_untrusted_events[] = {
159 MGMT_EV_INDEX_ADDED,
160 MGMT_EV_INDEX_REMOVED,
161 MGMT_EV_NEW_SETTINGS,
162 MGMT_EV_CLASS_OF_DEV_CHANGED,
163 MGMT_EV_LOCAL_NAME_CHANGED,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
170 };
171
172 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
173
174 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 "\x00\x00\x00\x00\x00\x00\x00\x00"
176
177 /* HCI to MGMT error code conversion table */
178 static u8 mgmt_status_table[] = {
179 MGMT_STATUS_SUCCESS,
180 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
181 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
182 MGMT_STATUS_FAILED, /* Hardware Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
184 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
185 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
186 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
187 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
188 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
189 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
190 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
191 MGMT_STATUS_BUSY, /* Command Disallowed */
192 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
193 MGMT_STATUS_REJECTED, /* Rejected Security */
194 MGMT_STATUS_REJECTED, /* Rejected Personal */
195 MGMT_STATUS_TIMEOUT, /* Host Timeout */
196 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
197 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
198 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
199 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
200 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
201 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
202 MGMT_STATUS_BUSY, /* Repeated Attempts */
203 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
204 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
205 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
206 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
207 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
208 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
209 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
210 MGMT_STATUS_FAILED, /* Unspecified Error */
211 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
212 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
213 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
214 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
215 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
216 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
217 MGMT_STATUS_FAILED, /* Unit Link Key Used */
218 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
219 MGMT_STATUS_TIMEOUT, /* Instant Passed */
220 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
221 MGMT_STATUS_FAILED, /* Transaction Collision */
222 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
223 MGMT_STATUS_REJECTED, /* QoS Rejected */
224 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
225 MGMT_STATUS_REJECTED, /* Insufficient Security */
226 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
227 MGMT_STATUS_BUSY, /* Role Switch Pending */
228 MGMT_STATUS_FAILED, /* Slot Violation */
229 MGMT_STATUS_FAILED, /* Role Switch Failed */
230 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
231 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
232 MGMT_STATUS_BUSY, /* Host Busy Pairing */
233 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
234 MGMT_STATUS_BUSY, /* Controller Busy */
235 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
236 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
237 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
238 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
239 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
240 };
241
242 static u8 mgmt_status(u8 hci_status)
243 {
244 if (hci_status < ARRAY_SIZE(mgmt_status_table))
245 return mgmt_status_table[hci_status];
246
247 return MGMT_STATUS_FAILED;
248 }
249
250 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
251 u16 len, int flag)
252 {
253 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 flag, NULL);
255 }
256
257 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
258 u16 len, int flag, struct sock *skip_sk)
259 {
260 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 flag, skip_sk);
262 }
263
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 struct sock *skip_sk)
266 {
267 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 HCI_SOCK_TRUSTED, skip_sk);
269 }
270
271 static u8 le_addr_type(u8 mgmt_addr_type)
272 {
273 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
274 return ADDR_LE_DEV_PUBLIC;
275 else
276 return ADDR_LE_DEV_RANDOM;
277 }
278
279 void mgmt_fill_version_info(void *ver)
280 {
281 struct mgmt_rp_read_version *rp = ver;
282
283 rp->version = MGMT_VERSION;
284 rp->revision = cpu_to_le16(MGMT_REVISION);
285 }
286
287 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
288 u16 data_len)
289 {
290 struct mgmt_rp_read_version rp;
291
292 BT_DBG("sock %p", sk);
293
294 mgmt_fill_version_info(&rp);
295
296 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
297 &rp, sizeof(rp));
298 }
299
300 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
301 u16 data_len)
302 {
303 struct mgmt_rp_read_commands *rp;
304 u16 num_commands, num_events;
305 size_t rp_size;
306 int i, err;
307
308 BT_DBG("sock %p", sk);
309
310 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
311 num_commands = ARRAY_SIZE(mgmt_commands);
312 num_events = ARRAY_SIZE(mgmt_events);
313 } else {
314 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
315 num_events = ARRAY_SIZE(mgmt_untrusted_events);
316 }
317
318 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
319
320 rp = kmalloc(rp_size, GFP_KERNEL);
321 if (!rp)
322 return -ENOMEM;
323
324 rp->num_commands = cpu_to_le16(num_commands);
325 rp->num_events = cpu_to_le16(num_events);
326
327 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
328 __le16 *opcode = rp->opcodes;
329
330 for (i = 0; i < num_commands; i++, opcode++)
331 put_unaligned_le16(mgmt_commands[i], opcode);
332
333 for (i = 0; i < num_events; i++, opcode++)
334 put_unaligned_le16(mgmt_events[i], opcode);
335 } else {
336 __le16 *opcode = rp->opcodes;
337
338 for (i = 0; i < num_commands; i++, opcode++)
339 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
340
341 for (i = 0; i < num_events; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
343 }
344
345 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
346 rp, rp_size);
347 kfree(rp);
348
349 return err;
350 }
351
352 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
353 u16 data_len)
354 {
355 struct mgmt_rp_read_index_list *rp;
356 struct hci_dev *d;
357 size_t rp_len;
358 u16 count;
359 int err;
360
361 BT_DBG("sock %p", sk);
362
363 read_lock(&hci_dev_list_lock);
364
365 count = 0;
366 list_for_each_entry(d, &hci_dev_list, list) {
367 if (d->dev_type == HCI_PRIMARY &&
368 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
369 count++;
370 }
371
372 rp_len = sizeof(*rp) + (2 * count);
373 rp = kmalloc(rp_len, GFP_ATOMIC);
374 if (!rp) {
375 read_unlock(&hci_dev_list_lock);
376 return -ENOMEM;
377 }
378
379 count = 0;
380 list_for_each_entry(d, &hci_dev_list, list) {
381 if (hci_dev_test_flag(d, HCI_SETUP) ||
382 hci_dev_test_flag(d, HCI_CONFIG) ||
383 hci_dev_test_flag(d, HCI_USER_CHANNEL))
384 continue;
385
386 /* Devices marked as raw-only are neither configured
387 * nor unconfigured controllers.
388 */
389 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
390 continue;
391
392 if (d->dev_type == HCI_PRIMARY &&
393 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
394 rp->index[count++] = cpu_to_le16(d->id);
395 BT_DBG("Added hci%u", d->id);
396 }
397 }
398
399 rp->num_controllers = cpu_to_le16(count);
400 rp_len = sizeof(*rp) + (2 * count);
401
402 read_unlock(&hci_dev_list_lock);
403
404 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
405 0, rp, rp_len);
406
407 kfree(rp);
408
409 return err;
410 }
411
412 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
413 void *data, u16 data_len)
414 {
415 struct mgmt_rp_read_unconf_index_list *rp;
416 struct hci_dev *d;
417 size_t rp_len;
418 u16 count;
419 int err;
420
421 BT_DBG("sock %p", sk);
422
423 read_lock(&hci_dev_list_lock);
424
425 count = 0;
426 list_for_each_entry(d, &hci_dev_list, list) {
427 if (d->dev_type == HCI_PRIMARY &&
428 hci_dev_test_flag(d, HCI_UNCONFIGURED))
429 count++;
430 }
431
432 rp_len = sizeof(*rp) + (2 * count);
433 rp = kmalloc(rp_len, GFP_ATOMIC);
434 if (!rp) {
435 read_unlock(&hci_dev_list_lock);
436 return -ENOMEM;
437 }
438
439 count = 0;
440 list_for_each_entry(d, &hci_dev_list, list) {
441 if (hci_dev_test_flag(d, HCI_SETUP) ||
442 hci_dev_test_flag(d, HCI_CONFIG) ||
443 hci_dev_test_flag(d, HCI_USER_CHANNEL))
444 continue;
445
446 /* Devices marked as raw-only are neither configured
447 * nor unconfigured controllers.
448 */
449 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
450 continue;
451
452 if (d->dev_type == HCI_PRIMARY &&
453 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
454 rp->index[count++] = cpu_to_le16(d->id);
455 BT_DBG("Added hci%u", d->id);
456 }
457 }
458
459 rp->num_controllers = cpu_to_le16(count);
460 rp_len = sizeof(*rp) + (2 * count);
461
462 read_unlock(&hci_dev_list_lock);
463
464 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
465 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
466
467 kfree(rp);
468
469 return err;
470 }
471
472 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
473 void *data, u16 data_len)
474 {
475 struct mgmt_rp_read_ext_index_list *rp;
476 struct hci_dev *d;
477 size_t rp_len;
478 u16 count;
479 int err;
480
481 BT_DBG("sock %p", sk);
482
483 read_lock(&hci_dev_list_lock);
484
485 count = 0;
486 list_for_each_entry(d, &hci_dev_list, list) {
487 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
488 count++;
489 }
490
491 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
492 rp = kmalloc(rp_len, GFP_ATOMIC);
493 if (!rp) {
494 read_unlock(&hci_dev_list_lock);
495 return -ENOMEM;
496 }
497
498 count = 0;
499 list_for_each_entry(d, &hci_dev_list, list) {
500 if (hci_dev_test_flag(d, HCI_SETUP) ||
501 hci_dev_test_flag(d, HCI_CONFIG) ||
502 hci_dev_test_flag(d, HCI_USER_CHANNEL))
503 continue;
504
505 /* Devices marked as raw-only are neither configured
506 * nor unconfigured controllers.
507 */
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
509 continue;
510
511 if (d->dev_type == HCI_PRIMARY) {
512 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
513 rp->entry[count].type = 0x01;
514 else
515 rp->entry[count].type = 0x00;
516 } else if (d->dev_type == HCI_AMP) {
517 rp->entry[count].type = 0x02;
518 } else {
519 continue;
520 }
521
522 rp->entry[count].bus = d->bus;
523 rp->entry[count++].index = cpu_to_le16(d->id);
524 BT_DBG("Added hci%u", d->id);
525 }
526
527 rp->num_controllers = cpu_to_le16(count);
528 rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
529
530 read_unlock(&hci_dev_list_lock);
531
532 /* If this command is called at least once, then all the
533 * default index and unconfigured index events are disabled
534 * and from now on only extended index events are used.
535 */
536 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
538 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
539
540 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
541 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
542
543 kfree(rp);
544
545 return err;
546 }
547
548 static bool is_configured(struct hci_dev *hdev)
549 {
550 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 return false;
553
554 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
556 return false;
557
558 return true;
559 }
560
561 static __le32 get_missing_options(struct hci_dev *hdev)
562 {
563 u32 options = 0;
564
565 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
566 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
567 options |= MGMT_OPTION_EXTERNAL_CONFIG;
568
569 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
570 !bacmp(&hdev->public_addr, BDADDR_ANY))
571 options |= MGMT_OPTION_PUBLIC_ADDRESS;
572
573 return cpu_to_le32(options);
574 }
575
576 static int new_options(struct hci_dev *hdev, struct sock *skip)
577 {
578 __le32 options = get_missing_options(hdev);
579
580 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
581 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
582 }
583
584 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
585 {
586 __le32 options = get_missing_options(hdev);
587
588 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
589 sizeof(options));
590 }
591
592 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
593 void *data, u16 data_len)
594 {
595 struct mgmt_rp_read_config_info rp;
596 u32 options = 0;
597
598 BT_DBG("sock %p %s", sk, hdev->name);
599
600 hci_dev_lock(hdev);
601
602 memset(&rp, 0, sizeof(rp));
603 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
604
605 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
606 options |= MGMT_OPTION_EXTERNAL_CONFIG;
607
608 if (hdev->set_bdaddr)
609 options |= MGMT_OPTION_PUBLIC_ADDRESS;
610
611 rp.supported_options = cpu_to_le32(options);
612 rp.missing_options = get_missing_options(hdev);
613
614 hci_dev_unlock(hdev);
615
616 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
617 &rp, sizeof(rp));
618 }
619
620 static u32 get_supported_phys(struct hci_dev *hdev)
621 {
622 u32 supported_phys = 0;
623
624 if (lmp_bredr_capable(hdev)) {
625 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
626
627 if (hdev->features[0][0] & LMP_3SLOT)
628 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
629
630 if (hdev->features[0][0] & LMP_5SLOT)
631 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
632
633 if (lmp_edr_2m_capable(hdev)) {
634 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
635
636 if (lmp_edr_3slot_capable(hdev))
637 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
638
639 if (lmp_edr_5slot_capable(hdev))
640 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
641
642 if (lmp_edr_3m_capable(hdev)) {
643 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
644
645 if (lmp_edr_3slot_capable(hdev))
646 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
647
648 if (lmp_edr_5slot_capable(hdev))
649 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
650 }
651 }
652 }
653
654 if (lmp_le_capable(hdev)) {
655 supported_phys |= MGMT_PHY_LE_1M_TX;
656 supported_phys |= MGMT_PHY_LE_1M_RX;
657
658 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
659 supported_phys |= MGMT_PHY_LE_2M_TX;
660 supported_phys |= MGMT_PHY_LE_2M_RX;
661 }
662
663 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
664 supported_phys |= MGMT_PHY_LE_CODED_TX;
665 supported_phys |= MGMT_PHY_LE_CODED_RX;
666 }
667 }
668
669 return supported_phys;
670 }
671
672 static u32 get_selected_phys(struct hci_dev *hdev)
673 {
674 u32 selected_phys = 0;
675
676 if (lmp_bredr_capable(hdev)) {
677 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
678
679 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
680 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
681
682 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
683 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
684
685 if (lmp_edr_2m_capable(hdev)) {
686 if (!(hdev->pkt_type & HCI_2DH1))
687 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
688
689 if (lmp_edr_3slot_capable(hdev) &&
690 !(hdev->pkt_type & HCI_2DH3))
691 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
692
693 if (lmp_edr_5slot_capable(hdev) &&
694 !(hdev->pkt_type & HCI_2DH5))
695 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
696
697 if (lmp_edr_3m_capable(hdev)) {
698 if (!(hdev->pkt_type & HCI_3DH1))
699 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
700
701 if (lmp_edr_3slot_capable(hdev) &&
702 !(hdev->pkt_type & HCI_3DH3))
703 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
704
705 if (lmp_edr_5slot_capable(hdev) &&
706 !(hdev->pkt_type & HCI_3DH5))
707 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
708 }
709 }
710 }
711
712 if (lmp_le_capable(hdev)) {
713 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
714 selected_phys |= MGMT_PHY_LE_1M_TX;
715
716 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
717 selected_phys |= MGMT_PHY_LE_1M_RX;
718
719 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
720 selected_phys |= MGMT_PHY_LE_2M_TX;
721
722 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
723 selected_phys |= MGMT_PHY_LE_2M_RX;
724
725 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
726 selected_phys |= MGMT_PHY_LE_CODED_TX;
727
728 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
729 selected_phys |= MGMT_PHY_LE_CODED_RX;
730 }
731
732 return selected_phys;
733 }
734
735 static u32 get_configurable_phys(struct hci_dev *hdev)
736 {
737 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
738 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
739 }
740
741 static u32 get_supported_settings(struct hci_dev *hdev)
742 {
743 u32 settings = 0;
744
745 settings |= MGMT_SETTING_POWERED;
746 settings |= MGMT_SETTING_BONDABLE;
747 settings |= MGMT_SETTING_DEBUG_KEYS;
748 settings |= MGMT_SETTING_CONNECTABLE;
749 settings |= MGMT_SETTING_DISCOVERABLE;
750
751 if (lmp_bredr_capable(hdev)) {
752 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
753 settings |= MGMT_SETTING_FAST_CONNECTABLE;
754 settings |= MGMT_SETTING_BREDR;
755 settings |= MGMT_SETTING_LINK_SECURITY;
756
757 if (lmp_ssp_capable(hdev)) {
758 settings |= MGMT_SETTING_SSP;
759 settings |= MGMT_SETTING_HS;
760 }
761
762 if (lmp_sc_capable(hdev))
763 settings |= MGMT_SETTING_SECURE_CONN;
764 }
765
766 if (lmp_le_capable(hdev)) {
767 settings |= MGMT_SETTING_LE;
768 settings |= MGMT_SETTING_ADVERTISING;
769 settings |= MGMT_SETTING_SECURE_CONN;
770 settings |= MGMT_SETTING_PRIVACY;
771 settings |= MGMT_SETTING_STATIC_ADDRESS;
772 }
773
774 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
775 hdev->set_bdaddr)
776 settings |= MGMT_SETTING_CONFIGURATION;
777
778 settings |= MGMT_SETTING_PHY_CONFIGURATION;
779
780 return settings;
781 }
782
783 static u32 get_current_settings(struct hci_dev *hdev)
784 {
785 u32 settings = 0;
786
787 if (hdev_is_powered(hdev))
788 settings |= MGMT_SETTING_POWERED;
789
790 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
791 settings |= MGMT_SETTING_CONNECTABLE;
792
793 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
794 settings |= MGMT_SETTING_FAST_CONNECTABLE;
795
796 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
797 settings |= MGMT_SETTING_DISCOVERABLE;
798
799 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
800 settings |= MGMT_SETTING_BONDABLE;
801
802 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
803 settings |= MGMT_SETTING_BREDR;
804
805 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
806 settings |= MGMT_SETTING_LE;
807
808 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
809 settings |= MGMT_SETTING_LINK_SECURITY;
810
811 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
812 settings |= MGMT_SETTING_SSP;
813
814 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
815 settings |= MGMT_SETTING_HS;
816
817 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
818 settings |= MGMT_SETTING_ADVERTISING;
819
820 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
821 settings |= MGMT_SETTING_SECURE_CONN;
822
823 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
824 settings |= MGMT_SETTING_DEBUG_KEYS;
825
826 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
827 settings |= MGMT_SETTING_PRIVACY;
828
829 /* The current setting for static address has two purposes. The
830 * first is to indicate if the static address will be used and
831 * the second is to indicate if it is actually set.
832 *
833 * This means if the static address is not configured, this flag
834 * will never be set. If the address is configured, then if the
835 * address is actually used decides if the flag is set or not.
836 *
837 * For single mode LE only controllers and dual-mode controllers
838 * with BR/EDR disabled, the existence of the static address will
839 * be evaluated.
840 */
841 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
842 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
843 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
844 if (bacmp(&hdev->static_addr, BDADDR_ANY))
845 settings |= MGMT_SETTING_STATIC_ADDRESS;
846 }
847
848 return settings;
849 }
850
851 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
852 {
853 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
854 }
855
856 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
857 struct hci_dev *hdev,
858 const void *data)
859 {
860 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
861 }
862
863 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
864 {
865 struct mgmt_pending_cmd *cmd;
866
867 /* If there's a pending mgmt command the flags will not yet have
868 * their final values, so check for this first.
869 */
870 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
871 if (cmd) {
872 struct mgmt_mode *cp = cmd->param;
873 if (cp->val == 0x01)
874 return LE_AD_GENERAL;
875 else if (cp->val == 0x02)
876 return LE_AD_LIMITED;
877 } else {
878 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
879 return LE_AD_LIMITED;
880 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
881 return LE_AD_GENERAL;
882 }
883
884 return 0;
885 }
886
887 bool mgmt_get_connectable(struct hci_dev *hdev)
888 {
889 struct mgmt_pending_cmd *cmd;
890
891 /* If there's a pending mgmt command the flag will not yet have
892 * it's final value, so check for this first.
893 */
894 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
895 if (cmd) {
896 struct mgmt_mode *cp = cmd->param;
897
898 return cp->val;
899 }
900
901 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
902 }
903
904 static void service_cache_off(struct work_struct *work)
905 {
906 struct hci_dev *hdev = container_of(work, struct hci_dev,
907 service_cache.work);
908 struct hci_request req;
909
910 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
911 return;
912
913 hci_req_init(&req, hdev);
914
915 hci_dev_lock(hdev);
916
917 __hci_req_update_eir(&req);
918 __hci_req_update_class(&req);
919
920 hci_dev_unlock(hdev);
921
922 hci_req_run(&req, NULL);
923 }
924
925 static void rpa_expired(struct work_struct *work)
926 {
927 struct hci_dev *hdev = container_of(work, struct hci_dev,
928 rpa_expired.work);
929 struct hci_request req;
930
931 BT_DBG("");
932
933 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
934
935 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
936 return;
937
938 /* The generation of a new RPA and programming it into the
939 * controller happens in the hci_req_enable_advertising()
940 * function.
941 */
942 hci_req_init(&req, hdev);
943 if (ext_adv_capable(hdev))
944 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
945 else
946 __hci_req_enable_advertising(&req);
947 hci_req_run(&req, NULL);
948 }
949
950 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
951 {
952 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
953 return;
954
955 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
956 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
957
958 /* Non-mgmt controlled devices get this bit set
959 * implicitly so that pairing works for them, however
960 * for mgmt we require user-space to explicitly enable
961 * it
962 */
963 hci_dev_clear_flag(hdev, HCI_BONDABLE);
964 }
965
966 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
967 void *data, u16 data_len)
968 {
969 struct mgmt_rp_read_info rp;
970
971 BT_DBG("sock %p %s", sk, hdev->name);
972
973 hci_dev_lock(hdev);
974
975 memset(&rp, 0, sizeof(rp));
976
977 bacpy(&rp.bdaddr, &hdev->bdaddr);
978
979 rp.version = hdev->hci_ver;
980 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
981
982 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
983 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
984
985 memcpy(rp.dev_class, hdev->dev_class, 3);
986
987 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
988 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
989
990 hci_dev_unlock(hdev);
991
992 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
993 sizeof(rp));
994 }
995
996 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
997 {
998 u16 eir_len = 0;
999 size_t name_len;
1000
1001 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1002 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1003 hdev->dev_class, 3);
1004
1005 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1006 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1007 hdev->appearance);
1008
1009 name_len = strlen(hdev->dev_name);
1010 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1011 hdev->dev_name, name_len);
1012
1013 name_len = strlen(hdev->short_name);
1014 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1015 hdev->short_name, name_len);
1016
1017 return eir_len;
1018 }
1019
1020 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1021 void *data, u16 data_len)
1022 {
1023 char buf[512];
1024 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1025 u16 eir_len;
1026
1027 BT_DBG("sock %p %s", sk, hdev->name);
1028
1029 memset(&buf, 0, sizeof(buf));
1030
1031 hci_dev_lock(hdev);
1032
1033 bacpy(&rp->bdaddr, &hdev->bdaddr);
1034
1035 rp->version = hdev->hci_ver;
1036 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1037
1038 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1039 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1040
1041
1042 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1043 rp->eir_len = cpu_to_le16(eir_len);
1044
1045 hci_dev_unlock(hdev);
1046
1047 /* If this command is called at least once, then the events
1048 * for class of device and local name changes are disabled
1049 * and only the new extended controller information event
1050 * is used.
1051 */
1052 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1053 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1054 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1055
1056 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1057 sizeof(*rp) + eir_len);
1058 }
1059
1060 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1061 {
1062 char buf[512];
1063 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1064 u16 eir_len;
1065
1066 memset(buf, 0, sizeof(buf));
1067
1068 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1069 ev->eir_len = cpu_to_le16(eir_len);
1070
1071 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1072 sizeof(*ev) + eir_len,
1073 HCI_MGMT_EXT_INFO_EVENTS, skip);
1074 }
1075
1076 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1077 {
1078 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1079
1080 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1081 sizeof(settings));
1082 }
1083
1084 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1085 {
1086 BT_DBG("%s status 0x%02x", hdev->name, status);
1087
1088 if (hci_conn_count(hdev) == 0) {
1089 cancel_delayed_work(&hdev->power_off);
1090 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1091 }
1092 }
1093
1094 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1095 {
1096 struct mgmt_ev_advertising_added ev;
1097
1098 ev.instance = instance;
1099
1100 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1101 }
1102
1103 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1104 u8 instance)
1105 {
1106 struct mgmt_ev_advertising_removed ev;
1107
1108 ev.instance = instance;
1109
1110 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1111 }
1112
1113 static void cancel_adv_timeout(struct hci_dev *hdev)
1114 {
1115 if (hdev->adv_instance_timeout) {
1116 hdev->adv_instance_timeout = 0;
1117 cancel_delayed_work(&hdev->adv_instance_expire);
1118 }
1119 }
1120
1121 static int clean_up_hci_state(struct hci_dev *hdev)
1122 {
1123 struct hci_request req;
1124 struct hci_conn *conn;
1125 bool discov_stopped;
1126 int err;
1127
1128 hci_req_init(&req, hdev);
1129
1130 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1131 test_bit(HCI_PSCAN, &hdev->flags)) {
1132 u8 scan = 0x00;
1133 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1134 }
1135
1136 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1137
1138 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1139 __hci_req_disable_advertising(&req);
1140
1141 discov_stopped = hci_req_stop_discovery(&req);
1142
1143 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1144 /* 0x15 == Terminated due to Power Off */
1145 __hci_abort_conn(&req, conn, 0x15);
1146 }
1147
1148 err = hci_req_run(&req, clean_up_hci_complete);
1149 if (!err && discov_stopped)
1150 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1151
1152 return err;
1153 }
1154
1155 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1156 u16 len)
1157 {
1158 struct mgmt_mode *cp = data;
1159 struct mgmt_pending_cmd *cmd;
1160 int err;
1161
1162 BT_DBG("request for %s", hdev->name);
1163
1164 if (cp->val != 0x00 && cp->val != 0x01)
1165 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1166 MGMT_STATUS_INVALID_PARAMS);
1167
1168 hci_dev_lock(hdev);
1169
1170 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1171 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1172 MGMT_STATUS_BUSY);
1173 goto failed;
1174 }
1175
1176 if (!!cp->val == hdev_is_powered(hdev)) {
1177 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1178 goto failed;
1179 }
1180
1181 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1182 if (!cmd) {
1183 err = -ENOMEM;
1184 goto failed;
1185 }
1186
1187 if (cp->val) {
1188 queue_work(hdev->req_workqueue, &hdev->power_on);
1189 err = 0;
1190 } else {
1191 /* Disconnect connections, stop scans, etc */
1192 err = clean_up_hci_state(hdev);
1193 if (!err)
1194 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1195 HCI_POWER_OFF_TIMEOUT);
1196
1197 /* ENODATA means there were no HCI commands queued */
1198 if (err == -ENODATA) {
1199 cancel_delayed_work(&hdev->power_off);
1200 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1201 err = 0;
1202 }
1203 }
1204
1205 failed:
1206 hci_dev_unlock(hdev);
1207 return err;
1208 }
1209
1210 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1211 {
1212 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1213
1214 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1215 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1216 }
1217
1218 int mgmt_new_settings(struct hci_dev *hdev)
1219 {
1220 return new_settings(hdev, NULL);
1221 }
1222
1223 struct cmd_lookup {
1224 struct sock *sk;
1225 struct hci_dev *hdev;
1226 u8 mgmt_status;
1227 };
1228
1229 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1230 {
1231 struct cmd_lookup *match = data;
1232
1233 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1234
1235 list_del(&cmd->list);
1236
1237 if (match->sk == NULL) {
1238 match->sk = cmd->sk;
1239 sock_hold(match->sk);
1240 }
1241
1242 mgmt_pending_free(cmd);
1243 }
1244
1245 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1246 {
1247 u8 *status = data;
1248
1249 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1250 mgmt_pending_remove(cmd);
1251 }
1252
1253 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1254 {
1255 if (cmd->cmd_complete) {
1256 u8 *status = data;
1257
1258 cmd->cmd_complete(cmd, *status);
1259 mgmt_pending_remove(cmd);
1260
1261 return;
1262 }
1263
1264 cmd_status_rsp(cmd, data);
1265 }
1266
1267 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1268 {
1269 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1270 cmd->param, cmd->param_len);
1271 }
1272
1273 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1274 {
1275 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1276 cmd->param, sizeof(struct mgmt_addr_info));
1277 }
1278
1279 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1280 {
1281 if (!lmp_bredr_capable(hdev))
1282 return MGMT_STATUS_NOT_SUPPORTED;
1283 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1284 return MGMT_STATUS_REJECTED;
1285 else
1286 return MGMT_STATUS_SUCCESS;
1287 }
1288
1289 static u8 mgmt_le_support(struct hci_dev *hdev)
1290 {
1291 if (!lmp_le_capable(hdev))
1292 return MGMT_STATUS_NOT_SUPPORTED;
1293 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1294 return MGMT_STATUS_REJECTED;
1295 else
1296 return MGMT_STATUS_SUCCESS;
1297 }
1298
1299 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1300 {
1301 struct mgmt_pending_cmd *cmd;
1302
1303 BT_DBG("status 0x%02x", status);
1304
1305 hci_dev_lock(hdev);
1306
1307 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1308 if (!cmd)
1309 goto unlock;
1310
1311 if (status) {
1312 u8 mgmt_err = mgmt_status(status);
1313 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1314 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1315 goto remove_cmd;
1316 }
1317
1318 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1319 hdev->discov_timeout > 0) {
1320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1321 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1322 }
1323
1324 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1325 new_settings(hdev, cmd->sk);
1326
1327 remove_cmd:
1328 mgmt_pending_remove(cmd);
1329
1330 unlock:
1331 hci_dev_unlock(hdev);
1332 }
1333
1334 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1335 u16 len)
1336 {
1337 struct mgmt_cp_set_discoverable *cp = data;
1338 struct mgmt_pending_cmd *cmd;
1339 u16 timeout;
1340 int err;
1341
1342 BT_DBG("request for %s", hdev->name);
1343
1344 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1345 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1347 MGMT_STATUS_REJECTED);
1348
1349 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351 MGMT_STATUS_INVALID_PARAMS);
1352
1353 timeout = __le16_to_cpu(cp->timeout);
1354
1355 /* Disabling discoverable requires that no timeout is set,
1356 * and enabling limited discoverable requires a timeout.
1357 */
1358 if ((cp->val == 0x00 && timeout > 0) ||
1359 (cp->val == 0x02 && timeout == 0))
1360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1361 MGMT_STATUS_INVALID_PARAMS);
1362
1363 hci_dev_lock(hdev);
1364
1365 if (!hdev_is_powered(hdev) && timeout > 0) {
1366 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1367 MGMT_STATUS_NOT_POWERED);
1368 goto failed;
1369 }
1370
1371 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1372 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1373 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1374 MGMT_STATUS_BUSY);
1375 goto failed;
1376 }
1377
1378 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1379 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1380 MGMT_STATUS_REJECTED);
1381 goto failed;
1382 }
1383
1384 if (!hdev_is_powered(hdev)) {
1385 bool changed = false;
1386
1387 /* Setting limited discoverable when powered off is
1388 * not a valid operation since it requires a timeout
1389 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1390 */
1391 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1392 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1393 changed = true;
1394 }
1395
1396 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1397 if (err < 0)
1398 goto failed;
1399
1400 if (changed)
1401 err = new_settings(hdev, sk);
1402
1403 goto failed;
1404 }
1405
1406 /* If the current mode is the same, then just update the timeout
1407 * value with the new value. And if only the timeout gets updated,
1408 * then no need for any HCI transactions.
1409 */
1410 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1412 HCI_LIMITED_DISCOVERABLE)) {
1413 cancel_delayed_work(&hdev->discov_off);
1414 hdev->discov_timeout = timeout;
1415
1416 if (cp->val && hdev->discov_timeout > 0) {
1417 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1418 queue_delayed_work(hdev->req_workqueue,
1419 &hdev->discov_off, to);
1420 }
1421
1422 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1423 goto failed;
1424 }
1425
1426 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1427 if (!cmd) {
1428 err = -ENOMEM;
1429 goto failed;
1430 }
1431
1432 /* Cancel any potential discoverable timeout that might be
1433 * still active and store new timeout value. The arming of
1434 * the timeout happens in the complete handler.
1435 */
1436 cancel_delayed_work(&hdev->discov_off);
1437 hdev->discov_timeout = timeout;
1438
1439 if (cp->val)
1440 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1441 else
1442 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1443
1444 /* Limited discoverable mode */
1445 if (cp->val == 0x02)
1446 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1447 else
1448 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1449
1450 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1451 err = 0;
1452
1453 failed:
1454 hci_dev_unlock(hdev);
1455 return err;
1456 }
1457
1458 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1459 {
1460 struct mgmt_pending_cmd *cmd;
1461
1462 BT_DBG("status 0x%02x", status);
1463
1464 hci_dev_lock(hdev);
1465
1466 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1467 if (!cmd)
1468 goto unlock;
1469
1470 if (status) {
1471 u8 mgmt_err = mgmt_status(status);
1472 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1473 goto remove_cmd;
1474 }
1475
1476 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1477 new_settings(hdev, cmd->sk);
1478
1479 remove_cmd:
1480 mgmt_pending_remove(cmd);
1481
1482 unlock:
1483 hci_dev_unlock(hdev);
1484 }
1485
1486 static int set_connectable_update_settings(struct hci_dev *hdev,
1487 struct sock *sk, u8 val)
1488 {
1489 bool changed = false;
1490 int err;
1491
1492 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1493 changed = true;
1494
1495 if (val) {
1496 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1497 } else {
1498 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1499 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1500 }
1501
1502 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1503 if (err < 0)
1504 return err;
1505
1506 if (changed) {
1507 hci_req_update_scan(hdev);
1508 hci_update_background_scan(hdev);
1509 return new_settings(hdev, sk);
1510 }
1511
1512 return 0;
1513 }
1514
1515 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1516 u16 len)
1517 {
1518 struct mgmt_mode *cp = data;
1519 struct mgmt_pending_cmd *cmd;
1520 int err;
1521
1522 BT_DBG("request for %s", hdev->name);
1523
1524 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1525 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1527 MGMT_STATUS_REJECTED);
1528
1529 if (cp->val != 0x00 && cp->val != 0x01)
1530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1531 MGMT_STATUS_INVALID_PARAMS);
1532
1533 hci_dev_lock(hdev);
1534
1535 if (!hdev_is_powered(hdev)) {
1536 err = set_connectable_update_settings(hdev, sk, cp->val);
1537 goto failed;
1538 }
1539
1540 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1541 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1542 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1543 MGMT_STATUS_BUSY);
1544 goto failed;
1545 }
1546
1547 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1548 if (!cmd) {
1549 err = -ENOMEM;
1550 goto failed;
1551 }
1552
1553 if (cp->val) {
1554 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1555 } else {
1556 if (hdev->discov_timeout > 0)
1557 cancel_delayed_work(&hdev->discov_off);
1558
1559 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1560 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1561 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1562 }
1563
1564 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1565 err = 0;
1566
1567 failed:
1568 hci_dev_unlock(hdev);
1569 return err;
1570 }
1571
1572 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1573 u16 len)
1574 {
1575 struct mgmt_mode *cp = data;
1576 bool changed;
1577 int err;
1578
1579 BT_DBG("request for %s", hdev->name);
1580
1581 if (cp->val != 0x00 && cp->val != 0x01)
1582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1583 MGMT_STATUS_INVALID_PARAMS);
1584
1585 hci_dev_lock(hdev);
1586
1587 if (cp->val)
1588 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1589 else
1590 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1591
1592 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1593 if (err < 0)
1594 goto unlock;
1595
1596 if (changed) {
1597 /* In limited privacy mode the change of bondable mode
1598 * may affect the local advertising address.
1599 */
1600 if (hdev_is_powered(hdev) &&
1601 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1602 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1603 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1604 queue_work(hdev->req_workqueue,
1605 &hdev->discoverable_update);
1606
1607 err = new_settings(hdev, sk);
1608 }
1609
1610 unlock:
1611 hci_dev_unlock(hdev);
1612 return err;
1613 }
1614
1615 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1616 u16 len)
1617 {
1618 struct mgmt_mode *cp = data;
1619 struct mgmt_pending_cmd *cmd;
1620 u8 val, status;
1621 int err;
1622
1623 BT_DBG("request for %s", hdev->name);
1624
1625 status = mgmt_bredr_support(hdev);
1626 if (status)
1627 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1628 status);
1629
1630 if (cp->val != 0x00 && cp->val != 0x01)
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1632 MGMT_STATUS_INVALID_PARAMS);
1633
1634 hci_dev_lock(hdev);
1635
1636 if (!hdev_is_powered(hdev)) {
1637 bool changed = false;
1638
1639 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1640 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1641 changed = true;
1642 }
1643
1644 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1645 if (err < 0)
1646 goto failed;
1647
1648 if (changed)
1649 err = new_settings(hdev, sk);
1650
1651 goto failed;
1652 }
1653
1654 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1655 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1656 MGMT_STATUS_BUSY);
1657 goto failed;
1658 }
1659
1660 val = !!cp->val;
1661
1662 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1663 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1664 goto failed;
1665 }
1666
1667 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1668 if (!cmd) {
1669 err = -ENOMEM;
1670 goto failed;
1671 }
1672
1673 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1674 if (err < 0) {
1675 mgmt_pending_remove(cmd);
1676 goto failed;
1677 }
1678
1679 failed:
1680 hci_dev_unlock(hdev);
1681 return err;
1682 }
1683
1684 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1685 {
1686 struct mgmt_mode *cp = data;
1687 struct mgmt_pending_cmd *cmd;
1688 u8 status;
1689 int err;
1690
1691 BT_DBG("request for %s", hdev->name);
1692
1693 status = mgmt_bredr_support(hdev);
1694 if (status)
1695 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1696
1697 if (!lmp_ssp_capable(hdev))
1698 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1699 MGMT_STATUS_NOT_SUPPORTED);
1700
1701 if (cp->val != 0x00 && cp->val != 0x01)
1702 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703 MGMT_STATUS_INVALID_PARAMS);
1704
1705 hci_dev_lock(hdev);
1706
1707 if (!hdev_is_powered(hdev)) {
1708 bool changed;
1709
1710 if (cp->val) {
1711 changed = !hci_dev_test_and_set_flag(hdev,
1712 HCI_SSP_ENABLED);
1713 } else {
1714 changed = hci_dev_test_and_clear_flag(hdev,
1715 HCI_SSP_ENABLED);
1716 if (!changed)
1717 changed = hci_dev_test_and_clear_flag(hdev,
1718 HCI_HS_ENABLED);
1719 else
1720 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1721 }
1722
1723 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1724 if (err < 0)
1725 goto failed;
1726
1727 if (changed)
1728 err = new_settings(hdev, sk);
1729
1730 goto failed;
1731 }
1732
1733 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1734 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1735 MGMT_STATUS_BUSY);
1736 goto failed;
1737 }
1738
1739 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1740 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1741 goto failed;
1742 }
1743
1744 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1745 if (!cmd) {
1746 err = -ENOMEM;
1747 goto failed;
1748 }
1749
1750 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1751 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1752 sizeof(cp->val), &cp->val);
1753
1754 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1755 if (err < 0) {
1756 mgmt_pending_remove(cmd);
1757 goto failed;
1758 }
1759
1760 failed:
1761 hci_dev_unlock(hdev);
1762 return err;
1763 }
1764
1765 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1766 {
1767 struct mgmt_mode *cp = data;
1768 bool changed;
1769 u8 status;
1770 int err;
1771
1772 BT_DBG("request for %s", hdev->name);
1773
1774 status = mgmt_bredr_support(hdev);
1775 if (status)
1776 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1777
1778 if (!lmp_ssp_capable(hdev))
1779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1780 MGMT_STATUS_NOT_SUPPORTED);
1781
1782 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1784 MGMT_STATUS_REJECTED);
1785
1786 if (cp->val != 0x00 && cp->val != 0x01)
1787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1788 MGMT_STATUS_INVALID_PARAMS);
1789
1790 hci_dev_lock(hdev);
1791
1792 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1793 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1794 MGMT_STATUS_BUSY);
1795 goto unlock;
1796 }
1797
1798 if (cp->val) {
1799 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1800 } else {
1801 if (hdev_is_powered(hdev)) {
1802 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1803 MGMT_STATUS_REJECTED);
1804 goto unlock;
1805 }
1806
1807 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1808 }
1809
1810 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1811 if (err < 0)
1812 goto unlock;
1813
1814 if (changed)
1815 err = new_settings(hdev, sk);
1816
1817 unlock:
1818 hci_dev_unlock(hdev);
1819 return err;
1820 }
1821
1822 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1823 {
1824 struct cmd_lookup match = { NULL, hdev };
1825
1826 hci_dev_lock(hdev);
1827
1828 if (status) {
1829 u8 mgmt_err = mgmt_status(status);
1830
1831 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1832 &mgmt_err);
1833 goto unlock;
1834 }
1835
1836 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1837
1838 new_settings(hdev, match.sk);
1839
1840 if (match.sk)
1841 sock_put(match.sk);
1842
1843 /* Make sure the controller has a good default for
1844 * advertising data. Restrict the update to when LE
1845 * has actually been enabled. During power on, the
1846 * update in powered_update_hci will take care of it.
1847 */
1848 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1849 struct hci_request req;
1850 hci_req_init(&req, hdev);
1851 if (ext_adv_capable(hdev)) {
1852 int err;
1853
1854 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1855 if (!err)
1856 __hci_req_update_scan_rsp_data(&req, 0x00);
1857 } else {
1858 __hci_req_update_adv_data(&req, 0x00);
1859 __hci_req_update_scan_rsp_data(&req, 0x00);
1860 }
1861 hci_req_run(&req, NULL);
1862 hci_update_background_scan(hdev);
1863 }
1864
1865 unlock:
1866 hci_dev_unlock(hdev);
1867 }
1868
1869 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1870 {
1871 struct mgmt_mode *cp = data;
1872 struct hci_cp_write_le_host_supported hci_cp;
1873 struct mgmt_pending_cmd *cmd;
1874 struct hci_request req;
1875 int err;
1876 u8 val, enabled;
1877
1878 BT_DBG("request for %s", hdev->name);
1879
1880 if (!lmp_le_capable(hdev))
1881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1882 MGMT_STATUS_NOT_SUPPORTED);
1883
1884 if (cp->val != 0x00 && cp->val != 0x01)
1885 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1886 MGMT_STATUS_INVALID_PARAMS);
1887
1888 /* Bluetooth single mode LE only controllers or dual-mode
1889 * controllers configured as LE only devices, do not allow
1890 * switching LE off. These have either LE enabled explicitly
1891 * or BR/EDR has been previously switched off.
1892 *
1893 * When trying to enable an already enabled LE, then gracefully
1894 * send a positive response. Trying to disable it however will
1895 * result into rejection.
1896 */
1897 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1898 if (cp->val == 0x01)
1899 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1900
1901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1902 MGMT_STATUS_REJECTED);
1903 }
1904
1905 hci_dev_lock(hdev);
1906
1907 val = !!cp->val;
1908 enabled = lmp_host_le_capable(hdev);
1909
1910 if (!val)
1911 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1912
1913 if (!hdev_is_powered(hdev) || val == enabled) {
1914 bool changed = false;
1915
1916 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1917 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1918 changed = true;
1919 }
1920
1921 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1922 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1923 changed = true;
1924 }
1925
1926 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1927 if (err < 0)
1928 goto unlock;
1929
1930 if (changed)
1931 err = new_settings(hdev, sk);
1932
1933 goto unlock;
1934 }
1935
1936 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1937 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1938 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1939 MGMT_STATUS_BUSY);
1940 goto unlock;
1941 }
1942
1943 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1944 if (!cmd) {
1945 err = -ENOMEM;
1946 goto unlock;
1947 }
1948
1949 hci_req_init(&req, hdev);
1950
1951 memset(&hci_cp, 0, sizeof(hci_cp));
1952
1953 if (val) {
1954 hci_cp.le = val;
1955 hci_cp.simul = 0x00;
1956 } else {
1957 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1958 __hci_req_disable_advertising(&req);
1959
1960 if (ext_adv_capable(hdev))
1961 __hci_req_clear_ext_adv_sets(&req);
1962 }
1963
1964 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1965 &hci_cp);
1966
1967 err = hci_req_run(&req, le_enable_complete);
1968 if (err < 0)
1969 mgmt_pending_remove(cmd);
1970
1971 unlock:
1972 hci_dev_unlock(hdev);
1973 return err;
1974 }
1975
1976 /* This is a helper function to test for pending mgmt commands that can
1977 * cause CoD or EIR HCI commands. We can only allow one such pending
1978 * mgmt command at a time since otherwise we cannot easily track what
1979 * the current values are, will be, and based on that calculate if a new
1980 * HCI command needs to be sent and if yes with what value.
1981 */
1982 static bool pending_eir_or_class(struct hci_dev *hdev)
1983 {
1984 struct mgmt_pending_cmd *cmd;
1985
1986 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1987 switch (cmd->opcode) {
1988 case MGMT_OP_ADD_UUID:
1989 case MGMT_OP_REMOVE_UUID:
1990 case MGMT_OP_SET_DEV_CLASS:
1991 case MGMT_OP_SET_POWERED:
1992 return true;
1993 }
1994 }
1995
1996 return false;
1997 }
1998
1999 static const u8 bluetooth_base_uuid[] = {
2000 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2001 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2002 };
2003
2004 static u8 get_uuid_size(const u8 *uuid)
2005 {
2006 u32 val;
2007
2008 if (memcmp(uuid, bluetooth_base_uuid, 12))
2009 return 128;
2010
2011 val = get_unaligned_le32(&uuid[12]);
2012 if (val > 0xffff)
2013 return 32;
2014
2015 return 16;
2016 }
2017
2018 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2019 {
2020 struct mgmt_pending_cmd *cmd;
2021
2022 hci_dev_lock(hdev);
2023
2024 cmd = pending_find(mgmt_op, hdev);
2025 if (!cmd)
2026 goto unlock;
2027
2028 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2029 mgmt_status(status), hdev->dev_class, 3);
2030
2031 mgmt_pending_remove(cmd);
2032
2033 unlock:
2034 hci_dev_unlock(hdev);
2035 }
2036
2037 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2038 {
2039 BT_DBG("status 0x%02x", status);
2040
2041 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2042 }
2043
2044 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2045 {
2046 struct mgmt_cp_add_uuid *cp = data;
2047 struct mgmt_pending_cmd *cmd;
2048 struct hci_request req;
2049 struct bt_uuid *uuid;
2050 int err;
2051
2052 BT_DBG("request for %s", hdev->name);
2053
2054 hci_dev_lock(hdev);
2055
2056 if (pending_eir_or_class(hdev)) {
2057 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2058 MGMT_STATUS_BUSY);
2059 goto failed;
2060 }
2061
2062 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2063 if (!uuid) {
2064 err = -ENOMEM;
2065 goto failed;
2066 }
2067
2068 memcpy(uuid->uuid, cp->uuid, 16);
2069 uuid->svc_hint = cp->svc_hint;
2070 uuid->size = get_uuid_size(cp->uuid);
2071
2072 list_add_tail(&uuid->list, &hdev->uuids);
2073
2074 hci_req_init(&req, hdev);
2075
2076 __hci_req_update_class(&req);
2077 __hci_req_update_eir(&req);
2078
2079 err = hci_req_run(&req, add_uuid_complete);
2080 if (err < 0) {
2081 if (err != -ENODATA)
2082 goto failed;
2083
2084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2085 hdev->dev_class, 3);
2086 goto failed;
2087 }
2088
2089 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2090 if (!cmd) {
2091 err = -ENOMEM;
2092 goto failed;
2093 }
2094
2095 err = 0;
2096
2097 failed:
2098 hci_dev_unlock(hdev);
2099 return err;
2100 }
2101
2102 static bool enable_service_cache(struct hci_dev *hdev)
2103 {
2104 if (!hdev_is_powered(hdev))
2105 return false;
2106
2107 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2108 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2109 CACHE_TIMEOUT);
2110 return true;
2111 }
2112
2113 return false;
2114 }
2115
2116 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2117 {
2118 BT_DBG("status 0x%02x", status);
2119
2120 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2121 }
2122
2123 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2124 u16 len)
2125 {
2126 struct mgmt_cp_remove_uuid *cp = data;
2127 struct mgmt_pending_cmd *cmd;
2128 struct bt_uuid *match, *tmp;
2129 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2130 struct hci_request req;
2131 int err, found;
2132
2133 BT_DBG("request for %s", hdev->name);
2134
2135 hci_dev_lock(hdev);
2136
2137 if (pending_eir_or_class(hdev)) {
2138 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2139 MGMT_STATUS_BUSY);
2140 goto unlock;
2141 }
2142
2143 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2144 hci_uuids_clear(hdev);
2145
2146 if (enable_service_cache(hdev)) {
2147 err = mgmt_cmd_complete(sk, hdev->id,
2148 MGMT_OP_REMOVE_UUID,
2149 0, hdev->dev_class, 3);
2150 goto unlock;
2151 }
2152
2153 goto update_class;
2154 }
2155
2156 found = 0;
2157
2158 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2159 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2160 continue;
2161
2162 list_del(&match->list);
2163 kfree(match);
2164 found++;
2165 }
2166
2167 if (found == 0) {
2168 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2169 MGMT_STATUS_INVALID_PARAMS);
2170 goto unlock;
2171 }
2172
2173 update_class:
2174 hci_req_init(&req, hdev);
2175
2176 __hci_req_update_class(&req);
2177 __hci_req_update_eir(&req);
2178
2179 err = hci_req_run(&req, remove_uuid_complete);
2180 if (err < 0) {
2181 if (err != -ENODATA)
2182 goto unlock;
2183
2184 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2185 hdev->dev_class, 3);
2186 goto unlock;
2187 }
2188
2189 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2190 if (!cmd) {
2191 err = -ENOMEM;
2192 goto unlock;
2193 }
2194
2195 err = 0;
2196
2197 unlock:
2198 hci_dev_unlock(hdev);
2199 return err;
2200 }
2201
2202 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2203 {
2204 BT_DBG("status 0x%02x", status);
2205
2206 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2207 }
2208
2209 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2210 u16 len)
2211 {
2212 struct mgmt_cp_set_dev_class *cp = data;
2213 struct mgmt_pending_cmd *cmd;
2214 struct hci_request req;
2215 int err;
2216
2217 BT_DBG("request for %s", hdev->name);
2218
2219 if (!lmp_bredr_capable(hdev))
2220 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2221 MGMT_STATUS_NOT_SUPPORTED);
2222
2223 hci_dev_lock(hdev);
2224
2225 if (pending_eir_or_class(hdev)) {
2226 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2227 MGMT_STATUS_BUSY);
2228 goto unlock;
2229 }
2230
2231 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2232 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2233 MGMT_STATUS_INVALID_PARAMS);
2234 goto unlock;
2235 }
2236
2237 hdev->major_class = cp->major;
2238 hdev->minor_class = cp->minor;
2239
2240 if (!hdev_is_powered(hdev)) {
2241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2242 hdev->dev_class, 3);
2243 goto unlock;
2244 }
2245
2246 hci_req_init(&req, hdev);
2247
2248 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2249 hci_dev_unlock(hdev);
2250 cancel_delayed_work_sync(&hdev->service_cache);
2251 hci_dev_lock(hdev);
2252 __hci_req_update_eir(&req);
2253 }
2254
2255 __hci_req_update_class(&req);
2256
2257 err = hci_req_run(&req, set_class_complete);
2258 if (err < 0) {
2259 if (err != -ENODATA)
2260 goto unlock;
2261
2262 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2263 hdev->dev_class, 3);
2264 goto unlock;
2265 }
2266
2267 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2268 if (!cmd) {
2269 err = -ENOMEM;
2270 goto unlock;
2271 }
2272
2273 err = 0;
2274
2275 unlock:
2276 hci_dev_unlock(hdev);
2277 return err;
2278 }
2279
2280 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2281 u16 len)
2282 {
2283 struct mgmt_cp_load_link_keys *cp = data;
2284 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2285 sizeof(struct mgmt_link_key_info));
2286 u16 key_count, expected_len;
2287 bool changed;
2288 int i;
2289
2290 BT_DBG("request for %s", hdev->name);
2291
2292 if (!lmp_bredr_capable(hdev))
2293 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2294 MGMT_STATUS_NOT_SUPPORTED);
2295
2296 key_count = __le16_to_cpu(cp->key_count);
2297 if (key_count > max_key_count) {
2298 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2299 key_count);
2300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2301 MGMT_STATUS_INVALID_PARAMS);
2302 }
2303
2304 expected_len = sizeof(*cp) + key_count *
2305 sizeof(struct mgmt_link_key_info);
2306 if (expected_len != len) {
2307 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2308 expected_len, len);
2309 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2310 MGMT_STATUS_INVALID_PARAMS);
2311 }
2312
2313 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2314 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2315 MGMT_STATUS_INVALID_PARAMS);
2316
2317 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2318 key_count);
2319
2320 for (i = 0; i < key_count; i++) {
2321 struct mgmt_link_key_info *key = &cp->keys[i];
2322
2323 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2324 return mgmt_cmd_status(sk, hdev->id,
2325 MGMT_OP_LOAD_LINK_KEYS,
2326 MGMT_STATUS_INVALID_PARAMS);
2327 }
2328
2329 hci_dev_lock(hdev);
2330
2331 hci_link_keys_clear(hdev);
2332
2333 if (cp->debug_keys)
2334 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2335 else
2336 changed = hci_dev_test_and_clear_flag(hdev,
2337 HCI_KEEP_DEBUG_KEYS);
2338
2339 if (changed)
2340 new_settings(hdev, NULL);
2341
2342 for (i = 0; i < key_count; i++) {
2343 struct mgmt_link_key_info *key = &cp->keys[i];
2344
2345 /* Always ignore debug keys and require a new pairing if
2346 * the user wants to use them.
2347 */
2348 if (key->type == HCI_LK_DEBUG_COMBINATION)
2349 continue;
2350
2351 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2352 key->type, key->pin_len, NULL);
2353 }
2354
2355 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2356
2357 hci_dev_unlock(hdev);
2358
2359 return 0;
2360 }
2361
2362 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2363 u8 addr_type, struct sock *skip_sk)
2364 {
2365 struct mgmt_ev_device_unpaired ev;
2366
2367 bacpy(&ev.addr.bdaddr, bdaddr);
2368 ev.addr.type = addr_type;
2369
2370 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2371 skip_sk);
2372 }
2373
2374 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2375 u16 len)
2376 {
2377 struct mgmt_cp_unpair_device *cp = data;
2378 struct mgmt_rp_unpair_device rp;
2379 struct hci_conn_params *params;
2380 struct mgmt_pending_cmd *cmd;
2381 struct hci_conn *conn;
2382 u8 addr_type;
2383 int err;
2384
2385 memset(&rp, 0, sizeof(rp));
2386 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2387 rp.addr.type = cp->addr.type;
2388
2389 if (!bdaddr_type_is_valid(cp->addr.type))
2390 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2391 MGMT_STATUS_INVALID_PARAMS,
2392 &rp, sizeof(rp));
2393
2394 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2395 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2396 MGMT_STATUS_INVALID_PARAMS,
2397 &rp, sizeof(rp));
2398
2399 hci_dev_lock(hdev);
2400
2401 if (!hdev_is_powered(hdev)) {
2402 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2403 MGMT_STATUS_NOT_POWERED, &rp,
2404 sizeof(rp));
2405 goto unlock;
2406 }
2407
2408 if (cp->addr.type == BDADDR_BREDR) {
2409 /* If disconnection is requested, then look up the
2410 * connection. If the remote device is connected, it
2411 * will be later used to terminate the link.
2412 *
2413 * Setting it to NULL explicitly will cause no
2414 * termination of the link.
2415 */
2416 if (cp->disconnect)
2417 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2418 &cp->addr.bdaddr);
2419 else
2420 conn = NULL;
2421
2422 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2423 if (err < 0) {
2424 err = mgmt_cmd_complete(sk, hdev->id,
2425 MGMT_OP_UNPAIR_DEVICE,
2426 MGMT_STATUS_NOT_PAIRED, &rp,
2427 sizeof(rp));
2428 goto unlock;
2429 }
2430
2431 goto done;
2432 }
2433
2434 /* LE address type */
2435 addr_type = le_addr_type(cp->addr.type);
2436
2437 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2438
2439 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2440 if (err < 0) {
2441 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442 MGMT_STATUS_NOT_PAIRED, &rp,
2443 sizeof(rp));
2444 goto unlock;
2445 }
2446
2447 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2448 if (!conn) {
2449 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2450 goto done;
2451 }
2452
2453 /* Abort any ongoing SMP pairing */
2454 smp_cancel_pairing(conn);
2455
2456 /* Defer clearing up the connection parameters until closing to
2457 * give a chance of keeping them if a repairing happens.
2458 */
2459 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2460
2461 /* Disable auto-connection parameters if present */
2462 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2463 if (params) {
2464 if (params->explicit_connect)
2465 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2466 else
2467 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2468 }
2469
2470 /* If disconnection is not requested, then clear the connection
2471 * variable so that the link is not terminated.
2472 */
2473 if (!cp->disconnect)
2474 conn = NULL;
2475
2476 done:
2477 /* If the connection variable is set, then termination of the
2478 * link is requested.
2479 */
2480 if (!conn) {
2481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2482 &rp, sizeof(rp));
2483 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2484 goto unlock;
2485 }
2486
2487 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2488 sizeof(*cp));
2489 if (!cmd) {
2490 err = -ENOMEM;
2491 goto unlock;
2492 }
2493
2494 cmd->cmd_complete = addr_cmd_complete;
2495
2496 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2497 if (err < 0)
2498 mgmt_pending_remove(cmd);
2499
2500 unlock:
2501 hci_dev_unlock(hdev);
2502 return err;
2503 }
2504
2505 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2506 u16 len)
2507 {
2508 struct mgmt_cp_disconnect *cp = data;
2509 struct mgmt_rp_disconnect rp;
2510 struct mgmt_pending_cmd *cmd;
2511 struct hci_conn *conn;
2512 int err;
2513
2514 BT_DBG("");
2515
2516 memset(&rp, 0, sizeof(rp));
2517 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2518 rp.addr.type = cp->addr.type;
2519
2520 if (!bdaddr_type_is_valid(cp->addr.type))
2521 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2522 MGMT_STATUS_INVALID_PARAMS,
2523 &rp, sizeof(rp));
2524
2525 hci_dev_lock(hdev);
2526
2527 if (!test_bit(HCI_UP, &hdev->flags)) {
2528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2529 MGMT_STATUS_NOT_POWERED, &rp,
2530 sizeof(rp));
2531 goto failed;
2532 }
2533
2534 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2535 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2536 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2537 goto failed;
2538 }
2539
2540 if (cp->addr.type == BDADDR_BREDR)
2541 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2542 &cp->addr.bdaddr);
2543 else
2544 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2545 le_addr_type(cp->addr.type));
2546
2547 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2549 MGMT_STATUS_NOT_CONNECTED, &rp,
2550 sizeof(rp));
2551 goto failed;
2552 }
2553
2554 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2555 if (!cmd) {
2556 err = -ENOMEM;
2557 goto failed;
2558 }
2559
2560 cmd->cmd_complete = generic_cmd_complete;
2561
2562 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2563 if (err < 0)
2564 mgmt_pending_remove(cmd);
2565
2566 failed:
2567 hci_dev_unlock(hdev);
2568 return err;
2569 }
2570
2571 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2572 {
2573 switch (link_type) {
2574 case LE_LINK:
2575 switch (addr_type) {
2576 case ADDR_LE_DEV_PUBLIC:
2577 return BDADDR_LE_PUBLIC;
2578
2579 default:
2580 /* Fallback to LE Random address type */
2581 return BDADDR_LE_RANDOM;
2582 }
2583
2584 default:
2585 /* Fallback to BR/EDR type */
2586 return BDADDR_BREDR;
2587 }
2588 }
2589
2590 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2591 u16 data_len)
2592 {
2593 struct mgmt_rp_get_connections *rp;
2594 struct hci_conn *c;
2595 size_t rp_len;
2596 int err;
2597 u16 i;
2598
2599 BT_DBG("");
2600
2601 hci_dev_lock(hdev);
2602
2603 if (!hdev_is_powered(hdev)) {
2604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2605 MGMT_STATUS_NOT_POWERED);
2606 goto unlock;
2607 }
2608
2609 i = 0;
2610 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2611 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2612 i++;
2613 }
2614
2615 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2616 rp = kmalloc(rp_len, GFP_KERNEL);
2617 if (!rp) {
2618 err = -ENOMEM;
2619 goto unlock;
2620 }
2621
2622 i = 0;
2623 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2624 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2625 continue;
2626 bacpy(&rp->addr[i].bdaddr, &c->dst);
2627 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2628 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2629 continue;
2630 i++;
2631 }
2632
2633 rp->conn_count = cpu_to_le16(i);
2634
2635 /* Recalculate length in case of filtered SCO connections, etc */
2636 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2637
2638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2639 rp_len);
2640
2641 kfree(rp);
2642
2643 unlock:
2644 hci_dev_unlock(hdev);
2645 return err;
2646 }
2647
2648 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2649 struct mgmt_cp_pin_code_neg_reply *cp)
2650 {
2651 struct mgmt_pending_cmd *cmd;
2652 int err;
2653
2654 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2655 sizeof(*cp));
2656 if (!cmd)
2657 return -ENOMEM;
2658
2659 cmd->cmd_complete = addr_cmd_complete;
2660
2661 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2662 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2663 if (err < 0)
2664 mgmt_pending_remove(cmd);
2665
2666 return err;
2667 }
2668
2669 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2670 u16 len)
2671 {
2672 struct hci_conn *conn;
2673 struct mgmt_cp_pin_code_reply *cp = data;
2674 struct hci_cp_pin_code_reply reply;
2675 struct mgmt_pending_cmd *cmd;
2676 int err;
2677
2678 BT_DBG("");
2679
2680 hci_dev_lock(hdev);
2681
2682 if (!hdev_is_powered(hdev)) {
2683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2684 MGMT_STATUS_NOT_POWERED);
2685 goto failed;
2686 }
2687
2688 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2689 if (!conn) {
2690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2691 MGMT_STATUS_NOT_CONNECTED);
2692 goto failed;
2693 }
2694
2695 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2696 struct mgmt_cp_pin_code_neg_reply ncp;
2697
2698 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2699
2700 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2701
2702 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2703 if (err >= 0)
2704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2705 MGMT_STATUS_INVALID_PARAMS);
2706
2707 goto failed;
2708 }
2709
2710 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2711 if (!cmd) {
2712 err = -ENOMEM;
2713 goto failed;
2714 }
2715
2716 cmd->cmd_complete = addr_cmd_complete;
2717
2718 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2719 reply.pin_len = cp->pin_len;
2720 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2721
2722 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2723 if (err < 0)
2724 mgmt_pending_remove(cmd);
2725
2726 failed:
2727 hci_dev_unlock(hdev);
2728 return err;
2729 }
2730
2731 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2732 u16 len)
2733 {
2734 struct mgmt_cp_set_io_capability *cp = data;
2735
2736 BT_DBG("");
2737
2738 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2740 MGMT_STATUS_INVALID_PARAMS);
2741
2742 hci_dev_lock(hdev);
2743
2744 hdev->io_capability = cp->io_capability;
2745
2746 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2747 hdev->io_capability);
2748
2749 hci_dev_unlock(hdev);
2750
2751 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2752 NULL, 0);
2753 }
2754
2755 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2756 {
2757 struct hci_dev *hdev = conn->hdev;
2758 struct mgmt_pending_cmd *cmd;
2759
2760 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2761 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2762 continue;
2763
2764 if (cmd->user_data != conn)
2765 continue;
2766
2767 return cmd;
2768 }
2769
2770 return NULL;
2771 }
2772
2773 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2774 {
2775 struct mgmt_rp_pair_device rp;
2776 struct hci_conn *conn = cmd->user_data;
2777 int err;
2778
2779 bacpy(&rp.addr.bdaddr, &conn->dst);
2780 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2781
2782 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2783 status, &rp, sizeof(rp));
2784
2785 /* So we don't get further callbacks for this connection */
2786 conn->connect_cfm_cb = NULL;
2787 conn->security_cfm_cb = NULL;
2788 conn->disconn_cfm_cb = NULL;
2789
2790 hci_conn_drop(conn);
2791
2792 /* The device is paired so there is no need to remove
2793 * its connection parameters anymore.
2794 */
2795 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2796
2797 hci_conn_put(conn);
2798
2799 return err;
2800 }
2801
2802 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2803 {
2804 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2805 struct mgmt_pending_cmd *cmd;
2806
2807 cmd = find_pairing(conn);
2808 if (cmd) {
2809 cmd->cmd_complete(cmd, status);
2810 mgmt_pending_remove(cmd);
2811 }
2812 }
2813
2814 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2815 {
2816 struct mgmt_pending_cmd *cmd;
2817
2818 BT_DBG("status %u", status);
2819
2820 cmd = find_pairing(conn);
2821 if (!cmd) {
2822 BT_DBG("Unable to find a pending command");
2823 return;
2824 }
2825
2826 cmd->cmd_complete(cmd, mgmt_status(status));
2827 mgmt_pending_remove(cmd);
2828 }
2829
2830 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2831 {
2832 struct mgmt_pending_cmd *cmd;
2833
2834 BT_DBG("status %u", status);
2835
2836 if (!status)
2837 return;
2838
2839 cmd = find_pairing(conn);
2840 if (!cmd) {
2841 BT_DBG("Unable to find a pending command");
2842 return;
2843 }
2844
2845 cmd->cmd_complete(cmd, mgmt_status(status));
2846 mgmt_pending_remove(cmd);
2847 }
2848
2849 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2850 u16 len)
2851 {
2852 struct mgmt_cp_pair_device *cp = data;
2853 struct mgmt_rp_pair_device rp;
2854 struct mgmt_pending_cmd *cmd;
2855 u8 sec_level, auth_type;
2856 struct hci_conn *conn;
2857 int err;
2858
2859 BT_DBG("");
2860
2861 memset(&rp, 0, sizeof(rp));
2862 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2863 rp.addr.type = cp->addr.type;
2864
2865 if (!bdaddr_type_is_valid(cp->addr.type))
2866 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2867 MGMT_STATUS_INVALID_PARAMS,
2868 &rp, sizeof(rp));
2869
2870 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2871 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2872 MGMT_STATUS_INVALID_PARAMS,
2873 &rp, sizeof(rp));
2874
2875 hci_dev_lock(hdev);
2876
2877 if (!hdev_is_powered(hdev)) {
2878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2879 MGMT_STATUS_NOT_POWERED, &rp,
2880 sizeof(rp));
2881 goto unlock;
2882 }
2883
2884 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2885 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2886 MGMT_STATUS_ALREADY_PAIRED, &rp,
2887 sizeof(rp));
2888 goto unlock;
2889 }
2890
2891 sec_level = BT_SECURITY_MEDIUM;
2892 auth_type = HCI_AT_DEDICATED_BONDING;
2893
2894 if (cp->addr.type == BDADDR_BREDR) {
2895 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2896 auth_type);
2897 } else {
2898 u8 addr_type = le_addr_type(cp->addr.type);
2899 struct hci_conn_params *p;
2900
2901 /* When pairing a new device, it is expected to remember
2902 * this device for future connections. Adding the connection
2903 * parameter information ahead of time allows tracking
2904 * of the slave preferred values and will speed up any
2905 * further connection establishment.
2906 *
2907 * If connection parameters already exist, then they
2908 * will be kept and this function does nothing.
2909 */
2910 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2911
2912 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2913 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2914
2915 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2916 addr_type, sec_level,
2917 HCI_LE_CONN_TIMEOUT);
2918 }
2919
2920 if (IS_ERR(conn)) {
2921 int status;
2922
2923 if (PTR_ERR(conn) == -EBUSY)
2924 status = MGMT_STATUS_BUSY;
2925 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2926 status = MGMT_STATUS_NOT_SUPPORTED;
2927 else if (PTR_ERR(conn) == -ECONNREFUSED)
2928 status = MGMT_STATUS_REJECTED;
2929 else
2930 status = MGMT_STATUS_CONNECT_FAILED;
2931
2932 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 status, &rp, sizeof(rp));
2934 goto unlock;
2935 }
2936
2937 if (conn->connect_cfm_cb) {
2938 hci_conn_drop(conn);
2939 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2941 goto unlock;
2942 }
2943
2944 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2945 if (!cmd) {
2946 err = -ENOMEM;
2947 hci_conn_drop(conn);
2948 goto unlock;
2949 }
2950
2951 cmd->cmd_complete = pairing_complete;
2952
2953 /* For LE, just connecting isn't a proof that the pairing finished */
2954 if (cp->addr.type == BDADDR_BREDR) {
2955 conn->connect_cfm_cb = pairing_complete_cb;
2956 conn->security_cfm_cb = pairing_complete_cb;
2957 conn->disconn_cfm_cb = pairing_complete_cb;
2958 } else {
2959 conn->connect_cfm_cb = le_pairing_complete_cb;
2960 conn->security_cfm_cb = le_pairing_complete_cb;
2961 conn->disconn_cfm_cb = le_pairing_complete_cb;
2962 }
2963
2964 conn->io_capability = cp->io_cap;
2965 cmd->user_data = hci_conn_get(conn);
2966
2967 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2968 hci_conn_security(conn, sec_level, auth_type, true)) {
2969 cmd->cmd_complete(cmd, 0);
2970 mgmt_pending_remove(cmd);
2971 }
2972
2973 err = 0;
2974
2975 unlock:
2976 hci_dev_unlock(hdev);
2977 return err;
2978 }
2979
2980 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2981 u16 len)
2982 {
2983 struct mgmt_addr_info *addr = data;
2984 struct mgmt_pending_cmd *cmd;
2985 struct hci_conn *conn;
2986 int err;
2987
2988 BT_DBG("");
2989
2990 hci_dev_lock(hdev);
2991
2992 if (!hdev_is_powered(hdev)) {
2993 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2994 MGMT_STATUS_NOT_POWERED);
2995 goto unlock;
2996 }
2997
2998 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2999 if (!cmd) {
3000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3001 MGMT_STATUS_INVALID_PARAMS);
3002 goto unlock;
3003 }
3004
3005 conn = cmd->user_data;
3006
3007 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3009 MGMT_STATUS_INVALID_PARAMS);
3010 goto unlock;
3011 }
3012
3013 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3014 mgmt_pending_remove(cmd);
3015
3016 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3017 addr, sizeof(*addr));
3018 unlock:
3019 hci_dev_unlock(hdev);
3020 return err;
3021 }
3022
3023 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3024 struct mgmt_addr_info *addr, u16 mgmt_op,
3025 u16 hci_op, __le32 passkey)
3026 {
3027 struct mgmt_pending_cmd *cmd;
3028 struct hci_conn *conn;
3029 int err;
3030
3031 hci_dev_lock(hdev);
3032
3033 if (!hdev_is_powered(hdev)) {
3034 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3035 MGMT_STATUS_NOT_POWERED, addr,
3036 sizeof(*addr));
3037 goto done;
3038 }
3039
3040 if (addr->type == BDADDR_BREDR)
3041 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3042 else
3043 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3044 le_addr_type(addr->type));
3045
3046 if (!conn) {
3047 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3048 MGMT_STATUS_NOT_CONNECTED, addr,
3049 sizeof(*addr));
3050 goto done;
3051 }
3052
3053 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3054 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3055 if (!err)
3056 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3057 MGMT_STATUS_SUCCESS, addr,
3058 sizeof(*addr));
3059 else
3060 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3061 MGMT_STATUS_FAILED, addr,
3062 sizeof(*addr));
3063
3064 goto done;
3065 }
3066
3067 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3068 if (!cmd) {
3069 err = -ENOMEM;
3070 goto done;
3071 }
3072
3073 cmd->cmd_complete = addr_cmd_complete;
3074
3075 /* Continue with pairing via HCI */
3076 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3077 struct hci_cp_user_passkey_reply cp;
3078
3079 bacpy(&cp.bdaddr, &addr->bdaddr);
3080 cp.passkey = passkey;
3081 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3082 } else
3083 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3084 &addr->bdaddr);
3085
3086 if (err < 0)
3087 mgmt_pending_remove(cmd);
3088
3089 done:
3090 hci_dev_unlock(hdev);
3091 return err;
3092 }
3093
3094 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3095 void *data, u16 len)
3096 {
3097 struct mgmt_cp_pin_code_neg_reply *cp = data;
3098
3099 BT_DBG("");
3100
3101 return user_pairing_resp(sk, hdev, &cp->addr,
3102 MGMT_OP_PIN_CODE_NEG_REPLY,
3103 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3104 }
3105
3106 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3107 u16 len)
3108 {
3109 struct mgmt_cp_user_confirm_reply *cp = data;
3110
3111 BT_DBG("");
3112
3113 if (len != sizeof(*cp))
3114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3115 MGMT_STATUS_INVALID_PARAMS);
3116
3117 return user_pairing_resp(sk, hdev, &cp->addr,
3118 MGMT_OP_USER_CONFIRM_REPLY,
3119 HCI_OP_USER_CONFIRM_REPLY, 0);
3120 }
3121
3122 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3123 void *data, u16 len)
3124 {
3125 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3126
3127 BT_DBG("");
3128
3129 return user_pairing_resp(sk, hdev, &cp->addr,
3130 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3131 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3132 }
3133
3134 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3135 u16 len)
3136 {
3137 struct mgmt_cp_user_passkey_reply *cp = data;
3138
3139 BT_DBG("");
3140
3141 return user_pairing_resp(sk, hdev, &cp->addr,
3142 MGMT_OP_USER_PASSKEY_REPLY,
3143 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3144 }
3145
3146 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3147 void *data, u16 len)
3148 {
3149 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3150
3151 BT_DBG("");
3152
3153 return user_pairing_resp(sk, hdev, &cp->addr,
3154 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3155 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3156 }
3157
3158 static void adv_expire(struct hci_dev *hdev, u32 flags)
3159 {
3160 struct adv_info *adv_instance;
3161 struct hci_request req;
3162 int err;
3163
3164 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3165 if (!adv_instance)
3166 return;
3167
3168 /* stop if current instance doesn't need to be changed */
3169 if (!(adv_instance->flags & flags))
3170 return;
3171
3172 cancel_adv_timeout(hdev);
3173
3174 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3175 if (!adv_instance)
3176 return;
3177
3178 hci_req_init(&req, hdev);
3179 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3180 true);
3181 if (err)
3182 return;
3183
3184 hci_req_run(&req, NULL);
3185 }
3186
3187 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3188 {
3189 struct mgmt_cp_set_local_name *cp;
3190 struct mgmt_pending_cmd *cmd;
3191
3192 BT_DBG("status 0x%02x", status);
3193
3194 hci_dev_lock(hdev);
3195
3196 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3197 if (!cmd)
3198 goto unlock;
3199
3200 cp = cmd->param;
3201
3202 if (status) {
3203 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3204 mgmt_status(status));
3205 } else {
3206 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3207 cp, sizeof(*cp));
3208
3209 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3210 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3211 }
3212
3213 mgmt_pending_remove(cmd);
3214
3215 unlock:
3216 hci_dev_unlock(hdev);
3217 }
3218
3219 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3220 u16 len)
3221 {
3222 struct mgmt_cp_set_local_name *cp = data;
3223 struct mgmt_pending_cmd *cmd;
3224 struct hci_request req;
3225 int err;
3226
3227 BT_DBG("");
3228
3229 hci_dev_lock(hdev);
3230
3231 /* If the old values are the same as the new ones just return a
3232 * direct command complete event.
3233 */
3234 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3235 !memcmp(hdev->short_name, cp->short_name,
3236 sizeof(hdev->short_name))) {
3237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3238 data, len);
3239 goto failed;
3240 }
3241
3242 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3243
3244 if (!hdev_is_powered(hdev)) {
3245 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3246
3247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3248 data, len);
3249 if (err < 0)
3250 goto failed;
3251
3252 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3253 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3254 ext_info_changed(hdev, sk);
3255
3256 goto failed;
3257 }
3258
3259 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3260 if (!cmd) {
3261 err = -ENOMEM;
3262 goto failed;
3263 }
3264
3265 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3266
3267 hci_req_init(&req, hdev);
3268
3269 if (lmp_bredr_capable(hdev)) {
3270 __hci_req_update_name(&req);
3271 __hci_req_update_eir(&req);
3272 }
3273
3274 /* The name is stored in the scan response data and so
3275 * no need to udpate the advertising data here.
3276 */
3277 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3278 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3279
3280 err = hci_req_run(&req, set_name_complete);
3281 if (err < 0)
3282 mgmt_pending_remove(cmd);
3283
3284 failed:
3285 hci_dev_unlock(hdev);
3286 return err;
3287 }
3288
3289 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3290 u16 len)
3291 {
3292 struct mgmt_cp_set_appearance *cp = data;
3293 u16 apperance;
3294 int err;
3295
3296 BT_DBG("");
3297
3298 if (!lmp_le_capable(hdev))
3299 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3300 MGMT_STATUS_NOT_SUPPORTED);
3301
3302 apperance = le16_to_cpu(cp->appearance);
3303
3304 hci_dev_lock(hdev);
3305
3306 if (hdev->appearance != apperance) {
3307 hdev->appearance = apperance;
3308
3309 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3310 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3311
3312 ext_info_changed(hdev, sk);
3313 }
3314
3315 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3316 0);
3317
3318 hci_dev_unlock(hdev);
3319
3320 return err;
3321 }
3322
3323 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3324 void *data, u16 len)
3325 {
3326 struct mgmt_rp_get_phy_confguration rp;
3327
3328 BT_DBG("sock %p %s", sk, hdev->name);
3329
3330 hci_dev_lock(hdev);
3331
3332 memset(&rp, 0, sizeof(rp));
3333
3334 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3335 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3336 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3337
3338 hci_dev_unlock(hdev);
3339
3340 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3341 &rp, sizeof(rp));
3342 }
3343
3344 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3345 {
3346 struct mgmt_ev_phy_configuration_changed ev;
3347
3348 memset(&ev, 0, sizeof(ev));
3349
3350 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3351
3352 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3353 sizeof(ev), skip);
3354 }
3355
3356 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3357 u16 opcode, struct sk_buff *skb)
3358 {
3359 struct mgmt_cp_set_phy_confguration *cp;
3360 struct mgmt_pending_cmd *cmd;
3361
3362 BT_DBG("status 0x%02x", status);
3363
3364 hci_dev_lock(hdev);
3365
3366 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3367 if (!cmd)
3368 goto unlock;
3369
3370 cp = cmd->param;
3371
3372 if (status) {
3373 mgmt_cmd_status(cmd->sk, hdev->id,
3374 MGMT_OP_SET_PHY_CONFIGURATION,
3375 mgmt_status(status));
3376 } else {
3377 mgmt_cmd_complete(cmd->sk, hdev->id,
3378 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3379 NULL, 0);
3380
3381 mgmt_phy_configuration_changed(hdev, cmd->sk);
3382 }
3383
3384 mgmt_pending_remove(cmd);
3385
3386 unlock:
3387 hci_dev_unlock(hdev);
3388 }
3389
3390 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3391 void *data, u16 len)
3392 {
3393 struct mgmt_cp_set_phy_confguration *cp = data;
3394 struct hci_cp_le_set_default_phy cp_phy;
3395 struct mgmt_pending_cmd *cmd;
3396 struct hci_request req;
3397 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3398 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3399 bool changed = false;
3400 int err;
3401
3402 BT_DBG("sock %p %s", sk, hdev->name);
3403
3404 configurable_phys = get_configurable_phys(hdev);
3405 supported_phys = get_supported_phys(hdev);
3406 selected_phys = __le32_to_cpu(cp->selected_phys);
3407
3408 if (selected_phys & ~supported_phys)
3409 return mgmt_cmd_status(sk, hdev->id,
3410 MGMT_OP_SET_PHY_CONFIGURATION,
3411 MGMT_STATUS_INVALID_PARAMS);
3412
3413 unconfigure_phys = supported_phys & ~configurable_phys;
3414
3415 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3416 return mgmt_cmd_status(sk, hdev->id,
3417 MGMT_OP_SET_PHY_CONFIGURATION,
3418 MGMT_STATUS_INVALID_PARAMS);
3419
3420 if (selected_phys == get_selected_phys(hdev))
3421 return mgmt_cmd_complete(sk, hdev->id,
3422 MGMT_OP_SET_PHY_CONFIGURATION,
3423 0, NULL, 0);
3424
3425 hci_dev_lock(hdev);
3426
3427 if (!hdev_is_powered(hdev)) {
3428 err = mgmt_cmd_status(sk, hdev->id,
3429 MGMT_OP_SET_PHY_CONFIGURATION,
3430 MGMT_STATUS_REJECTED);
3431 goto unlock;
3432 }
3433
3434 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3435 err = mgmt_cmd_status(sk, hdev->id,
3436 MGMT_OP_SET_PHY_CONFIGURATION,
3437 MGMT_STATUS_BUSY);
3438 goto unlock;
3439 }
3440
3441 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3442 pkt_type |= (HCI_DH3 | HCI_DM3);
3443 else
3444 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3445
3446 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3447 pkt_type |= (HCI_DH5 | HCI_DM5);
3448 else
3449 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3450
3451 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3452 pkt_type &= ~HCI_2DH1;
3453 else
3454 pkt_type |= HCI_2DH1;
3455
3456 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3457 pkt_type &= ~HCI_2DH3;
3458 else
3459 pkt_type |= HCI_2DH3;
3460
3461 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3462 pkt_type &= ~HCI_2DH5;
3463 else
3464 pkt_type |= HCI_2DH5;
3465
3466 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3467 pkt_type &= ~HCI_3DH1;
3468 else
3469 pkt_type |= HCI_3DH1;
3470
3471 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3472 pkt_type &= ~HCI_3DH3;
3473 else
3474 pkt_type |= HCI_3DH3;
3475
3476 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3477 pkt_type &= ~HCI_3DH5;
3478 else
3479 pkt_type |= HCI_3DH5;
3480
3481 if (pkt_type != hdev->pkt_type) {
3482 hdev->pkt_type = pkt_type;
3483 changed = true;
3484 }
3485
3486 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3487 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3488 if (changed)
3489 mgmt_phy_configuration_changed(hdev, sk);
3490
3491 err = mgmt_cmd_complete(sk, hdev->id,
3492 MGMT_OP_SET_PHY_CONFIGURATION,
3493 0, NULL, 0);
3494
3495 goto unlock;
3496 }
3497
3498 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3499 len);
3500 if (!cmd) {
3501 err = -ENOMEM;
3502 goto unlock;
3503 }
3504
3505 hci_req_init(&req, hdev);
3506
3507 memset(&cp_phy, 0, sizeof(cp_phy));
3508
3509 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3510 cp_phy.all_phys |= 0x01;
3511
3512 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3513 cp_phy.all_phys |= 0x02;
3514
3515 if (selected_phys & MGMT_PHY_LE_1M_TX)
3516 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3517
3518 if (selected_phys & MGMT_PHY_LE_2M_TX)
3519 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3520
3521 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3522 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3523
3524 if (selected_phys & MGMT_PHY_LE_1M_RX)
3525 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3526
3527 if (selected_phys & MGMT_PHY_LE_2M_RX)
3528 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3529
3530 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3531 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3532
3533 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3534
3535 err = hci_req_run_skb(&req, set_default_phy_complete);
3536 if (err < 0)
3537 mgmt_pending_remove(cmd);
3538
3539 unlock:
3540 hci_dev_unlock(hdev);
3541
3542 return err;
3543 }
3544
3545 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3546 u16 opcode, struct sk_buff *skb)
3547 {
3548 struct mgmt_rp_read_local_oob_data mgmt_rp;
3549 size_t rp_size = sizeof(mgmt_rp);
3550 struct mgmt_pending_cmd *cmd;
3551
3552 BT_DBG("%s status %u", hdev->name, status);
3553
3554 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3555 if (!cmd)
3556 return;
3557
3558 if (status || !skb) {
3559 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3560 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3561 goto remove;
3562 }
3563
3564 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3565
3566 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3567 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3568
3569 if (skb->len < sizeof(*rp)) {
3570 mgmt_cmd_status(cmd->sk, hdev->id,
3571 MGMT_OP_READ_LOCAL_OOB_DATA,
3572 MGMT_STATUS_FAILED);
3573 goto remove;
3574 }
3575
3576 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3577 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3578
3579 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3580 } else {
3581 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3582
3583 if (skb->len < sizeof(*rp)) {
3584 mgmt_cmd_status(cmd->sk, hdev->id,
3585 MGMT_OP_READ_LOCAL_OOB_DATA,
3586 MGMT_STATUS_FAILED);
3587 goto remove;
3588 }
3589
3590 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3591 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3592
3593 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3594 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3595 }
3596
3597 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3598 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3599
3600 remove:
3601 mgmt_pending_remove(cmd);
3602 }
3603
3604 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3605 void *data, u16 data_len)
3606 {
3607 struct mgmt_pending_cmd *cmd;
3608 struct hci_request req;
3609 int err;
3610
3611 BT_DBG("%s", hdev->name);
3612
3613 hci_dev_lock(hdev);
3614
3615 if (!hdev_is_powered(hdev)) {
3616 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3617 MGMT_STATUS_NOT_POWERED);
3618 goto unlock;
3619 }
3620
3621 if (!lmp_ssp_capable(hdev)) {
3622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3623 MGMT_STATUS_NOT_SUPPORTED);
3624 goto unlock;
3625 }
3626
3627 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3628 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3629 MGMT_STATUS_BUSY);
3630 goto unlock;
3631 }
3632
3633 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3634 if (!cmd) {
3635 err = -ENOMEM;
3636 goto unlock;
3637 }
3638
3639 hci_req_init(&req, hdev);
3640
3641 if (bredr_sc_enabled(hdev))
3642 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3643 else
3644 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3645
3646 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3647 if (err < 0)
3648 mgmt_pending_remove(cmd);
3649
3650 unlock:
3651 hci_dev_unlock(hdev);
3652 return err;
3653 }
3654
3655 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3656 void *data, u16 len)
3657 {
3658 struct mgmt_addr_info *addr = data;
3659 int err;
3660
3661 BT_DBG("%s ", hdev->name);
3662
3663 if (!bdaddr_type_is_valid(addr->type))
3664 return mgmt_cmd_complete(sk, hdev->id,
3665 MGMT_OP_ADD_REMOTE_OOB_DATA,
3666 MGMT_STATUS_INVALID_PARAMS,
3667 addr, sizeof(*addr));
3668
3669 hci_dev_lock(hdev);
3670
3671 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3672 struct mgmt_cp_add_remote_oob_data *cp = data;
3673 u8 status;
3674
3675 if (cp->addr.type != BDADDR_BREDR) {
3676 err = mgmt_cmd_complete(sk, hdev->id,
3677 MGMT_OP_ADD_REMOTE_OOB_DATA,
3678 MGMT_STATUS_INVALID_PARAMS,
3679 &cp->addr, sizeof(cp->addr));
3680 goto unlock;
3681 }
3682
3683 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3684 cp->addr.type, cp->hash,
3685 cp->rand, NULL, NULL);
3686 if (err < 0)
3687 status = MGMT_STATUS_FAILED;
3688 else
3689 status = MGMT_STATUS_SUCCESS;
3690
3691 err = mgmt_cmd_complete(sk, hdev->id,
3692 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3693 &cp->addr, sizeof(cp->addr));
3694 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3695 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3696 u8 *rand192, *hash192, *rand256, *hash256;
3697 u8 status;
3698
3699 if (bdaddr_type_is_le(cp->addr.type)) {
3700 /* Enforce zero-valued 192-bit parameters as
3701 * long as legacy SMP OOB isn't implemented.
3702 */
3703 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3704 memcmp(cp->hash192, ZERO_KEY, 16)) {
3705 err = mgmt_cmd_complete(sk, hdev->id,
3706 MGMT_OP_ADD_REMOTE_OOB_DATA,
3707 MGMT_STATUS_INVALID_PARAMS,
3708 addr, sizeof(*addr));
3709 goto unlock;
3710 }
3711
3712 rand192 = NULL;
3713 hash192 = NULL;
3714 } else {
3715 /* In case one of the P-192 values is set to zero,
3716 * then just disable OOB data for P-192.
3717 */
3718 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3719 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3720 rand192 = NULL;
3721 hash192 = NULL;
3722 } else {
3723 rand192 = cp->rand192;
3724 hash192 = cp->hash192;
3725 }
3726 }
3727
3728 /* In case one of the P-256 values is set to zero, then just
3729 * disable OOB data for P-256.
3730 */
3731 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3732 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3733 rand256 = NULL;
3734 hash256 = NULL;
3735 } else {
3736 rand256 = cp->rand256;
3737 hash256 = cp->hash256;
3738 }
3739
3740 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3741 cp->addr.type, hash192, rand192,
3742 hash256, rand256);
3743 if (err < 0)
3744 status = MGMT_STATUS_FAILED;
3745 else
3746 status = MGMT_STATUS_SUCCESS;
3747
3748 err = mgmt_cmd_complete(sk, hdev->id,
3749 MGMT_OP_ADD_REMOTE_OOB_DATA,
3750 status, &cp->addr, sizeof(cp->addr));
3751 } else {
3752 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3753 len);
3754 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3755 MGMT_STATUS_INVALID_PARAMS);
3756 }
3757
3758 unlock:
3759 hci_dev_unlock(hdev);
3760 return err;
3761 }
3762
3763 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3764 void *data, u16 len)
3765 {
3766 struct mgmt_cp_remove_remote_oob_data *cp = data;
3767 u8 status;
3768 int err;
3769
3770 BT_DBG("%s", hdev->name);
3771
3772 if (cp->addr.type != BDADDR_BREDR)
3773 return mgmt_cmd_complete(sk, hdev->id,
3774 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3775 MGMT_STATUS_INVALID_PARAMS,
3776 &cp->addr, sizeof(cp->addr));
3777
3778 hci_dev_lock(hdev);
3779
3780 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3781 hci_remote_oob_data_clear(hdev);
3782 status = MGMT_STATUS_SUCCESS;
3783 goto done;
3784 }
3785
3786 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3787 if (err < 0)
3788 status = MGMT_STATUS_INVALID_PARAMS;
3789 else
3790 status = MGMT_STATUS_SUCCESS;
3791
3792 done:
3793 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3794 status, &cp->addr, sizeof(cp->addr));
3795
3796 hci_dev_unlock(hdev);
3797 return err;
3798 }
3799
3800 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3801 {
3802 struct mgmt_pending_cmd *cmd;
3803
3804 BT_DBG("status %d", status);
3805
3806 hci_dev_lock(hdev);
3807
3808 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3809 if (!cmd)
3810 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3811
3812 if (!cmd)
3813 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3814
3815 if (cmd) {
3816 cmd->cmd_complete(cmd, mgmt_status(status));
3817 mgmt_pending_remove(cmd);
3818 }
3819
3820 hci_dev_unlock(hdev);
3821 }
3822
3823 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3824 uint8_t *mgmt_status)
3825 {
3826 switch (type) {
3827 case DISCOV_TYPE_LE:
3828 *mgmt_status = mgmt_le_support(hdev);
3829 if (*mgmt_status)
3830 return false;
3831 break;
3832 case DISCOV_TYPE_INTERLEAVED:
3833 *mgmt_status = mgmt_le_support(hdev);
3834 if (*mgmt_status)
3835 return false;
3836 /* Intentional fall-through */
3837 case DISCOV_TYPE_BREDR:
3838 *mgmt_status = mgmt_bredr_support(hdev);
3839 if (*mgmt_status)
3840 return false;
3841 break;
3842 default:
3843 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3844 return false;
3845 }
3846
3847 return true;
3848 }
3849
3850 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3851 u16 op, void *data, u16 len)
3852 {
3853 struct mgmt_cp_start_discovery *cp = data;
3854 struct mgmt_pending_cmd *cmd;
3855 u8 status;
3856 int err;
3857
3858 BT_DBG("%s", hdev->name);
3859
3860 hci_dev_lock(hdev);
3861
3862 if (!hdev_is_powered(hdev)) {
3863 err = mgmt_cmd_complete(sk, hdev->id, op,
3864 MGMT_STATUS_NOT_POWERED,
3865 &cp->type, sizeof(cp->type));
3866 goto failed;
3867 }
3868
3869 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3870 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3871 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3872 &cp->type, sizeof(cp->type));
3873 goto failed;
3874 }
3875
3876 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3877 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3878 &cp->type, sizeof(cp->type));
3879 goto failed;
3880 }
3881
3882 /* Clear the discovery filter first to free any previously
3883 * allocated memory for the UUID list.
3884 */
3885 hci_discovery_filter_clear(hdev);
3886
3887 hdev->discovery.type = cp->type;
3888 hdev->discovery.report_invalid_rssi = false;
3889 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3890 hdev->discovery.limited = true;
3891 else
3892 hdev->discovery.limited = false;
3893
3894 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3895 if (!cmd) {
3896 err = -ENOMEM;
3897 goto failed;
3898 }
3899
3900 cmd->cmd_complete = generic_cmd_complete;
3901
3902 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3903 queue_work(hdev->req_workqueue, &hdev->discov_update);
3904 err = 0;
3905
3906 failed:
3907 hci_dev_unlock(hdev);
3908 return err;
3909 }
3910
3911 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3912 void *data, u16 len)
3913 {
3914 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3915 data, len);
3916 }
3917
3918 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3919 void *data, u16 len)
3920 {
3921 return start_discovery_internal(sk, hdev,
3922 MGMT_OP_START_LIMITED_DISCOVERY,
3923 data, len);
3924 }
3925
3926 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3927 u8 status)
3928 {
3929 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3930 cmd->param, 1);
3931 }
3932
3933 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3934 void *data, u16 len)
3935 {
3936 struct mgmt_cp_start_service_discovery *cp = data;
3937 struct mgmt_pending_cmd *cmd;
3938 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3939 u16 uuid_count, expected_len;
3940 u8 status;
3941 int err;
3942
3943 BT_DBG("%s", hdev->name);
3944
3945 hci_dev_lock(hdev);
3946
3947 if (!hdev_is_powered(hdev)) {
3948 err = mgmt_cmd_complete(sk, hdev->id,
3949 MGMT_OP_START_SERVICE_DISCOVERY,
3950 MGMT_STATUS_NOT_POWERED,
3951 &cp->type, sizeof(cp->type));
3952 goto failed;
3953 }
3954
3955 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3956 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3957 err = mgmt_cmd_complete(sk, hdev->id,
3958 MGMT_OP_START_SERVICE_DISCOVERY,
3959 MGMT_STATUS_BUSY, &cp->type,
3960 sizeof(cp->type));
3961 goto failed;
3962 }
3963
3964 uuid_count = __le16_to_cpu(cp->uuid_count);
3965 if (uuid_count > max_uuid_count) {
3966 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3967 uuid_count);
3968 err = mgmt_cmd_complete(sk, hdev->id,
3969 MGMT_OP_START_SERVICE_DISCOVERY,
3970 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3971 sizeof(cp->type));
3972 goto failed;
3973 }
3974
3975 expected_len = sizeof(*cp) + uuid_count * 16;
3976 if (expected_len != len) {
3977 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3978 expected_len, len);
3979 err = mgmt_cmd_complete(sk, hdev->id,
3980 MGMT_OP_START_SERVICE_DISCOVERY,
3981 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3982 sizeof(cp->type));
3983 goto failed;
3984 }
3985
3986 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3987 err = mgmt_cmd_complete(sk, hdev->id,
3988 MGMT_OP_START_SERVICE_DISCOVERY,
3989 status, &cp->type, sizeof(cp->type));
3990 goto failed;
3991 }
3992
3993 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3994 hdev, data, len);
3995 if (!cmd) {
3996 err = -ENOMEM;
3997 goto failed;
3998 }
3999
4000 cmd->cmd_complete = service_discovery_cmd_complete;
4001
4002 /* Clear the discovery filter first to free any previously
4003 * allocated memory for the UUID list.
4004 */
4005 hci_discovery_filter_clear(hdev);
4006
4007 hdev->discovery.result_filtering = true;
4008 hdev->discovery.type = cp->type;
4009 hdev->discovery.rssi = cp->rssi;
4010 hdev->discovery.uuid_count = uuid_count;
4011
4012 if (uuid_count > 0) {
4013 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4014 GFP_KERNEL);
4015 if (!hdev->discovery.uuids) {
4016 err = mgmt_cmd_complete(sk, hdev->id,
4017 MGMT_OP_START_SERVICE_DISCOVERY,
4018 MGMT_STATUS_FAILED,
4019 &cp->type, sizeof(cp->type));
4020 mgmt_pending_remove(cmd);
4021 goto failed;
4022 }
4023 }
4024
4025 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4026 queue_work(hdev->req_workqueue, &hdev->discov_update);
4027 err = 0;
4028
4029 failed:
4030 hci_dev_unlock(hdev);
4031 return err;
4032 }
4033
4034 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4035 {
4036 struct mgmt_pending_cmd *cmd;
4037
4038 BT_DBG("status %d", status);
4039
4040 hci_dev_lock(hdev);
4041
4042 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4043 if (cmd) {
4044 cmd->cmd_complete(cmd, mgmt_status(status));
4045 mgmt_pending_remove(cmd);
4046 }
4047
4048 hci_dev_unlock(hdev);
4049 }
4050
4051 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4052 u16 len)
4053 {
4054 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4055 struct mgmt_pending_cmd *cmd;
4056 int err;
4057
4058 BT_DBG("%s", hdev->name);
4059
4060 hci_dev_lock(hdev);
4061
4062 if (!hci_discovery_active(hdev)) {
4063 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4064 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4065 sizeof(mgmt_cp->type));
4066 goto unlock;
4067 }
4068
4069 if (hdev->discovery.type != mgmt_cp->type) {
4070 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4071 MGMT_STATUS_INVALID_PARAMS,
4072 &mgmt_cp->type, sizeof(mgmt_cp->type));
4073 goto unlock;
4074 }
4075
4076 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4077 if (!cmd) {
4078 err = -ENOMEM;
4079 goto unlock;
4080 }
4081
4082 cmd->cmd_complete = generic_cmd_complete;
4083
4084 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4085 queue_work(hdev->req_workqueue, &hdev->discov_update);
4086 err = 0;
4087
4088 unlock:
4089 hci_dev_unlock(hdev);
4090 return err;
4091 }
4092
4093 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4094 u16 len)
4095 {
4096 struct mgmt_cp_confirm_name *cp = data;
4097 struct inquiry_entry *e;
4098 int err;
4099
4100 BT_DBG("%s", hdev->name);
4101
4102 hci_dev_lock(hdev);
4103
4104 if (!hci_discovery_active(hdev)) {
4105 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4106 MGMT_STATUS_FAILED, &cp->addr,
4107 sizeof(cp->addr));
4108 goto failed;
4109 }
4110
4111 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4112 if (!e) {
4113 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4114 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4115 sizeof(cp->addr));
4116 goto failed;
4117 }
4118
4119 if (cp->name_known) {
4120 e->name_state = NAME_KNOWN;
4121 list_del(&e->list);
4122 } else {
4123 e->name_state = NAME_NEEDED;
4124 hci_inquiry_cache_update_resolve(hdev, e);
4125 }
4126
4127 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4128 &cp->addr, sizeof(cp->addr));
4129
4130 failed:
4131 hci_dev_unlock(hdev);
4132 return err;
4133 }
4134
4135 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4136 u16 len)
4137 {
4138 struct mgmt_cp_block_device *cp = data;
4139 u8 status;
4140 int err;
4141
4142 BT_DBG("%s", hdev->name);
4143
4144 if (!bdaddr_type_is_valid(cp->addr.type))
4145 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4146 MGMT_STATUS_INVALID_PARAMS,
4147 &cp->addr, sizeof(cp->addr));
4148
4149 hci_dev_lock(hdev);
4150
4151 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4152 cp->addr.type);
4153 if (err < 0) {
4154 status = MGMT_STATUS_FAILED;
4155 goto done;
4156 }
4157
4158 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4159 sk);
4160 status = MGMT_STATUS_SUCCESS;
4161
4162 done:
4163 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4164 &cp->addr, sizeof(cp->addr));
4165
4166 hci_dev_unlock(hdev);
4167
4168 return err;
4169 }
4170
4171 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4172 u16 len)
4173 {
4174 struct mgmt_cp_unblock_device *cp = data;
4175 u8 status;
4176 int err;
4177
4178 BT_DBG("%s", hdev->name);
4179
4180 if (!bdaddr_type_is_valid(cp->addr.type))
4181 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4182 MGMT_STATUS_INVALID_PARAMS,
4183 &cp->addr, sizeof(cp->addr));
4184
4185 hci_dev_lock(hdev);
4186
4187 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4188 cp->addr.type);
4189 if (err < 0) {
4190 status = MGMT_STATUS_INVALID_PARAMS;
4191 goto done;
4192 }
4193
4194 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4195 sk);
4196 status = MGMT_STATUS_SUCCESS;
4197
4198 done:
4199 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4200 &cp->addr, sizeof(cp->addr));
4201
4202 hci_dev_unlock(hdev);
4203
4204 return err;
4205 }
4206
4207 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4208 u16 len)
4209 {
4210 struct mgmt_cp_set_device_id *cp = data;
4211 struct hci_request req;
4212 int err;
4213 __u16 source;
4214
4215 BT_DBG("%s", hdev->name);
4216
4217 source = __le16_to_cpu(cp->source);
4218
4219 if (source > 0x0002)
4220 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4221 MGMT_STATUS_INVALID_PARAMS);
4222
4223 hci_dev_lock(hdev);
4224
4225 hdev->devid_source = source;
4226 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4227 hdev->devid_product = __le16_to_cpu(cp->product);
4228 hdev->devid_version = __le16_to_cpu(cp->version);
4229
4230 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4231 NULL, 0);
4232
4233 hci_req_init(&req, hdev);
4234 __hci_req_update_eir(&req);
4235 hci_req_run(&req, NULL);
4236
4237 hci_dev_unlock(hdev);
4238
4239 return err;
4240 }
4241
4242 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4243 u16 opcode)
4244 {
4245 BT_DBG("status %d", status);
4246 }
4247
4248 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4249 u16 opcode)
4250 {
4251 struct cmd_lookup match = { NULL, hdev };
4252 struct hci_request req;
4253 u8 instance;
4254 struct adv_info *adv_instance;
4255 int err;
4256
4257 hci_dev_lock(hdev);
4258
4259 if (status) {
4260 u8 mgmt_err = mgmt_status(status);
4261
4262 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4263 cmd_status_rsp, &mgmt_err);
4264 goto unlock;
4265 }
4266
4267 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4268 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4269 else
4270 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4271
4272 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4273 &match);
4274
4275 new_settings(hdev, match.sk);
4276
4277 if (match.sk)
4278 sock_put(match.sk);
4279
4280 /* If "Set Advertising" was just disabled and instance advertising was
4281 * set up earlier, then re-enable multi-instance advertising.
4282 */
4283 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4284 list_empty(&hdev->adv_instances))
4285 goto unlock;
4286
4287 instance = hdev->cur_adv_instance;
4288 if (!instance) {
4289 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4290 struct adv_info, list);
4291 if (!adv_instance)
4292 goto unlock;
4293
4294 instance = adv_instance->instance;
4295 }
4296
4297 hci_req_init(&req, hdev);
4298
4299 err = __hci_req_schedule_adv_instance(&req, instance, true);
4300
4301 if (!err)
4302 err = hci_req_run(&req, enable_advertising_instance);
4303
4304 if (err)
4305 bt_dev_err(hdev, "failed to re-configure advertising");
4306
4307 unlock:
4308 hci_dev_unlock(hdev);
4309 }
4310
4311 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4312 u16 len)
4313 {
4314 struct mgmt_mode *cp = data;
4315 struct mgmt_pending_cmd *cmd;
4316 struct hci_request req;
4317 u8 val, status;
4318 int err;
4319
4320 BT_DBG("request for %s", hdev->name);
4321
4322 status = mgmt_le_support(hdev);
4323 if (status)
4324 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4325 status);
4326
4327 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4328 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4329 MGMT_STATUS_INVALID_PARAMS);
4330
4331 hci_dev_lock(hdev);
4332
4333 val = !!cp->val;
4334
4335 /* The following conditions are ones which mean that we should
4336 * not do any HCI communication but directly send a mgmt
4337 * response to user space (after toggling the flag if
4338 * necessary).
4339 */
4340 if (!hdev_is_powered(hdev) ||
4341 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4342 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4343 hci_conn_num(hdev, LE_LINK) > 0 ||
4344 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4345 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4346 bool changed;
4347
4348 if (cp->val) {
4349 hdev->cur_adv_instance = 0x00;
4350 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4351 if (cp->val == 0x02)
4352 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4353 else
4354 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4355 } else {
4356 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4357 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4358 }
4359
4360 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4361 if (err < 0)
4362 goto unlock;
4363
4364 if (changed)
4365 err = new_settings(hdev, sk);
4366
4367 goto unlock;
4368 }
4369
4370 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4371 pending_find(MGMT_OP_SET_LE, hdev)) {
4372 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4373 MGMT_STATUS_BUSY);
4374 goto unlock;
4375 }
4376
4377 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4378 if (!cmd) {
4379 err = -ENOMEM;
4380 goto unlock;
4381 }
4382
4383 hci_req_init(&req, hdev);
4384
4385 if (cp->val == 0x02)
4386 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4387 else
4388 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4389
4390 cancel_adv_timeout(hdev);
4391
4392 if (val) {
4393 /* Switch to instance "0" for the Set Advertising setting.
4394 * We cannot use update_[adv|scan_rsp]_data() here as the
4395 * HCI_ADVERTISING flag is not yet set.
4396 */
4397 hdev->cur_adv_instance = 0x00;
4398
4399 if (ext_adv_capable(hdev)) {
4400 __hci_req_start_ext_adv(&req, 0x00);
4401 } else {
4402 __hci_req_update_adv_data(&req, 0x00);
4403 __hci_req_update_scan_rsp_data(&req, 0x00);
4404 __hci_req_enable_advertising(&req);
4405 }
4406 } else {
4407 __hci_req_disable_advertising(&req);
4408 }
4409
4410 err = hci_req_run(&req, set_advertising_complete);
4411 if (err < 0)
4412 mgmt_pending_remove(cmd);
4413
4414 unlock:
4415 hci_dev_unlock(hdev);
4416 return err;
4417 }
4418
4419 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4420 void *data, u16 len)
4421 {
4422 struct mgmt_cp_set_static_address *cp = data;
4423 int err;
4424
4425 BT_DBG("%s", hdev->name);
4426
4427 if (!lmp_le_capable(hdev))
4428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4429 MGMT_STATUS_NOT_SUPPORTED);
4430
4431 if (hdev_is_powered(hdev))
4432 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4433 MGMT_STATUS_REJECTED);
4434
4435 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4436 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4437 return mgmt_cmd_status(sk, hdev->id,
4438 MGMT_OP_SET_STATIC_ADDRESS,
4439 MGMT_STATUS_INVALID_PARAMS);
4440
4441 /* Two most significant bits shall be set */
4442 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4443 return mgmt_cmd_status(sk, hdev->id,
4444 MGMT_OP_SET_STATIC_ADDRESS,
4445 MGMT_STATUS_INVALID_PARAMS);
4446 }
4447
4448 hci_dev_lock(hdev);
4449
4450 bacpy(&hdev->static_addr, &cp->bdaddr);
4451
4452 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4453 if (err < 0)
4454 goto unlock;
4455
4456 err = new_settings(hdev, sk);
4457
4458 unlock:
4459 hci_dev_unlock(hdev);
4460 return err;
4461 }
4462
4463 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4464 void *data, u16 len)
4465 {
4466 struct mgmt_cp_set_scan_params *cp = data;
4467 __u16 interval, window;
4468 int err;
4469
4470 BT_DBG("%s", hdev->name);
4471
4472 if (!lmp_le_capable(hdev))
4473 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4474 MGMT_STATUS_NOT_SUPPORTED);
4475
4476 interval = __le16_to_cpu(cp->interval);
4477
4478 if (interval < 0x0004 || interval > 0x4000)
4479 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4480 MGMT_STATUS_INVALID_PARAMS);
4481
4482 window = __le16_to_cpu(cp->window);
4483
4484 if (window < 0x0004 || window > 0x4000)
4485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4486 MGMT_STATUS_INVALID_PARAMS);
4487
4488 if (window > interval)
4489 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4490 MGMT_STATUS_INVALID_PARAMS);
4491
4492 hci_dev_lock(hdev);
4493
4494 hdev->le_scan_interval = interval;
4495 hdev->le_scan_window = window;
4496
4497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4498 NULL, 0);
4499
4500 /* If background scan is running, restart it so new parameters are
4501 * loaded.
4502 */
4503 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4504 hdev->discovery.state == DISCOVERY_STOPPED) {
4505 struct hci_request req;
4506
4507 hci_req_init(&req, hdev);
4508
4509 hci_req_add_le_scan_disable(&req);
4510 hci_req_add_le_passive_scan(&req);
4511
4512 hci_req_run(&req, NULL);
4513 }
4514
4515 hci_dev_unlock(hdev);
4516
4517 return err;
4518 }
4519
4520 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4521 u16 opcode)
4522 {
4523 struct mgmt_pending_cmd *cmd;
4524
4525 BT_DBG("status 0x%02x", status);
4526
4527 hci_dev_lock(hdev);
4528
4529 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4530 if (!cmd)
4531 goto unlock;
4532
4533 if (status) {
4534 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4535 mgmt_status(status));
4536 } else {
4537 struct mgmt_mode *cp = cmd->param;
4538
4539 if (cp->val)
4540 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4541 else
4542 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4543
4544 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4545 new_settings(hdev, cmd->sk);
4546 }
4547
4548 mgmt_pending_remove(cmd);
4549
4550 unlock:
4551 hci_dev_unlock(hdev);
4552 }
4553
4554 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4555 void *data, u16 len)
4556 {
4557 struct mgmt_mode *cp = data;
4558 struct mgmt_pending_cmd *cmd;
4559 struct hci_request req;
4560 int err;
4561
4562 BT_DBG("%s", hdev->name);
4563
4564 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4565 hdev->hci_ver < BLUETOOTH_VER_1_2)
4566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4567 MGMT_STATUS_NOT_SUPPORTED);
4568
4569 if (cp->val != 0x00 && cp->val != 0x01)
4570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4571 MGMT_STATUS_INVALID_PARAMS);
4572
4573 hci_dev_lock(hdev);
4574
4575 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4577 MGMT_STATUS_BUSY);
4578 goto unlock;
4579 }
4580
4581 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4582 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4583 hdev);
4584 goto unlock;
4585 }
4586
4587 if (!hdev_is_powered(hdev)) {
4588 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4589 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4590 hdev);
4591 new_settings(hdev, sk);
4592 goto unlock;
4593 }
4594
4595 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4596 data, len);
4597 if (!cmd) {
4598 err = -ENOMEM;
4599 goto unlock;
4600 }
4601
4602 hci_req_init(&req, hdev);
4603
4604 __hci_req_write_fast_connectable(&req, cp->val);
4605
4606 err = hci_req_run(&req, fast_connectable_complete);
4607 if (err < 0) {
4608 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4609 MGMT_STATUS_FAILED);
4610 mgmt_pending_remove(cmd);
4611 }
4612
4613 unlock:
4614 hci_dev_unlock(hdev);
4615
4616 return err;
4617 }
4618
4619 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4620 {
4621 struct mgmt_pending_cmd *cmd;
4622
4623 BT_DBG("status 0x%02x", status);
4624
4625 hci_dev_lock(hdev);
4626
4627 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4628 if (!cmd)
4629 goto unlock;
4630
4631 if (status) {
4632 u8 mgmt_err = mgmt_status(status);
4633
4634 /* We need to restore the flag if related HCI commands
4635 * failed.
4636 */
4637 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4638
4639 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4640 } else {
4641 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4642 new_settings(hdev, cmd->sk);
4643 }
4644
4645 mgmt_pending_remove(cmd);
4646
4647 unlock:
4648 hci_dev_unlock(hdev);
4649 }
4650
4651 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4652 {
4653 struct mgmt_mode *cp = data;
4654 struct mgmt_pending_cmd *cmd;
4655 struct hci_request req;
4656 int err;
4657
4658 BT_DBG("request for %s", hdev->name);
4659
4660 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4662 MGMT_STATUS_NOT_SUPPORTED);
4663
4664 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4666 MGMT_STATUS_REJECTED);
4667
4668 if (cp->val != 0x00 && cp->val != 0x01)
4669 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4670 MGMT_STATUS_INVALID_PARAMS);
4671
4672 hci_dev_lock(hdev);
4673
4674 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4675 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4676 goto unlock;
4677 }
4678
4679 if (!hdev_is_powered(hdev)) {
4680 if (!cp->val) {
4681 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4682 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4683 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4684 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4685 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4686 }
4687
4688 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4689
4690 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4691 if (err < 0)
4692 goto unlock;
4693
4694 err = new_settings(hdev, sk);
4695 goto unlock;
4696 }
4697
4698 /* Reject disabling when powered on */
4699 if (!cp->val) {
4700 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4701 MGMT_STATUS_REJECTED);
4702 goto unlock;
4703 } else {
4704 /* When configuring a dual-mode controller to operate
4705 * with LE only and using a static address, then switching
4706 * BR/EDR back on is not allowed.
4707 *
4708 * Dual-mode controllers shall operate with the public
4709 * address as its identity address for BR/EDR and LE. So
4710 * reject the attempt to create an invalid configuration.
4711 *
4712 * The same restrictions applies when secure connections
4713 * has been enabled. For BR/EDR this is a controller feature
4714 * while for LE it is a host stack feature. This means that
4715 * switching BR/EDR back on when secure connections has been
4716 * enabled is not a supported transaction.
4717 */
4718 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4719 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4720 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4721 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4722 MGMT_STATUS_REJECTED);
4723 goto unlock;
4724 }
4725 }
4726
4727 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4728 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4729 MGMT_STATUS_BUSY);
4730 goto unlock;
4731 }
4732
4733 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4734 if (!cmd) {
4735 err = -ENOMEM;
4736 goto unlock;
4737 }
4738
4739 /* We need to flip the bit already here so that
4740 * hci_req_update_adv_data generates the correct flags.
4741 */
4742 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4743
4744 hci_req_init(&req, hdev);
4745
4746 __hci_req_write_fast_connectable(&req, false);
4747 __hci_req_update_scan(&req);
4748
4749 /* Since only the advertising data flags will change, there
4750 * is no need to update the scan response data.
4751 */
4752 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4753
4754 err = hci_req_run(&req, set_bredr_complete);
4755 if (err < 0)
4756 mgmt_pending_remove(cmd);
4757
4758 unlock:
4759 hci_dev_unlock(hdev);
4760 return err;
4761 }
4762
4763 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4764 {
4765 struct mgmt_pending_cmd *cmd;
4766 struct mgmt_mode *cp;
4767
4768 BT_DBG("%s status %u", hdev->name, status);
4769
4770 hci_dev_lock(hdev);
4771
4772 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4773 if (!cmd)
4774 goto unlock;
4775
4776 if (status) {
4777 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4778 mgmt_status(status));
4779 goto remove;
4780 }
4781
4782 cp = cmd->param;
4783
4784 switch (cp->val) {
4785 case 0x00:
4786 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4787 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4788 break;
4789 case 0x01:
4790 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4791 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4792 break;
4793 case 0x02:
4794 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4795 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4796 break;
4797 }
4798
4799 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4800 new_settings(hdev, cmd->sk);
4801
4802 remove:
4803 mgmt_pending_remove(cmd);
4804 unlock:
4805 hci_dev_unlock(hdev);
4806 }
4807
4808 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4809 void *data, u16 len)
4810 {
4811 struct mgmt_mode *cp = data;
4812 struct mgmt_pending_cmd *cmd;
4813 struct hci_request req;
4814 u8 val;
4815 int err;
4816
4817 BT_DBG("request for %s", hdev->name);
4818
4819 if (!lmp_sc_capable(hdev) &&
4820 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4822 MGMT_STATUS_NOT_SUPPORTED);
4823
4824 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4825 lmp_sc_capable(hdev) &&
4826 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4828 MGMT_STATUS_REJECTED);
4829
4830 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4831 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4832 MGMT_STATUS_INVALID_PARAMS);
4833
4834 hci_dev_lock(hdev);
4835
4836 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4837 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4838 bool changed;
4839
4840 if (cp->val) {
4841 changed = !hci_dev_test_and_set_flag(hdev,
4842 HCI_SC_ENABLED);
4843 if (cp->val == 0x02)
4844 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4845 else
4846 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4847 } else {
4848 changed = hci_dev_test_and_clear_flag(hdev,
4849 HCI_SC_ENABLED);
4850 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4851 }
4852
4853 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4854 if (err < 0)
4855 goto failed;
4856
4857 if (changed)
4858 err = new_settings(hdev, sk);
4859
4860 goto failed;
4861 }
4862
4863 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4864 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4865 MGMT_STATUS_BUSY);
4866 goto failed;
4867 }
4868
4869 val = !!cp->val;
4870
4871 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4872 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4873 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4874 goto failed;
4875 }
4876
4877 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4878 if (!cmd) {
4879 err = -ENOMEM;
4880 goto failed;
4881 }
4882
4883 hci_req_init(&req, hdev);
4884 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4885 err = hci_req_run(&req, sc_enable_complete);
4886 if (err < 0) {
4887 mgmt_pending_remove(cmd);
4888 goto failed;
4889 }
4890
4891 failed:
4892 hci_dev_unlock(hdev);
4893 return err;
4894 }
4895
4896 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4897 void *data, u16 len)
4898 {
4899 struct mgmt_mode *cp = data;
4900 bool changed, use_changed;
4901 int err;
4902
4903 BT_DBG("request for %s", hdev->name);
4904
4905 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4907 MGMT_STATUS_INVALID_PARAMS);
4908
4909 hci_dev_lock(hdev);
4910
4911 if (cp->val)
4912 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4913 else
4914 changed = hci_dev_test_and_clear_flag(hdev,
4915 HCI_KEEP_DEBUG_KEYS);
4916
4917 if (cp->val == 0x02)
4918 use_changed = !hci_dev_test_and_set_flag(hdev,
4919 HCI_USE_DEBUG_KEYS);
4920 else
4921 use_changed = hci_dev_test_and_clear_flag(hdev,
4922 HCI_USE_DEBUG_KEYS);
4923
4924 if (hdev_is_powered(hdev) && use_changed &&
4925 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4926 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4927 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4928 sizeof(mode), &mode);
4929 }
4930
4931 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4932 if (err < 0)
4933 goto unlock;
4934
4935 if (changed)
4936 err = new_settings(hdev, sk);
4937
4938 unlock:
4939 hci_dev_unlock(hdev);
4940 return err;
4941 }
4942
4943 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4944 u16 len)
4945 {
4946 struct mgmt_cp_set_privacy *cp = cp_data;
4947 bool changed;
4948 int err;
4949
4950 BT_DBG("request for %s", hdev->name);
4951
4952 if (!lmp_le_capable(hdev))
4953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4954 MGMT_STATUS_NOT_SUPPORTED);
4955
4956 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4958 MGMT_STATUS_INVALID_PARAMS);
4959
4960 if (hdev_is_powered(hdev))
4961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4962 MGMT_STATUS_REJECTED);
4963
4964 hci_dev_lock(hdev);
4965
4966 /* If user space supports this command it is also expected to
4967 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4968 */
4969 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4970
4971 if (cp->privacy) {
4972 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4973 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4974 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4975 hci_adv_instances_set_rpa_expired(hdev, true);
4976 if (cp->privacy == 0x02)
4977 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4978 else
4979 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4980 } else {
4981 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4982 memset(hdev->irk, 0, sizeof(hdev->irk));
4983 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4984 hci_adv_instances_set_rpa_expired(hdev, false);
4985 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4986 }
4987
4988 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4989 if (err < 0)
4990 goto unlock;
4991
4992 if (changed)
4993 err = new_settings(hdev, sk);
4994
4995 unlock:
4996 hci_dev_unlock(hdev);
4997 return err;
4998 }
4999
5000 static bool irk_is_valid(struct mgmt_irk_info *irk)
5001 {
5002 switch (irk->addr.type) {
5003 case BDADDR_LE_PUBLIC:
5004 return true;
5005
5006 case BDADDR_LE_RANDOM:
5007 /* Two most significant bits shall be set */
5008 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5009 return false;
5010 return true;
5011 }
5012
5013 return false;
5014 }
5015
5016 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5017 u16 len)
5018 {
5019 struct mgmt_cp_load_irks *cp = cp_data;
5020 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5021 sizeof(struct mgmt_irk_info));
5022 u16 irk_count, expected_len;
5023 int i, err;
5024
5025 BT_DBG("request for %s", hdev->name);
5026
5027 if (!lmp_le_capable(hdev))
5028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5029 MGMT_STATUS_NOT_SUPPORTED);
5030
5031 irk_count = __le16_to_cpu(cp->irk_count);
5032 if (irk_count > max_irk_count) {
5033 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5034 irk_count);
5035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5036 MGMT_STATUS_INVALID_PARAMS);
5037 }
5038
5039 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5040 if (expected_len != len) {
5041 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5042 expected_len, len);
5043 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5044 MGMT_STATUS_INVALID_PARAMS);
5045 }
5046
5047 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5048
5049 for (i = 0; i < irk_count; i++) {
5050 struct mgmt_irk_info *key = &cp->irks[i];
5051
5052 if (!irk_is_valid(key))
5053 return mgmt_cmd_status(sk, hdev->id,
5054 MGMT_OP_LOAD_IRKS,
5055 MGMT_STATUS_INVALID_PARAMS);
5056 }
5057
5058 hci_dev_lock(hdev);
5059
5060 hci_smp_irks_clear(hdev);
5061
5062 for (i = 0; i < irk_count; i++) {
5063 struct mgmt_irk_info *irk = &cp->irks[i];
5064
5065 hci_add_irk(hdev, &irk->addr.bdaddr,
5066 le_addr_type(irk->addr.type), irk->val,
5067 BDADDR_ANY);
5068 }
5069
5070 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5071
5072 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5073
5074 hci_dev_unlock(hdev);
5075
5076 return err;
5077 }
5078
5079 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5080 {
5081 if (key->master != 0x00 && key->master != 0x01)
5082 return false;
5083
5084 switch (key->addr.type) {
5085 case BDADDR_LE_PUBLIC:
5086 return true;
5087
5088 case BDADDR_LE_RANDOM:
5089 /* Two most significant bits shall be set */
5090 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5091 return false;
5092 return true;
5093 }
5094
5095 return false;
5096 }
5097
5098 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5099 void *cp_data, u16 len)
5100 {
5101 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5102 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5103 sizeof(struct mgmt_ltk_info));
5104 u16 key_count, expected_len;
5105 int i, err;
5106
5107 BT_DBG("request for %s", hdev->name);
5108
5109 if (!lmp_le_capable(hdev))
5110 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5111 MGMT_STATUS_NOT_SUPPORTED);
5112
5113 key_count = __le16_to_cpu(cp->key_count);
5114 if (key_count > max_key_count) {
5115 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5116 key_count);
5117 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5118 MGMT_STATUS_INVALID_PARAMS);
5119 }
5120
5121 expected_len = sizeof(*cp) + key_count *
5122 sizeof(struct mgmt_ltk_info);
5123 if (expected_len != len) {
5124 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5125 expected_len, len);
5126 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5127 MGMT_STATUS_INVALID_PARAMS);
5128 }
5129
5130 BT_DBG("%s key_count %u", hdev->name, key_count);
5131
5132 for (i = 0; i < key_count; i++) {
5133 struct mgmt_ltk_info *key = &cp->keys[i];
5134
5135 if (!ltk_is_valid(key))
5136 return mgmt_cmd_status(sk, hdev->id,
5137 MGMT_OP_LOAD_LONG_TERM_KEYS,
5138 MGMT_STATUS_INVALID_PARAMS);
5139 }
5140
5141 hci_dev_lock(hdev);
5142
5143 hci_smp_ltks_clear(hdev);
5144
5145 for (i = 0; i < key_count; i++) {
5146 struct mgmt_ltk_info *key = &cp->keys[i];
5147 u8 type, authenticated;
5148
5149 switch (key->type) {
5150 case MGMT_LTK_UNAUTHENTICATED:
5151 authenticated = 0x00;
5152 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5153 break;
5154 case MGMT_LTK_AUTHENTICATED:
5155 authenticated = 0x01;
5156 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5157 break;
5158 case MGMT_LTK_P256_UNAUTH:
5159 authenticated = 0x00;
5160 type = SMP_LTK_P256;
5161 break;
5162 case MGMT_LTK_P256_AUTH:
5163 authenticated = 0x01;
5164 type = SMP_LTK_P256;
5165 break;
5166 case MGMT_LTK_P256_DEBUG:
5167 authenticated = 0x00;
5168 type = SMP_LTK_P256_DEBUG;
5169 /* fall through */
5170 default:
5171 continue;
5172 }
5173
5174 hci_add_ltk(hdev, &key->addr.bdaddr,
5175 le_addr_type(key->addr.type), type, authenticated,
5176 key->val, key->enc_size, key->ediv, key->rand);
5177 }
5178
5179 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5180 NULL, 0);
5181
5182 hci_dev_unlock(hdev);
5183
5184 return err;
5185 }
5186
5187 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5188 {
5189 struct hci_conn *conn = cmd->user_data;
5190 struct mgmt_rp_get_conn_info rp;
5191 int err;
5192
5193 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5194
5195 if (status == MGMT_STATUS_SUCCESS) {
5196 rp.rssi = conn->rssi;
5197 rp.tx_power = conn->tx_power;
5198 rp.max_tx_power = conn->max_tx_power;
5199 } else {
5200 rp.rssi = HCI_RSSI_INVALID;
5201 rp.tx_power = HCI_TX_POWER_INVALID;
5202 rp.max_tx_power = HCI_TX_POWER_INVALID;
5203 }
5204
5205 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5206 status, &rp, sizeof(rp));
5207
5208 hci_conn_drop(conn);
5209 hci_conn_put(conn);
5210
5211 return err;
5212 }
5213
5214 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5215 u16 opcode)
5216 {
5217 struct hci_cp_read_rssi *cp;
5218 struct mgmt_pending_cmd *cmd;
5219 struct hci_conn *conn;
5220 u16 handle;
5221 u8 status;
5222
5223 BT_DBG("status 0x%02x", hci_status);
5224
5225 hci_dev_lock(hdev);
5226
5227 /* Commands sent in request are either Read RSSI or Read Transmit Power
5228 * Level so we check which one was last sent to retrieve connection
5229 * handle. Both commands have handle as first parameter so it's safe to
5230 * cast data on the same command struct.
5231 *
5232 * First command sent is always Read RSSI and we fail only if it fails.
5233 * In other case we simply override error to indicate success as we
5234 * already remembered if TX power value is actually valid.
5235 */
5236 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5237 if (!cp) {
5238 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5239 status = MGMT_STATUS_SUCCESS;
5240 } else {
5241 status = mgmt_status(hci_status);
5242 }
5243
5244 if (!cp) {
5245 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5246 goto unlock;
5247 }
5248
5249 handle = __le16_to_cpu(cp->handle);
5250 conn = hci_conn_hash_lookup_handle(hdev, handle);
5251 if (!conn) {
5252 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5253 handle);
5254 goto unlock;
5255 }
5256
5257 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5258 if (!cmd)
5259 goto unlock;
5260
5261 cmd->cmd_complete(cmd, status);
5262 mgmt_pending_remove(cmd);
5263
5264 unlock:
5265 hci_dev_unlock(hdev);
5266 }
5267
5268 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5269 u16 len)
5270 {
5271 struct mgmt_cp_get_conn_info *cp = data;
5272 struct mgmt_rp_get_conn_info rp;
5273 struct hci_conn *conn;
5274 unsigned long conn_info_age;
5275 int err = 0;
5276
5277 BT_DBG("%s", hdev->name);
5278
5279 memset(&rp, 0, sizeof(rp));
5280 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5281 rp.addr.type = cp->addr.type;
5282
5283 if (!bdaddr_type_is_valid(cp->addr.type))
5284 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5285 MGMT_STATUS_INVALID_PARAMS,
5286 &rp, sizeof(rp));
5287
5288 hci_dev_lock(hdev);
5289
5290 if (!hdev_is_powered(hdev)) {
5291 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5292 MGMT_STATUS_NOT_POWERED, &rp,
5293 sizeof(rp));
5294 goto unlock;
5295 }
5296
5297 if (cp->addr.type == BDADDR_BREDR)
5298 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5299 &cp->addr.bdaddr);
5300 else
5301 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5302
5303 if (!conn || conn->state != BT_CONNECTED) {
5304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5305 MGMT_STATUS_NOT_CONNECTED, &rp,
5306 sizeof(rp));
5307 goto unlock;
5308 }
5309
5310 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5311 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5312 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5313 goto unlock;
5314 }
5315
5316 /* To avoid client trying to guess when to poll again for information we
5317 * calculate conn info age as random value between min/max set in hdev.
5318 */
5319 conn_info_age = hdev->conn_info_min_age +
5320 prandom_u32_max(hdev->conn_info_max_age -
5321 hdev->conn_info_min_age);
5322
5323 /* Query controller to refresh cached values if they are too old or were
5324 * never read.
5325 */
5326 if (time_after(jiffies, conn->conn_info_timestamp +
5327 msecs_to_jiffies(conn_info_age)) ||
5328 !conn->conn_info_timestamp) {
5329 struct hci_request req;
5330 struct hci_cp_read_tx_power req_txp_cp;
5331 struct hci_cp_read_rssi req_rssi_cp;
5332 struct mgmt_pending_cmd *cmd;
5333
5334 hci_req_init(&req, hdev);
5335 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5336 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5337 &req_rssi_cp);
5338
5339 /* For LE links TX power does not change thus we don't need to
5340 * query for it once value is known.
5341 */
5342 if (!bdaddr_type_is_le(cp->addr.type) ||
5343 conn->tx_power == HCI_TX_POWER_INVALID) {
5344 req_txp_cp.handle = cpu_to_le16(conn->handle);
5345 req_txp_cp.type = 0x00;
5346 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5347 sizeof(req_txp_cp), &req_txp_cp);
5348 }
5349
5350 /* Max TX power needs to be read only once per connection */
5351 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5352 req_txp_cp.handle = cpu_to_le16(conn->handle);
5353 req_txp_cp.type = 0x01;
5354 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5355 sizeof(req_txp_cp), &req_txp_cp);
5356 }
5357
5358 err = hci_req_run(&req, conn_info_refresh_complete);
5359 if (err < 0)
5360 goto unlock;
5361
5362 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5363 data, len);
5364 if (!cmd) {
5365 err = -ENOMEM;
5366 goto unlock;
5367 }
5368
5369 hci_conn_hold(conn);
5370 cmd->user_data = hci_conn_get(conn);
5371 cmd->cmd_complete = conn_info_cmd_complete;
5372
5373 conn->conn_info_timestamp = jiffies;
5374 } else {
5375 /* Cache is valid, just reply with values cached in hci_conn */
5376 rp.rssi = conn->rssi;
5377 rp.tx_power = conn->tx_power;
5378 rp.max_tx_power = conn->max_tx_power;
5379
5380 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5381 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5382 }
5383
5384 unlock:
5385 hci_dev_unlock(hdev);
5386 return err;
5387 }
5388
5389 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5390 {
5391 struct hci_conn *conn = cmd->user_data;
5392 struct mgmt_rp_get_clock_info rp;
5393 struct hci_dev *hdev;
5394 int err;
5395
5396 memset(&rp, 0, sizeof(rp));
5397 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5398
5399 if (status)
5400 goto complete;
5401
5402 hdev = hci_dev_get(cmd->index);
5403 if (hdev) {
5404 rp.local_clock = cpu_to_le32(hdev->clock);
5405 hci_dev_put(hdev);
5406 }
5407
5408 if (conn) {
5409 rp.piconet_clock = cpu_to_le32(conn->clock);
5410 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5411 }
5412
5413 complete:
5414 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5415 sizeof(rp));
5416
5417 if (conn) {
5418 hci_conn_drop(conn);
5419 hci_conn_put(conn);
5420 }
5421
5422 return err;
5423 }
5424
5425 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5426 {
5427 struct hci_cp_read_clock *hci_cp;
5428 struct mgmt_pending_cmd *cmd;
5429 struct hci_conn *conn;
5430
5431 BT_DBG("%s status %u", hdev->name, status);
5432
5433 hci_dev_lock(hdev);
5434
5435 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5436 if (!hci_cp)
5437 goto unlock;
5438
5439 if (hci_cp->which) {
5440 u16 handle = __le16_to_cpu(hci_cp->handle);
5441 conn = hci_conn_hash_lookup_handle(hdev, handle);
5442 } else {
5443 conn = NULL;
5444 }
5445
5446 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5447 if (!cmd)
5448 goto unlock;
5449
5450 cmd->cmd_complete(cmd, mgmt_status(status));
5451 mgmt_pending_remove(cmd);
5452
5453 unlock:
5454 hci_dev_unlock(hdev);
5455 }
5456
5457 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5458 u16 len)
5459 {
5460 struct mgmt_cp_get_clock_info *cp = data;
5461 struct mgmt_rp_get_clock_info rp;
5462 struct hci_cp_read_clock hci_cp;
5463 struct mgmt_pending_cmd *cmd;
5464 struct hci_request req;
5465 struct hci_conn *conn;
5466 int err;
5467
5468 BT_DBG("%s", hdev->name);
5469
5470 memset(&rp, 0, sizeof(rp));
5471 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5472 rp.addr.type = cp->addr.type;
5473
5474 if (cp->addr.type != BDADDR_BREDR)
5475 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5476 MGMT_STATUS_INVALID_PARAMS,
5477 &rp, sizeof(rp));
5478
5479 hci_dev_lock(hdev);
5480
5481 if (!hdev_is_powered(hdev)) {
5482 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5483 MGMT_STATUS_NOT_POWERED, &rp,
5484 sizeof(rp));
5485 goto unlock;
5486 }
5487
5488 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5490 &cp->addr.bdaddr);
5491 if (!conn || conn->state != BT_CONNECTED) {
5492 err = mgmt_cmd_complete(sk, hdev->id,
5493 MGMT_OP_GET_CLOCK_INFO,
5494 MGMT_STATUS_NOT_CONNECTED,
5495 &rp, sizeof(rp));
5496 goto unlock;
5497 }
5498 } else {
5499 conn = NULL;
5500 }
5501
5502 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5503 if (!cmd) {
5504 err = -ENOMEM;
5505 goto unlock;
5506 }
5507
5508 cmd->cmd_complete = clock_info_cmd_complete;
5509
5510 hci_req_init(&req, hdev);
5511
5512 memset(&hci_cp, 0, sizeof(hci_cp));
5513 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5514
5515 if (conn) {
5516 hci_conn_hold(conn);
5517 cmd->user_data = hci_conn_get(conn);
5518
5519 hci_cp.handle = cpu_to_le16(conn->handle);
5520 hci_cp.which = 0x01; /* Piconet clock */
5521 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5522 }
5523
5524 err = hci_req_run(&req, get_clock_info_complete);
5525 if (err < 0)
5526 mgmt_pending_remove(cmd);
5527
5528 unlock:
5529 hci_dev_unlock(hdev);
5530 return err;
5531 }
5532
5533 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5534 {
5535 struct hci_conn *conn;
5536
5537 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5538 if (!conn)
5539 return false;
5540
5541 if (conn->dst_type != type)
5542 return false;
5543
5544 if (conn->state != BT_CONNECTED)
5545 return false;
5546
5547 return true;
5548 }
5549
5550 /* This function requires the caller holds hdev->lock */
5551 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5552 u8 addr_type, u8 auto_connect)
5553 {
5554 struct hci_conn_params *params;
5555
5556 params = hci_conn_params_add(hdev, addr, addr_type);
5557 if (!params)
5558 return -EIO;
5559
5560 if (params->auto_connect == auto_connect)
5561 return 0;
5562
5563 list_del_init(&params->action);
5564
5565 switch (auto_connect) {
5566 case HCI_AUTO_CONN_DISABLED:
5567 case HCI_AUTO_CONN_LINK_LOSS:
5568 /* If auto connect is being disabled when we're trying to
5569 * connect to device, keep connecting.
5570 */
5571 if (params->explicit_connect)
5572 list_add(&params->action, &hdev->pend_le_conns);
5573 break;
5574 case HCI_AUTO_CONN_REPORT:
5575 if (params->explicit_connect)
5576 list_add(&params->action, &hdev->pend_le_conns);
5577 else
5578 list_add(&params->action, &hdev->pend_le_reports);
5579 break;
5580 case HCI_AUTO_CONN_DIRECT:
5581 case HCI_AUTO_CONN_ALWAYS:
5582 if (!is_connected(hdev, addr, addr_type))
5583 list_add(&params->action, &hdev->pend_le_conns);
5584 break;
5585 }
5586
5587 params->auto_connect = auto_connect;
5588
5589 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5590 auto_connect);
5591
5592 return 0;
5593 }
5594
5595 static void device_added(struct sock *sk, struct hci_dev *hdev,
5596 bdaddr_t *bdaddr, u8 type, u8 action)
5597 {
5598 struct mgmt_ev_device_added ev;
5599
5600 bacpy(&ev.addr.bdaddr, bdaddr);
5601 ev.addr.type = type;
5602 ev.action = action;
5603
5604 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5605 }
5606
5607 static int add_device(struct sock *sk, struct hci_dev *hdev,
5608 void *data, u16 len)
5609 {
5610 struct mgmt_cp_add_device *cp = data;
5611 u8 auto_conn, addr_type;
5612 int err;
5613
5614 BT_DBG("%s", hdev->name);
5615
5616 if (!bdaddr_type_is_valid(cp->addr.type) ||
5617 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5618 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5619 MGMT_STATUS_INVALID_PARAMS,
5620 &cp->addr, sizeof(cp->addr));
5621
5622 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5623 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5624 MGMT_STATUS_INVALID_PARAMS,
5625 &cp->addr, sizeof(cp->addr));
5626
5627 hci_dev_lock(hdev);
5628
5629 if (cp->addr.type == BDADDR_BREDR) {
5630 /* Only incoming connections action is supported for now */
5631 if (cp->action != 0x01) {
5632 err = mgmt_cmd_complete(sk, hdev->id,
5633 MGMT_OP_ADD_DEVICE,
5634 MGMT_STATUS_INVALID_PARAMS,
5635 &cp->addr, sizeof(cp->addr));
5636 goto unlock;
5637 }
5638
5639 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5640 cp->addr.type);
5641 if (err)
5642 goto unlock;
5643
5644 hci_req_update_scan(hdev);
5645
5646 goto added;
5647 }
5648
5649 addr_type = le_addr_type(cp->addr.type);
5650
5651 if (cp->action == 0x02)
5652 auto_conn = HCI_AUTO_CONN_ALWAYS;
5653 else if (cp->action == 0x01)
5654 auto_conn = HCI_AUTO_CONN_DIRECT;
5655 else
5656 auto_conn = HCI_AUTO_CONN_REPORT;
5657
5658 /* Kernel internally uses conn_params with resolvable private
5659 * address, but Add Device allows only identity addresses.
5660 * Make sure it is enforced before calling
5661 * hci_conn_params_lookup.
5662 */
5663 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5664 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5665 MGMT_STATUS_INVALID_PARAMS,
5666 &cp->addr, sizeof(cp->addr));
5667 goto unlock;
5668 }
5669
5670 /* If the connection parameters don't exist for this device,
5671 * they will be created and configured with defaults.
5672 */
5673 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5674 auto_conn) < 0) {
5675 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5676 MGMT_STATUS_FAILED, &cp->addr,
5677 sizeof(cp->addr));
5678 goto unlock;
5679 }
5680
5681 hci_update_background_scan(hdev);
5682
5683 added:
5684 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5685
5686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5687 MGMT_STATUS_SUCCESS, &cp->addr,
5688 sizeof(cp->addr));
5689
5690 unlock:
5691 hci_dev_unlock(hdev);
5692 return err;
5693 }
5694
5695 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5696 bdaddr_t *bdaddr, u8 type)
5697 {
5698 struct mgmt_ev_device_removed ev;
5699
5700 bacpy(&ev.addr.bdaddr, bdaddr);
5701 ev.addr.type = type;
5702
5703 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5704 }
5705
5706 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5707 void *data, u16 len)
5708 {
5709 struct mgmt_cp_remove_device *cp = data;
5710 int err;
5711
5712 BT_DBG("%s", hdev->name);
5713
5714 hci_dev_lock(hdev);
5715
5716 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5717 struct hci_conn_params *params;
5718 u8 addr_type;
5719
5720 if (!bdaddr_type_is_valid(cp->addr.type)) {
5721 err = mgmt_cmd_complete(sk, hdev->id,
5722 MGMT_OP_REMOVE_DEVICE,
5723 MGMT_STATUS_INVALID_PARAMS,
5724 &cp->addr, sizeof(cp->addr));
5725 goto unlock;
5726 }
5727
5728 if (cp->addr.type == BDADDR_BREDR) {
5729 err = hci_bdaddr_list_del(&hdev->whitelist,
5730 &cp->addr.bdaddr,
5731 cp->addr.type);
5732 if (err) {
5733 err = mgmt_cmd_complete(sk, hdev->id,
5734 MGMT_OP_REMOVE_DEVICE,
5735 MGMT_STATUS_INVALID_PARAMS,
5736 &cp->addr,
5737 sizeof(cp->addr));
5738 goto unlock;
5739 }
5740
5741 hci_req_update_scan(hdev);
5742
5743 device_removed(sk, hdev, &cp->addr.bdaddr,
5744 cp->addr.type);
5745 goto complete;
5746 }
5747
5748 addr_type = le_addr_type(cp->addr.type);
5749
5750 /* Kernel internally uses conn_params with resolvable private
5751 * address, but Remove Device allows only identity addresses.
5752 * Make sure it is enforced before calling
5753 * hci_conn_params_lookup.
5754 */
5755 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5756 err = mgmt_cmd_complete(sk, hdev->id,
5757 MGMT_OP_REMOVE_DEVICE,
5758 MGMT_STATUS_INVALID_PARAMS,
5759 &cp->addr, sizeof(cp->addr));
5760 goto unlock;
5761 }
5762
5763 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5764 addr_type);
5765 if (!params) {
5766 err = mgmt_cmd_complete(sk, hdev->id,
5767 MGMT_OP_REMOVE_DEVICE,
5768 MGMT_STATUS_INVALID_PARAMS,
5769 &cp->addr, sizeof(cp->addr));
5770 goto unlock;
5771 }
5772
5773 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5774 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5775 err = mgmt_cmd_complete(sk, hdev->id,
5776 MGMT_OP_REMOVE_DEVICE,
5777 MGMT_STATUS_INVALID_PARAMS,
5778 &cp->addr, sizeof(cp->addr));
5779 goto unlock;
5780 }
5781
5782 list_del(&params->action);
5783 list_del(&params->list);
5784 kfree(params);
5785 hci_update_background_scan(hdev);
5786
5787 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5788 } else {
5789 struct hci_conn_params *p, *tmp;
5790 struct bdaddr_list *b, *btmp;
5791
5792 if (cp->addr.type) {
5793 err = mgmt_cmd_complete(sk, hdev->id,
5794 MGMT_OP_REMOVE_DEVICE,
5795 MGMT_STATUS_INVALID_PARAMS,
5796 &cp->addr, sizeof(cp->addr));
5797 goto unlock;
5798 }
5799
5800 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5801 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5802 list_del(&b->list);
5803 kfree(b);
5804 }
5805
5806 hci_req_update_scan(hdev);
5807
5808 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5809 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5810 continue;
5811 device_removed(sk, hdev, &p->addr, p->addr_type);
5812 if (p->explicit_connect) {
5813 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5814 continue;
5815 }
5816 list_del(&p->action);
5817 list_del(&p->list);
5818 kfree(p);
5819 }
5820
5821 BT_DBG("All LE connection parameters were removed");
5822
5823 hci_update_background_scan(hdev);
5824 }
5825
5826 complete:
5827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5828 MGMT_STATUS_SUCCESS, &cp->addr,
5829 sizeof(cp->addr));
5830 unlock:
5831 hci_dev_unlock(hdev);
5832 return err;
5833 }
5834
5835 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5836 u16 len)
5837 {
5838 struct mgmt_cp_load_conn_param *cp = data;
5839 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5840 sizeof(struct mgmt_conn_param));
5841 u16 param_count, expected_len;
5842 int i;
5843
5844 if (!lmp_le_capable(hdev))
5845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5846 MGMT_STATUS_NOT_SUPPORTED);
5847
5848 param_count = __le16_to_cpu(cp->param_count);
5849 if (param_count > max_param_count) {
5850 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5851 param_count);
5852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5853 MGMT_STATUS_INVALID_PARAMS);
5854 }
5855
5856 expected_len = sizeof(*cp) + param_count *
5857 sizeof(struct mgmt_conn_param);
5858 if (expected_len != len) {
5859 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5860 expected_len, len);
5861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5862 MGMT_STATUS_INVALID_PARAMS);
5863 }
5864
5865 BT_DBG("%s param_count %u", hdev->name, param_count);
5866
5867 hci_dev_lock(hdev);
5868
5869 hci_conn_params_clear_disabled(hdev);
5870
5871 for (i = 0; i < param_count; i++) {
5872 struct mgmt_conn_param *param = &cp->params[i];
5873 struct hci_conn_params *hci_param;
5874 u16 min, max, latency, timeout;
5875 u8 addr_type;
5876
5877 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5878 param->addr.type);
5879
5880 if (param->addr.type == BDADDR_LE_PUBLIC) {
5881 addr_type = ADDR_LE_DEV_PUBLIC;
5882 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5883 addr_type = ADDR_LE_DEV_RANDOM;
5884 } else {
5885 bt_dev_err(hdev, "ignoring invalid connection parameters");
5886 continue;
5887 }
5888
5889 min = le16_to_cpu(param->min_interval);
5890 max = le16_to_cpu(param->max_interval);
5891 latency = le16_to_cpu(param->latency);
5892 timeout = le16_to_cpu(param->timeout);
5893
5894 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5895 min, max, latency, timeout);
5896
5897 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5898 bt_dev_err(hdev, "ignoring invalid connection parameters");
5899 continue;
5900 }
5901
5902 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5903 addr_type);
5904 if (!hci_param) {
5905 bt_dev_err(hdev, "failed to add connection parameters");
5906 continue;
5907 }
5908
5909 hci_param->conn_min_interval = min;
5910 hci_param->conn_max_interval = max;
5911 hci_param->conn_latency = latency;
5912 hci_param->supervision_timeout = timeout;
5913 }
5914
5915 hci_dev_unlock(hdev);
5916
5917 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5918 NULL, 0);
5919 }
5920
5921 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5922 void *data, u16 len)
5923 {
5924 struct mgmt_cp_set_external_config *cp = data;
5925 bool changed;
5926 int err;
5927
5928 BT_DBG("%s", hdev->name);
5929
5930 if (hdev_is_powered(hdev))
5931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5932 MGMT_STATUS_REJECTED);
5933
5934 if (cp->config != 0x00 && cp->config != 0x01)
5935 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5936 MGMT_STATUS_INVALID_PARAMS);
5937
5938 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5939 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5940 MGMT_STATUS_NOT_SUPPORTED);
5941
5942 hci_dev_lock(hdev);
5943
5944 if (cp->config)
5945 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5946 else
5947 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5948
5949 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5950 if (err < 0)
5951 goto unlock;
5952
5953 if (!changed)
5954 goto unlock;
5955
5956 err = new_options(hdev, sk);
5957
5958 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5959 mgmt_index_removed(hdev);
5960
5961 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5962 hci_dev_set_flag(hdev, HCI_CONFIG);
5963 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5964
5965 queue_work(hdev->req_workqueue, &hdev->power_on);
5966 } else {
5967 set_bit(HCI_RAW, &hdev->flags);
5968 mgmt_index_added(hdev);
5969 }
5970 }
5971
5972 unlock:
5973 hci_dev_unlock(hdev);
5974 return err;
5975 }
5976
5977 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5978 void *data, u16 len)
5979 {
5980 struct mgmt_cp_set_public_address *cp = data;
5981 bool changed;
5982 int err;
5983
5984 BT_DBG("%s", hdev->name);
5985
5986 if (hdev_is_powered(hdev))
5987 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5988 MGMT_STATUS_REJECTED);
5989
5990 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5991 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5992 MGMT_STATUS_INVALID_PARAMS);
5993
5994 if (!hdev->set_bdaddr)
5995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5996 MGMT_STATUS_NOT_SUPPORTED);
5997
5998 hci_dev_lock(hdev);
5999
6000 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6001 bacpy(&hdev->public_addr, &cp->bdaddr);
6002
6003 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6004 if (err < 0)
6005 goto unlock;
6006
6007 if (!changed)
6008 goto unlock;
6009
6010 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6011 err = new_options(hdev, sk);
6012
6013 if (is_configured(hdev)) {
6014 mgmt_index_removed(hdev);
6015
6016 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6017
6018 hci_dev_set_flag(hdev, HCI_CONFIG);
6019 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6020
6021 queue_work(hdev->req_workqueue, &hdev->power_on);
6022 }
6023
6024 unlock:
6025 hci_dev_unlock(hdev);
6026 return err;
6027 }
6028
6029 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6030 u16 opcode, struct sk_buff *skb)
6031 {
6032 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6033 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6034 u8 *h192, *r192, *h256, *r256;
6035 struct mgmt_pending_cmd *cmd;
6036 u16 eir_len;
6037 int err;
6038
6039 BT_DBG("%s status %u", hdev->name, status);
6040
6041 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6042 if (!cmd)
6043 return;
6044
6045 mgmt_cp = cmd->param;
6046
6047 if (status) {
6048 status = mgmt_status(status);
6049 eir_len = 0;
6050
6051 h192 = NULL;
6052 r192 = NULL;
6053 h256 = NULL;
6054 r256 = NULL;
6055 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6056 struct hci_rp_read_local_oob_data *rp;
6057
6058 if (skb->len != sizeof(*rp)) {
6059 status = MGMT_STATUS_FAILED;
6060 eir_len = 0;
6061 } else {
6062 status = MGMT_STATUS_SUCCESS;
6063 rp = (void *)skb->data;
6064
6065 eir_len = 5 + 18 + 18;
6066 h192 = rp->hash;
6067 r192 = rp->rand;
6068 h256 = NULL;
6069 r256 = NULL;
6070 }
6071 } else {
6072 struct hci_rp_read_local_oob_ext_data *rp;
6073
6074 if (skb->len != sizeof(*rp)) {
6075 status = MGMT_STATUS_FAILED;
6076 eir_len = 0;
6077 } else {
6078 status = MGMT_STATUS_SUCCESS;
6079 rp = (void *)skb->data;
6080
6081 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6082 eir_len = 5 + 18 + 18;
6083 h192 = NULL;
6084 r192 = NULL;
6085 } else {
6086 eir_len = 5 + 18 + 18 + 18 + 18;
6087 h192 = rp->hash192;
6088 r192 = rp->rand192;
6089 }
6090
6091 h256 = rp->hash256;
6092 r256 = rp->rand256;
6093 }
6094 }
6095
6096 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6097 if (!mgmt_rp)
6098 goto done;
6099
6100 if (status)
6101 goto send_rsp;
6102
6103 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6104 hdev->dev_class, 3);
6105
6106 if (h192 && r192) {
6107 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6108 EIR_SSP_HASH_C192, h192, 16);
6109 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6110 EIR_SSP_RAND_R192, r192, 16);
6111 }
6112
6113 if (h256 && r256) {
6114 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6115 EIR_SSP_HASH_C256, h256, 16);
6116 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6117 EIR_SSP_RAND_R256, r256, 16);
6118 }
6119
6120 send_rsp:
6121 mgmt_rp->type = mgmt_cp->type;
6122 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6123
6124 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6125 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6126 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6127 if (err < 0 || status)
6128 goto done;
6129
6130 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6131
6132 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6133 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6134 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6135 done:
6136 kfree(mgmt_rp);
6137 mgmt_pending_remove(cmd);
6138 }
6139
6140 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6141 struct mgmt_cp_read_local_oob_ext_data *cp)
6142 {
6143 struct mgmt_pending_cmd *cmd;
6144 struct hci_request req;
6145 int err;
6146
6147 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6148 cp, sizeof(*cp));
6149 if (!cmd)
6150 return -ENOMEM;
6151
6152 hci_req_init(&req, hdev);
6153
6154 if (bredr_sc_enabled(hdev))
6155 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6156 else
6157 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6158
6159 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6160 if (err < 0) {
6161 mgmt_pending_remove(cmd);
6162 return err;
6163 }
6164
6165 return 0;
6166 }
6167
6168 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6169 void *data, u16 data_len)
6170 {
6171 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6172 struct mgmt_rp_read_local_oob_ext_data *rp;
6173 size_t rp_len;
6174 u16 eir_len;
6175 u8 status, flags, role, addr[7], hash[16], rand[16];
6176 int err;
6177
6178 BT_DBG("%s", hdev->name);
6179
6180 if (hdev_is_powered(hdev)) {
6181 switch (cp->type) {
6182 case BIT(BDADDR_BREDR):
6183 status = mgmt_bredr_support(hdev);
6184 if (status)
6185 eir_len = 0;
6186 else
6187 eir_len = 5;
6188 break;
6189 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6190 status = mgmt_le_support(hdev);
6191 if (status)
6192 eir_len = 0;
6193 else
6194 eir_len = 9 + 3 + 18 + 18 + 3;
6195 break;
6196 default:
6197 status = MGMT_STATUS_INVALID_PARAMS;
6198 eir_len = 0;
6199 break;
6200 }
6201 } else {
6202 status = MGMT_STATUS_NOT_POWERED;
6203 eir_len = 0;
6204 }
6205
6206 rp_len = sizeof(*rp) + eir_len;
6207 rp = kmalloc(rp_len, GFP_ATOMIC);
6208 if (!rp)
6209 return -ENOMEM;
6210
6211 if (status)
6212 goto complete;
6213
6214 hci_dev_lock(hdev);
6215
6216 eir_len = 0;
6217 switch (cp->type) {
6218 case BIT(BDADDR_BREDR):
6219 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6220 err = read_local_ssp_oob_req(hdev, sk, cp);
6221 hci_dev_unlock(hdev);
6222 if (!err)
6223 goto done;
6224
6225 status = MGMT_STATUS_FAILED;
6226 goto complete;
6227 } else {
6228 eir_len = eir_append_data(rp->eir, eir_len,
6229 EIR_CLASS_OF_DEV,
6230 hdev->dev_class, 3);
6231 }
6232 break;
6233 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6234 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6235 smp_generate_oob(hdev, hash, rand) < 0) {
6236 hci_dev_unlock(hdev);
6237 status = MGMT_STATUS_FAILED;
6238 goto complete;
6239 }
6240
6241 /* This should return the active RPA, but since the RPA
6242 * is only programmed on demand, it is really hard to fill
6243 * this in at the moment. For now disallow retrieving
6244 * local out-of-band data when privacy is in use.
6245 *
6246 * Returning the identity address will not help here since
6247 * pairing happens before the identity resolving key is
6248 * known and thus the connection establishment happens
6249 * based on the RPA and not the identity address.
6250 */
6251 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6252 hci_dev_unlock(hdev);
6253 status = MGMT_STATUS_REJECTED;
6254 goto complete;
6255 }
6256
6257 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6258 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6259 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6260 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6261 memcpy(addr, &hdev->static_addr, 6);
6262 addr[6] = 0x01;
6263 } else {
6264 memcpy(addr, &hdev->bdaddr, 6);
6265 addr[6] = 0x00;
6266 }
6267
6268 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6269 addr, sizeof(addr));
6270
6271 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6272 role = 0x02;
6273 else
6274 role = 0x01;
6275
6276 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6277 &role, sizeof(role));
6278
6279 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6280 eir_len = eir_append_data(rp->eir, eir_len,
6281 EIR_LE_SC_CONFIRM,
6282 hash, sizeof(hash));
6283
6284 eir_len = eir_append_data(rp->eir, eir_len,
6285 EIR_LE_SC_RANDOM,
6286 rand, sizeof(rand));
6287 }
6288
6289 flags = mgmt_get_adv_discov_flags(hdev);
6290
6291 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6292 flags |= LE_AD_NO_BREDR;
6293
6294 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6295 &flags, sizeof(flags));
6296 break;
6297 }
6298
6299 hci_dev_unlock(hdev);
6300
6301 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6302
6303 status = MGMT_STATUS_SUCCESS;
6304
6305 complete:
6306 rp->type = cp->type;
6307 rp->eir_len = cpu_to_le16(eir_len);
6308
6309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6310 status, rp, sizeof(*rp) + eir_len);
6311 if (err < 0 || status)
6312 goto done;
6313
6314 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6315 rp, sizeof(*rp) + eir_len,
6316 HCI_MGMT_OOB_DATA_EVENTS, sk);
6317
6318 done:
6319 kfree(rp);
6320
6321 return err;
6322 }
6323
6324 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6325 {
6326 u32 flags = 0;
6327
6328 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6329 flags |= MGMT_ADV_FLAG_DISCOV;
6330 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6331 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6332 flags |= MGMT_ADV_FLAG_APPEARANCE;
6333 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6334
6335 /* In extended adv TX_POWER returned from Set Adv Param
6336 * will be always valid.
6337 */
6338 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6339 ext_adv_capable(hdev))
6340 flags |= MGMT_ADV_FLAG_TX_POWER;
6341
6342 return flags;
6343 }
6344
6345 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6346 void *data, u16 data_len)
6347 {
6348 struct mgmt_rp_read_adv_features *rp;
6349 size_t rp_len;
6350 int err;
6351 struct adv_info *adv_instance;
6352 u32 supported_flags;
6353 u8 *instance;
6354
6355 BT_DBG("%s", hdev->name);
6356
6357 if (!lmp_le_capable(hdev))
6358 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6359 MGMT_STATUS_REJECTED);
6360
6361 hci_dev_lock(hdev);
6362
6363 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6364 rp = kmalloc(rp_len, GFP_ATOMIC);
6365 if (!rp) {
6366 hci_dev_unlock(hdev);
6367 return -ENOMEM;
6368 }
6369
6370 supported_flags = get_supported_adv_flags(hdev);
6371
6372 rp->supported_flags = cpu_to_le32(supported_flags);
6373 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6374 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6375 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6376 rp->num_instances = hdev->adv_instance_cnt;
6377
6378 instance = rp->instance;
6379 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6380 *instance = adv_instance->instance;
6381 instance++;
6382 }
6383
6384 hci_dev_unlock(hdev);
6385
6386 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6387 MGMT_STATUS_SUCCESS, rp, rp_len);
6388
6389 kfree(rp);
6390
6391 return err;
6392 }
6393
6394 static u8 calculate_name_len(struct hci_dev *hdev)
6395 {
6396 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6397
6398 return append_local_name(hdev, buf, 0);
6399 }
6400
6401 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6402 bool is_adv_data)
6403 {
6404 u8 max_len = HCI_MAX_AD_LENGTH;
6405
6406 if (is_adv_data) {
6407 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6408 MGMT_ADV_FLAG_LIMITED_DISCOV |
6409 MGMT_ADV_FLAG_MANAGED_FLAGS))
6410 max_len -= 3;
6411
6412 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6413 max_len -= 3;
6414 } else {
6415 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6416 max_len -= calculate_name_len(hdev);
6417
6418 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6419 max_len -= 4;
6420 }
6421
6422 return max_len;
6423 }
6424
6425 static bool flags_managed(u32 adv_flags)
6426 {
6427 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6428 MGMT_ADV_FLAG_LIMITED_DISCOV |
6429 MGMT_ADV_FLAG_MANAGED_FLAGS);
6430 }
6431
6432 static bool tx_power_managed(u32 adv_flags)
6433 {
6434 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6435 }
6436
6437 static bool name_managed(u32 adv_flags)
6438 {
6439 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6440 }
6441
6442 static bool appearance_managed(u32 adv_flags)
6443 {
6444 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6445 }
6446
6447 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6448 u8 len, bool is_adv_data)
6449 {
6450 int i, cur_len;
6451 u8 max_len;
6452
6453 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6454
6455 if (len > max_len)
6456 return false;
6457
6458 /* Make sure that the data is correctly formatted. */
6459 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6460 cur_len = data[i];
6461
6462 if (data[i + 1] == EIR_FLAGS &&
6463 (!is_adv_data || flags_managed(adv_flags)))
6464 return false;
6465
6466 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6467 return false;
6468
6469 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6470 return false;
6471
6472 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6473 return false;
6474
6475 if (data[i + 1] == EIR_APPEARANCE &&
6476 appearance_managed(adv_flags))
6477 return false;
6478
6479 /* If the current field length would exceed the total data
6480 * length, then it's invalid.
6481 */
6482 if (i + cur_len >= len)
6483 return false;
6484 }
6485
6486 return true;
6487 }
6488
6489 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6490 u16 opcode)
6491 {
6492 struct mgmt_pending_cmd *cmd;
6493 struct mgmt_cp_add_advertising *cp;
6494 struct mgmt_rp_add_advertising rp;
6495 struct adv_info *adv_instance, *n;
6496 u8 instance;
6497
6498 BT_DBG("status %d", status);
6499
6500 hci_dev_lock(hdev);
6501
6502 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6503
6504 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6505 if (!adv_instance->pending)
6506 continue;
6507
6508 if (!status) {
6509 adv_instance->pending = false;
6510 continue;
6511 }
6512
6513 instance = adv_instance->instance;
6514
6515 if (hdev->cur_adv_instance == instance)
6516 cancel_adv_timeout(hdev);
6517
6518 hci_remove_adv_instance(hdev, instance);
6519 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6520 }
6521
6522 if (!cmd)
6523 goto unlock;
6524
6525 cp = cmd->param;
6526 rp.instance = cp->instance;
6527
6528 if (status)
6529 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6530 mgmt_status(status));
6531 else
6532 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6533 mgmt_status(status), &rp, sizeof(rp));
6534
6535 mgmt_pending_remove(cmd);
6536
6537 unlock:
6538 hci_dev_unlock(hdev);
6539 }
6540
6541 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6542 void *data, u16 data_len)
6543 {
6544 struct mgmt_cp_add_advertising *cp = data;
6545 struct mgmt_rp_add_advertising rp;
6546 u32 flags;
6547 u32 supported_flags;
6548 u8 status;
6549 u16 timeout, duration;
6550 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6551 u8 schedule_instance = 0;
6552 struct adv_info *next_instance;
6553 int err;
6554 struct mgmt_pending_cmd *cmd;
6555 struct hci_request req;
6556
6557 BT_DBG("%s", hdev->name);
6558
6559 status = mgmt_le_support(hdev);
6560 if (status)
6561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6562 status);
6563
6564 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6566 MGMT_STATUS_INVALID_PARAMS);
6567
6568 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6570 MGMT_STATUS_INVALID_PARAMS);
6571
6572 flags = __le32_to_cpu(cp->flags);
6573 timeout = __le16_to_cpu(cp->timeout);
6574 duration = __le16_to_cpu(cp->duration);
6575
6576 /* The current implementation only supports a subset of the specified
6577 * flags.
6578 */
6579 supported_flags = get_supported_adv_flags(hdev);
6580 if (flags & ~supported_flags)
6581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6582 MGMT_STATUS_INVALID_PARAMS);
6583
6584 hci_dev_lock(hdev);
6585
6586 if (timeout && !hdev_is_powered(hdev)) {
6587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6588 MGMT_STATUS_REJECTED);
6589 goto unlock;
6590 }
6591
6592 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6593 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6594 pending_find(MGMT_OP_SET_LE, hdev)) {
6595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6596 MGMT_STATUS_BUSY);
6597 goto unlock;
6598 }
6599
6600 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6601 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6602 cp->scan_rsp_len, false)) {
6603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6604 MGMT_STATUS_INVALID_PARAMS);
6605 goto unlock;
6606 }
6607
6608 err = hci_add_adv_instance(hdev, cp->instance, flags,
6609 cp->adv_data_len, cp->data,
6610 cp->scan_rsp_len,
6611 cp->data + cp->adv_data_len,
6612 timeout, duration);
6613 if (err < 0) {
6614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6615 MGMT_STATUS_FAILED);
6616 goto unlock;
6617 }
6618
6619 /* Only trigger an advertising added event if a new instance was
6620 * actually added.
6621 */
6622 if (hdev->adv_instance_cnt > prev_instance_cnt)
6623 mgmt_advertising_added(sk, hdev, cp->instance);
6624
6625 if (hdev->cur_adv_instance == cp->instance) {
6626 /* If the currently advertised instance is being changed then
6627 * cancel the current advertising and schedule the next
6628 * instance. If there is only one instance then the overridden
6629 * advertising data will be visible right away.
6630 */
6631 cancel_adv_timeout(hdev);
6632
6633 next_instance = hci_get_next_instance(hdev, cp->instance);
6634 if (next_instance)
6635 schedule_instance = next_instance->instance;
6636 } else if (!hdev->adv_instance_timeout) {
6637 /* Immediately advertise the new instance if no other
6638 * instance is currently being advertised.
6639 */
6640 schedule_instance = cp->instance;
6641 }
6642
6643 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6644 * there is no instance to be advertised then we have no HCI
6645 * communication to make. Simply return.
6646 */
6647 if (!hdev_is_powered(hdev) ||
6648 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6649 !schedule_instance) {
6650 rp.instance = cp->instance;
6651 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6652 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6653 goto unlock;
6654 }
6655
6656 /* We're good to go, update advertising data, parameters, and start
6657 * advertising.
6658 */
6659 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6660 data_len);
6661 if (!cmd) {
6662 err = -ENOMEM;
6663 goto unlock;
6664 }
6665
6666 hci_req_init(&req, hdev);
6667
6668 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6669
6670 if (!err)
6671 err = hci_req_run(&req, add_advertising_complete);
6672
6673 if (err < 0)
6674 mgmt_pending_remove(cmd);
6675
6676 unlock:
6677 hci_dev_unlock(hdev);
6678
6679 return err;
6680 }
6681
6682 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6683 u16 opcode)
6684 {
6685 struct mgmt_pending_cmd *cmd;
6686 struct mgmt_cp_remove_advertising *cp;
6687 struct mgmt_rp_remove_advertising rp;
6688
6689 BT_DBG("status %d", status);
6690
6691 hci_dev_lock(hdev);
6692
6693 /* A failure status here only means that we failed to disable
6694 * advertising. Otherwise, the advertising instance has been removed,
6695 * so report success.
6696 */
6697 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6698 if (!cmd)
6699 goto unlock;
6700
6701 cp = cmd->param;
6702 rp.instance = cp->instance;
6703
6704 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6705 &rp, sizeof(rp));
6706 mgmt_pending_remove(cmd);
6707
6708 unlock:
6709 hci_dev_unlock(hdev);
6710 }
6711
6712 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6713 void *data, u16 data_len)
6714 {
6715 struct mgmt_cp_remove_advertising *cp = data;
6716 struct mgmt_rp_remove_advertising rp;
6717 struct mgmt_pending_cmd *cmd;
6718 struct hci_request req;
6719 int err;
6720
6721 BT_DBG("%s", hdev->name);
6722
6723 hci_dev_lock(hdev);
6724
6725 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6726 err = mgmt_cmd_status(sk, hdev->id,
6727 MGMT_OP_REMOVE_ADVERTISING,
6728 MGMT_STATUS_INVALID_PARAMS);
6729 goto unlock;
6730 }
6731
6732 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6733 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6734 pending_find(MGMT_OP_SET_LE, hdev)) {
6735 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6736 MGMT_STATUS_BUSY);
6737 goto unlock;
6738 }
6739
6740 if (list_empty(&hdev->adv_instances)) {
6741 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6742 MGMT_STATUS_INVALID_PARAMS);
6743 goto unlock;
6744 }
6745
6746 hci_req_init(&req, hdev);
6747
6748 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6749
6750 if (list_empty(&hdev->adv_instances))
6751 __hci_req_disable_advertising(&req);
6752
6753 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6754 * flag is set or the device isn't powered then we have no HCI
6755 * communication to make. Simply return.
6756 */
6757 if (skb_queue_empty(&req.cmd_q) ||
6758 !hdev_is_powered(hdev) ||
6759 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6760 hci_req_purge(&req);
6761 rp.instance = cp->instance;
6762 err = mgmt_cmd_complete(sk, hdev->id,
6763 MGMT_OP_REMOVE_ADVERTISING,
6764 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6765 goto unlock;
6766 }
6767
6768 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6769 data_len);
6770 if (!cmd) {
6771 err = -ENOMEM;
6772 goto unlock;
6773 }
6774
6775 err = hci_req_run(&req, remove_advertising_complete);
6776 if (err < 0)
6777 mgmt_pending_remove(cmd);
6778
6779 unlock:
6780 hci_dev_unlock(hdev);
6781
6782 return err;
6783 }
6784
6785 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6786 void *data, u16 data_len)
6787 {
6788 struct mgmt_cp_get_adv_size_info *cp = data;
6789 struct mgmt_rp_get_adv_size_info rp;
6790 u32 flags, supported_flags;
6791 int err;
6792
6793 BT_DBG("%s", hdev->name);
6794
6795 if (!lmp_le_capable(hdev))
6796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6797 MGMT_STATUS_REJECTED);
6798
6799 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6801 MGMT_STATUS_INVALID_PARAMS);
6802
6803 flags = __le32_to_cpu(cp->flags);
6804
6805 /* The current implementation only supports a subset of the specified
6806 * flags.
6807 */
6808 supported_flags = get_supported_adv_flags(hdev);
6809 if (flags & ~supported_flags)
6810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6811 MGMT_STATUS_INVALID_PARAMS);
6812
6813 rp.instance = cp->instance;
6814 rp.flags = cp->flags;
6815 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6816 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6817
6818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6819 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6820
6821 return err;
6822 }
6823
6824 static const struct hci_mgmt_handler mgmt_handlers[] = {
6825 { NULL }, /* 0x0000 (no command) */
6826 { read_version, MGMT_READ_VERSION_SIZE,
6827 HCI_MGMT_NO_HDEV |
6828 HCI_MGMT_UNTRUSTED },
6829 { read_commands, MGMT_READ_COMMANDS_SIZE,
6830 HCI_MGMT_NO_HDEV |
6831 HCI_MGMT_UNTRUSTED },
6832 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6833 HCI_MGMT_NO_HDEV |
6834 HCI_MGMT_UNTRUSTED },
6835 { read_controller_info, MGMT_READ_INFO_SIZE,
6836 HCI_MGMT_UNTRUSTED },
6837 { set_powered, MGMT_SETTING_SIZE },
6838 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6839 { set_connectable, MGMT_SETTING_SIZE },
6840 { set_fast_connectable, MGMT_SETTING_SIZE },
6841 { set_bondable, MGMT_SETTING_SIZE },
6842 { set_link_security, MGMT_SETTING_SIZE },
6843 { set_ssp, MGMT_SETTING_SIZE },
6844 { set_hs, MGMT_SETTING_SIZE },
6845 { set_le, MGMT_SETTING_SIZE },
6846 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6847 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6848 { add_uuid, MGMT_ADD_UUID_SIZE },
6849 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6850 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6851 HCI_MGMT_VAR_LEN },
6852 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6853 HCI_MGMT_VAR_LEN },
6854 { disconnect, MGMT_DISCONNECT_SIZE },
6855 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6856 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6857 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6858 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6859 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6860 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6861 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6862 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6863 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6864 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6865 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6866 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6867 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6868 HCI_MGMT_VAR_LEN },
6869 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6870 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6871 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6872 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6873 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6874 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6875 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6876 { set_advertising, MGMT_SETTING_SIZE },
6877 { set_bredr, MGMT_SETTING_SIZE },
6878 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6879 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6880 { set_secure_conn, MGMT_SETTING_SIZE },
6881 { set_debug_keys, MGMT_SETTING_SIZE },
6882 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6883 { load_irks, MGMT_LOAD_IRKS_SIZE,
6884 HCI_MGMT_VAR_LEN },
6885 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6886 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6887 { add_device, MGMT_ADD_DEVICE_SIZE },
6888 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6889 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6890 HCI_MGMT_VAR_LEN },
6891 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6892 HCI_MGMT_NO_HDEV |
6893 HCI_MGMT_UNTRUSTED },
6894 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6895 HCI_MGMT_UNCONFIGURED |
6896 HCI_MGMT_UNTRUSTED },
6897 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6898 HCI_MGMT_UNCONFIGURED },
6899 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6900 HCI_MGMT_UNCONFIGURED },
6901 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6902 HCI_MGMT_VAR_LEN },
6903 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6904 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6905 HCI_MGMT_NO_HDEV |
6906 HCI_MGMT_UNTRUSTED },
6907 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6908 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6909 HCI_MGMT_VAR_LEN },
6910 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6911 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6912 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6913 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6914 HCI_MGMT_UNTRUSTED },
6915 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
6916 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
6917 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
6918 };
6919
6920 void mgmt_index_added(struct hci_dev *hdev)
6921 {
6922 struct mgmt_ev_ext_index ev;
6923
6924 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6925 return;
6926
6927 switch (hdev->dev_type) {
6928 case HCI_PRIMARY:
6929 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6930 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6931 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6932 ev.type = 0x01;
6933 } else {
6934 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6935 HCI_MGMT_INDEX_EVENTS);
6936 ev.type = 0x00;
6937 }
6938 break;
6939 case HCI_AMP:
6940 ev.type = 0x02;
6941 break;
6942 default:
6943 return;
6944 }
6945
6946 ev.bus = hdev->bus;
6947
6948 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6949 HCI_MGMT_EXT_INDEX_EVENTS);
6950 }
6951
6952 void mgmt_index_removed(struct hci_dev *hdev)
6953 {
6954 struct mgmt_ev_ext_index ev;
6955 u8 status = MGMT_STATUS_INVALID_INDEX;
6956
6957 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6958 return;
6959
6960 switch (hdev->dev_type) {
6961 case HCI_PRIMARY:
6962 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6963
6964 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6965 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6966 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6967 ev.type = 0x01;
6968 } else {
6969 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6970 HCI_MGMT_INDEX_EVENTS);
6971 ev.type = 0x00;
6972 }
6973 break;
6974 case HCI_AMP:
6975 ev.type = 0x02;
6976 break;
6977 default:
6978 return;
6979 }
6980
6981 ev.bus = hdev->bus;
6982
6983 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6984 HCI_MGMT_EXT_INDEX_EVENTS);
6985 }
6986
6987 /* This function requires the caller holds hdev->lock */
6988 static void restart_le_actions(struct hci_dev *hdev)
6989 {
6990 struct hci_conn_params *p;
6991
6992 list_for_each_entry(p, &hdev->le_conn_params, list) {
6993 /* Needed for AUTO_OFF case where might not "really"
6994 * have been powered off.
6995 */
6996 list_del_init(&p->action);
6997
6998 switch (p->auto_connect) {
6999 case HCI_AUTO_CONN_DIRECT:
7000 case HCI_AUTO_CONN_ALWAYS:
7001 list_add(&p->action, &hdev->pend_le_conns);
7002 break;
7003 case HCI_AUTO_CONN_REPORT:
7004 list_add(&p->action, &hdev->pend_le_reports);
7005 break;
7006 default:
7007 break;
7008 }
7009 }
7010 }
7011
7012 void mgmt_power_on(struct hci_dev *hdev, int err)
7013 {
7014 struct cmd_lookup match = { NULL, hdev };
7015
7016 BT_DBG("err %d", err);
7017
7018 hci_dev_lock(hdev);
7019
7020 if (!err) {
7021 restart_le_actions(hdev);
7022 hci_update_background_scan(hdev);
7023 }
7024
7025 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7026
7027 new_settings(hdev, match.sk);
7028
7029 if (match.sk)
7030 sock_put(match.sk);
7031
7032 hci_dev_unlock(hdev);
7033 }
7034
7035 void __mgmt_power_off(struct hci_dev *hdev)
7036 {
7037 struct cmd_lookup match = { NULL, hdev };
7038 u8 status, zero_cod[] = { 0, 0, 0 };
7039
7040 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7041
7042 /* If the power off is because of hdev unregistration let
7043 * use the appropriate INVALID_INDEX status. Otherwise use
7044 * NOT_POWERED. We cover both scenarios here since later in
7045 * mgmt_index_removed() any hci_conn callbacks will have already
7046 * been triggered, potentially causing misleading DISCONNECTED
7047 * status responses.
7048 */
7049 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7050 status = MGMT_STATUS_INVALID_INDEX;
7051 else
7052 status = MGMT_STATUS_NOT_POWERED;
7053
7054 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7055
7056 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7057 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7058 zero_cod, sizeof(zero_cod),
7059 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7060 ext_info_changed(hdev, NULL);
7061 }
7062
7063 new_settings(hdev, match.sk);
7064
7065 if (match.sk)
7066 sock_put(match.sk);
7067 }
7068
7069 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7070 {
7071 struct mgmt_pending_cmd *cmd;
7072 u8 status;
7073
7074 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7075 if (!cmd)
7076 return;
7077
7078 if (err == -ERFKILL)
7079 status = MGMT_STATUS_RFKILLED;
7080 else
7081 status = MGMT_STATUS_FAILED;
7082
7083 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7084
7085 mgmt_pending_remove(cmd);
7086 }
7087
7088 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7089 bool persistent)
7090 {
7091 struct mgmt_ev_new_link_key ev;
7092
7093 memset(&ev, 0, sizeof(ev));
7094
7095 ev.store_hint = persistent;
7096 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7097 ev.key.addr.type = BDADDR_BREDR;
7098 ev.key.type = key->type;
7099 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7100 ev.key.pin_len = key->pin_len;
7101
7102 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7103 }
7104
7105 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7106 {
7107 switch (ltk->type) {
7108 case SMP_LTK:
7109 case SMP_LTK_SLAVE:
7110 if (ltk->authenticated)
7111 return MGMT_LTK_AUTHENTICATED;
7112 return MGMT_LTK_UNAUTHENTICATED;
7113 case SMP_LTK_P256:
7114 if (ltk->authenticated)
7115 return MGMT_LTK_P256_AUTH;
7116 return MGMT_LTK_P256_UNAUTH;
7117 case SMP_LTK_P256_DEBUG:
7118 return MGMT_LTK_P256_DEBUG;
7119 }
7120
7121 return MGMT_LTK_UNAUTHENTICATED;
7122 }
7123
7124 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7125 {
7126 struct mgmt_ev_new_long_term_key ev;
7127
7128 memset(&ev, 0, sizeof(ev));
7129
7130 /* Devices using resolvable or non-resolvable random addresses
7131 * without providing an identity resolving key don't require
7132 * to store long term keys. Their addresses will change the
7133 * next time around.
7134 *
7135 * Only when a remote device provides an identity address
7136 * make sure the long term key is stored. If the remote
7137 * identity is known, the long term keys are internally
7138 * mapped to the identity address. So allow static random
7139 * and public addresses here.
7140 */
7141 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7142 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7143 ev.store_hint = 0x00;
7144 else
7145 ev.store_hint = persistent;
7146
7147 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7148 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7149 ev.key.type = mgmt_ltk_type(key);
7150 ev.key.enc_size = key->enc_size;
7151 ev.key.ediv = key->ediv;
7152 ev.key.rand = key->rand;
7153
7154 if (key->type == SMP_LTK)
7155 ev.key.master = 1;
7156
7157 /* Make sure we copy only the significant bytes based on the
7158 * encryption key size, and set the rest of the value to zeroes.
7159 */
7160 memcpy(ev.key.val, key->val, key->enc_size);
7161 memset(ev.key.val + key->enc_size, 0,
7162 sizeof(ev.key.val) - key->enc_size);
7163
7164 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7165 }
7166
7167 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7168 {
7169 struct mgmt_ev_new_irk ev;
7170
7171 memset(&ev, 0, sizeof(ev));
7172
7173 ev.store_hint = persistent;
7174
7175 bacpy(&ev.rpa, &irk->rpa);
7176 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7177 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7178 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7179
7180 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7181 }
7182
7183 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7184 bool persistent)
7185 {
7186 struct mgmt_ev_new_csrk ev;
7187
7188 memset(&ev, 0, sizeof(ev));
7189
7190 /* Devices using resolvable or non-resolvable random addresses
7191 * without providing an identity resolving key don't require
7192 * to store signature resolving keys. Their addresses will change
7193 * the next time around.
7194 *
7195 * Only when a remote device provides an identity address
7196 * make sure the signature resolving key is stored. So allow
7197 * static random and public addresses here.
7198 */
7199 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7200 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7201 ev.store_hint = 0x00;
7202 else
7203 ev.store_hint = persistent;
7204
7205 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7206 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7207 ev.key.type = csrk->type;
7208 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7209
7210 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7211 }
7212
7213 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7214 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7215 u16 max_interval, u16 latency, u16 timeout)
7216 {
7217 struct mgmt_ev_new_conn_param ev;
7218
7219 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7220 return;
7221
7222 memset(&ev, 0, sizeof(ev));
7223 bacpy(&ev.addr.bdaddr, bdaddr);
7224 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7225 ev.store_hint = store_hint;
7226 ev.min_interval = cpu_to_le16(min_interval);
7227 ev.max_interval = cpu_to_le16(max_interval);
7228 ev.latency = cpu_to_le16(latency);
7229 ev.timeout = cpu_to_le16(timeout);
7230
7231 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7232 }
7233
7234 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7235 u32 flags, u8 *name, u8 name_len)
7236 {
7237 char buf[512];
7238 struct mgmt_ev_device_connected *ev = (void *) buf;
7239 u16 eir_len = 0;
7240
7241 bacpy(&ev->addr.bdaddr, &conn->dst);
7242 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7243
7244 ev->flags = __cpu_to_le32(flags);
7245
7246 /* We must ensure that the EIR Data fields are ordered and
7247 * unique. Keep it simple for now and avoid the problem by not
7248 * adding any BR/EDR data to the LE adv.
7249 */
7250 if (conn->le_adv_data_len > 0) {
7251 memcpy(&ev->eir[eir_len],
7252 conn->le_adv_data, conn->le_adv_data_len);
7253 eir_len = conn->le_adv_data_len;
7254 } else {
7255 if (name_len > 0)
7256 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7257 name, name_len);
7258
7259 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7260 eir_len = eir_append_data(ev->eir, eir_len,
7261 EIR_CLASS_OF_DEV,
7262 conn->dev_class, 3);
7263 }
7264
7265 ev->eir_len = cpu_to_le16(eir_len);
7266
7267 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7268 sizeof(*ev) + eir_len, NULL);
7269 }
7270
7271 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7272 {
7273 struct sock **sk = data;
7274
7275 cmd->cmd_complete(cmd, 0);
7276
7277 *sk = cmd->sk;
7278 sock_hold(*sk);
7279
7280 mgmt_pending_remove(cmd);
7281 }
7282
7283 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7284 {
7285 struct hci_dev *hdev = data;
7286 struct mgmt_cp_unpair_device *cp = cmd->param;
7287
7288 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7289
7290 cmd->cmd_complete(cmd, 0);
7291 mgmt_pending_remove(cmd);
7292 }
7293
7294 bool mgmt_powering_down(struct hci_dev *hdev)
7295 {
7296 struct mgmt_pending_cmd *cmd;
7297 struct mgmt_mode *cp;
7298
7299 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7300 if (!cmd)
7301 return false;
7302
7303 cp = cmd->param;
7304 if (!cp->val)
7305 return true;
7306
7307 return false;
7308 }
7309
7310 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7311 u8 link_type, u8 addr_type, u8 reason,
7312 bool mgmt_connected)
7313 {
7314 struct mgmt_ev_device_disconnected ev;
7315 struct sock *sk = NULL;
7316
7317 /* The connection is still in hci_conn_hash so test for 1
7318 * instead of 0 to know if this is the last one.
7319 */
7320 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7321 cancel_delayed_work(&hdev->power_off);
7322 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7323 }
7324
7325 if (!mgmt_connected)
7326 return;
7327
7328 if (link_type != ACL_LINK && link_type != LE_LINK)
7329 return;
7330
7331 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7332
7333 bacpy(&ev.addr.bdaddr, bdaddr);
7334 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7335 ev.reason = reason;
7336
7337 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7338
7339 if (sk)
7340 sock_put(sk);
7341
7342 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7343 hdev);
7344 }
7345
7346 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7347 u8 link_type, u8 addr_type, u8 status)
7348 {
7349 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7350 struct mgmt_cp_disconnect *cp;
7351 struct mgmt_pending_cmd *cmd;
7352
7353 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7354 hdev);
7355
7356 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7357 if (!cmd)
7358 return;
7359
7360 cp = cmd->param;
7361
7362 if (bacmp(bdaddr, &cp->addr.bdaddr))
7363 return;
7364
7365 if (cp->addr.type != bdaddr_type)
7366 return;
7367
7368 cmd->cmd_complete(cmd, mgmt_status(status));
7369 mgmt_pending_remove(cmd);
7370 }
7371
7372 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7373 u8 addr_type, u8 status)
7374 {
7375 struct mgmt_ev_connect_failed ev;
7376
7377 /* The connection is still in hci_conn_hash so test for 1
7378 * instead of 0 to know if this is the last one.
7379 */
7380 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7381 cancel_delayed_work(&hdev->power_off);
7382 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7383 }
7384
7385 bacpy(&ev.addr.bdaddr, bdaddr);
7386 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7387 ev.status = mgmt_status(status);
7388
7389 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7390 }
7391
7392 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7393 {
7394 struct mgmt_ev_pin_code_request ev;
7395
7396 bacpy(&ev.addr.bdaddr, bdaddr);
7397 ev.addr.type = BDADDR_BREDR;
7398 ev.secure = secure;
7399
7400 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7401 }
7402
7403 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7404 u8 status)
7405 {
7406 struct mgmt_pending_cmd *cmd;
7407
7408 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7409 if (!cmd)
7410 return;
7411
7412 cmd->cmd_complete(cmd, mgmt_status(status));
7413 mgmt_pending_remove(cmd);
7414 }
7415
7416 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7417 u8 status)
7418 {
7419 struct mgmt_pending_cmd *cmd;
7420
7421 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7422 if (!cmd)
7423 return;
7424
7425 cmd->cmd_complete(cmd, mgmt_status(status));
7426 mgmt_pending_remove(cmd);
7427 }
7428
7429 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7430 u8 link_type, u8 addr_type, u32 value,
7431 u8 confirm_hint)
7432 {
7433 struct mgmt_ev_user_confirm_request ev;
7434
7435 BT_DBG("%s", hdev->name);
7436
7437 bacpy(&ev.addr.bdaddr, bdaddr);
7438 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7439 ev.confirm_hint = confirm_hint;
7440 ev.value = cpu_to_le32(value);
7441
7442 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7443 NULL);
7444 }
7445
7446 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7447 u8 link_type, u8 addr_type)
7448 {
7449 struct mgmt_ev_user_passkey_request ev;
7450
7451 BT_DBG("%s", hdev->name);
7452
7453 bacpy(&ev.addr.bdaddr, bdaddr);
7454 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7455
7456 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7457 NULL);
7458 }
7459
7460 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7461 u8 link_type, u8 addr_type, u8 status,
7462 u8 opcode)
7463 {
7464 struct mgmt_pending_cmd *cmd;
7465
7466 cmd = pending_find(opcode, hdev);
7467 if (!cmd)
7468 return -ENOENT;
7469
7470 cmd->cmd_complete(cmd, mgmt_status(status));
7471 mgmt_pending_remove(cmd);
7472
7473 return 0;
7474 }
7475
7476 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7477 u8 link_type, u8 addr_type, u8 status)
7478 {
7479 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7480 status, MGMT_OP_USER_CONFIRM_REPLY);
7481 }
7482
7483 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7484 u8 link_type, u8 addr_type, u8 status)
7485 {
7486 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7487 status,
7488 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7489 }
7490
7491 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7492 u8 link_type, u8 addr_type, u8 status)
7493 {
7494 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7495 status, MGMT_OP_USER_PASSKEY_REPLY);
7496 }
7497
7498 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7499 u8 link_type, u8 addr_type, u8 status)
7500 {
7501 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7502 status,
7503 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7504 }
7505
7506 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7507 u8 link_type, u8 addr_type, u32 passkey,
7508 u8 entered)
7509 {
7510 struct mgmt_ev_passkey_notify ev;
7511
7512 BT_DBG("%s", hdev->name);
7513
7514 bacpy(&ev.addr.bdaddr, bdaddr);
7515 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7516 ev.passkey = __cpu_to_le32(passkey);
7517 ev.entered = entered;
7518
7519 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7520 }
7521
7522 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7523 {
7524 struct mgmt_ev_auth_failed ev;
7525 struct mgmt_pending_cmd *cmd;
7526 u8 status = mgmt_status(hci_status);
7527
7528 bacpy(&ev.addr.bdaddr, &conn->dst);
7529 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7530 ev.status = status;
7531
7532 cmd = find_pairing(conn);
7533
7534 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7535 cmd ? cmd->sk : NULL);
7536
7537 if (cmd) {
7538 cmd->cmd_complete(cmd, status);
7539 mgmt_pending_remove(cmd);
7540 }
7541 }
7542
7543 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7544 {
7545 struct cmd_lookup match = { NULL, hdev };
7546 bool changed;
7547
7548 if (status) {
7549 u8 mgmt_err = mgmt_status(status);
7550 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7551 cmd_status_rsp, &mgmt_err);
7552 return;
7553 }
7554
7555 if (test_bit(HCI_AUTH, &hdev->flags))
7556 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7557 else
7558 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7559
7560 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7561 &match);
7562
7563 if (changed)
7564 new_settings(hdev, match.sk);
7565
7566 if (match.sk)
7567 sock_put(match.sk);
7568 }
7569
7570 static void clear_eir(struct hci_request *req)
7571 {
7572 struct hci_dev *hdev = req->hdev;
7573 struct hci_cp_write_eir cp;
7574
7575 if (!lmp_ext_inq_capable(hdev))
7576 return;
7577
7578 memset(hdev->eir, 0, sizeof(hdev->eir));
7579
7580 memset(&cp, 0, sizeof(cp));
7581
7582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7583 }
7584
7585 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7586 {
7587 struct cmd_lookup match = { NULL, hdev };
7588 struct hci_request req;
7589 bool changed = false;
7590
7591 if (status) {
7592 u8 mgmt_err = mgmt_status(status);
7593
7594 if (enable && hci_dev_test_and_clear_flag(hdev,
7595 HCI_SSP_ENABLED)) {
7596 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7597 new_settings(hdev, NULL);
7598 }
7599
7600 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7601 &mgmt_err);
7602 return;
7603 }
7604
7605 if (enable) {
7606 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7607 } else {
7608 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7609 if (!changed)
7610 changed = hci_dev_test_and_clear_flag(hdev,
7611 HCI_HS_ENABLED);
7612 else
7613 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7614 }
7615
7616 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7617
7618 if (changed)
7619 new_settings(hdev, match.sk);
7620
7621 if (match.sk)
7622 sock_put(match.sk);
7623
7624 hci_req_init(&req, hdev);
7625
7626 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7627 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7628 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7629 sizeof(enable), &enable);
7630 __hci_req_update_eir(&req);
7631 } else {
7632 clear_eir(&req);
7633 }
7634
7635 hci_req_run(&req, NULL);
7636 }
7637
7638 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7639 {
7640 struct cmd_lookup *match = data;
7641
7642 if (match->sk == NULL) {
7643 match->sk = cmd->sk;
7644 sock_hold(match->sk);
7645 }
7646 }
7647
7648 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7649 u8 status)
7650 {
7651 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7652
7653 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7654 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7655 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7656
7657 if (!status) {
7658 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7659 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7660 ext_info_changed(hdev, NULL);
7661 }
7662
7663 if (match.sk)
7664 sock_put(match.sk);
7665 }
7666
7667 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7668 {
7669 struct mgmt_cp_set_local_name ev;
7670 struct mgmt_pending_cmd *cmd;
7671
7672 if (status)
7673 return;
7674
7675 memset(&ev, 0, sizeof(ev));
7676 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7677 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7678
7679 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7680 if (!cmd) {
7681 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7682
7683 /* If this is a HCI command related to powering on the
7684 * HCI dev don't send any mgmt signals.
7685 */
7686 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7687 return;
7688 }
7689
7690 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7691 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7692 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7693 }
7694
7695 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7696 {
7697 int i;
7698
7699 for (i = 0; i < uuid_count; i++) {
7700 if (!memcmp(uuid, uuids[i], 16))
7701 return true;
7702 }
7703
7704 return false;
7705 }
7706
7707 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7708 {
7709 u16 parsed = 0;
7710
7711 while (parsed < eir_len) {
7712 u8 field_len = eir[0];
7713 u8 uuid[16];
7714 int i;
7715
7716 if (field_len == 0)
7717 break;
7718
7719 if (eir_len - parsed < field_len + 1)
7720 break;
7721
7722 switch (eir[1]) {
7723 case EIR_UUID16_ALL:
7724 case EIR_UUID16_SOME:
7725 for (i = 0; i + 3 <= field_len; i += 2) {
7726 memcpy(uuid, bluetooth_base_uuid, 16);
7727 uuid[13] = eir[i + 3];
7728 uuid[12] = eir[i + 2];
7729 if (has_uuid(uuid, uuid_count, uuids))
7730 return true;
7731 }
7732 break;
7733 case EIR_UUID32_ALL:
7734 case EIR_UUID32_SOME:
7735 for (i = 0; i + 5 <= field_len; i += 4) {
7736 memcpy(uuid, bluetooth_base_uuid, 16);
7737 uuid[15] = eir[i + 5];
7738 uuid[14] = eir[i + 4];
7739 uuid[13] = eir[i + 3];
7740 uuid[12] = eir[i + 2];
7741 if (has_uuid(uuid, uuid_count, uuids))
7742 return true;
7743 }
7744 break;
7745 case EIR_UUID128_ALL:
7746 case EIR_UUID128_SOME:
7747 for (i = 0; i + 17 <= field_len; i += 16) {
7748 memcpy(uuid, eir + i + 2, 16);
7749 if (has_uuid(uuid, uuid_count, uuids))
7750 return true;
7751 }
7752 break;
7753 }
7754
7755 parsed += field_len + 1;
7756 eir += field_len + 1;
7757 }
7758
7759 return false;
7760 }
7761
7762 static void restart_le_scan(struct hci_dev *hdev)
7763 {
7764 /* If controller is not scanning we are done. */
7765 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7766 return;
7767
7768 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7769 hdev->discovery.scan_start +
7770 hdev->discovery.scan_duration))
7771 return;
7772
7773 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7774 DISCOV_LE_RESTART_DELAY);
7775 }
7776
7777 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7778 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7779 {
7780 /* If a RSSI threshold has been specified, and
7781 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7782 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7783 * is set, let it through for further processing, as we might need to
7784 * restart the scan.
7785 *
7786 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7787 * the results are also dropped.
7788 */
7789 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7790 (rssi == HCI_RSSI_INVALID ||
7791 (rssi < hdev->discovery.rssi &&
7792 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7793 return false;
7794
7795 if (hdev->discovery.uuid_count != 0) {
7796 /* If a list of UUIDs is provided in filter, results with no
7797 * matching UUID should be dropped.
7798 */
7799 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7800 hdev->discovery.uuids) &&
7801 !eir_has_uuids(scan_rsp, scan_rsp_len,
7802 hdev->discovery.uuid_count,
7803 hdev->discovery.uuids))
7804 return false;
7805 }
7806
7807 /* If duplicate filtering does not report RSSI changes, then restart
7808 * scanning to ensure updated result with updated RSSI values.
7809 */
7810 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7811 restart_le_scan(hdev);
7812
7813 /* Validate RSSI value against the RSSI threshold once more. */
7814 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7815 rssi < hdev->discovery.rssi)
7816 return false;
7817 }
7818
7819 return true;
7820 }
7821
7822 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7823 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7824 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7825 {
7826 char buf[512];
7827 struct mgmt_ev_device_found *ev = (void *)buf;
7828 size_t ev_size;
7829
7830 /* Don't send events for a non-kernel initiated discovery. With
7831 * LE one exception is if we have pend_le_reports > 0 in which
7832 * case we're doing passive scanning and want these events.
7833 */
7834 if (!hci_discovery_active(hdev)) {
7835 if (link_type == ACL_LINK)
7836 return;
7837 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7838 return;
7839 }
7840
7841 if (hdev->discovery.result_filtering) {
7842 /* We are using service discovery */
7843 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7844 scan_rsp_len))
7845 return;
7846 }
7847
7848 if (hdev->discovery.limited) {
7849 /* Check for limited discoverable bit */
7850 if (dev_class) {
7851 if (!(dev_class[1] & 0x20))
7852 return;
7853 } else {
7854 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7855 if (!flags || !(flags[0] & LE_AD_LIMITED))
7856 return;
7857 }
7858 }
7859
7860 /* Make sure that the buffer is big enough. The 5 extra bytes
7861 * are for the potential CoD field.
7862 */
7863 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7864 return;
7865
7866 memset(buf, 0, sizeof(buf));
7867
7868 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7869 * RSSI value was reported as 0 when not available. This behavior
7870 * is kept when using device discovery. This is required for full
7871 * backwards compatibility with the API.
7872 *
7873 * However when using service discovery, the value 127 will be
7874 * returned when the RSSI is not available.
7875 */
7876 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7877 link_type == ACL_LINK)
7878 rssi = 0;
7879
7880 bacpy(&ev->addr.bdaddr, bdaddr);
7881 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7882 ev->rssi = rssi;
7883 ev->flags = cpu_to_le32(flags);
7884
7885 if (eir_len > 0)
7886 /* Copy EIR or advertising data into event */
7887 memcpy(ev->eir, eir, eir_len);
7888
7889 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7890 NULL))
7891 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7892 dev_class, 3);
7893
7894 if (scan_rsp_len > 0)
7895 /* Append scan response data to event */
7896 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7897
7898 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7899 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7900
7901 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7902 }
7903
7904 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7905 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7906 {
7907 struct mgmt_ev_device_found *ev;
7908 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7909 u16 eir_len;
7910
7911 ev = (struct mgmt_ev_device_found *) buf;
7912
7913 memset(buf, 0, sizeof(buf));
7914
7915 bacpy(&ev->addr.bdaddr, bdaddr);
7916 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7917 ev->rssi = rssi;
7918
7919 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7920 name_len);
7921
7922 ev->eir_len = cpu_to_le16(eir_len);
7923
7924 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7925 }
7926
7927 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7928 {
7929 struct mgmt_ev_discovering ev;
7930
7931 BT_DBG("%s discovering %u", hdev->name, discovering);
7932
7933 memset(&ev, 0, sizeof(ev));
7934 ev.type = hdev->discovery.type;
7935 ev.discovering = discovering;
7936
7937 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7938 }
7939
7940 static struct hci_mgmt_chan chan = {
7941 .channel = HCI_CHANNEL_CONTROL,
7942 .handler_count = ARRAY_SIZE(mgmt_handlers),
7943 .handlers = mgmt_handlers,
7944 .hdev_init = mgmt_init_hdev,
7945 };
7946
7947 int mgmt_init(void)
7948 {
7949 return hci_mgmt_chan_register(&chan);
7950 }
7951
7952 void mgmt_exit(void)
7953 {
7954 hci_mgmt_chan_unregister(&chan);
7955 }