]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Bluetooth: hci_sync: annotate data-races around hdev->req_status
authorCen Zhang <zzzccc427@gmail.com>
Sun, 15 Mar 2026 12:07:26 +0000 (20:07 +0800)
committerLuiz Augusto von Dentz <luiz.von.dentz@intel.com>
Thu, 19 Mar 2026 18:43:20 +0000 (14:43 -0400)
__hci_cmd_sync_sk() sets hdev->req_status under hdev->req_lock:

    hdev->req_status = HCI_REQ_PEND;

However, several other functions read or write hdev->req_status without
holding any lock:

  - hci_send_cmd_sync() reads req_status in hci_cmd_work (workqueue)
  - hci_cmd_sync_complete() reads/writes from HCI event completion
  - hci_cmd_sync_cancel() / hci_cmd_sync_cancel_sync() read/write
  - hci_abort_conn() reads in connection abort path

Since __hci_cmd_sync_sk() runs on hdev->req_workqueue while
hci_send_cmd_sync() runs on hdev->workqueue, these are different
workqueues that can execute concurrently on different CPUs. The plain
C accesses constitute a data race.

Add READ_ONCE()/WRITE_ONCE() annotations on all concurrent accesses
to hdev->req_status to prevent potential compiler optimizations that
could affect correctness (e.g., load fusing in the wait_event
condition or store reordering).

Signed-off-by: Cen Zhang <zzzccc427@gmail.com>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sync.c

index 6eb59e9f2aa834030c885c2d721f70bae9d71b2d..e6393f17576b0bc1c181aedee4537b7102432967 100644 (file)
@@ -3095,7 +3095,7 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
         * hci_connect_le serializes the connection attempts so only one
         * connection can be in BT_CONNECT at time.
         */
-       if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
+       if (conn->state == BT_CONNECT && READ_ONCE(hdev->req_status) == HCI_REQ_PEND) {
                switch (hci_skb_event(hdev->sent_cmd)) {
                case HCI_EV_CONN_COMPLETE:
                case HCI_EV_LE_CONN_COMPLETE:
index 31308c1de4ec3f0038872a963d6c3d9abf66d93f..01f8ceeb1c0c84391616542af1e4c9194c2606ab 100644 (file)
@@ -4126,7 +4126,7 @@ static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
                kfree_skb(skb);
        }
 
-       if (hdev->req_status == HCI_REQ_PEND &&
+       if (READ_ONCE(hdev->req_status) == HCI_REQ_PEND &&
            !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
                kfree_skb(hdev->req_skb);
                hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
index 3166914b0d6ce3e56ced2f9a7914427296d38c48..45d16639874a0b82663fe42d46bf66e013122cc1 100644 (file)
@@ -25,11 +25,11 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
 {
        bt_dev_dbg(hdev, "result 0x%2.2x", result);
 
-       if (hdev->req_status != HCI_REQ_PEND)
+       if (READ_ONCE(hdev->req_status) != HCI_REQ_PEND)
                return;
 
        hdev->req_result = result;
-       hdev->req_status = HCI_REQ_DONE;
+       WRITE_ONCE(hdev->req_status, HCI_REQ_DONE);
 
        /* Free the request command so it is not used as response */
        kfree_skb(hdev->req_skb);
@@ -167,20 +167,20 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
 
        hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
 
-       hdev->req_status = HCI_REQ_PEND;
+       WRITE_ONCE(hdev->req_status, HCI_REQ_PEND);
 
        err = hci_req_sync_run(&req);
        if (err < 0)
                return ERR_PTR(err);
 
        err = wait_event_interruptible_timeout(hdev->req_wait_q,
-                                              hdev->req_status != HCI_REQ_PEND,
+                                              READ_ONCE(hdev->req_status) != HCI_REQ_PEND,
                                               timeout);
 
        if (err == -ERESTARTSYS)
                return ERR_PTR(-EINTR);
 
-       switch (hdev->req_status) {
+       switch (READ_ONCE(hdev->req_status)) {
        case HCI_REQ_DONE:
                err = -bt_to_errno(hdev->req_result);
                break;
@@ -194,7 +194,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
                break;
        }
 
-       hdev->req_status = 0;
+       WRITE_ONCE(hdev->req_status, 0);
        hdev->req_result = 0;
        skb = hdev->req_rsp;
        hdev->req_rsp = NULL;
@@ -665,9 +665,9 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
 {
        bt_dev_dbg(hdev, "err 0x%2.2x", err);
 
-       if (hdev->req_status == HCI_REQ_PEND) {
+       if (READ_ONCE(hdev->req_status) == HCI_REQ_PEND) {
                hdev->req_result = err;
-               hdev->req_status = HCI_REQ_CANCELED;
+               WRITE_ONCE(hdev->req_status, HCI_REQ_CANCELED);
 
                queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
        }
@@ -683,12 +683,12 @@ void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
 {
        bt_dev_dbg(hdev, "err 0x%2.2x", err);
 
-       if (hdev->req_status == HCI_REQ_PEND) {
+       if (READ_ONCE(hdev->req_status) == HCI_REQ_PEND) {
                /* req_result is __u32 so error must be positive to be properly
                 * propagated.
                 */
                hdev->req_result = err < 0 ? -err : err;
-               hdev->req_status = HCI_REQ_CANCELED;
+               WRITE_ONCE(hdev->req_status, HCI_REQ_CANCELED);
 
                wake_up_interruptible(&hdev->req_wait_q);
        }