mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
Bluetooth: hci_sync: annotate data-races around hdev->req_status
__hci_cmd_sync_sk() sets hdev->req_status under hdev->req_lock:
hdev->req_status = HCI_REQ_PEND;
However, several other functions read or write hdev->req_status without
holding any lock:
- hci_send_cmd_sync() reads req_status in hci_cmd_work (workqueue)
- hci_cmd_sync_complete() reads/writes from HCI event completion
- hci_cmd_sync_cancel() / hci_cmd_sync_cancel_sync() read/write
- hci_abort_conn() reads in connection abort path
Since __hci_cmd_sync_sk() runs on hdev->req_workqueue while
hci_send_cmd_sync() runs on hdev->workqueue, these are different
workqueues that can execute concurrently on different CPUs. The plain
C accesses constitute a data race.
Add READ_ONCE()/WRITE_ONCE() annotations on all concurrent accesses
to hdev->req_status to prevent potential compiler optimizations that
could affect correctness (e.g., load fusing in the wait_event
condition or store reordering).
Signed-off-by: Cen Zhang <zzzccc427@gmail.com>
Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
This commit is contained in:
parent
5f5fa4cd35
commit
b6807cfc19
|
|
@ -3095,7 +3095,7 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
|||
* hci_connect_le serializes the connection attempts so only one
|
||||
* connection can be in BT_CONNECT at time.
|
||||
*/
|
||||
if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
|
||||
if (conn->state == BT_CONNECT && READ_ONCE(hdev->req_status) == HCI_REQ_PEND) {
|
||||
switch (hci_skb_event(hdev->sent_cmd)) {
|
||||
case HCI_EV_CONN_COMPLETE:
|
||||
case HCI_EV_LE_CONN_COMPLETE:
|
||||
|
|
|
|||
|
|
@ -4126,7 +4126,7 @@ static int hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
if (hdev->req_status == HCI_REQ_PEND &&
|
||||
if (READ_ONCE(hdev->req_status) == HCI_REQ_PEND &&
|
||||
!hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
|
||||
kfree_skb(hdev->req_skb);
|
||||
hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
|
||||
|
|
|
|||
|
|
@ -25,11 +25,11 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
|
|||
{
|
||||
bt_dev_dbg(hdev, "result 0x%2.2x", result);
|
||||
|
||||
if (hdev->req_status != HCI_REQ_PEND)
|
||||
if (READ_ONCE(hdev->req_status) != HCI_REQ_PEND)
|
||||
return;
|
||||
|
||||
hdev->req_result = result;
|
||||
hdev->req_status = HCI_REQ_DONE;
|
||||
WRITE_ONCE(hdev->req_status, HCI_REQ_DONE);
|
||||
|
||||
/* Free the request command so it is not used as response */
|
||||
kfree_skb(hdev->req_skb);
|
||||
|
|
@ -167,20 +167,20 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|||
|
||||
hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
|
||||
|
||||
hdev->req_status = HCI_REQ_PEND;
|
||||
WRITE_ONCE(hdev->req_status, HCI_REQ_PEND);
|
||||
|
||||
err = hci_req_sync_run(&req);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
err = wait_event_interruptible_timeout(hdev->req_wait_q,
|
||||
hdev->req_status != HCI_REQ_PEND,
|
||||
READ_ONCE(hdev->req_status) != HCI_REQ_PEND,
|
||||
timeout);
|
||||
|
||||
if (err == -ERESTARTSYS)
|
||||
return ERR_PTR(-EINTR);
|
||||
|
||||
switch (hdev->req_status) {
|
||||
switch (READ_ONCE(hdev->req_status)) {
|
||||
case HCI_REQ_DONE:
|
||||
err = -bt_to_errno(hdev->req_result);
|
||||
break;
|
||||
|
|
@ -194,7 +194,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|||
break;
|
||||
}
|
||||
|
||||
hdev->req_status = 0;
|
||||
WRITE_ONCE(hdev->req_status, 0);
|
||||
hdev->req_result = 0;
|
||||
skb = hdev->req_rsp;
|
||||
hdev->req_rsp = NULL;
|
||||
|
|
@ -665,9 +665,9 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
|
|||
{
|
||||
bt_dev_dbg(hdev, "err 0x%2.2x", err);
|
||||
|
||||
if (hdev->req_status == HCI_REQ_PEND) {
|
||||
if (READ_ONCE(hdev->req_status) == HCI_REQ_PEND) {
|
||||
hdev->req_result = err;
|
||||
hdev->req_status = HCI_REQ_CANCELED;
|
||||
WRITE_ONCE(hdev->req_status, HCI_REQ_CANCELED);
|
||||
|
||||
queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
|
||||
}
|
||||
|
|
@ -683,12 +683,12 @@ void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
|
|||
{
|
||||
bt_dev_dbg(hdev, "err 0x%2.2x", err);
|
||||
|
||||
if (hdev->req_status == HCI_REQ_PEND) {
|
||||
if (READ_ONCE(hdev->req_status) == HCI_REQ_PEND) {
|
||||
/* req_result is __u32 so error must be positive to be properly
|
||||
* propagated.
|
||||
*/
|
||||
hdev->req_result = err < 0 ? -err : err;
|
||||
hdev->req_status = HCI_REQ_CANCELED;
|
||||
WRITE_ONCE(hdev->req_status, HCI_REQ_CANCELED);
|
||||
|
||||
wake_up_interruptible(&hdev->req_wait_q);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user