summaryrefslogtreecommitdiff
path: root/net/bluetooth/hci_request.c
diff options
context:
space:
mode:
authorJohan Hedberg <johan.hedberg@intel.com>2015-11-16 13:52:21 +0300
committerMarcel Holtmann <marcel@holtmann.org>2015-12-10 02:51:47 +0300
commit01b1cb87d37fb19cdaa5e7002416fdde156873d0 (patch)
tree87fdc0ba8b3872cea7267fb3a3b3b7ed101ffbba /net/bluetooth/hci_request.c
parentad2c8c73d29702c3193f739390f6661f9a4ecad9 (diff)
downloadlinux-01b1cb87d37fb19cdaa5e7002416fdde156873d0.tar.xz
Bluetooth: Run page scan updates through hdev->req_workqueue
Since Add/Remove Device perform the page scan updates independently from the HCI command completion we've introduced a potential race when multiple mgmt commands are queued. Doing the page scan updates through the req_workqueue ensures that the state changes are performed in a race-free manner. At the same time, to make the request helper more widely usable, extend it to also cover Inquiry Scan changes since those are behind the same HCI command. This is also reflected in the new name of the API as well as the work struct name. Signed-off-by: Johan Hedberg <johan.hedberg@intel.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Diffstat (limited to 'net/bluetooth/hci_request.c')
-rw-r--r--net/bluetooth/hci_request.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e639671f54bd..78c026b4ffa1 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -637,7 +637,7 @@ static bool disconnected_whitelist_entries(struct hci_dev *hdev)
return false;
}
-void __hci_update_page_scan(struct hci_request *req)
+void __hci_req_update_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 scan;
@@ -657,22 +657,29 @@ void __hci_update_page_scan(struct hci_request *req)
else
scan = SCAN_DISABLED;
- if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
- return;
-
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
scan |= SCAN_INQUIRY;
+ if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
+ test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
+ return;
+
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
-void hci_update_page_scan(struct hci_dev *hdev)
+static int update_scan(struct hci_request *req, unsigned long opt)
{
- struct hci_request req;
+ hci_dev_lock(req->hdev);
+ __hci_req_update_scan(req);
+ hci_dev_unlock(req->hdev);
+ return 0;
+}
- hci_req_init(&req, hdev);
- __hci_update_page_scan(&req);
- hci_req_run(&req, NULL);
+static void scan_update_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
+
+ hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
}
/* This function controls the background scanning based on hdev->pend_le_conns
@@ -1270,6 +1277,7 @@ void hci_request_setup(struct hci_dev *hdev)
{
INIT_WORK(&hdev->discov_update, discov_update);
INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
+ INIT_WORK(&hdev->scan_update, scan_update_work);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
}
@@ -1280,6 +1288,7 @@ void hci_request_cancel_all(struct hci_dev *hdev)
cancel_work_sync(&hdev->discov_update);
cancel_work_sync(&hdev->bg_scan_update);
+ cancel_work_sync(&hdev->scan_update);
cancel_delayed_work_sync(&hdev->le_scan_disable);
cancel_delayed_work_sync(&hdev->le_scan_restart);
}