lkml.org 
[lkml]   [2020]   [Mar]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/1] Bluetooth: Prioritize SCO traffic on slow interfaces
Date
When scheduling TX packets, send all SCO/eSCO packets first and then
send only 1 ACL/LE packet in a loop while checking that there are no SCO
packets pending. This is done to make sure that we can meet SCO
deadlines on slow interfaces like UART. If we were to queue up multiple
ACL packets without checking for a SCO packet, we might miss the SCO
timing. For example:

The time it takes to send a maximum size ACL packet (1024 bytes):
t = 10/8 * 1024 bytes * 8 bits/byte * 1 packet / baudrate
where 10/8 is uart overhead due to start/stop bits per byte

Replace t = 3.75ms (SCO deadline), which gives us a baudrate of 2730666
and is pretty close to a common baudrate of 3000000 used for BT. At this
baudrate, if we sent two 1024 byte ACL packets, we would miss the 3.75ms
timing window.

Signed-off-by: Abhishek Pandit-Subedi <abhishekpandit@chromium.org>
---

include/net/bluetooth/hci_core.h | 1 +
net/bluetooth/hci_core.c | 91 +++++++++++++++++++++++++-------
2 files changed, 73 insertions(+), 19 deletions(-)

diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index d4e28773d378..f636c89f1fe1 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -315,6 +315,7 @@ struct hci_dev {
__u8 ssp_debug_mode;
__u8 hw_error_code;
__u32 clock;
+ __u8 sched_limit;

__u16 devid_source;
__u16 devid_vendor;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index dbd2ad3a26ed..00a72265cd96 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -4239,18 +4239,32 @@ static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
}
}

-static void hci_sched_acl_pkt(struct hci_dev *hdev)
+/* Limit packets in flight when SCO/eSCO links are active. */
+static bool hci_sched_limit(struct hci_dev *hdev)
+{
+ return hdev->sched_limit && hci_conn_num(hdev, SCO_LINK);
+}
+
+static bool hci_sched_acl_pkt(struct hci_dev *hdev)
{
unsigned int cnt = hdev->acl_cnt;
struct hci_chan *chan;
struct sk_buff *skb;
int quote;
+ bool sched_limit = hci_sched_limit(hdev);
+ bool resched = false;

__check_timeout(hdev, cnt);

while (hdev->acl_cnt &&
(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
+
+ if (sched_limit && quote > 0) {
+ resched = true;
+ quote = 1;
+ }
+
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb->len, skb->priority);
@@ -4271,19 +4285,26 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
chan->sent++;
chan->conn->sent++;
}
+
+ if (resched && cnt != hdev->acl_cnt)
+ break;
}

- if (cnt != hdev->acl_cnt)
+ if (hdev->acl_cnt == 0 && cnt != hdev->acl_cnt)
hci_prio_recalculate(hdev, ACL_LINK);
+
+ return resched;
}

-static void hci_sched_acl_blk(struct hci_dev *hdev)
+static bool hci_sched_acl_blk(struct hci_dev *hdev)
{
unsigned int cnt = hdev->block_cnt;
struct hci_chan *chan;
struct sk_buff *skb;
int quote;
u8 type;
+ bool sched_limit = hci_sched_limit(hdev);
+ bool resched = false;

__check_timeout(hdev, cnt);

@@ -4297,6 +4318,12 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
while (hdev->block_cnt > 0 &&
(chan = hci_chan_sent(hdev, type, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
+
+ if (sched_limit && quote > 0) {
+ resched = true;
+ quote = 1;
+ }
+
while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
int blocks;

@@ -4311,7 +4338,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)

blocks = __get_blocks(hdev, skb);
if (blocks > hdev->block_cnt)
- return;
+ return false;

hci_conn_enter_active_mode(chan->conn,
bt_cb(skb)->force_active);
@@ -4325,33 +4352,39 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
chan->sent += blocks;
chan->conn->sent += blocks;
}
+
+ if (resched && cnt != hdev->block_cnt)
+ break;
}

- if (cnt != hdev->block_cnt)
+ if (hdev->block_cnt == 0 && cnt != hdev->block_cnt)
hci_prio_recalculate(hdev, type);
+
+ return resched;
}

-static void hci_sched_acl(struct hci_dev *hdev)
+static bool hci_sched_acl(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);

/* No ACL link over BR/EDR controller */
if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
- return;
+ goto done;

/* No AMP link over AMP controller */
if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
- return;
+ goto done;

switch (hdev->flow_ctl_mode) {
case HCI_FLOW_CTL_MODE_PACKET_BASED:
- hci_sched_acl_pkt(hdev);
- break;
+ return hci_sched_acl_pkt(hdev);

case HCI_FLOW_CTL_MODE_BLOCK_BASED:
- hci_sched_acl_blk(hdev);
- break;
+ return hci_sched_acl_blk(hdev);
}
+
+done:
+ return false;
}

/* Schedule SCO */
@@ -4402,16 +4435,18 @@ static void hci_sched_esco(struct hci_dev *hdev)
}
}

-static void hci_sched_le(struct hci_dev *hdev)
+static bool hci_sched_le(struct hci_dev *hdev)
{
struct hci_chan *chan;
struct sk_buff *skb;
int quote, cnt, tmp;
+ bool sched_limit = hci_sched_limit(hdev);
+ bool resched = false;

BT_DBG("%s", hdev->name);

if (!hci_conn_num(hdev, LE_LINK))
- return;
+ return resched;

cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;

@@ -4420,6 +4455,12 @@ static void hci_sched_le(struct hci_dev *hdev)
tmp = cnt;
while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
+
+ if (sched_limit && quote > 0) {
+ resched = true;
+ quote = 1;
+ }
+
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
skb->len, skb->priority);
@@ -4437,6 +4478,9 @@ static void hci_sched_le(struct hci_dev *hdev)
chan->sent++;
chan->conn->sent++;
}
+
+ if (resched && cnt != tmp)
+ break;
}

if (hdev->le_pkts)
@@ -4444,24 +4488,33 @@ static void hci_sched_le(struct hci_dev *hdev)
else
hdev->acl_cnt = cnt;

- if (cnt != tmp)
+ if (cnt == 0 && cnt != tmp)
hci_prio_recalculate(hdev, LE_LINK);
+
+ return resched;
}

static void hci_tx_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
struct sk_buff *skb;
+ bool resched;

BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
hdev->sco_cnt, hdev->le_cnt);

if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
/* Schedule queues and send stuff to HCI driver */
- hci_sched_acl(hdev);
- hci_sched_sco(hdev);
- hci_sched_esco(hdev);
- hci_sched_le(hdev);
+ do {
+ /* SCO and eSCO send all packets until emptied */
+ hci_sched_sco(hdev);
+ hci_sched_esco(hdev);
+
+ /* Acl and Le send based on quota (priority on ACL per
+ * loop)
+ */
+ resched = hci_sched_acl(hdev) || hci_sched_le(hdev);
+ } while (resched);
}

/* Send next queued raw (unknown type) packet */
--
2.25.1.481.gfbce0eb801-goog
\
 
 \ /
  Last update: 2020-03-12 19:11    [W:0.068 / U:0.600 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site