aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/wireless/rt2x00/rt2x00queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/rt2x00/rt2x00queue.c')
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c70
1 files changed, 30 insertions, 40 deletions
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 451d410ecda..b8de9d2750e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -230,8 +230,15 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Determine retry information.
*/
- txdesc->retry_limit = tx_info->control.retry_limit;
- if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
+ txdesc->retry_limit = tx_info->control.rates[0].count - 1;
+ /*
+ * XXX: If at this point we knew whether the HW is going to use
+ * the RETRY_MODE bit or the retry_limit (currently all
+ * use the RETRY_MODE bit) we could do something like b43
+ * does, set the RETRY_MODE bit when the RC algorithm is
+ * requesting more than the long retry limit.
+ */
+ if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
/*
@@ -312,8 +319,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Convert length to microseconds.
*/
- residual = get_duration_res(data_length, hwrate->bitrate);
- duration = get_duration(data_length, hwrate->bitrate);
+ residual = GET_DURATION_RES(data_length, hwrate->bitrate);
+ duration = GET_DURATION(data_length, hwrate->bitrate);
if (residual != 0) {
duration++;
@@ -371,13 +378,15 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
{
+ struct ieee80211_tx_info *tx_info;
struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
struct txentry_desc txdesc;
struct skb_frame_desc *skbdesc;
unsigned int iv_len = 0;
+ u8 rate_idx, rate_flags;
if (unlikely(rt2x00queue_full(queue)))
- return -EINVAL;
+ return -ENOBUFS;
if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
ERROR(queue->rt2x00dev,
@@ -399,13 +408,18 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len;
/*
- * All information is retreived from the skb->cb array,
+ * All information is retrieved from the skb->cb array,
* now we should claim ownership of the driver part of that
- * array.
+ * array, preserving the bitrate index and flags.
*/
- skbdesc = get_skb_frame_desc(entry->skb);
+ tx_info = IEEE80211_SKB_CB(skb);
+ rate_idx = tx_info->control.rates[0].idx;
+ rate_flags = tx_info->control.rates[0].flags;
+ skbdesc = get_skb_frame_desc(skb);
memset(skbdesc, 0, sizeof(*skbdesc));
skbdesc->entry = entry;
+ skbdesc->tx_rate_idx = rate_idx;
+ skbdesc->tx_rate_flags = rate_flags;
/*
* When hardware encryption is supported, and this frame
@@ -413,20 +427,18 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
* the frame so we can provide it to the driver seperately.
*/
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
- !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
+ !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags))
rt2x00crypto_tx_remove_iv(skb, iv_len);
- }
/*
* It could be possible that the queue was corrupted and this
- * call failed. Just drop the frame, we cannot rollback and pass
- * the frame to mac80211 because the skb->cb has now been tainted.
+ * call failed. Since we always return NETDEV_TX_OK to mac80211,
+ * this frame will simply be dropped.
*/
if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
- dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
- return 0;
+ return -EIO;
}
if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
@@ -556,7 +568,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
queue->length++;
} else if (index == Q_INDEX_DONE) {
queue->length--;
- queue->count ++;
+ queue->count++;
}
spin_unlock_irqrestore(&queue->lock, irqflags);
@@ -575,40 +587,18 @@ static void rt2x00queue_reset(struct data_queue *queue)
spin_unlock_irqrestore(&queue->lock, irqflags);
}
-void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue = rt2x00dev->rx;
- unsigned int i;
-
- rt2x00queue_reset(queue);
-
- if (!rt2x00dev->ops->lib->init_rxentry)
- return;
-
- for (i = 0; i < queue->limit; i++) {
- queue->entries[i].flags = 0;
-
- rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
- &queue->entries[i]);
- }
-}
-
-void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
+void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
unsigned int i;
- txall_queue_for_each(rt2x00dev, queue) {
+ queue_for_each(rt2x00dev, queue) {
rt2x00queue_reset(queue);
- if (!rt2x00dev->ops->lib->init_txentry)
- continue;
-
for (i = 0; i < queue->limit; i++) {
queue->entries[i].flags = 0;
- rt2x00dev->ops->lib->init_txentry(rt2x00dev,
- &queue->entries[i]);
+ rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
}
}
}