diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn.c | 120 |
1 files changed, 120 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 7e0baf6deed..4a15e42ad00 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -471,6 +471,126 @@ static int iwl_send_beacon_cmd(struct iwl_priv *priv) return rc; } +static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +/** + * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @priv - driver private data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0]; + struct iwl_tfd *tfd; + struct pci_dev *dev = priv->pci_dev; + int index = txq->q.read_ptr; + int i; + int num_tbs; + + tfd = &tfd_tmp[index]; + + /* Sanity check on number of chunks */ + num_tbs = iwl_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, + pci_unmap_addr(&txq->cmd[index]->meta, mapping), + pci_unmap_len(&txq->cmd[index]->meta, len), + PCI_DMA_TODEVICE); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) { + pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), + iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); + + if (txq->txb) { + dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]); + txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; + } + } +} + +int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd; + u32 num_tbs; + + q = &txq->q; + tfd = &txq->tfds[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + BUG_ON(addr & ~DMA_BIT_MASK(36)); + if (unlikely(addr & ~IWL_TX_DMA_MASK)) + IWL_ERR(priv, "Unaligned address = %llx\n", + (unsigned long long)addr); + + iwl_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + /****************************************************************************** * * Misc. internal state and helper functions |