iPXE
ath9k_xmit.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
5  * Original from Linux kernel 3.0.1
6  *
7  * Permission to use, copy, modify, and/or distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <ipxe/io.h>
21 
22 #include "ath9k.h"
23 #include "ar9003_mac.h"
24 
25 #define BITS_PER_BYTE 8
26 #define OFDM_PLCP_BITS 22
27 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
28 #define L_STF 8
29 #define L_LTF 8
30 #define L_SIG 4
31 #define HT_SIG 8
32 #define HT_STF 4
33 #define HT_LTF(_ns) (4 * (_ns))
34 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
35 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
36 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
37 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
38 
39 
40 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
41 
42 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
43  struct ath_atx_tid *tid,
44  struct list_head *bf_head);
45 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
46  struct ath_txq *txq, struct list_head *bf_q,
47  struct ath_tx_status *ts, int txok, int sendbar);
48 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
49  struct list_head *head);
50 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
51 
52 enum {
57 };
58 
59 /*********************/
60 /* Aggregation logic */
61 /*********************/
62 
63 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
64 {
65  struct ath_atx_ac *ac = tid->ac;
66 
67  if (tid->paused)
68  return;
69 
70  if (tid->sched)
71  return;
72 
73  tid->sched = 1;
74  list_add_tail(&tid->list, &ac->tid_q);
75 
76  if (ac->sched)
77  return;
78 
79  ac->sched = 1;
80  list_add_tail(&ac->list, &txq->axq_acq);
81 }
82 
83 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
84 {
85  struct ath_buf *bf = NULL;
86 
87  if (list_empty(&sc->tx.txbuf)) {
88  return NULL;
89  }
90 
91  bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
92  list_del(&bf->list);
93 
94  return bf;
95 }
96 
97 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
98 {
99  list_add_tail(&bf->list, &sc->tx.txbuf);
100 }
101 
102 /********************/
103 /* Queue Management */
104 /********************/
105 
106 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
107 {
108  struct ath_hw *ah = sc->sc_ah;
109  struct ath9k_tx_queue_info qi;
110  static const int subtype_txq_to_hwq[] = {
112  };
113  int axq_qnum, i;
114 
115  memset(&qi, 0, sizeof(qi));
116  qi.tqi_subtype = subtype_txq_to_hwq[subtype];
120  qi.tqi_physCompBuf = 0;
121 
122  /*
123  * Enable interrupts only for EOL and DESC conditions.
124  * We mark tx descriptors to receive a DESC interrupt
125  * when a tx queue gets deep; otherwise waiting for the
126  * EOL to reap descriptors. Note that this is done to
127  * reduce interrupt load and this only defers reaping
128  * descriptors, never transmitting frames. Aside from
129  * reducing interrupts this also permits more concurrency.
130  * The only potential downside is if the tx queue backs
131  * up in which case the top half of the kernel may backup
132  * due to a lack of tx descriptors.
133  *
134  * The UAPSD queue is an exception, since we take a desc-
135  * based intr on the EOSP frames.
136  */
139 
140  axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
141  if (axq_qnum == -1) {
142  /*
143  * NB: don't print a message, this happens
144  * normally on parts with too few tx queues
145  */
146  return NULL;
147  }
148  if ((unsigned int)axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
149  DBG("ath9k: qnum %d out of range, max %zd!\n",
150  axq_qnum, ARRAY_SIZE(sc->tx.txq));
151  ath9k_hw_releasetxqueue(ah, axq_qnum);
152  return NULL;
153  }
154  if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
155  struct ath_txq *txq = &sc->tx.txq[axq_qnum];
156 
157  txq->axq_qnum = axq_qnum;
158  txq->mac80211_qnum = -1;
159  txq->axq_link = NULL;
160  INIT_LIST_HEAD(&txq->axq_q);
161  INIT_LIST_HEAD(&txq->axq_acq);
162  txq->axq_depth = 0;
163  txq->axq_ampdu_depth = 0;
164  txq->axq_tx_inprogress = 0;
165  sc->tx.txqsetup |= 1<<axq_qnum;
166 
167  txq->txq_headidx = txq->txq_tailidx = 0;
168  for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
169  INIT_LIST_HEAD(&txq->txq_fifo[i]);
171  }
172  return &sc->tx.txq[axq_qnum];
173 }
174 
175 /*
176  * Drain a given TX queue (could be Beacon or Data)
177  *
178  * This assumes output has been stopped and
179  * we do not need to block ath_tx_tasklet.
180  */
181 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, int retry_tx __unused)
182 {
183  struct ath_buf *bf, *lastbf __unused;
184  struct list_head bf_head;
185  struct ath_tx_status ts;
186 
187  memset(&ts, 0, sizeof(ts));
188  INIT_LIST_HEAD(&bf_head);
189 
190  for (;;) {
191  if (list_empty(&txq->axq_q)) {
192  txq->axq_link = NULL;
193  break;
194  }
195  bf = list_first_entry(&txq->axq_q, struct ath_buf,
196  list);
197 
198  if (bf->bf_stale) {
199  list_del(&bf->list);
200 
201  ath_tx_return_buffer(sc, bf);
202  continue;
203  }
204 
205  lastbf = bf->bf_lastbf;
206 
207  list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
208 
209  txq->axq_depth--;
210  ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
211  }
212 
213  txq->axq_tx_inprogress = 0;
214 }
215 
216 int ath_drain_all_txq(struct ath_softc *sc, int retry_tx)
217 {
218  struct ath_hw *ah = sc->sc_ah;
219  struct ath_txq *txq;
220  int i, npend = 0;
221 
222  if (sc->sc_flags & SC_OP_INVALID)
223  return 1;
224 
226 
227  /* Check if any queue remains active */
228  for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
229  if (!ATH_TXQ_SETUP(sc, i))
230  continue;
231 
232  npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
233  }
234 
235  if (npend)
236  DBG("ath9k: Failed to stop TX DMA!\n");
237 
238  for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
239  if (!ATH_TXQ_SETUP(sc, i))
240  continue;
241 
242  /*
243  * The caller will resume queues with ieee80211_wake_queues.
244  * Mark the queue as not stopped to prevent ath_tx_complete
245  * from waking the queue too early.
246  */
247  txq = &sc->tx.txq[i];
248  txq->stopped = 0;
249  ath_draintxq(sc, txq, retry_tx);
250  }
251 
252  return !npend;
253 }
254 
255 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
256 {
258  sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
259 }
260 
261 /* For each axq_acq entry, for each tid, try to schedule packets
262  * for transmit until ampdu_depth has reached min Q depth.
263  */
264 void ath_txq_schedule(struct ath_softc *sc __unused, struct ath_txq *txq)
265 {
266  struct ath_atx_ac *ac, *ac_tmp, *last_ac;
267  struct ath_atx_tid *tid, *last_tid;
268 
269  if (list_empty(&txq->axq_acq) ||
271  return;
272 
273  ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
274  last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
275 
276  list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
277  last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
278  list_del(&ac->list);
279  ac->sched = 0;
280 
281  while (!list_empty(&ac->tid_q)) {
283  list);
284  list_del(&tid->list);
285  tid->sched = 0;
286 
287  if (tid->paused)
288  continue;
289 
290  /*
291  * add tid to round-robin queue if more frames
292  * are pending for the tid
293  */
294  if (!list_empty(&tid->buf_q))
295  ath_tx_queue_tid(txq, tid);
296 
297  if (tid == last_tid ||
299  break;
300  }
301 
302  if (!list_empty(&ac->tid_q)) {
303  if (!ac->sched) {
304  ac->sched = 1;
305  list_add_tail(&ac->list, &txq->axq_acq);
306  }
307  }
308 
309  if (ac == last_ac ||
311  return;
312  }
313 }
314 
315 /***********/
316 /* TX, DMA */
317 /***********/
318 
319 /*
320  * Insert a chain of ath_buf (descriptors) on a txq and
321  * assume the descriptors are already chained together by caller.
322  */
323 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
324  struct list_head *head)
325 {
326  struct ath_hw *ah = sc->sc_ah;
327  struct ath_buf *bf;
328 
329  /*
330  * Insert the frame on the outbound list and
331  * pass it on to the hardware.
332  */
333 
334  if (list_empty(head))
335  return;
336 
337  bf = list_first_entry(head, struct ath_buf, list);
338 
339  DBGIO("ath9k: "
340  "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
341 
343 
344  if (txq->axq_link == NULL) {
346  DBGIO("ath9k: TXDP[%d] = %llx (%p)\n",
347  txq->axq_qnum, ito64(bf->bf_daddr),
348  bf->bf_desc);
349  } else {
350  *txq->axq_link = bf->bf_daddr;
351  DBGIO("ath9k: "
352  "link[%d] (%p)=%llx (%p)\n",
353  txq->axq_qnum, txq->axq_link,
354  ito64(bf->bf_daddr), bf->bf_desc);
355  }
357  &txq->axq_link);
359 
360  txq->axq_depth++;
361 }
362 
363 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
364  struct ath_atx_tid *tid,
365  struct list_head *bf_head)
366 {
367  struct ath_buf *bf;
368 
369  bf = list_first_entry(bf_head, struct ath_buf, list);
370  bf->bf_state.bf_type &= ~BUF_AMPDU;
371 
372  /* update starting sequence number for subsequent ADDBA request */
373  if (tid)
374  INCR(tid->seq_start, IEEE80211_SEQ_MAX);
375 
376  bf->bf_lastbf = bf;
377  ath_buf_set_rate(sc, bf, iob_len(bf->bf_mpdu) + FCS_LEN);
378  ath_tx_txqaddbuf(sc, txq, bf_head);
379 }
380 
382 {
383  struct ieee80211_frame *hdr;
384  enum ath9k_pkt_type htype;
385  u16 fc;
386 
387  hdr = (struct ieee80211_frame *)iob->data;
388  fc = hdr->fc;
389 
391  htype = ATH9K_PKT_TYPE_BEACON;
394  else
395  htype = ATH9K_PKT_TYPE_NORMAL;
396 
397  return htype;
398 }
399 
400 static int setup_tx_flags(struct io_buffer *iob __unused)
401 {
402  int flags = 0;
403 
405 
406  return flags;
407 }
408 
409 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
410 {
411  struct ath_hw *ah = sc->sc_ah;
412  struct ath9k_channel *curchan = ah->curchan;
413  if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
414  (curchan->channelFlags & CHANNEL_5GHZ) &&
415  (chainmask == 0x7) && (rate < 0x90))
416  return 0x3;
417  else
418  return chainmask;
419 }
420 
421 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
422 {
423  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
424  struct ath9k_11n_rate_series series[4];
425  const struct ath9k_legacy_rate *rate;
426  int i, flags = 0;
427  u8 rix = 0, ctsrate = 0;
428  int is_pspoll;
429 
430  memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
431 
432  is_pspoll = 0;
433 
434  /*
435  * We check if Short Preamble is needed for the CTS rate by
436  * checking the BSS's global flag.
437  * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
438  */
439  rate = &sc->rates[sc->hw_rix];
440  ctsrate = rate->hw_value;
441  if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
442  ctsrate |= rate->hw_value_short;
443 
444  for (i = 0; i < 4; i++) {
445  int is_40 __unused, is_sgi __unused, is_sp;
446  int phy;
447 
448  rix = sc->hw_rix;
449  series[i].Tries = ATH_TXMAXTRY;
450 
451  if (sc->sc_flags & SC_OP_PROTECT_ENABLE) {
452  series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
454  }
455 
456  is_sp = !!(rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
457 
458  /* legacy rates */
459  if ((sc->dev->channels + sc->dev->channel)->band == NET80211_BAND_2GHZ)
460  phy = CHANNEL_CCK;
461  else
462  phy = CHANNEL_OFDM;
463 
464  series[i].Rate = rate->hw_value;
465  if (rate->hw_value_short && (sc->sc_flags & SC_OP_PREAMBLE_SHORT)) {
467  series[i].Rate |= rate->hw_value_short;
468  } else {
469  is_sp = 0;
470  }
471 
472  if (bf->bf_state.bfs_paprd)
473  series[i].ChSel = common->tx_chainmask;
474  else
475  series[i].ChSel = ath_txchainmask_reduction(sc,
476  common->tx_chainmask, series[i].Rate);
477 
478  series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
479  phy, rate->bitrate * 100, len, rix, is_sp);
480  }
481 
482  /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
483  if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
485 
486  /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
489 
490  /* set dur_update_en for l-sig computation except for PS-Poll frames */
492  bf->bf_lastbf->bf_desc,
493  !is_pspoll, ctsrate,
494  0, series, 4, flags);
495 
496 }
497 
498 static struct ath_buf *ath_tx_setup_buffer(struct net80211_device *dev,
499  struct ath_txq *txq,
500  struct io_buffer *iob)
501 {
502  struct ath_softc *sc = dev->priv;
503  struct ath_hw *ah = sc->sc_ah;
504  struct ath_buf *bf;
505  struct ath_desc *ds;
506  int frm_type;
507  static const enum ath9k_key_type net80211_keytype_to_ath[] = {
513  };
514 
515  bf = ath_tx_get_buffer(sc);
516  if (!bf) {
517  DBG("ath9k: TX buffers are full\n");
518  return NULL;
519  }
520 
521  ATH_TXBUF_RESET(bf);
522 
523  bf->bf_flags = setup_tx_flags(iob);
524  bf->bf_mpdu = iob;
525 
526  bf->bf_buf_addr = virt_to_bus(iob->data);
527 
528  frm_type = get_hw_packet_type(iob);
529 
530  ds = bf->bf_desc;
532 
534  ATH9K_TXKEYIX_INVALID, net80211_keytype_to_ath[dev->crypto->algorithm], bf->bf_flags);
535 
537  iob_len(iob), /* segment length */
538  1, /* first segment */
539  1, /* last segment */
540  ds, /* first descriptor */
541  bf->bf_buf_addr,
542  txq->axq_qnum);
543 
544 
545  return bf;
546 }
547 
548 /* FIXME: tx power */
549 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
550  struct ath_tx_control *txctl)
551 {
552  struct list_head bf_head;
553  struct ath_atx_tid *tid = NULL;
554 
555  INIT_LIST_HEAD(&bf_head);
556  list_add_tail(&bf->list, &bf_head);
557 
558  bf->bf_state.bfs_paprd = txctl->paprd;
559 
560  if (txctl->paprd)
562 
563  ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, 1);
564 
565  ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
566 }
567 
568 /* Upon failure caller should free iob */
569 int ath_tx_start(struct net80211_device *dev, struct io_buffer *iob,
570  struct ath_tx_control *txctl)
571 {
572  struct ath_softc *sc = dev->priv;
573  struct ath_txq *txq = txctl->txq;
574  struct ath_buf *bf;
575  int q;
576 
577  /*
578  * At this point, the vif, hw_key and sta pointers in the tx control
579  * info are no longer valid (overwritten by the ath_frame_info data.
580  */
581 
582  bf = ath_tx_setup_buffer(dev, txctl->txq, iob);
583  if (!bf)
584  return -ENOMEM;
585 
586  q = 0;
587  if (txq == sc->tx.txq_map[q] &&
588  ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
589  txq->stopped = 1;
590  }
591 
592  ath_tx_start_dma(sc, bf, txctl);
593 
594  return 0;
595 }
596 
597 /*****************/
598 /* TX Completion */
599 /*****************/
600 
601 static void ath_tx_complete(struct ath_softc *sc, struct io_buffer *iob,
602  int tx_flags __unused, struct ath_tx_status *ts, struct ath_txq *txq)
603 {
604  struct net80211_device *dev = sc->dev;
605  int q, padpos __unused, padsize __unused;
606 
607  DBGIO("ath9k: TX complete: iob: %p\n", iob);
608 
609  q = 0;
610  if (txq == sc->tx.txq_map[q]) {
611  if (--txq->pending_frames < 0)
612  txq->pending_frames = 0;
613 
614  if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
615  txq->stopped = 0;
616  }
617  }
618 
619  net80211_tx_complete(dev, iob, ts->ts_longretry,
620  (ts->ts_status & ATH9K_TXERR_MASK) ? EIO : 0);
621 }
622 
623 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
624  struct ath_txq *txq, struct list_head *bf_q,
625  struct ath_tx_status *ts, int txok, int sendbar)
626 {
627  struct io_buffer *iob = bf->bf_mpdu;
628  int tx_flags = 0;
629 
630  if (sendbar)
631  tx_flags = ATH_TX_BAR;
632 
633  if (!txok) {
634  tx_flags |= ATH_TX_ERROR;
635 
636  if (bf_isxretried(bf))
637  tx_flags |= ATH_TX_XRETRY;
638  }
639 
640  bf->bf_buf_addr = 0;
641 
642  ath_tx_complete(sc, iob, tx_flags,
643  ts, txq);
644 
645  /* At this point, iob (bf->bf_mpdu) is consumed...make sure we don't
646  * accidentally reference it later.
647  */
648  bf->bf_mpdu = NULL;
649 
650  /*
651  * Return the list of ath_buf of this mpdu to free queue
652  */
653  list_splice_tail_init(bf_q, &sc->tx.txbuf);
654 }
655 
656 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
657 {
658  struct ath_hw *ah = sc->sc_ah;
659  struct ath_buf *bf, *lastbf, *bf_held = NULL;
660  struct list_head bf_head;
661  struct ath_desc *ds;
662  struct ath_tx_status ts;
663  int txok;
664  int status;
665 
666  DBGIO("ath9k: tx queue %d (%x), link %p\n",
667  txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
668  txq->axq_link);
669 
670  for (;;) {
671  if (list_empty(&txq->axq_q)) {
672  txq->axq_link = NULL;
673  if (sc->sc_flags & SC_OP_TXAGGR)
674  ath_txq_schedule(sc, txq);
675  break;
676  }
677  bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
678 
679  /*
680  * There is a race condition that a BH gets scheduled
681  * after sw writes TxE and before hw re-load the last
682  * descriptor to get the newly chained one.
683  * Software must keep the last DONE descriptor as a
684  * holding descriptor - software does so by marking
685  * it with the STALE flag.
686  */
687  bf_held = NULL;
688  if (bf->bf_stale) {
689  bf_held = bf;
690  if (list_is_last(&bf_held->list, &txq->axq_q)) {
691  break;
692  } else {
693  bf = list_entry(bf_held->list.next,
694  struct ath_buf, list);
695  }
696  }
697 
698  lastbf = bf->bf_lastbf;
699  ds = lastbf->bf_desc;
700 
701  memset(&ts, 0, sizeof(ts));
702  status = ath9k_hw_txprocdesc(ah, ds, &ts);
703  if (status == -EINPROGRESS) {
704  break;
705  }
706 
707  /*
708  * Remove ath_buf's of the same transmit unit from txq,
709  * however leave the last descriptor back as the holding
710  * descriptor for hw.
711  */
712  lastbf->bf_stale = 1;
713  INIT_LIST_HEAD(&bf_head);
714  if (!list_is_singular(&lastbf->list))
715  list_cut_position(&bf_head,
716  &txq->axq_q, lastbf->list.prev);
717 
718  txq->axq_depth--;
719  txok = !(ts.ts_status & ATH9K_TXERR_MASK);
720  txq->axq_tx_inprogress = 0;
721  if (bf_held)
722  list_del(&bf_held->list);
723 
724  if (bf_held)
725  ath_tx_return_buffer(sc, bf_held);
726 
727  /*
728  * This frame is sent out as a single frame.
729  * Use hardware retry status for this frame.
730  */
731  if (ts.ts_status & ATH9K_TXERR_XRETRY)
732  bf->bf_state.bf_type |= BUF_XRETRY;
733 
734  ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
735 
736  if (sc->sc_flags & SC_OP_TXAGGR)
737  ath_txq_schedule(sc, txq);
738  }
739 }
740 
741 static void ath_tx_complete_poll_work(struct ath_softc *sc)
742 {
743  struct ath_txq *txq;
744  int i;
745  int needreset = 0;
746 
747  for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
748  if (ATH_TXQ_SETUP(sc, i)) {
749  txq = &sc->tx.txq[i];
750  if (txq->axq_depth) {
751  if (txq->axq_tx_inprogress) {
752  needreset = 1;
753  break;
754  } else {
755  txq->axq_tx_inprogress = 1;
756  }
757  }
758  }
759 
760  if (needreset) {
761  DBG("ath9k: "
762  "tx hung, resetting the chip\n");
763  ath_reset(sc, 1);
764  }
765 
767 }
768 
769 
770 
771 void ath_tx_tasklet(struct ath_softc *sc)
772 {
773  int i;
774  u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
775 
776  ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
777 
778  for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
779  if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
780  ath_tx_processq(sc, &sc->tx.txq[i]);
781  }
782 }
783 
784 /*****************/
785 /* Init, Cleanup */
786 /*****************/
787 
788 int ath_tx_init(struct ath_softc *sc, int nbufs)
789 {
790  int error = 0;
791 
792  error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
793  "tx", nbufs, 1, 1);
794  if (error != 0) {
795  DBG("ath9k: "
796  "Failed to allocate tx descriptors: %d\n", error);
797  goto err;
798  }
799 
801 
802 err:
803  if (error != 0)
804  ath_tx_cleanup(sc);
805 
806  return error;
807 }
808 
809 void ath_tx_cleanup(struct ath_softc *sc)
810 {
811  if (sc->tx.txdma.dd_desc_len != 0)
812  ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
813 }
#define ATH9K_RATESERIES_RTS_CTS
Definition: mac.h:651
u8 ts_longretry
Definition: mac.h:117
uint16_t u16
Definition: stdint.h:21
struct ath_descdma txdma
Definition: ath9k.h:286
void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, struct list_head *head)
iPXE I/O API
struct io_buffer * bf_mpdu
Definition: ath9k.h:224
Network protected with CCMP (AES-based system)
Definition: net80211.h:174
u16 hw_value_short
Definition: ath9k.h:393
void ath_tx_tasklet(struct ath_softc *sc)
Definition: ath9k_xmit.c:771
Definition: hw.h:656
#define TICKS_PER_SEC
Number of ticks per second.
Definition: timer.h:15
unsigned long tx_complete_work_timer
Definition: ath9k.h:485
static void ath9k_hw_set_clrdmask(struct ath_hw *ah, void *ds, int val)
Definition: hw-ops.h:121
void * bf_desc
Definition: ath9k.h:225
int pending_frames
Definition: ath9k.h:194
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
Definition: ath9k_xmit.c:409
int ath_drain_all_txq(struct ath_softc *sc, int retry_tx)
Definition: ath9k_xmit.c:216
u8 channel
The channel currently in use, as an index into the channels array.
Definition: net80211.h:812
u8 txq_headidx
Definition: ath9k.h:192
#define ATH9K_TXDESC_CTSENA
Definition: mac.h:251
struct ath_buf_state bf_state
Definition: ath9k.h:230
struct list_head * next
Next list entry.
Definition: list.h:20
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Definition: ath9k_xmit.c:421
Dummy value used when the cryptosystem can't be detected.
Definition: net80211.h:177
u16 rts_aggr_limit
Definition: hw.h:200
u16 fc
802.11 Frame Control field
Definition: ieee80211.h:14
struct ath_txq * txq
Definition: ath9k.h:198
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
Definition: ath9k_mac.c:62
u8 ts_status
Definition: mac.h:113
#define ATH_AGGR_MIN_QDEPTH
Definition: ath9k.h:139
int stopped
Definition: ath9k.h:187
struct arbelprm_completion_with_error error
Definition: arbel.h:12
u32 * axq_link
Definition: ath9k.h:183
#define ATH9K_TXERR_MASK
Definition: mac.h:89
int ath_tx_init(struct ath_softc *sc, int nbufs)
Definition: ath9k_xmit.c:788
static struct ath_buf * ath_tx_get_buffer(struct ath_softc *sc)
Definition: ath9k_xmit.c:83
An 802.11 data or management frame without QoS or WDS header fields.
Definition: ieee80211.h:300
int mac80211_qnum
Definition: ath9k.h:181
struct ath_txq * txq
Definition: ath9k.h:266
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
Definition: ath9k_mac.c:50
#define ATH_TXQ_SETUP(sc, i)
Definition: ath9k.h:62
#define ATH_TXBUF_RESET(_bf)
Definition: ath9k.h:73
struct ath_txq * ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
Definition: ath9k_xmit.c:106
#define INCR(_l, _sz)
Definition: ath9k.h:46
int ath_tx_start(struct net80211_device *dev, struct io_buffer *iob, struct ath_tx_control *txctl)
Definition: ath9k_xmit.c:569
u8 txq_tailidx
Definition: ath9k.h:193
struct ath_txq txq[ATH9K_NUM_TX_QUEUES]
Definition: ath9k.h:285
uint8_t head
Head number.
Definition: int13.h:34
u8 bfs_paprd
Definition: ath9k.h:215
static enum ath9k_pkt_type get_hw_packet_type(struct io_buffer *iob)
Definition: ath9k_xmit.c:381
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
static void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds, void *lastds, u32 durUpdateEn, u32 rtsctsRate, u32 rtsctsDuration, struct ath9k_11n_rate_series series[], u32 nseries, u32 flags)
Definition: hw-ops.h:87
Network protected with WEP (awful RC4-based system)
Definition: net80211.h:145
#define ATH_TXMAXTRY
Definition: ath9k.h:126
u32 channelFlags
Definition: hw.h:350
Definition: mac.h:240
#define bf_isxretried(bf)
Definition: ath9k.h:101
struct list_head axq_q
Definition: ath9k.h:184
#define WME_AC_BE
Definition: common.h:34
static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_control *txctl)
Definition: ath9k_xmit.c:549
struct ib_mad_tid tid
Definition: ib_mad.h:17
#define ito64(x)
Definition: ath9k.h:37
#define SC_OP_PREAMBLE_SHORT
Definition: ath9k.h:367
#define ATH9K_TXDESC_INTREQ
Definition: mac.h:265
#define bf_isaggr(bf)
Definition: ath9k.h:100
A doubly-linked list entry (or list head)
Definition: list.h:18
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, struct list_head *head, const char *name, int nbuf, int ndesc, int is_tx)
Definition: ath9k_init.c:180
void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
Definition: ath9k_mac.c:55
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
Definition: ath9k_xmit.c:63
#define ATH_TX_XRETRY
Definition: ath9k.h:273
#define list_empty(list)
Test whether a list is empty.
Definition: list.h:136
u32 axq_ampdu_depth
Definition: ath9k.h:186
u32 bf_daddr
Definition: ath9k.h:226
#define CHANNEL_CCK
Definition: ath5k.h:630
#define NET80211_BAND_2GHZ
The 2.4 GHz ISM band, unlicensed in most countries.
Definition: net80211.h:45
uint8_t status
Status.
Definition: ena.h:16
#define list_first_entry(list, type, member)
Get the container of the first entry in a list.
Definition: list.h:333
struct net80211_device * dev
Definition: ath9k.h:445
#define list_del(list)
Delete an entry from a list.
Definition: list.h:119
#define ENOMEM
Not enough space.
Definition: errno.h:534
u16 bf_flags
Definition: ath9k.h:229
enum net80211_crypto_alg algorithm
The cryptographic algorithm implemented.
Definition: net80211.h:692
#define IEEE80211_FC_SUBTYPE
802.11 Frame Control field, Frame Subtype bitmask
Definition: ieee80211.h:110
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, struct ath_tx_status *ts, int txok, int sendbar)
Definition: ath9k_xmit.c:623
Definition: ath9k.h:180
static void ath_tx_complete(struct ath_softc *sc, struct io_buffer *iob, int tx_flags __unused, struct ath_tx_status *ts, struct ath_txq *txq)
Definition: ath9k_xmit.c:601
static void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds, u32 pktLen, enum ath9k_pkt_type type, u32 txPower, u32 keyIx, enum ath9k_key_type keyType, u32 flags)
Definition: hw-ops.h:77
struct net80211_crypto * crypto
802.11 cryptosystem for our current network
Definition: net80211.h:940
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
void(* tx_complete_work)(struct ath_softc *sc)
Definition: ath9k.h:484
int ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
Definition: ath9k_mac.c:259
u32 txqsetup
Definition: ath9k.h:283
int bf_stale
Definition: ath9k.h:228
#define ATH_TX_COMPLETE_POLL_INT
Definition: ath9k.h:171
ath9k_key_type
Definition: mac.h:664
struct ath_hw * sc_ah
Definition: ath9k.h:454
#define ATH9K_TXQ_USEDEFAULT
Definition: mac.h:597
enum ath9k_tx_queue_flags tqi_qflags
Definition: mac.h:618
unsigned long bfs_paprd_timestamp
Definition: ath9k.h:216
#define list_add_tail(new, head)
Add a new entry to the tail of a list.
Definition: list.h:93
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
Definition: ath9k_xmit.c:255
void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
Definition: ath9k_mac.c:129
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
Definition: ath9k_mac.c:156
u32 bf_buf_addr
Definition: ath9k.h:227
static void ath_tx_complete_poll_work(struct ath_softc *sc)
Definition: ath9k_xmit.c:741
#define FCS_LEN
Definition: ath5k_desc.c:36
#define DBGIO(...)
Definition: compiler.h:549
struct list_head list
Definition: ath9k.h:200
#define list_for_each_entry_safe(pos, tmp, head, member)
Iterate over entries in a list, safe against deletion of the current entry.
Definition: list.h:447
void * priv
Driver private data.
Definition: net80211.h:798
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, struct list_head *head)
Definition: ath9k_xmit.c:323
Definition: ath9k.h:219
#define EINPROGRESS
Operation in progress.
Definition: errno.h:418
u8 bf_type
Definition: ath9k.h:214
void ath_tx_cleanup(struct ath_softc *sc)
Definition: ath9k_xmit.c:809
#define ATH_TXFIFO_DEPTH
Definition: ath9k.h:179
#define list_splice_tail_init(list, entry)
Move all entries from one list into another list and reinitialise empty list.
Definition: list.h:299
#define SC_OP_TXAGGR
Definition: ath9k.h:365
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Definition: ath9k_xmit.c:656
struct list_head txq_fifo[ATH_TXFIFO_DEPTH]
Definition: ath9k.h:190
uint8_t subtype
Slow protocols subtype.
Definition: eth_slow.h:12
#define IEEE80211_FC_TYPE
802.11 Frame Control field, Frame Type bitmask
Definition: ieee80211.h:97
int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, const struct ath9k_tx_queue_info *qinfo)
Definition: ath9k_mac.c:216
u16 ath9k_hw_computetxtime(struct ath_hw *ah, u8 phy, int kbps, u32 frameLen, u16 rateix, int shortPreamble)
Definition: ath9k_hw.c:137
static void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen, int is_firstseg, int is_lastseg, const void *ds0, u32 buf_addr, unsigned int qcu)
Definition: hw-ops.h:62
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:151
Structure encapsulating the complete state of an 802.11 device.
Definition: net80211.h:786
struct ath_buf * bf_lastbf
Definition: ath9k.h:221
struct list_head list
Definition: ath9k.h:220
u32 axq_qnum
Definition: ath9k.h:182
No security, an "Open" network.
Definition: net80211.h:131
#define ATH9K_TXDESC_RTSENA
Definition: mac.h:250
#define ATH9K_NUM_TX_QUEUES
Definition: mac.h:580
static struct ath_common * ath9k_hw_common(struct ath_hw *ah)
Definition: hw.h:869
#define ARRAY_SIZE(x)
Definition: efx_common.h:43
int hw_rix
Definition: ath9k.h:479
#define CHANNEL_OFDM
Definition: ath5k.h:631
struct ath_tx tx
Definition: ath9k.h:476
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, int retry_tx __unused)
Definition: ath9k_xmit.c:181
#define list_cut_position(new, list, entry)
Cut a list into two.
Definition: list.h:185
void ath_txq_schedule(struct ath_softc *sc __unused, struct ath_txq *txq)
Definition: ath9k_xmit.c:264
void net80211_tx_complete(struct net80211_device *dev, struct io_buffer *iob, int retries, int rc)
Indicate the completed transmission of a packet.
Definition: net80211.c:2808
struct list_head list
Definition: ath9k.h:234
u32 tqi_physCompBuf
Definition: mac.h:629
int ath_reset(struct ath_softc *sc, int retry_tx)
Definition: ath9k_main.c:511
struct ib_cm_common common
Definition: ib_mad.h:11
uint32_t ds
Definition: librm.h:254
int sched
Definition: ath9k.h:199
u32 sc_flags
Definition: ath9k.h:465
#define SC_OP_INVALID
Definition: ath9k.h:362
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, struct list_head *bf_head)
Definition: ath9k_xmit.c:363
#define IEEE80211_STYPE_PROBE_RESP
Subtype value for probe-response management frames.
Definition: ieee80211.h:157
uint32_t hdr
Message header.
Definition: intelvf.h:12
static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
Definition: ath9k_xmit.c:97
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
static int setup_tx_flags(struct io_buffer *iob __unused)
Definition: ath9k_xmit.c:400
struct list_head axq_acq
Definition: ath9k.h:189
#define INIT_LIST_HEAD(list)
Initialise a list head.
Definition: list.h:45
static void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds, u32 **link)
Definition: hw-ops.h:44
#define list_is_singular(list)
Test whether a list has just one entry.
Definition: list.h:149
uint32_t len
Length.
Definition: ena.h:14
u32 axq_depth
Definition: ath9k.h:185
static void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds, u32 link)
Definition: hw-ops.h:38
struct list_head * prev
Previous list entry.
Definition: list.h:22
#define ATH_MAX_QDEPTH
Definition: ath9k.h:125
struct list_head txbuf
Definition: ath9k.h:284
struct ath9k_hw_capabilities caps
Definition: hw.h:663
#define ATH_TX_BAR
Definition: ath9k.h:274
#define CHANNEL_5GHZ
Definition: ath5k.h:633
void * data
Start of data.
Definition: iobuf.h:44
#define EIO
Input/output error.
Definition: errno.h:433
struct net80211_channel channels[NET80211_MAX_CHANNELS]
A list of all possible channels we might use.
Definition: net80211.h:806
static int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds, struct ath_tx_status *ts)
Definition: hw-ops.h:71
uint8_t ah
Definition: registers.h:85
struct list_head txq_fifo_pending
Definition: ath9k.h:191
struct ath9k_legacy_rate rates[NET80211_MAX_RATES]
Definition: ath9k.h:478
#define SC_OP_PROTECT_ENABLE
Definition: ath9k.h:368
Network protected with TKIP (better RC4-based system)
Definition: net80211.h:163
u32 dd_desc_len
Definition: ath9k.h:108
#define IEEE80211_STYPE_BEACON
Subtype value for beacon management frames.
Definition: ieee80211.h:168
unsigned long currticks(void)
Get current system time in ticks.
Definition: timer.c:42
#define DBG(...)
Print a debugging message.
Definition: compiler.h:498
ath9k_pkt_type
Definition: mac.h:604
#define SC_OP_ENABLE_APM
Definition: ath9k.h:376
#define ATH_TX_ERROR
Definition: ath9k.h:272
struct list_head tid_q
Definition: ath9k.h:201
struct ath_txq * txq_map[WME_NUM_AC]
Definition: ath9k.h:287
#define list_entry(list, type, member)
Get the container of a list entry.
Definition: list.h:321
#define ATH9K_TXERR_XRETRY
Definition: mac.h:83
#define list_is_last(list, head)
Test whether an entry is the last entry in list.
Definition: list.h:163
#define NULL
NULL pointer (VOID *)
Definition: Base.h:362
#define ATH9K_TXKEYIX_INVALID
Definition: mac.h:204
uint8_t u8
Definition: stdint.h:19
#define MAX_RATE_POWER
Definition: hw.h:144
static struct ath_buf * ath_tx_setup_buffer(struct net80211_device *dev, struct ath_txq *txq, struct io_buffer *iob)
Definition: ath9k_xmit.c:498
uint32_t u32
Definition: stdint.h:23
int axq_tx_inprogress
Definition: ath9k.h:188
#define IEEE80211_TYPE_MGMT
Type value for management (layer-2) frames.
Definition: ieee80211.h:100
struct ath_atx_ac * ac
Definition: ath9k.h:237
#define IEEE80211_SEQ_MAX
Definition: ath9k.h:144
if(natsemi->flags &NATSEMI_64BIT) return 1
void * memset(void *dest, int character, size_t len) __nonnull
A persistent I/O buffer.
Definition: iobuf.h:32
uint8_t flags
Flags.
Definition: ena.h:18