iPXE
ath9k_xmit.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 *
4 * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
5 * Original from Linux kernel 3.0.1
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20FILE_SECBOOT ( FORBIDDEN );
21
22#include <ipxe/io.h>
23
24#include "ath9k.h"
25#include "ar9003_mac.h"
26
27#define BITS_PER_BYTE 8
28#define OFDM_PLCP_BITS 22
29#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
30#define L_STF 8
31#define L_LTF 8
32#define L_SIG 4
33#define HT_SIG 8
34#define HT_STF 4
35#define HT_LTF(_ns) (4 * (_ns))
36#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
37#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
38#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
39#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
40
41
42#define IS_HT_RATE(_rate) ((_rate) & 0x80)
43
44static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
45 struct ath_atx_tid *tid,
46 struct list_head *bf_head);
47static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
48 struct ath_txq *txq, struct list_head *bf_q,
49 struct ath_tx_status *ts, int txok, int sendbar);
50static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
51 struct list_head *head);
52static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
53
54enum {
59};
60
61/*********************/
62/* Aggregation logic */
63/*********************/
64
65static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
66{
67 struct ath_atx_ac *ac = tid->ac;
68
69 if (tid->paused)
70 return;
71
72 if (tid->sched)
73 return;
74
75 tid->sched = 1;
76 list_add_tail(&tid->list, &ac->tid_q);
77
78 if (ac->sched)
79 return;
80
81 ac->sched = 1;
83}
84
85static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
86{
87 struct ath_buf *bf = NULL;
88
89 if (list_empty(&sc->tx.txbuf)) {
90 return NULL;
91 }
92
93 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
94 list_del(&bf->list);
95
96 return bf;
97}
98
99static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
100{
101 list_add_tail(&bf->list, &sc->tx.txbuf);
102}
103
104/********************/
105/* Queue Management */
106/********************/
107
108struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
109{
110 struct ath_hw *ah = sc->sc_ah;
111 struct ath9k_tx_queue_info qi;
112 static const int subtype_txq_to_hwq[] = {
114 };
115 int axq_qnum, i;
116
117 memset(&qi, 0, sizeof(qi));
118 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
122 qi.tqi_physCompBuf = 0;
123
124 /*
125 * Enable interrupts only for EOL and DESC conditions.
126 * We mark tx descriptors to receive a DESC interrupt
127 * when a tx queue gets deep; otherwise waiting for the
128 * EOL to reap descriptors. Note that this is done to
129 * reduce interrupt load and this only defers reaping
130 * descriptors, never transmitting frames. Aside from
131 * reducing interrupts this also permits more concurrency.
132 * The only potential downside is if the tx queue backs
133 * up in which case the top half of the kernel may backup
134 * due to a lack of tx descriptors.
135 *
136 * The UAPSD queue is an exception, since we take a desc-
137 * based intr on the EOSP frames.
138 */
141
142 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
143 if (axq_qnum == -1) {
144 /*
145 * NB: don't print a message, this happens
146 * normally on parts with too few tx queues
147 */
148 return NULL;
149 }
150 if ((unsigned int)axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
151 DBG("ath9k: qnum %d out of range, max %zd!\n",
152 axq_qnum, ARRAY_SIZE(sc->tx.txq));
153 ath9k_hw_releasetxqueue(ah, axq_qnum);
154 return NULL;
155 }
156 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
157 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
158
159 txq->axq_qnum = axq_qnum;
160 txq->mac80211_qnum = -1;
161 txq->axq_link = NULL;
162 INIT_LIST_HEAD(&txq->axq_q);
163 INIT_LIST_HEAD(&txq->axq_acq);
164 txq->axq_depth = 0;
165 txq->axq_ampdu_depth = 0;
166 txq->axq_tx_inprogress = 0;
167 sc->tx.txqsetup |= 1<<axq_qnum;
168
169 txq->txq_headidx = txq->txq_tailidx = 0;
170 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
171 INIT_LIST_HEAD(&txq->txq_fifo[i]);
173 }
174 return &sc->tx.txq[axq_qnum];
175}
176
177/*
178 * Drain a given TX queue (could be Beacon or Data)
179 *
180 * This assumes output has been stopped and
181 * we do not need to block ath_tx_tasklet.
182 */
183void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, int retry_tx __unused)
184{
185 struct ath_buf *bf, *lastbf __unused;
186 struct list_head bf_head;
187 struct ath_tx_status ts;
188
189 memset(&ts, 0, sizeof(ts));
190 INIT_LIST_HEAD(&bf_head);
191
192 for (;;) {
193 if (list_empty(&txq->axq_q)) {
194 txq->axq_link = NULL;
195 break;
196 }
197 bf = list_first_entry(&txq->axq_q, struct ath_buf,
198 list);
199
200 if (bf->bf_stale) {
201 list_del(&bf->list);
202
203 ath_tx_return_buffer(sc, bf);
204 continue;
205 }
206
207 lastbf = bf->bf_lastbf;
208
209 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
210
211 txq->axq_depth--;
212 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
213 }
214
215 txq->axq_tx_inprogress = 0;
216}
217
218int ath_drain_all_txq(struct ath_softc *sc, int retry_tx)
219{
220 struct ath_hw *ah = sc->sc_ah;
221 struct ath_txq *txq;
222 int i, npend = 0;
223
224 if (sc->sc_flags & SC_OP_INVALID)
225 return 1;
226
228
229 /* Check if any queue remains active */
230 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
231 if (!ATH_TXQ_SETUP(sc, i))
232 continue;
233
234 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
235 }
236
237 if (npend)
238 DBG("ath9k: Failed to stop TX DMA!\n");
239
240 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
241 if (!ATH_TXQ_SETUP(sc, i))
242 continue;
243
244 /*
245 * The caller will resume queues with ieee80211_wake_queues.
246 * Mark the queue as not stopped to prevent ath_tx_complete
247 * from waking the queue too early.
248 */
249 txq = &sc->tx.txq[i];
250 txq->stopped = 0;
251 ath_draintxq(sc, txq, retry_tx);
252 }
253
254 return !npend;
255}
256
257void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
258{
260 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
261}
262
263/* For each axq_acq entry, for each tid, try to schedule packets
264 * for transmit until ampdu_depth has reached min Q depth.
265 */
266void ath_txq_schedule(struct ath_softc *sc __unused, struct ath_txq *txq)
267{
268 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
269 struct ath_atx_tid *tid, *last_tid;
270
271 if (list_empty(&txq->axq_acq) ||
273 return;
274
275 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
276 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
277
278 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
279 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
280 list_del(&ac->list);
281 ac->sched = 0;
282
283 while (!list_empty(&ac->tid_q)) {
285 list);
286 list_del(&tid->list);
287 tid->sched = 0;
288
289 if (tid->paused)
290 continue;
291
292 /*
293 * add tid to round-robin queue if more frames
294 * are pending for the tid
295 */
296 if (!list_empty(&tid->buf_q))
297 ath_tx_queue_tid(txq, tid);
298
299 if (tid == last_tid ||
301 break;
302 }
303
304 if (!list_empty(&ac->tid_q)) {
305 if (!ac->sched) {
306 ac->sched = 1;
307 list_add_tail(&ac->list, &txq->axq_acq);
308 }
309 }
310
311 if (ac == last_ac ||
313 return;
314 }
315}
316
317/***********/
318/* TX, DMA */
319/***********/
320
321/*
322 * Insert a chain of ath_buf (descriptors) on a txq and
323 * assume the descriptors are already chained together by caller.
324 */
325static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
326 struct list_head *head)
327{
328 struct ath_hw *ah = sc->sc_ah;
329 struct ath_buf *bf;
330
331 /*
332 * Insert the frame on the outbound list and
333 * pass it on to the hardware.
334 */
335
336 if (list_empty(head))
337 return;
338
339 bf = list_first_entry(head, struct ath_buf, list);
340
341 DBGIO("ath9k: "
342 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
343
345
346 if (txq->axq_link == NULL) {
348 DBGIO("ath9k: TXDP[%d] = %llx (%p)\n",
349 txq->axq_qnum, ito64(bf->bf_daddr),
350 bf->bf_desc);
351 } else {
352 *txq->axq_link = bf->bf_daddr;
353 DBGIO("ath9k: "
354 "link[%d] (%p)=%llx (%p)\n",
355 txq->axq_qnum, txq->axq_link,
356 ito64(bf->bf_daddr), bf->bf_desc);
357 }
359 &txq->axq_link);
361
362 txq->axq_depth++;
363}
364
365static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
366 struct ath_atx_tid *tid,
367 struct list_head *bf_head)
368{
369 struct ath_buf *bf;
370
371 bf = list_first_entry(bf_head, struct ath_buf, list);
373
374 /* update starting sequence number for subsequent ADDBA request */
375 if (tid)
376 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
377
378 bf->bf_lastbf = bf;
379 ath_buf_set_rate(sc, bf, iob_len(bf->bf_mpdu) + FCS_LEN);
380 ath_tx_txqaddbuf(sc, txq, bf_head);
381}
382
384{
385 struct ieee80211_frame *hdr;
386 enum ath9k_pkt_type htype;
387 u16 fc;
388
389 hdr = (struct ieee80211_frame *)iob->data;
390 fc = hdr->fc;
391
393 htype = ATH9K_PKT_TYPE_BEACON;
396 else
397 htype = ATH9K_PKT_TYPE_NORMAL;
398
399 return htype;
400}
401
402static int setup_tx_flags(struct io_buffer *iob __unused)
403{
404 int flags = 0;
405
407
408 return flags;
409}
410
411u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
412{
413 struct ath_hw *ah = sc->sc_ah;
414 struct ath9k_channel *curchan = ah->curchan;
415 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
416 (curchan->channelFlags & CHANNEL_5GHZ) &&
417 (chainmask == 0x7) && (rate < 0x90))
418 return 0x3;
419 else
420 return chainmask;
421}
422
423static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
424{
425 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
426 struct ath9k_11n_rate_series series[4];
427 const struct ath9k_legacy_rate *rate;
428 int i, flags = 0;
429 u8 rix = 0, ctsrate = 0;
430 int is_pspoll;
431
432 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
433
434 is_pspoll = 0;
435
436 /*
437 * We check if Short Preamble is needed for the CTS rate by
438 * checking the BSS's global flag.
439 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
440 */
441 rate = &sc->rates[sc->hw_rix];
442 ctsrate = rate->hw_value;
444 ctsrate |= rate->hw_value_short;
445
446 for (i = 0; i < 4; i++) {
447 int is_40 __unused, is_sgi __unused, is_sp;
448 int phy;
449
450 rix = sc->hw_rix;
451 series[i].Tries = ATH_TXMAXTRY;
452
453 if (sc->sc_flags & SC_OP_PROTECT_ENABLE) {
456 }
457
458 is_sp = !!(rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
459
460 /* legacy rates */
461 if ((sc->dev->channels + sc->dev->channel)->band == NET80211_BAND_2GHZ)
462 phy = CHANNEL_CCK;
463 else
464 phy = CHANNEL_OFDM;
465
466 series[i].Rate = rate->hw_value;
467 if (rate->hw_value_short && (sc->sc_flags & SC_OP_PREAMBLE_SHORT)) {
469 series[i].Rate |= rate->hw_value_short;
470 } else {
471 is_sp = 0;
472 }
473
474 if (bf->bf_state.bfs_paprd)
475 series[i].ChSel = common->tx_chainmask;
476 else
477 series[i].ChSel = ath_txchainmask_reduction(sc,
478 common->tx_chainmask, series[i].Rate);
479
481 phy, rate->bitrate * 100, len, rix, is_sp);
482 }
483
484 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
485 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
487
488 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
491
492 /* set dur_update_en for l-sig computation except for PS-Poll frames */
494 bf->bf_lastbf->bf_desc,
495 !is_pspoll, ctsrate,
496 0, series, 4, flags);
497
498}
499
500static struct ath_buf *ath_tx_setup_buffer(struct net80211_device *dev,
501 struct ath_txq *txq,
502 struct io_buffer *iob)
503{
504 struct ath_softc *sc = dev->priv;
505 struct ath_hw *ah = sc->sc_ah;
506 struct ath_buf *bf;
507 struct ath_desc *ds;
508 int frm_type;
509 static const enum ath9k_key_type net80211_keytype_to_ath[] = {
515 };
516
517 bf = ath_tx_get_buffer(sc);
518 if (!bf) {
519 DBG("ath9k: TX buffers are full\n");
520 return NULL;
521 }
522
523 ATH_TXBUF_RESET(bf);
524
525 bf->bf_flags = setup_tx_flags(iob);
526 bf->bf_mpdu = iob;
527
528 bf->bf_buf_addr = virt_to_bus(iob->data);
529
530 frm_type = get_hw_packet_type(iob);
531
532 ds = bf->bf_desc;
534
536 ATH9K_TXKEYIX_INVALID, net80211_keytype_to_ath[dev->crypto->algorithm], bf->bf_flags);
537
539 iob_len(iob), /* segment length */
540 1, /* first segment */
541 1, /* last segment */
542 ds, /* first descriptor */
543 bf->bf_buf_addr,
544 txq->axq_qnum);
545
546
547 return bf;
548}
549
550/* FIXME: tx power */
551static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
552 struct ath_tx_control *txctl)
553{
554 struct list_head bf_head;
555 struct ath_atx_tid *tid = NULL;
556
557 INIT_LIST_HEAD(&bf_head);
558 list_add_tail(&bf->list, &bf_head);
559
560 bf->bf_state.bfs_paprd = txctl->paprd;
561
562 if (txctl->paprd)
564
566
567 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
568}
569
570/* Upon failure caller should free iob */
571int ath_tx_start(struct net80211_device *dev, struct io_buffer *iob,
572 struct ath_tx_control *txctl)
573{
574 struct ath_softc *sc = dev->priv;
575 struct ath_txq *txq = txctl->txq;
576 struct ath_buf *bf;
577 int q;
578
579 /*
580 * At this point, the vif, hw_key and sta pointers in the tx control
581 * info are no longer valid (overwritten by the ath_frame_info data.
582 */
583
584 bf = ath_tx_setup_buffer(dev, txctl->txq, iob);
585 if (!bf)
586 return -ENOMEM;
587
588 q = 0;
589 if (txq == sc->tx.txq_map[q] &&
590 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
591 txq->stopped = 1;
592 }
593
594 ath_tx_start_dma(sc, bf, txctl);
595
596 return 0;
597}
598
599/*****************/
600/* TX Completion */
601/*****************/
602
603static void ath_tx_complete(struct ath_softc *sc, struct io_buffer *iob,
604 int tx_flags __unused, struct ath_tx_status *ts, struct ath_txq *txq)
605{
606 struct net80211_device *dev = sc->dev;
607 int q, padpos __unused, padsize __unused;
608
609 DBGIO("ath9k: TX complete: iob: %p\n", iob);
610
611 q = 0;
612 if (txq == sc->tx.txq_map[q]) {
613 if (--txq->pending_frames < 0)
614 txq->pending_frames = 0;
615
616 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
617 txq->stopped = 0;
618 }
619 }
620
622 (ts->ts_status & ATH9K_TXERR_MASK) ? EIO : 0);
623}
624
625static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
626 struct ath_txq *txq, struct list_head *bf_q,
627 struct ath_tx_status *ts, int txok, int sendbar)
628{
629 struct io_buffer *iob = bf->bf_mpdu;
630 int tx_flags = 0;
631
632 if (sendbar)
633 tx_flags = ATH_TX_BAR;
634
635 if (!txok) {
636 tx_flags |= ATH_TX_ERROR;
637
638 if (bf_isxretried(bf))
639 tx_flags |= ATH_TX_XRETRY;
640 }
641
642 bf->bf_buf_addr = 0;
643
644 ath_tx_complete(sc, iob, tx_flags,
645 ts, txq);
646
647 /* At this point, iob (bf->bf_mpdu) is consumed...make sure we don't
648 * accidentally reference it later.
649 */
650 bf->bf_mpdu = NULL;
651
652 /*
653 * Return the list of ath_buf of this mpdu to free queue
654 */
655 list_splice_tail_init(bf_q, &sc->tx.txbuf);
656}
657
658static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
659{
660 struct ath_hw *ah = sc->sc_ah;
661 struct ath_buf *bf, *lastbf, *bf_held = NULL;
662 struct list_head bf_head;
663 struct ath_desc *ds;
664 struct ath_tx_status ts;
665 int txok;
666 int status;
667
668 DBGIO("ath9k: tx queue %d (%x), link %p\n",
669 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
670 txq->axq_link);
671
672 for (;;) {
673 if (list_empty(&txq->axq_q)) {
674 txq->axq_link = NULL;
675 if (sc->sc_flags & SC_OP_TXAGGR)
676 ath_txq_schedule(sc, txq);
677 break;
678 }
679 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
680
681 /*
682 * There is a race condition that a BH gets scheduled
683 * after sw writes TxE and before hw re-load the last
684 * descriptor to get the newly chained one.
685 * Software must keep the last DONE descriptor as a
686 * holding descriptor - software does so by marking
687 * it with the STALE flag.
688 */
689 bf_held = NULL;
690 if (bf->bf_stale) {
691 bf_held = bf;
692 if (list_is_last(&bf_held->list, &txq->axq_q)) {
693 break;
694 } else {
695 bf = list_entry(bf_held->list.next,
696 struct ath_buf, list);
697 }
698 }
699
700 lastbf = bf->bf_lastbf;
701 ds = lastbf->bf_desc;
702
703 memset(&ts, 0, sizeof(ts));
705 if (status == -EINPROGRESS) {
706 break;
707 }
708
709 /*
710 * Remove ath_buf's of the same transmit unit from txq,
711 * however leave the last descriptor back as the holding
712 * descriptor for hw.
713 */
714 lastbf->bf_stale = 1;
715 INIT_LIST_HEAD(&bf_head);
716 if (!list_is_singular(&lastbf->list))
717 list_cut_position(&bf_head,
718 &txq->axq_q, lastbf->list.prev);
719
720 txq->axq_depth--;
721 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
722 txq->axq_tx_inprogress = 0;
723 if (bf_held)
724 list_del(&bf_held->list);
725
726 if (bf_held)
727 ath_tx_return_buffer(sc, bf_held);
728
729 /*
730 * This frame is sent out as a single frame.
731 * Use hardware retry status for this frame.
732 */
735
736 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
737
738 if (sc->sc_flags & SC_OP_TXAGGR)
739 ath_txq_schedule(sc, txq);
740 }
741}
742
744{
745 struct ath_txq *txq;
746 int i;
747 int needreset = 0;
748
749 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
750 if (ATH_TXQ_SETUP(sc, i)) {
751 txq = &sc->tx.txq[i];
752 if (txq->axq_depth) {
753 if (txq->axq_tx_inprogress) {
754 needreset = 1;
755 break;
756 } else {
757 txq->axq_tx_inprogress = 1;
758 }
759 }
760 }
761
762 if (needreset) {
763 DBG("ath9k: "
764 "tx hung, resetting the chip\n");
765 ath_reset(sc, 1);
766 }
767
769}
770
771
772
773void ath_tx_tasklet(struct ath_softc *sc)
774{
775 int i;
776 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
777
778 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
779
780 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
781 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
782 ath_tx_processq(sc, &sc->tx.txq[i]);
783 }
784}
785
786/*****************/
787/* Init, Cleanup */
788/*****************/
789
790int ath_tx_init(struct ath_softc *sc, int nbufs)
791{
792 int error = 0;
793
794 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
795 "tx", nbufs, 1, 1);
796 if (error != 0) {
797 DBG("ath9k: "
798 "Failed to allocate tx descriptors: %d\n", error);
799 goto err;
800 }
801
803
804err:
805 if (error != 0)
806 ath_tx_cleanup(sc);
807
808 return error;
809}
810
811void ath_tx_cleanup(struct ath_softc *sc)
812{
813 if (sc->tx.txdma.dd_desc_len != 0)
814 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
815}
#define NULL
NULL pointer (VOID *)
Definition Base.h:322
struct golan_inbox_hdr hdr
Message header.
Definition CIB_PRM.h:0
struct arbelprm_completion_with_error error
Definition arbel.h:1
#define CHANNEL_CCK
Definition ath5k.h:632
#define CHANNEL_5GHZ
Definition ath5k.h:635
#define CHANNEL_OFDM
Definition ath5k.h:633
#define FCS_LEN
Definition ath5k_desc.c:37
int ath_reset(struct ath_softc *sc, int retry_tx)
Definition ath9k_main.c:513
#define ATH_TX_BAR
Definition ath9k.h:275
#define SC_OP_ENABLE_APM
Definition ath9k.h:377
#define ATH_TXBUF_RESET(_bf)
Definition ath9k.h:74
#define ATH_TX_COMPLETE_POLL_INT
Definition ath9k.h:172
#define SC_OP_TXAGGR
Definition ath9k.h:366
#define ATH_TXFIFO_DEPTH
Definition ath9k.h:180
#define ATH_TXQ_SETUP(sc, i)
Definition ath9k.h:63
#define INCR(_l, _sz)
Definition ath9k.h:47
void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, struct list_head *head)
@ BUF_XRETRY
Definition ath9k.h:97
@ BUF_AMPDU
Definition ath9k.h:95
#define ATH_TXMAXTRY
Definition ath9k.h:127
#define SC_OP_PREAMBLE_SHORT
Definition ath9k.h:368
#define ATH_MAX_QDEPTH
Definition ath9k.h:126
#define bf_isaggr(bf)
Definition ath9k.h:101
@ IEEE80211_TX_RC_USE_SHORT_PREAMBLE
Definition ath9k.h:400
#define IEEE80211_SEQ_MAX
Definition ath9k.h:145
int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, struct list_head *head, const char *name, int nbuf, int ndesc, int is_tx)
Definition ath9k_init.c:181
#define SC_OP_PROTECT_ENABLE
Definition ath9k.h:369
#define ATH_TX_XRETRY
Definition ath9k.h:274
#define ATH_AGGR_MIN_QDEPTH
Definition ath9k.h:140
#define SC_OP_INVALID
Definition ath9k.h:363
#define ATH_TX_ERROR
Definition ath9k.h:273
#define bf_isxretried(bf)
Definition ath9k.h:102
#define ito64(x)
Definition ath9k.h:38
u16 ath9k_hw_computetxtime(struct ath_hw *ah, u8 phy, int kbps, u32 frameLen, u16 rateix, int shortPreamble)
Definition ath9k_hw.c:139
int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, const struct ath9k_tx_queue_info *qinfo)
Definition ath9k_mac.c:218
u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
Definition ath9k_mac.c:64
void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
Definition ath9k_mac.c:52
int ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
Definition ath9k_mac.c:261
void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
Definition ath9k_mac.c:131
void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
Definition ath9k_mac.c:57
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
Definition ath9k_mac.c:158
int ath_tx_init(struct ath_softc *sc, int nbufs)
Definition ath9k_xmit.c:790
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, struct list_head *head)
Definition ath9k_xmit.c:325
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, struct ath_txq *txq, struct list_head *bf_q, struct ath_tx_status *ts, int txok, int sendbar)
Definition ath9k_xmit.c:625
static int setup_tx_flags(struct io_buffer *iob __unused)
Definition ath9k_xmit.c:402
static enum ath9k_pkt_type get_hw_packet_type(struct io_buffer *iob)
Definition ath9k_xmit.c:383
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
Definition ath9k_xmit.c:257
void ath_tx_tasklet(struct ath_softc *sc)
Definition ath9k_xmit.c:773
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, int retry_tx __unused)
Definition ath9k_xmit.c:183
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
Definition ath9k_xmit.c:658
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
Definition ath9k_xmit.c:411
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, struct ath_atx_tid *tid, struct list_head *bf_head)
Definition ath9k_xmit.c:365
static void ath_tx_complete(struct ath_softc *sc, struct io_buffer *iob, int tx_flags __unused, struct ath_tx_status *ts, struct ath_txq *txq)
Definition ath9k_xmit.c:603
int ath_drain_all_txq(struct ath_softc *sc, int retry_tx)
Definition ath9k_xmit.c:218
static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
Definition ath9k_xmit.c:99
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
Definition ath9k_xmit.c:65
void ath_tx_cleanup(struct ath_softc *sc)
Definition ath9k_xmit.c:811
int ath_tx_start(struct net80211_device *dev, struct io_buffer *iob, struct ath_tx_control *txctl)
Definition ath9k_xmit.c:571
struct ath_txq * ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
Definition ath9k_xmit.c:108
static void ath_tx_complete_poll_work(struct ath_softc *sc)
Definition ath9k_xmit.c:743
@ MCS_HT20
Definition ath9k_xmit.c:55
@ MCS_HT20_SGI
Definition ath9k_xmit.c:56
@ MCS_HT40_SGI
Definition ath9k_xmit.c:58
@ MCS_HT40
Definition ath9k_xmit.c:57
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
Definition ath9k_xmit.c:423
static struct ath_buf * ath_tx_setup_buffer(struct net80211_device *dev, struct ath_txq *txq, struct io_buffer *iob)
Definition ath9k_xmit.c:500
void ath_txq_schedule(struct ath_softc *sc __unused, struct ath_txq *txq)
Definition ath9k_xmit.c:266
static struct ath_buf * ath_tx_get_buffer(struct ath_softc *sc)
Definition ath9k_xmit.c:85
static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, struct ath_tx_control *txctl)
Definition ath9k_xmit.c:551
#define WME_AC_BE
Definition common.h:35
ring len
Length.
Definition dwmac.h:226
#define ARRAY_SIZE(x)
Definition efx_common.h:43
uint8_t flags
Flags.
Definition ena.h:7
uint8_t status
Status.
Definition ena.h:5
uint8_t subtype
Slow protocols subtype.
Definition eth_slow.h:1
#define __unused
Declare a variable or data structure as unused.
Definition compiler.h:573
#define DBGIO(...)
Definition compiler.h:549
#define DBG(...)
Print a debugging message.
Definition compiler.h:498
#define IEEE80211_FC_SUBTYPE
802.11 Frame Control field, Frame Subtype bitmask
Definition ieee80211.h:110
#define IEEE80211_TYPE_MGMT
Type value for management (layer-2) frames.
Definition ieee80211.h:100
#define IEEE80211_STYPE_PROBE_RESP
Subtype value for probe-response management frames.
Definition ieee80211.h:157
#define IEEE80211_FC_TYPE
802.11 Frame Control field, Frame Type bitmask
Definition ieee80211.h:97
#define IEEE80211_STYPE_BEACON
Subtype value for beacon management frames.
Definition ieee80211.h:168
uint8_t head
Head number.
Definition int13.h:23
#define NET80211_BAND_2GHZ
The 2.4 GHz ISM band, unlicensed in most countries.
Definition net80211.h:45
void net80211_tx_complete(struct net80211_device *dev, struct io_buffer *iob, int retries, int rc)
Indicate the completed transmission of a packet.
Definition net80211.c:2809
#define ENOMEM
Not enough space.
Definition errno.h:535
#define EINPROGRESS
Operation in progress.
Definition errno.h:419
#define EIO
Input/output error.
Definition errno.h:434
#define FILE_SECBOOT(_status)
Declare a file's UEFI Secure Boot permission status.
Definition compiler.h:926
static void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds, u32 link)
Definition hw-ops.h:39
static void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds, u32 **link)
Definition hw-ops.h:45
static void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen, int is_firstseg, int is_lastseg, const void *ds0, u32 buf_addr, unsigned int qcu)
Definition hw-ops.h:63
static void ath9k_hw_set_clrdmask(struct ath_hw *ah, void *ds, int val)
Definition hw-ops.h:122
static void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds, u32 pktLen, enum ath9k_pkt_type type, u32 txPower, u32 keyIx, enum ath9k_key_type keyType, u32 flags)
Definition hw-ops.h:78
static void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds, void *lastds, u32 durUpdateEn, u32 rtsctsRate, u32 rtsctsDuration, struct ath9k_11n_rate_series series[], u32 nseries, u32 flags)
Definition hw-ops.h:88
static int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds, struct ath_tx_status *ts)
Definition hw-ops.h:72
#define MAX_RATE_POWER
Definition hw.h:145
static struct ath_common * ath9k_hw_common(struct ath_hw *ah)
Definition hw.h:870
@ ATH_TXQ_AC_BE
Definition hw.h:171
struct ib_cm_common common
Definition ib_mad.h:0
struct ib_mad_tid tid
Definition ib_mad.h:6
u16 fc
802.11 Frame Control field
Definition ieee80211.h:0
#define u8
Definition igbvf_osdep.h:40
iPXE I/O API
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition io.h:184
#define TICKS_PER_SEC
Number of ticks per second.
Definition timer.h:16
void * memset(void *dest, int character, size_t len) __nonnull
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition iobuf.h:160
uint32_t ds
Definition librm.h:5
#define list_cut_position(new, list, entry)
Cut a list into two.
Definition list.h:186
#define list_first_entry(list, type, member)
Get the container of the first entry in a list.
Definition list.h:334
#define list_entry(list, type, member)
Get the container of a list entry.
Definition list.h:322
#define list_for_each_entry_safe(pos, tmp, head, member)
Iterate over entries in a list, safe against deletion of the current entry.
Definition list.h:459
#define list_add_tail(new, head)
Add a new entry to the tail of a list.
Definition list.h:94
#define list_del(list)
Delete an entry from a list.
Definition list.h:120
#define INIT_LIST_HEAD(list)
Initialise a list head.
Definition list.h:46
#define list_empty(list)
Test whether a list is empty.
Definition list.h:137
#define list_is_singular(list)
Test whether a list has just one entry.
Definition list.h:150
#define list_is_last(list, head)
Test whether an entry is the last entry in list.
Definition list.h:164
#define list_splice_tail_init(list, entry)
Move all entries from one list into another list and reinitialise empty list.
Definition list.h:300
#define ATH9K_TXDESC_RTSENA
Definition mac.h:251
ath9k_key_type
Definition mac.h:665
@ ATH9K_KEY_TYPE_TKIP
Definition mac.h:669
@ ATH9K_KEY_TYPE_CLEAR
Definition mac.h:666
@ ATH9K_KEY_TYPE_AES
Definition mac.h:668
@ ATH9K_KEY_TYPE_WEP
Definition mac.h:667
#define ATH9K_TXQ_USEDEFAULT
Definition mac.h:598
u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
#define ATH9K_TXDESC_INTREQ
Definition mac.h:266
ath9k_pkt_type
Definition mac.h:605
@ ATH9K_PKT_TYPE_NORMAL
Definition mac.h:606
@ ATH9K_PKT_TYPE_BEACON
Definition mac.h:609
@ ATH9K_PKT_TYPE_PROBE_RESP
Definition mac.h:610
#define ATH9K_TXERR_XRETRY
Definition mac.h:84
#define ATH9K_NUM_TX_QUEUES
Definition mac.h:581
#define ATH9K_TXDESC_CTSENA
Definition mac.h:252
#define ATH9K_TXKEYIX_INVALID
Definition mac.h:205
@ TXQ_FLAG_TXDESCINT_ENABLE
Definition mac.h:589
@ TXQ_FLAG_TXEOLINT_ENABLE
Definition mac.h:590
#define ATH9K_RATESERIES_RTS_CTS
Definition mac.h:652
#define ATH9K_TXERR_MASK
Definition mac.h:90
@ NET80211_CRYPT_NONE
No security, an "Open" network.
Definition net80211.h:131
@ NET80211_CRYPT_CCMP
Network protected with CCMP (AES-based system)
Definition net80211.h:174
@ NET80211_CRYPT_TKIP
Network protected with TKIP (better RC4-based system)
Definition net80211.h:163
@ NET80211_CRYPT_UNKNOWN
Dummy value used when the cryptosystem can't be detected.
Definition net80211.h:177
@ NET80211_CRYPT_WEP
Network protected with WEP (awful RC4-based system)
Definition net80211.h:145
uint8_t ah
Definition registers.h:1
u32 channelFlags
Definition hw.h:351
enum ath9k_tx_queue_flags tqi_qflags
Definition mac.h:619
u32 tqi_physCompBuf
Definition mac.h:630
int sched
Definition ath9k.h:200
struct list_head tid_q
Definition ath9k.h:202
struct ath_txq * txq
Definition ath9k.h:199
struct list_head list
Definition ath9k.h:201
struct ath_atx_ac * ac
Definition ath9k.h:238
struct list_head list
Definition ath9k.h:235
unsigned long bfs_paprd_timestamp
Definition ath9k.h:217
struct list_head list
Definition ath9k.h:221
void * bf_desc
Definition ath9k.h:226
struct io_buffer * bf_mpdu
Definition ath9k.h:225
int bf_stale
Definition ath9k.h:229
u32 bf_buf_addr
Definition ath9k.h:228
struct ath_buf_state bf_state
Definition ath9k.h:231
u16 bf_flags
Definition ath9k.h:230
u32 bf_daddr
Definition ath9k.h:227
struct ath_buf * bf_lastbf
Definition ath9k.h:222
u32 dd_desc_len
Definition ath9k.h:109
Definition hw.h:657
struct ath9k_hw_capabilities caps
Definition hw.h:664
void(* tx_complete_work)(struct ath_softc *sc)
Definition ath9k.h:485
struct net80211_device * dev
Definition ath9k.h:446
int hw_rix
Definition ath9k.h:480
u32 sc_flags
Definition ath9k.h:466
struct ath_tx tx
Definition ath9k.h:477
unsigned long tx_complete_work_timer
Definition ath9k.h:486
struct ath_hw * sc_ah
Definition ath9k.h:455
struct ath9k_legacy_rate rates[NET80211_MAX_RATES]
Definition ath9k.h:479
struct ath_txq * txq
Definition ath9k.h:267
u8 ts_status
Definition mac.h:114
u8 ts_longretry
Definition mac.h:118
struct ath_descdma txdma
Definition ath9k.h:287
struct list_head txbuf
Definition ath9k.h:285
struct ath_txq txq[ATH9K_NUM_TX_QUEUES]
Definition ath9k.h:286
u32 txqsetup
Definition ath9k.h:284
struct ath_txq * txq_map[WME_NUM_AC]
Definition ath9k.h:288
struct list_head axq_q
Definition ath9k.h:185
struct list_head axq_acq
Definition ath9k.h:190
int stopped
Definition ath9k.h:188
u32 * axq_link
Definition ath9k.h:184
u32 axq_ampdu_depth
Definition ath9k.h:187
u8 txq_headidx
Definition ath9k.h:193
u8 txq_tailidx
Definition ath9k.h:194
u32 axq_depth
Definition ath9k.h:186
struct list_head txq_fifo[ATH_TXFIFO_DEPTH]
Definition ath9k.h:191
int mac80211_qnum
Definition ath9k.h:182
struct list_head txq_fifo_pending
Definition ath9k.h:192
int pending_frames
Definition ath9k.h:195
u32 axq_qnum
Definition ath9k.h:183
int axq_tx_inprogress
Definition ath9k.h:189
An 802.11 data or management frame without QoS or WDS header fields.
Definition ieee80211.h:301
A persistent I/O buffer.
Definition iobuf.h:38
void * data
Start of data.
Definition iobuf.h:53
A doubly-linked list entry (or list head)
Definition list.h:19
struct list_head * next
Next list entry.
Definition list.h:21
struct list_head * prev
Previous list entry.
Definition list.h:23
enum net80211_crypto_alg algorithm
The cryptographic algorithm implemented.
Definition net80211.h:692
Structure encapsulating the complete state of an 802.11 device.
Definition net80211.h:787
struct net80211_channel channels[NET80211_MAX_CHANNELS]
A list of all possible channels we might use.
Definition net80211.h:806
void * priv
Driver private data.
Definition net80211.h:798
struct net80211_crypto * crypto
802.11 cryptosystem for our current network
Definition net80211.h:940
u8 channel
The channel currently in use, as an index into the channels array.
Definition net80211.h:812
unsigned long currticks(void)
Get current system time in ticks.
Definition timer.c:43
#define u16
Definition vga.h:20
#define u32
Definition vga.h:21