iPXE
ath9k_xmit.c
Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2008-2011 Atheros Communications Inc.
00003  *
00004  * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
00005  * Original from Linux kernel 3.0.1
00006  *
00007  * Permission to use, copy, modify, and/or distribute this software for any
00008  * purpose with or without fee is hereby granted, provided that the above
00009  * copyright notice and this permission notice appear in all copies.
00010  *
00011  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
00012  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
00013  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
00014  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
00015  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
00016  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
00017  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
00018  */
00019 
00020 #include <ipxe/io.h>
00021 
00022 #include "ath9k.h"
00023 #include "ar9003_mac.h"
00024 
00025 #define BITS_PER_BYTE           8
00026 #define OFDM_PLCP_BITS          22
00027 #define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
00028 #define L_STF                   8
00029 #define L_LTF                   8
00030 #define L_SIG                   4
00031 #define HT_SIG                  8
00032 #define HT_STF                  4
00033 #define HT_LTF(_ns)             (4 * (_ns))
00034 #define SYMBOL_TIME(_ns)        ((_ns) << 2) /* ns * 4 us */
00035 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
00036 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
00037 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
00038 
00039 
00040 #define IS_HT_RATE(_rate)     ((_rate) & 0x80)
00041 
00042 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
00043                                struct ath_atx_tid *tid,
00044                                struct list_head *bf_head);
00045 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
00046                                 struct ath_txq *txq, struct list_head *bf_q,
00047                                 struct ath_tx_status *ts, int txok, int sendbar);
00048 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
00049                              struct list_head *head);
00050 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
00051 
00052 enum {
00053         MCS_HT20,
00054         MCS_HT20_SGI,
00055         MCS_HT40,
00056         MCS_HT40_SGI,
00057 };
00058 
00059 /*********************/
00060 /* Aggregation logic */
00061 /*********************/
00062 
00063 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
00064 {
00065         struct ath_atx_ac *ac = tid->ac;
00066 
00067         if (tid->paused)
00068                 return;
00069 
00070         if (tid->sched)
00071                 return;
00072 
00073         tid->sched = 1;
00074         list_add_tail(&tid->list, &ac->tid_q);
00075 
00076         if (ac->sched)
00077                 return;
00078 
00079         ac->sched = 1;
00080         list_add_tail(&ac->list, &txq->axq_acq);
00081 }
00082 
00083 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
00084 {
00085         struct ath_buf *bf = NULL;
00086 
00087         if (list_empty(&sc->tx.txbuf)) {
00088                 return NULL;
00089         }
00090 
00091         bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
00092         list_del(&bf->list);
00093 
00094         return bf;
00095 }
00096 
00097 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
00098 {
00099         list_add_tail(&bf->list, &sc->tx.txbuf);
00100 }
00101 
00102 /********************/
00103 /* Queue Management */
00104 /********************/
00105 
00106 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
00107 {
00108         struct ath_hw *ah = sc->sc_ah;
00109         struct ath9k_tx_queue_info qi;
00110         static const int subtype_txq_to_hwq[] = {
00111                 [WME_AC_BE] = ATH_TXQ_AC_BE,
00112         };
00113         int axq_qnum, i;
00114 
00115         memset(&qi, 0, sizeof(qi));
00116         qi.tqi_subtype = subtype_txq_to_hwq[subtype];
00117         qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
00118         qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
00119         qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
00120         qi.tqi_physCompBuf = 0;
00121 
00122         /*
00123          * Enable interrupts only for EOL and DESC conditions.
00124          * We mark tx descriptors to receive a DESC interrupt
00125          * when a tx queue gets deep; otherwise waiting for the
00126          * EOL to reap descriptors.  Note that this is done to
00127          * reduce interrupt load and this only defers reaping
00128          * descriptors, never transmitting frames.  Aside from
00129          * reducing interrupts this also permits more concurrency.
00130          * The only potential downside is if the tx queue backs
00131          * up in which case the top half of the kernel may backup
00132          * due to a lack of tx descriptors.
00133          *
00134          * The UAPSD queue is an exception, since we take a desc-
00135          * based intr on the EOSP frames.
00136          */
00137         qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
00138                         TXQ_FLAG_TXDESCINT_ENABLE;
00139 
00140         axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
00141         if (axq_qnum == -1) {
00142                 /*
00143                  * NB: don't print a message, this happens
00144                  * normally on parts with too few tx queues
00145                  */
00146                 return NULL;
00147         }
00148         if ((unsigned int)axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
00149                 DBG("ath9k: qnum %d out of range, max %zd!\n",
00150                         axq_qnum, ARRAY_SIZE(sc->tx.txq));
00151                 ath9k_hw_releasetxqueue(ah, axq_qnum);
00152                 return NULL;
00153         }
00154         if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
00155                 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
00156 
00157                 txq->axq_qnum = axq_qnum;
00158                 txq->mac80211_qnum = -1;
00159                 txq->axq_link = NULL;
00160                 INIT_LIST_HEAD(&txq->axq_q);
00161                 INIT_LIST_HEAD(&txq->axq_acq);
00162                 txq->axq_depth = 0;
00163                 txq->axq_ampdu_depth = 0;
00164                 txq->axq_tx_inprogress = 0;
00165                 sc->tx.txqsetup |= 1<<axq_qnum;
00166 
00167                 txq->txq_headidx = txq->txq_tailidx = 0;
00168                 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
00169                         INIT_LIST_HEAD(&txq->txq_fifo[i]);
00170                 INIT_LIST_HEAD(&txq->txq_fifo_pending);
00171         }
00172         return &sc->tx.txq[axq_qnum];
00173 }
00174 
00175 /*
00176  * Drain a given TX queue (could be Beacon or Data)
00177  *
00178  * This assumes output has been stopped and
00179  * we do not need to block ath_tx_tasklet.
00180  */
00181 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, int retry_tx __unused)
00182 {
00183         struct ath_buf *bf, *lastbf __unused;
00184         struct list_head bf_head;
00185         struct ath_tx_status ts;
00186 
00187         memset(&ts, 0, sizeof(ts));
00188         INIT_LIST_HEAD(&bf_head);
00189 
00190         for (;;) {
00191                 if (list_empty(&txq->axq_q)) {
00192                         txq->axq_link = NULL;
00193                         break;
00194                 }
00195                 bf = list_first_entry(&txq->axq_q, struct ath_buf,
00196                                       list);
00197 
00198                 if (bf->bf_stale) {
00199                         list_del(&bf->list);
00200 
00201                         ath_tx_return_buffer(sc, bf);
00202                         continue;
00203                 }
00204 
00205                 lastbf = bf->bf_lastbf;
00206 
00207                 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
00208 
00209                 txq->axq_depth--;
00210                 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
00211         }
00212 
00213         txq->axq_tx_inprogress = 0;
00214 }
00215 
00216 int ath_drain_all_txq(struct ath_softc *sc, int retry_tx)
00217 {
00218         struct ath_hw *ah = sc->sc_ah;
00219         struct ath_txq *txq;
00220         int i, npend = 0;
00221 
00222         if (sc->sc_flags & SC_OP_INVALID)
00223                 return 1;
00224 
00225         ath9k_hw_abort_tx_dma(ah);
00226 
00227         /* Check if any queue remains active */
00228         for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
00229                 if (!ATH_TXQ_SETUP(sc, i))
00230                         continue;
00231 
00232                 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
00233         }
00234 
00235         if (npend)
00236                 DBG("ath9k: Failed to stop TX DMA!\n");
00237 
00238         for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
00239                 if (!ATH_TXQ_SETUP(sc, i))
00240                         continue;
00241 
00242                 /*
00243                  * The caller will resume queues with ieee80211_wake_queues.
00244                  * Mark the queue as not stopped to prevent ath_tx_complete
00245                  * from waking the queue too early.
00246                  */
00247                 txq = &sc->tx.txq[i];
00248                 txq->stopped = 0;
00249                 ath_draintxq(sc, txq, retry_tx);
00250         }
00251 
00252         return !npend;
00253 }
00254 
00255 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
00256 {
00257         ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
00258         sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
00259 }
00260 
00261 /* For each axq_acq entry, for each tid, try to schedule packets
00262  * for transmit until ampdu_depth has reached min Q depth.
00263  */
00264 void ath_txq_schedule(struct ath_softc *sc __unused, struct ath_txq *txq)
00265 {
00266         struct ath_atx_ac *ac, *ac_tmp, *last_ac;
00267         struct ath_atx_tid *tid, *last_tid;
00268 
00269         if (list_empty(&txq->axq_acq) ||
00270             txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
00271                 return;
00272 
00273         ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
00274         last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
00275 
00276         list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
00277                 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
00278                 list_del(&ac->list);
00279                 ac->sched = 0;
00280 
00281                 while (!list_empty(&ac->tid_q)) {
00282                         tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
00283                                                list);
00284                         list_del(&tid->list);
00285                         tid->sched = 0;
00286 
00287                         if (tid->paused)
00288                                 continue;
00289 
00290                         /*
00291                          * add tid to round-robin queue if more frames
00292                          * are pending for the tid
00293                          */
00294                         if (!list_empty(&tid->buf_q))
00295                                 ath_tx_queue_tid(txq, tid);
00296 
00297                         if (tid == last_tid ||
00298                             txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
00299                                 break;
00300                 }
00301 
00302                 if (!list_empty(&ac->tid_q)) {
00303                         if (!ac->sched) {
00304                                 ac->sched = 1;
00305                                 list_add_tail(&ac->list, &txq->axq_acq);
00306                         }
00307                 }
00308 
00309                 if (ac == last_ac ||
00310                     txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
00311                         return;
00312         }
00313 }
00314 
00315 /***********/
00316 /* TX, DMA */
00317 /***********/
00318 
00319 /*
00320  * Insert a chain of ath_buf (descriptors) on a txq and
00321  * assume the descriptors are already chained together by caller.
00322  */
00323 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
00324                              struct list_head *head)
00325 {
00326         struct ath_hw *ah = sc->sc_ah;
00327         struct ath_buf *bf;
00328 
00329         /*
00330          * Insert the frame on the outbound list and
00331          * pass it on to the hardware.
00332          */
00333 
00334         if (list_empty(head))
00335                 return;
00336 
00337         bf = list_first_entry(head, struct ath_buf, list);
00338 
00339         DBGIO("ath9k: "
00340                 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
00341 
00342         list_splice_tail_init(head, &txq->axq_q);
00343 
00344         if (txq->axq_link == NULL) {
00345                 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
00346                 DBGIO("ath9k: TXDP[%d] = %llx (%p)\n",
00347                         txq->axq_qnum, ito64(bf->bf_daddr),
00348                         bf->bf_desc);
00349         } else {
00350                 *txq->axq_link = bf->bf_daddr;
00351                 DBGIO("ath9k: "
00352                         "link[%d] (%p)=%llx (%p)\n",
00353                         txq->axq_qnum, txq->axq_link,
00354                         ito64(bf->bf_daddr), bf->bf_desc);
00355         }
00356         ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
00357                                &txq->axq_link);
00358         ath9k_hw_txstart(ah, txq->axq_qnum);
00359 
00360         txq->axq_depth++;
00361 }
00362 
00363 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
00364                                struct ath_atx_tid *tid,
00365                                struct list_head *bf_head)
00366 {
00367         struct ath_buf *bf;
00368 
00369         bf = list_first_entry(bf_head, struct ath_buf, list);
00370         bf->bf_state.bf_type &= ~BUF_AMPDU;
00371 
00372         /* update starting sequence number for subsequent ADDBA request */
00373         if (tid)
00374                 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
00375 
00376         bf->bf_lastbf = bf;
00377         ath_buf_set_rate(sc, bf, iob_len(bf->bf_mpdu) + FCS_LEN);
00378         ath_tx_txqaddbuf(sc, txq, bf_head);
00379 }
00380 
00381 static enum ath9k_pkt_type get_hw_packet_type(struct io_buffer *iob)
00382 {
00383         struct ieee80211_frame *hdr;
00384         enum ath9k_pkt_type htype;
00385         u16 fc;
00386 
00387         hdr = (struct ieee80211_frame *)iob->data;
00388         fc = hdr->fc;
00389 
00390         if ((fc & (IEEE80211_FC_TYPE | IEEE80211_FC_SUBTYPE)) == (IEEE80211_TYPE_MGMT | IEEE80211_STYPE_BEACON))
00391                 htype = ATH9K_PKT_TYPE_BEACON;
00392         else if ((fc & (IEEE80211_FC_TYPE | IEEE80211_FC_SUBTYPE)) == (IEEE80211_TYPE_MGMT | IEEE80211_STYPE_PROBE_RESP))
00393                 htype = ATH9K_PKT_TYPE_PROBE_RESP;
00394         else
00395                 htype = ATH9K_PKT_TYPE_NORMAL;
00396 
00397         return htype;
00398 }
00399 
00400 static int setup_tx_flags(struct io_buffer *iob __unused)
00401 {
00402         int flags = 0;
00403 
00404         flags |= ATH9K_TXDESC_INTREQ;
00405 
00406         return flags;
00407 }
00408 
00409 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
00410 {
00411         struct ath_hw *ah = sc->sc_ah;
00412         struct ath9k_channel *curchan = ah->curchan;
00413         if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
00414                         (curchan->channelFlags & CHANNEL_5GHZ) &&
00415                         (chainmask == 0x7) && (rate < 0x90))
00416                 return 0x3;
00417         else
00418                 return chainmask;
00419 }
00420 
00421 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
00422 {
00423         struct ath_common *common = ath9k_hw_common(sc->sc_ah);
00424         struct ath9k_11n_rate_series series[4];
00425         const struct ath9k_legacy_rate *rate;
00426         int i, flags = 0;
00427         u8 rix = 0, ctsrate = 0;
00428         int is_pspoll;
00429 
00430         memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
00431 
00432         is_pspoll = 0;
00433 
00434         /*
00435          * We check if Short Preamble is needed for the CTS rate by
00436          * checking the BSS's global flag.
00437          * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
00438          */
00439         rate = &sc->rates[sc->hw_rix];
00440         ctsrate = rate->hw_value;
00441         if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
00442                 ctsrate |= rate->hw_value_short;
00443 
00444         for (i = 0; i < 4; i++) {
00445                 int is_40 __unused, is_sgi __unused, is_sp;
00446                 int phy;
00447 
00448                 rix = sc->hw_rix;
00449                 series[i].Tries = ATH_TXMAXTRY;
00450 
00451                 if (sc->sc_flags & SC_OP_PROTECT_ENABLE) {
00452                         series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
00453                         flags |= ATH9K_TXDESC_CTSENA;
00454                 }
00455 
00456                 is_sp = !!(rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
00457 
00458                 /* legacy rates */
00459                 if ((sc->dev->channels + sc->dev->channel)->band == NET80211_BAND_2GHZ)
00460                         phy = CHANNEL_CCK;
00461                 else
00462                         phy = CHANNEL_OFDM;
00463 
00464                 series[i].Rate = rate->hw_value;
00465                 if (rate->hw_value_short && (sc->sc_flags & SC_OP_PREAMBLE_SHORT)) {
00466                         if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
00467                                 series[i].Rate |= rate->hw_value_short;
00468                 } else {
00469                         is_sp = 0;
00470                 }
00471 
00472                 if (bf->bf_state.bfs_paprd)
00473                         series[i].ChSel = common->tx_chainmask;
00474                 else
00475                         series[i].ChSel = ath_txchainmask_reduction(sc,
00476                                         common->tx_chainmask, series[i].Rate);
00477 
00478                 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
00479                         phy, rate->bitrate * 100, len, rix, is_sp);
00480         }
00481 
00482         /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
00483         if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
00484                 flags &= ~ATH9K_TXDESC_RTSENA;
00485 
00486         /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
00487         if (flags & ATH9K_TXDESC_RTSENA)
00488                 flags &= ~ATH9K_TXDESC_CTSENA;
00489 
00490         /* set dur_update_en for l-sig computation except for PS-Poll frames */
00491         ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
00492                                      bf->bf_lastbf->bf_desc,
00493                                      !is_pspoll, ctsrate,
00494                                      0, series, 4, flags);
00495 
00496 }
00497 
00498 static struct ath_buf *ath_tx_setup_buffer(struct net80211_device *dev,
00499                                            struct ath_txq *txq,
00500                                            struct io_buffer *iob)
00501 {
00502         struct ath_softc *sc = dev->priv;
00503         struct ath_hw *ah = sc->sc_ah;
00504         struct ath_buf *bf;
00505         struct ath_desc *ds;
00506         int frm_type;
00507         static const enum ath9k_key_type net80211_keytype_to_ath[] = {
00508                         [NET80211_CRYPT_NONE] = ATH9K_KEY_TYPE_CLEAR,
00509                         [NET80211_CRYPT_WEP] = ATH9K_KEY_TYPE_WEP,
00510                         [NET80211_CRYPT_TKIP] = ATH9K_KEY_TYPE_TKIP,
00511                         [NET80211_CRYPT_CCMP] = ATH9K_KEY_TYPE_AES,
00512                         [NET80211_CRYPT_UNKNOWN] = ATH9K_KEY_TYPE_CLEAR,
00513         };
00514 
00515         bf = ath_tx_get_buffer(sc);
00516         if (!bf) {
00517                 DBG("ath9k: TX buffers are full\n");
00518                 return NULL;
00519         }
00520 
00521         ATH_TXBUF_RESET(bf);
00522 
00523         bf->bf_flags = setup_tx_flags(iob);
00524         bf->bf_mpdu = iob;
00525 
00526         bf->bf_buf_addr = virt_to_bus(iob->data);
00527 
00528         frm_type = get_hw_packet_type(iob);
00529 
00530         ds = bf->bf_desc;
00531         ath9k_hw_set_desc_link(ah, ds, 0);
00532 
00533         ath9k_hw_set11n_txdesc(ah, ds, iob_len(iob) + FCS_LEN, frm_type, MAX_RATE_POWER,
00534                                ATH9K_TXKEYIX_INVALID, net80211_keytype_to_ath[dev->crypto->algorithm], bf->bf_flags);
00535 
00536         ath9k_hw_filltxdesc(ah, ds,
00537                             iob_len(iob),       /* segment length */
00538                             1,          /* first segment */
00539                             1,          /* last segment */
00540                             ds,         /* first descriptor */
00541                             bf->bf_buf_addr,
00542                             txq->axq_qnum);
00543 
00544 
00545         return bf;
00546 }
00547 
00548 /* FIXME: tx power */
00549 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
00550                              struct ath_tx_control *txctl)
00551 {
00552         struct list_head bf_head;
00553         struct ath_atx_tid *tid = NULL;
00554 
00555         INIT_LIST_HEAD(&bf_head);
00556         list_add_tail(&bf->list, &bf_head);
00557 
00558         bf->bf_state.bfs_paprd = txctl->paprd;
00559 
00560         if (txctl->paprd)
00561                 bf->bf_state.bfs_paprd_timestamp = ( currticks() * 1000 ) / TICKS_PER_SEC;
00562 
00563         ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, 1);
00564 
00565         ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
00566 }
00567 
00568 /* Upon failure caller should free iob */
00569 int ath_tx_start(struct net80211_device *dev, struct io_buffer *iob,
00570                  struct ath_tx_control *txctl)
00571 {
00572         struct ath_softc *sc = dev->priv;
00573         struct ath_txq *txq = txctl->txq;
00574         struct ath_buf *bf;
00575         int q;
00576 
00577         /*
00578          * At this point, the vif, hw_key and sta pointers in the tx control
00579          * info are no longer valid (overwritten by the ath_frame_info data.
00580          */
00581 
00582         bf = ath_tx_setup_buffer(dev, txctl->txq, iob);
00583         if (!bf)
00584                 return -ENOMEM;
00585 
00586         q = 0;
00587         if (txq == sc->tx.txq_map[q] &&
00588             ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
00589                 txq->stopped = 1;
00590         }
00591 
00592         ath_tx_start_dma(sc, bf, txctl);
00593 
00594         return 0;
00595 }
00596 
00597 /*****************/
00598 /* TX Completion */
00599 /*****************/
00600 
00601 static void ath_tx_complete(struct ath_softc *sc, struct io_buffer *iob,
00602                             int tx_flags __unused, struct ath_tx_status *ts, struct ath_txq *txq)
00603 {
00604         struct net80211_device *dev = sc->dev;
00605         int q, padpos __unused, padsize __unused;
00606 
00607         DBGIO("ath9k: TX complete: iob: %p\n", iob);
00608 
00609         q = 0;
00610         if (txq == sc->tx.txq_map[q]) {
00611                 if (--txq->pending_frames < 0)
00612                         txq->pending_frames = 0;
00613 
00614                 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
00615                         txq->stopped = 0;
00616                 }
00617         }
00618 
00619         net80211_tx_complete(dev, iob, ts->ts_longretry,
00620                         (ts->ts_status & ATH9K_TXERR_MASK) ? EIO : 0);
00621 }
00622 
00623 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
00624                                 struct ath_txq *txq, struct list_head *bf_q,
00625                                 struct ath_tx_status *ts, int txok, int sendbar)
00626 {
00627         struct io_buffer *iob = bf->bf_mpdu;
00628         int tx_flags = 0;
00629 
00630         if (sendbar)
00631                 tx_flags = ATH_TX_BAR;
00632 
00633         if (!txok) {
00634                 tx_flags |= ATH_TX_ERROR;
00635 
00636                 if (bf_isxretried(bf))
00637                         tx_flags |= ATH_TX_XRETRY;
00638         }
00639 
00640         bf->bf_buf_addr = 0;
00641 
00642         ath_tx_complete(sc, iob, tx_flags,
00643                         ts, txq);
00644 
00645         /* At this point, iob (bf->bf_mpdu) is consumed...make sure we don't
00646          * accidentally reference it later.
00647          */
00648         bf->bf_mpdu = NULL;
00649 
00650         /*
00651          * Return the list of ath_buf of this mpdu to free queue
00652          */
00653         list_splice_tail_init(bf_q, &sc->tx.txbuf);
00654 }
00655 
00656 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
00657 {
00658         struct ath_hw *ah = sc->sc_ah;
00659         struct ath_buf *bf, *lastbf, *bf_held = NULL;
00660         struct list_head bf_head;
00661         struct ath_desc *ds;
00662         struct ath_tx_status ts;
00663         int txok;
00664         int status;
00665 
00666         DBGIO("ath9k: tx queue %d (%x), link %p\n",
00667                 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
00668                 txq->axq_link);
00669 
00670         for (;;) {
00671                 if (list_empty(&txq->axq_q)) {
00672                         txq->axq_link = NULL;
00673                         if (sc->sc_flags & SC_OP_TXAGGR)
00674                                 ath_txq_schedule(sc, txq);
00675                         break;
00676                 }
00677                 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
00678 
00679                 /*
00680                  * There is a race condition that a BH gets scheduled
00681                  * after sw writes TxE and before hw re-load the last
00682                  * descriptor to get the newly chained one.
00683                  * Software must keep the last DONE descriptor as a
00684                  * holding descriptor - software does so by marking
00685                  * it with the STALE flag.
00686                  */
00687                 bf_held = NULL;
00688                 if (bf->bf_stale) {
00689                         bf_held = bf;
00690                         if (list_is_last(&bf_held->list, &txq->axq_q)) {
00691                                 break;
00692                         } else {
00693                                 bf = list_entry(bf_held->list.next,
00694                                                 struct ath_buf, list);
00695                         }
00696                 }
00697 
00698                 lastbf = bf->bf_lastbf;
00699                 ds = lastbf->bf_desc;
00700 
00701                 memset(&ts, 0, sizeof(ts));
00702                 status = ath9k_hw_txprocdesc(ah, ds, &ts);
00703                 if (status == -EINPROGRESS) {
00704                         break;
00705                 }
00706 
00707                 /*
00708                  * Remove ath_buf's of the same transmit unit from txq,
00709                  * however leave the last descriptor back as the holding
00710                  * descriptor for hw.
00711                  */
00712                 lastbf->bf_stale = 1;
00713                 INIT_LIST_HEAD(&bf_head);
00714                 if (!list_is_singular(&lastbf->list))
00715                         list_cut_position(&bf_head,
00716                                 &txq->axq_q, lastbf->list.prev);
00717 
00718                 txq->axq_depth--;
00719                 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
00720                 txq->axq_tx_inprogress = 0;
00721                 if (bf_held)
00722                         list_del(&bf_held->list);
00723 
00724                 if (bf_held)
00725                         ath_tx_return_buffer(sc, bf_held);
00726 
00727                 /*
00728                  * This frame is sent out as a single frame.
00729                  * Use hardware retry status for this frame.
00730                  */
00731                 if (ts.ts_status & ATH9K_TXERR_XRETRY)
00732                         bf->bf_state.bf_type |= BUF_XRETRY;
00733 
00734                 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
00735 
00736                 if (sc->sc_flags & SC_OP_TXAGGR)
00737                         ath_txq_schedule(sc, txq);
00738         }
00739 }
00740 
00741 static void ath_tx_complete_poll_work(struct ath_softc *sc)
00742 {
00743         struct ath_txq *txq;
00744         int i;
00745         int needreset = 0;
00746 
00747         for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
00748                 if (ATH_TXQ_SETUP(sc, i)) {
00749                         txq = &sc->tx.txq[i];
00750                         if (txq->axq_depth) {
00751                                 if (txq->axq_tx_inprogress) {
00752                                         needreset = 1;
00753                                         break;
00754                                 } else {
00755                                         txq->axq_tx_inprogress = 1;
00756                                 }
00757                         }
00758                 }
00759 
00760         if (needreset) {
00761                 DBG("ath9k: "
00762                         "tx hung, resetting the chip\n");
00763                 ath_reset(sc, 1);
00764         }
00765 
00766         sc->tx_complete_work_timer = ( currticks() * 1000 ) / TICKS_PER_SEC + ATH_TX_COMPLETE_POLL_INT;
00767 }
00768 
00769 
00770 
00771 void ath_tx_tasklet(struct ath_softc *sc)
00772 {
00773         int i;
00774         u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
00775 
00776         ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
00777 
00778         for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
00779                 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
00780                         ath_tx_processq(sc, &sc->tx.txq[i]);
00781         }
00782 }
00783 
00784 /*****************/
00785 /* Init, Cleanup */
00786 /*****************/
00787 
00788 int ath_tx_init(struct ath_softc *sc, int nbufs)
00789 {
00790         int error = 0;
00791 
00792         error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
00793                                   "tx", nbufs, 1, 1);
00794         if (error != 0) {
00795                 DBG("ath9k: "
00796                         "Failed to allocate tx descriptors: %d\n", error);
00797                 goto err;
00798         }
00799 
00800         sc->tx_complete_work = ath_tx_complete_poll_work;
00801 
00802 err:
00803         if (error != 0)
00804                 ath_tx_cleanup(sc);
00805 
00806         return error;
00807 }
00808 
00809 void ath_tx_cleanup(struct ath_softc *sc)
00810 {
00811         if (sc->tx.txdma.dd_desc_len != 0)
00812                 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
00813 }