iPXE
Functions
ath5k_dma.c File Reference
#include <unistd.h>
#include "ath5k.h"
#include "reg.h"
#include "base.h"

Go to the source code of this file.

Functions

 FILE_LICENCE (MIT)
void ath5k_hw_start_rx_dma (struct ath5k_hw *ah)
 ath5k_hw_start_rx_dma - Start DMA receive
int ath5k_hw_stop_rx_dma (struct ath5k_hw *ah)
 ath5k_hw_stop_rx_dma - Stop DMA receive
u32 ath5k_hw_get_rxdp (struct ath5k_hw *ah)
 ath5k_hw_get_rxdp - Get RX Descriptor's address
void ath5k_hw_set_rxdp (struct ath5k_hw *ah, u32 phys_addr)
 ath5k_hw_set_rxdp - Set RX Descriptor's address
int ath5k_hw_start_tx_dma (struct ath5k_hw *ah, unsigned int queue)
 ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
int ath5k_hw_stop_tx_dma (struct ath5k_hw *ah, unsigned int queue)
 ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
u32 ath5k_hw_get_txdp (struct ath5k_hw *ah, unsigned int queue)
 ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
int ath5k_hw_set_txdp (struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
 ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
int ath5k_hw_update_tx_triglevel (struct ath5k_hw *ah, int increase)
 ath5k_hw_update_tx_triglevel - Update tx trigger level
int ath5k_hw_is_intr_pending (struct ath5k_hw *ah)
 ath5k_hw_is_intr_pending - Check if we have pending interrupts
int ath5k_hw_get_isr (struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
enum ath5k_int ath5k_hw_set_imr (struct ath5k_hw *ah, enum ath5k_int new_mask)
 ath5k_hw_set_imr - Set interrupt mask

Function Documentation

FILE_LICENCE ( MIT  )
void ath5k_hw_start_rx_dma ( struct ath5k_hw ah)

ath5k_hw_start_rx_dma - Start DMA receive

: The &struct ath5k_hw

Definition at line 54 of file ath5k_dma.c.

References AR5K_CR, AR5K_CR_RXE, ath5k_hw_reg_read(), and ath5k_hw_reg_write().

Referenced by ath5k_rx_start().

int ath5k_hw_stop_rx_dma ( struct ath5k_hw ah)

ath5k_hw_stop_rx_dma - Stop DMA receive

: The &struct ath5k_hw

Definition at line 65 of file ath5k_dma.c.

References AR5K_CR, AR5K_CR_RXD, AR5K_CR_RXE, ath5k_hw_reg_read(), ath5k_hw_reg_write(), EBUSY, and udelay().

Referenced by ath5k_rx_stop().

{
        unsigned int i;

        ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);

        /*
         * It may take some time to disable the DMA receive unit
         */
        for (i = 1000; i > 0 &&
                        (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
                        i--)
                udelay(10);

        return i ? 0 : -EBUSY;
}
u32 ath5k_hw_get_rxdp ( struct ath5k_hw ah)

ath5k_hw_get_rxdp - Get RX Descriptor's address

: The &struct ath5k_hw

XXX: Is RXDP read and clear ?

Definition at line 89 of file ath5k_dma.c.

References AR5K_RXDP, and ath5k_hw_reg_read().

{
        return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
void ath5k_hw_set_rxdp ( struct ath5k_hw ah,
u32  phys_addr 
)

ath5k_hw_set_rxdp - Set RX Descriptor's address

: The &struct ath5k_hw : RX descriptor address

XXX: Should we check if rx is enabled before setting rxdp ?

Definition at line 102 of file ath5k_dma.c.

References AR5K_RXDP, and ath5k_hw_reg_write().

Referenced by ath5k_rx_start().

{
        ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
}
int ath5k_hw_start_tx_dma ( struct ath5k_hw ah,
unsigned int  queue 
)

ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue

: The &struct ath5k_hw : The hw queue number

Start DMA transmit for a specific queue and since 5210 doesn't have QCU/DCU, set up queue parameters for 5210 here based on queue type (one queue for normal data and one queue for beacons). For queue setup on newer chips check out qcu.c. Returns -EINVAL if queue number is out of range or if queue is already disabled.

NOTE: Must be called after setting up tx control descriptor for that queue (see below).

Definition at line 127 of file ath5k_dma.c.

References ath5k_hw::ah_txq, ath5k_hw::ah_version, AR5K_AR5210, AR5K_CR, AR5K_CR_TXD0, AR5K_CR_TXE0, AR5K_QCU_TXD, AR5K_QCU_TXE, AR5K_REG_READ_Q, AR5K_REG_WRITE_Q, AR5K_TX_QUEUE_INACTIVE, ath5k_hw_reg_read(), ath5k_hw_reg_write(), EIO, and ath5k_txq_info::tqi_type.

Referenced by ath5k_txbuf_setup().

{
        u32 tx_queue;

        /* Return if queue is declared inactive */
        if (ah->ah_txq.tqi_type == AR5K_TX_QUEUE_INACTIVE)
                return -EIO;

        if (ah->ah_version == AR5K_AR5210) {
                tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);

                /* Assume always a data queue */
                tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;

                /* Start queue */
                ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
                ath5k_hw_reg_read(ah, AR5K_CR);
        } else {
                /* Return if queue is disabled */
                if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
                        return -EIO;

                /* Start queue */
                AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
        }

        return 0;
}
int ath5k_hw_stop_tx_dma ( struct ath5k_hw ah,
unsigned int  queue 
)

ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue

: The &struct ath5k_hw : The hw queue number

Stop DMA transmit on a specific hw queue and drain queue so we don't have any pending frames. Returns -EBUSY if we still have pending frames, -EINVAL if queue number is out of range.

Definition at line 167 of file ath5k_dma.c.

References ath5k_hw::ah_mac_version, ath5k_hw::ah_txq, ath5k_hw::ah_version, AR5K_AR5210, AR5K_CR, AR5K_CR_TXD0, AR5K_CR_TXE0, AR5K_DIAG_SW_5211, AR5K_DIAG_SW_CHANEL_IDLE_HIGH, AR5K_QCU_STS_FRMPENDCNT, AR5K_QCU_TXD, AR5K_QUEUE_STATUS, AR5K_QUIET_CTL1, AR5K_QUIET_CTL1_NEXT_QT_TSF, AR5K_QUIET_CTL1_QT_EN, AR5K_QUIET_CTL2, AR5K_QUIET_CTL2_QT_DUR, AR5K_QUIET_CTL2_QT_PER, AR5K_REG_DISABLE_BITS, AR5K_REG_ENABLE_BITS, AR5K_REG_SM, AR5K_REG_WRITE_Q, AR5K_SREV_AR2414, AR5K_TSF_L32_5211, AR5K_TX_QUEUE_INACTIVE, ath5k_hw_reg_read(), ath5k_hw_reg_write(), EBUSY, EIO, pending, ath5k_txq_info::tqi_type, and udelay().

Referenced by ath5k_txq_cleanup().

{
        unsigned int i = 40;
        u32 tx_queue, pending;

        /* Return if queue is declared inactive */
        if (ah->ah_txq.tqi_type == AR5K_TX_QUEUE_INACTIVE)
                return -EIO;

        if (ah->ah_version == AR5K_AR5210) {
                tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);

                /* Assume a data queue */
                tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;

                /* Stop queue */
                ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
                ath5k_hw_reg_read(ah, AR5K_CR);
        } else {
                /*
                 * Schedule TX disable and wait until queue is empty
                 */
                AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);

                /*Check for pending frames*/
                do {
                        pending = ath5k_hw_reg_read(ah,
                                AR5K_QUEUE_STATUS(queue)) &
                                AR5K_QCU_STS_FRMPENDCNT;
                        udelay(100);
                } while (--i && pending);

                /* For 2413+ order PCU to drop packets using
                 * QUIET mechanism */
                if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && pending) {
                        /* Set periodicity and duration */
                        ath5k_hw_reg_write(ah,
                                AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
                                AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
                                AR5K_QUIET_CTL2);

                        /* Enable quiet period for current TSF */
                        ath5k_hw_reg_write(ah,
                                AR5K_QUIET_CTL1_QT_EN |
                                AR5K_REG_SM(ath5k_hw_reg_read(ah,
                                                AR5K_TSF_L32_5211) >> 10,
                                                AR5K_QUIET_CTL1_NEXT_QT_TSF),
                                AR5K_QUIET_CTL1);

                        /* Force channel idle high */
                        AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
                                        AR5K_DIAG_SW_CHANEL_IDLE_HIGH);

                        /* Wait a while and disable mechanism */
                        udelay(200);
                        AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
                                                AR5K_QUIET_CTL1_QT_EN);

                        /* Re-check for pending frames */
                        i = 40;
                        do {
                                pending = ath5k_hw_reg_read(ah,
                                        AR5K_QUEUE_STATUS(queue)) &
                                        AR5K_QCU_STS_FRMPENDCNT;
                                udelay(100);
                        } while (--i && pending);

                        AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
                                        AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
                }

                /* Clear register */
                ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
                if (pending)
                        return -EBUSY;
        }

        /* TODO: Check for success on 5210 else return error */
        return 0;
}
u32 ath5k_hw_get_txdp ( struct ath5k_hw ah,
unsigned int  queue 
)

ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue

: The &struct ath5k_hw : The hw queue number

Get TX descriptor's address for a specific queue. For 5210 we ignore the queue number and use tx queue type since we only have 2 queues. We use TXDP0 for normal data queue and TXDP1 for beacon queue. For newer chips with QCU/DCU we just read the corresponding TXDP register.

XXX: Is TXDP read and clear ?

Definition at line 261 of file ath5k_dma.c.

References ath5k_hw::ah_version, AR5K_AR5210, AR5K_NOQCU_TXDP0, AR5K_QUEUE_TXDP, and ath5k_hw_reg_read().

Referenced by ath5k_txq_cleanup().

{
        u16 tx_reg;

        /*
         * Get the transmit queue descriptor pointer from the selected queue
         */
        /*5210 doesn't have QCU*/
        if (ah->ah_version == AR5K_AR5210) {
                /* Assume a data queue */
                tx_reg = AR5K_NOQCU_TXDP0;
        } else {
                tx_reg = AR5K_QUEUE_TXDP(queue);
        }

        return ath5k_hw_reg_read(ah, tx_reg);
}
int ath5k_hw_set_txdp ( struct ath5k_hw ah,
unsigned int  queue,
u32  phys_addr 
)

ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue

: The &struct ath5k_hw : The hw queue number

Set TX descriptor's address for a specific queue. For 5210 we ignore the queue number and we use tx queue type since we only have 2 queues so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. For newer chips with QCU/DCU we just set the corresponding TXDP register. Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still active.

Definition at line 292 of file ath5k_dma.c.

References ath5k_hw::ah_version, AR5K_AR5210, AR5K_NOQCU_TXDP0, AR5K_QCU_TXE, AR5K_QUEUE_TXDP, AR5K_REG_READ_Q, ath5k_hw_reg_write(), and EIO.

Referenced by ath5k_txbuf_setup().

{
        u16 tx_reg;

        /*
         * Set the transmit queue descriptor pointer register by type
         * on 5210
         */
        if (ah->ah_version == AR5K_AR5210) {
                /* Assume a data queue */
                tx_reg = AR5K_NOQCU_TXDP0;
        } else {
                /*
                 * Set the transmit queue descriptor pointer for
                 * the selected queue on QCU for 5211+
                 * (this won't work if the queue is still active)
                 */
                if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
                        return -EIO;

                tx_reg = AR5K_QUEUE_TXDP(queue);
        }

        /* Set descriptor pointer */
        ath5k_hw_reg_write(ah, phys_addr, tx_reg);

        return 0;
}
int ath5k_hw_update_tx_triglevel ( struct ath5k_hw ah,
int  increase 
)

ath5k_hw_update_tx_triglevel - Update tx trigger level

: The &struct ath5k_hw : Flag to force increase of trigger level

This function increases/decreases the tx trigger level for the tx fifo buffer (aka FIFO threshold) that is used to indicate when PCU flushes the buffer and transmits it's data. Lowering this results sending small frames more quickly but can lead to tx underruns, raising it a lot can result other problems (i think bmiss is related). Right now we start with the lowest possible (64Bytes) and if we get tx underrun we increase it using the increase flag. Returns -EIO if we have have reached maximum/minimum.

XXX: Link this with tx DMA size ? XXX: Use it to save interrupts ? TODO: Needs testing, i think it's related to bmiss...

Definition at line 339 of file ath5k_dma.c.

References ath5k_hw::ah_imr, ath5k_hw::ah_version, AR5K_AR5210, AR5K_INT_GLOBAL, AR5K_REG_MS, AR5K_REG_WRITE_BITS, AR5K_TRIG_LVL, AR5K_TUNE_MAX_TX_FIFO_THRES, AR5K_TUNE_MIN_TX_FIFO_THRES, AR5K_TXCFG, AR5K_TXCFG_TXFULL, ath5k_hw_reg_read(), ath5k_hw_reg_write(), ath5k_hw_set_imr(), done, EIO, imr, and ret.

Referenced by ath5k_poll().

{
        u32 trigger_level, imr;
        int ret = -EIO;

        /*
         * Disable interrupts by setting the mask
         */
        imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);

        trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
                        AR5K_TXCFG_TXFULL);

        if (!increase) {
                if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
                        goto done;
        } else
                trigger_level +=
                        ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);

        /*
         * Update trigger level on success
         */
        if (ah->ah_version == AR5K_AR5210)
                ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
        else
                AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
                                AR5K_TXCFG_TXFULL, trigger_level);

        ret = 0;

done:
        /*
         * Restore interrupt mask
         */
        ath5k_hw_set_imr(ah, imr);

        return ret;
}
int ath5k_hw_is_intr_pending ( struct ath5k_hw ah)

ath5k_hw_is_intr_pending - Check if we have pending interrupts

: The &struct ath5k_hw

Check if we have pending interrupts to process. Returns 1 if we have pending interrupts and 0 if we haven't.

Definition at line 391 of file ath5k_dma.c.

References AR5K_INTPEND, and ath5k_hw_reg_read().

Referenced by ath5k_poll().

{
        return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
int ath5k_hw_get_isr ( struct ath5k_hw ah,
enum ath5k_int interrupt_mask 
)

Definition at line 412 of file ath5k_dma.c.

References ath5k_hw::ah_imr, ath5k_hw::ah_txq_isr, ath5k_hw::ah_version, AR5K_AR5210, AR5K_INT_BCN_TIMEOUT, AR5K_INT_BNR, AR5K_INT_CAB_TIMEOUT, AR5K_INT_COMMON, AR5K_INT_DTIM, AR5K_INT_DTIM_SYNC, AR5K_INT_FATAL, AR5K_INT_NOCARD, AR5K_INT_QCBRORN, AR5K_INT_QCBRURN, AR5K_INT_QTRIG, AR5K_INT_RX_DOPPLER, AR5K_INT_TIM, AR5K_ISR, AR5K_ISR_BCNMISC, AR5K_ISR_BNR, AR5K_ISR_DPERR, AR5K_ISR_HIUERR, AR5K_ISR_MCABT, AR5K_ISR_QCBRORN, AR5K_ISR_QCBRURN, AR5K_ISR_QTRIG, AR5K_ISR_RXDOPPLER, AR5K_ISR_SSERR, AR5K_ISR_TIM, AR5K_ISR_TXDESC, AR5K_ISR_TXEOL, AR5K_ISR_TXERR, AR5K_ISR_TXOK, AR5K_ISR_TXURN, AR5K_RAC_PISR, AR5K_RAC_SISR0, AR5K_RAC_SISR1, AR5K_RAC_SISR2, AR5K_RAC_SISR3, AR5K_RAC_SISR4, AR5K_REG_MS, AR5K_SISR0_QCU_TXDESC, AR5K_SISR0_QCU_TXOK, AR5K_SISR1_QCU_TXEOL, AR5K_SISR1_QCU_TXERR, AR5K_SISR2_BCN_TIMEOUT, AR5K_SISR2_CAB_TIMEOUT, AR5K_SISR2_DPERR, AR5K_SISR2_DTIM, AR5K_SISR2_DTIM_SYNC, AR5K_SISR2_MCABT, AR5K_SISR2_QCU_TXURN, AR5K_SISR2_SSERR, AR5K_SISR2_TIM, AR5K_SISR3_QCBRORN, AR5K_SISR3_QCBRURN, AR5K_SISR4_QTRIG, ath5k_hw_reg_read(), data, and ENODEV.

Referenced by ath5k_poll().

{
        u32 data;

        /*
         * Read interrupt status from the Interrupt Status register
         * on 5210
         */
        if (ah->ah_version == AR5K_AR5210) {
                data = ath5k_hw_reg_read(ah, AR5K_ISR);
                if (data == AR5K_INT_NOCARD) {
                        *interrupt_mask = data;
                        return -ENODEV;
                }
        } else {
                /*
                 * Read interrupt status from Interrupt
                 * Status Register shadow copy (Read And Clear)
                 *
                 * Note: PISR/SISR Not available on 5210
                 */
                data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
                if (data == AR5K_INT_NOCARD) {
                        *interrupt_mask = data;
                        return -ENODEV;
                }
        }

        /*
         * Get abstract interrupt mask (driver-compatible)
         */
        *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;

        if (ah->ah_version != AR5K_AR5210) {
                u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);

                /*HIU = Host Interface Unit (PCI etc)*/
                if (data & (AR5K_ISR_HIUERR))
                        *interrupt_mask |= AR5K_INT_FATAL;

                /*Beacon Not Ready*/
                if (data & (AR5K_ISR_BNR))
                        *interrupt_mask |= AR5K_INT_BNR;

                if (sisr2 & (AR5K_SISR2_SSERR | AR5K_SISR2_DPERR |
                             AR5K_SISR2_MCABT))
                        *interrupt_mask |= AR5K_INT_FATAL;

                if (data & AR5K_ISR_TIM)
                        *interrupt_mask |= AR5K_INT_TIM;

                if (data & AR5K_ISR_BCNMISC) {
                        if (sisr2 & AR5K_SISR2_TIM)
                                *interrupt_mask |= AR5K_INT_TIM;
                        if (sisr2 & AR5K_SISR2_DTIM)
                                *interrupt_mask |= AR5K_INT_DTIM;
                        if (sisr2 & AR5K_SISR2_DTIM_SYNC)
                                *interrupt_mask |= AR5K_INT_DTIM_SYNC;
                        if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
                                *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
                        if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
                                *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
                }

                if (data & AR5K_ISR_RXDOPPLER)
                        *interrupt_mask |= AR5K_INT_RX_DOPPLER;
                if (data & AR5K_ISR_QCBRORN) {
                        *interrupt_mask |= AR5K_INT_QCBRORN;
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
                                        AR5K_SISR3_QCBRORN);
                }
                if (data & AR5K_ISR_QCBRURN) {
                        *interrupt_mask |= AR5K_INT_QCBRURN;
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
                                        AR5K_SISR3_QCBRURN);
                }
                if (data & AR5K_ISR_QTRIG) {
                        *interrupt_mask |= AR5K_INT_QTRIG;
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
                                        AR5K_SISR4_QTRIG);
                }

                if (data & AR5K_ISR_TXOK)
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
                                        AR5K_SISR0_QCU_TXOK);

                if (data & AR5K_ISR_TXDESC)
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
                                        AR5K_SISR0_QCU_TXDESC);

                if (data & AR5K_ISR_TXERR)
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
                                        AR5K_SISR1_QCU_TXERR);

                if (data & AR5K_ISR_TXEOL)
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
                                        AR5K_SISR1_QCU_TXEOL);

                if (data & AR5K_ISR_TXURN)
                        ah->ah_txq_isr |= AR5K_REG_MS(
                                        ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
                                        AR5K_SISR2_QCU_TXURN);
        } else {
                if (data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT |
                            AR5K_ISR_HIUERR | AR5K_ISR_DPERR))
                        *interrupt_mask |= AR5K_INT_FATAL;

                /*
                 * XXX: BMISS interrupts may occur after association.
                 * I found this on 5210 code but it needs testing. If this is
                 * true we should disable them before assoc and re-enable them
                 * after a successful assoc + some jiffies.
                        interrupt_mask &= ~AR5K_INT_BMISS;
                 */
        }

        return 0;
}
enum ath5k_int ath5k_hw_set_imr ( struct ath5k_hw ah,
enum ath5k_int  new_mask 
)

ath5k_hw_set_imr - Set interrupt mask

: The &struct ath5k_hw : The new interrupt mask to be set

Set the interrupt mask in hw to save interrupts. We do that by mapping ath5k_int bits to hw-specific bits to remove abstraction and writing Interrupt Mask Register.

Definition at line 548 of file ath5k_dma.c.

References ath5k_hw::ah_ier, ath5k_hw::ah_imr, ath5k_hw::ah_version, AR5K_AR5210, AR5K_IER, AR5K_IER_DISABLE, AR5K_IMR, AR5K_IMR_DPERR, AR5K_IMR_HIUERR, AR5K_IMR_MCABT, AR5K_IMR_RXDOPPLER, AR5K_IMR_SSERR, AR5K_IMR_TIM, AR5K_INT_BCN_TIMEOUT, AR5K_INT_BNR, AR5K_INT_CAB_TIMEOUT, AR5K_INT_COMMON, AR5K_INT_DTIM, AR5K_INT_DTIM_SYNC, AR5K_INT_FATAL, AR5K_INT_GLOBAL, AR5K_INT_RX_DOPPLER, AR5K_INT_RXNOFRM, AR5K_INT_TIM, AR5K_PIMR, AR5K_RXNOFRM, AR5K_SIMR2, AR5K_SIMR2_DPERR, AR5K_SIMR2_MCABT, AR5K_SIMR2_QCU_TXURN, AR5K_SIMR2_SSERR, AR5K_SISR2_BCN_TIMEOUT, AR5K_SISR2_CAB_TIMEOUT, AR5K_SISR2_DTIM, AR5K_SISR2_DTIM_SYNC, AR5K_SISR2_TIM, ath5k_hw_reg_read(), and ath5k_hw_reg_write().

Referenced by ath5k_hw_reset(), ath5k_hw_update_tx_triglevel(), ath5k_irq(), ath5k_reset(), and ath5k_stop_hw().

{
        enum ath5k_int old_mask, int_mask;

        old_mask = ah->ah_imr;

        /*
         * Disable card interrupts to prevent any race conditions
         * (they will be re-enabled afterwards if AR5K_INT GLOBAL
         * is set again on the new mask).
         */
        if (old_mask & AR5K_INT_GLOBAL) {
                ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
                ath5k_hw_reg_read(ah, AR5K_IER);
        }

        /*
         * Add additional, chipset-dependent interrupt mask flags
         * and write them to the IMR (interrupt mask register).
         */
        int_mask = new_mask & AR5K_INT_COMMON;

        if (ah->ah_version != AR5K_AR5210) {
                /* Preserve per queue TXURN interrupt mask */
                u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
                                & AR5K_SIMR2_QCU_TXURN;

                if (new_mask & AR5K_INT_FATAL) {
                        int_mask |= AR5K_IMR_HIUERR;
                        simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
                                | AR5K_SIMR2_DPERR);
                }

                /*Beacon Not Ready*/
                if (new_mask & AR5K_INT_BNR)
                        int_mask |= AR5K_INT_BNR;

                if (new_mask & AR5K_INT_TIM)
                        int_mask |= AR5K_IMR_TIM;

                if (new_mask & AR5K_INT_TIM)
                        simr2 |= AR5K_SISR2_TIM;
                if (new_mask & AR5K_INT_DTIM)
                        simr2 |= AR5K_SISR2_DTIM;
                if (new_mask & AR5K_INT_DTIM_SYNC)
                        simr2 |= AR5K_SISR2_DTIM_SYNC;
                if (new_mask & AR5K_INT_BCN_TIMEOUT)
                        simr2 |= AR5K_SISR2_BCN_TIMEOUT;
                if (new_mask & AR5K_INT_CAB_TIMEOUT)
                        simr2 |= AR5K_SISR2_CAB_TIMEOUT;

                if (new_mask & AR5K_INT_RX_DOPPLER)
                        int_mask |= AR5K_IMR_RXDOPPLER;

                /* Note: Per queue interrupt masks
                 * are set via reset_tx_queue (qcu.c) */
                ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
                ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);

        } else {
                if (new_mask & AR5K_INT_FATAL)
                        int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
                                | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);

                ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
        }

        /* If RXNOFRM interrupt is masked disable it
         * by setting AR5K_RXNOFRM to zero */
        if (!(new_mask & AR5K_INT_RXNOFRM))
                ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);

        /* Store new interrupt mask */
        ah->ah_imr = new_mask;

        /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
        if (new_mask & AR5K_INT_GLOBAL) {
                ath5k_hw_reg_write(ah, ah->ah_ier, AR5K_IER);
                ath5k_hw_reg_read(ah, AR5K_IER);
        }

        return old_mask;
}