iPXE
efx_hunt.c
Go to the documentation of this file.
00001 /**************************************************************************
00002  *
00003  * Driver datapath for Solarflare network cards
00004  *
00005  * Written by Shradha Shah <sshah@solarflare.com>
00006  *
00007  * Copyright 2012-2017 Solarflare Communications Inc.
00008  *
00009  * This program is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU General Public License as
00011  * published by the Free Software Foundation; either version 2 of the
00012  * License, or any later version.
00013  *
00014  * You can also choose to distribute this program under the terms of
00015  * the Unmodified Binary Distribution Licence (as given in the file
00016  * COPYING.UBDL), provided that you have satisfied its requirements.
00017  *
00018  ***************************************************************************/
00019 
00020 #include <stdint.h>
00021 #include <stdlib.h>
00022 #include <stdio.h>
00023 #include <unistd.h>
00024 #include <errno.h>
00025 #include <assert.h>
00026 #include <byteswap.h>
00027 #include <ipxe/io.h>
00028 #include <ipxe/pci.h>
00029 #include <ipxe/malloc.h>
00030 #include <ipxe/iobuf.h>
00031 #include <ipxe/netdevice.h>
00032 #include "efx_hunt.h"
00033 #include "efx_bitfield.h"
00034 #include "ef10_regs.h"
00035 
00036 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
00037 
00038 void efx_hunt_free_special_buffer(void *buf, int bytes)
00039 {
00040         free_dma(buf, bytes);
00041 }
00042 
00043 static void *efx_hunt_alloc_special_buffer(int bytes,
00044                                            struct efx_special_buffer *entry)
00045 {
00046         void *buffer;
00047         dma_addr_t dma_addr;
00048 
00049         /* Allocate the buffer, aligned on a buffer address boundary.  This
00050          * buffer will be passed into an MC_CMD_INIT_*Q command to setup the
00051          * appropriate type of queue via MCDI.
00052          */
00053         buffer = malloc_dma(bytes, EFX_BUF_ALIGN);
00054         if (!buffer)
00055                 return NULL;
00056 
00057         entry->dma_addr = dma_addr = virt_to_bus(buffer);
00058         assert((dma_addr & (EFX_BUF_ALIGN - 1)) == 0);
00059 
00060         /* Buffer table entries aren't allocated, so set id to zero */
00061         entry->id = 0;
00062         DBGP("Allocated 0x%x bytes at %p\n", bytes, buffer);
00063 
00064         return buffer;
00065 }
00066 
00067 /*******************************************************************************
00068  *
00069  *
00070  * TX
00071  *
00072  *
00073  ******************************************************************************/
00074 static void
00075 efx_hunt_build_tx_desc(efx_tx_desc_t *txd, struct io_buffer *iob)
00076 {
00077         dma_addr_t dma_addr;
00078 
00079         dma_addr = virt_to_bus(iob->data);
00080 
00081         EFX_POPULATE_QWORD_4(*txd,
00082                              ESF_DZ_TX_KER_TYPE, 0,
00083                              ESF_DZ_TX_KER_CONT, 0,
00084                              ESF_DZ_TX_KER_BYTE_CNT, iob_len(iob),
00085                              ESF_DZ_TX_KER_BUF_ADDR, dma_addr);
00086 }
00087 
00088 static void
00089 efx_hunt_notify_tx_desc(struct efx_nic *efx)
00090 {
00091         struct efx_tx_queue *txq = &efx->txq;
00092         int ptr = txq->write_ptr & EFX_TXD_MASK;
00093         efx_dword_t reg;
00094 
00095         EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, ptr);
00096         efx_writel_page(efx, &reg, 0, ER_DZ_TX_DESC_UPD_DWORD);
00097 }
00098 
00099 int
00100 efx_hunt_transmit(struct net_device *netdev, struct io_buffer *iob)
00101 {
00102         struct efx_nic *efx = netdev_priv(netdev);
00103         struct efx_tx_queue *txq = &efx->txq;
00104         int fill_level, space;
00105         efx_tx_desc_t *txd;
00106         int buf_id;
00107 
00108         fill_level = txq->write_ptr - txq->read_ptr;
00109         space = EFX_TXD_SIZE - fill_level - 1;
00110         if (space < 1)
00111                 return -ENOBUFS;
00112 
00113         /* Save the iobuffer for later completion */
00114         buf_id = txq->write_ptr & EFX_TXD_MASK;
00115         assert(txq->buf[buf_id] == NULL);
00116         txq->buf[buf_id] = iob;
00117 
00118         DBGCIO(efx, "tx_buf[%d] for iob %p data %p len %zd\n",
00119                buf_id, iob, iob->data, iob_len(iob));
00120 
00121         /* Form the descriptor, and push it to hardware */
00122         txd = txq->ring + buf_id;
00123         efx_hunt_build_tx_desc(txd, iob);
00124         ++txq->write_ptr;
00125         efx_hunt_notify_tx_desc(efx);
00126 
00127         return 0;
00128 }
00129 
00130 static void
00131 efx_hunt_transmit_done(struct efx_nic *efx, int id)
00132 {
00133         struct efx_tx_queue *txq = &efx->txq;
00134         unsigned int read_ptr, stop;
00135 
00136         /* Complete all buffers from read_ptr up to and including id */
00137         read_ptr = txq->read_ptr & EFX_TXD_MASK;
00138         stop = (id + 1) & EFX_TXD_MASK;
00139 
00140         while (read_ptr != stop) {
00141                 struct io_buffer *iob = txq->buf[read_ptr];
00142 
00143                 assert(iob);
00144                 /* Complete the tx buffer */
00145                 if (iob)
00146                         netdev_tx_complete(efx->netdev, iob);
00147                 DBGCIO(efx, "tx_buf[%d] for iob %p done\n", read_ptr, iob);
00148                 txq->buf[read_ptr] = NULL;
00149 
00150                 ++txq->read_ptr;
00151                 read_ptr = txq->read_ptr & EFX_TXD_MASK;
00152         }
00153 }
00154 
00155 int efx_hunt_tx_init(struct net_device *netdev, dma_addr_t *dma_addr)
00156 {
00157         struct efx_nic *efx = netdev_priv(netdev);
00158         struct efx_tx_queue *txq = &efx->txq;
00159         size_t bytes;
00160 
00161         /* Allocate hardware transmit queue */
00162         bytes = sizeof(efx_tx_desc_t) * EFX_TXD_SIZE;
00163         txq->ring = efx_hunt_alloc_special_buffer(bytes, &txq->entry);
00164         if (!txq->ring)
00165                 return -ENOMEM;
00166 
00167         txq->read_ptr = txq->write_ptr = 0;
00168         *dma_addr = txq->entry.dma_addr;
00169         return 0;
00170 }
00171 
00172 /*******************************************************************************
00173  *
00174  *
00175  * RX
00176  *
00177  *
00178  ******************************************************************************/
00179 static void
00180 efx_hunt_build_rx_desc(efx_rx_desc_t *rxd, struct io_buffer *iob)
00181 {
00182         dma_addr_t dma_addr = virt_to_bus(iob->data);
00183 
00184         EFX_POPULATE_QWORD_2(*rxd,
00185                              ESF_DZ_RX_KER_BYTE_CNT, EFX_RX_BUF_SIZE,
00186                              ESF_DZ_RX_KER_BUF_ADDR, dma_addr);
00187 }
00188 
00189 static void
00190 efx_hunt_notify_rx_desc(struct efx_nic *efx)
00191 {
00192         struct efx_rx_queue *rxq = &efx->rxq;
00193         int ptr = rxq->write_ptr & EFX_RXD_MASK;
00194         efx_dword_t reg;
00195 
00196         EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, ptr);
00197         efx_writel_page(efx, &reg, 0, ER_DZ_RX_DESC_UPD);
00198 }
00199 
00200 static void
00201 efx_hunt_rxq_fill(struct efx_nic *efx)
00202 {
00203         struct efx_rx_queue *rxq = &efx->rxq;
00204         int fill_level = rxq->write_ptr - rxq->read_ptr;
00205         int space = EFX_NUM_RX_DESC - fill_level - 1;
00206         int pushed = 0;
00207 
00208         while (space) {
00209                 int buf_id = rxq->write_ptr & (EFX_NUM_RX_DESC - 1);
00210                 int desc_id = rxq->write_ptr & EFX_RXD_MASK;
00211                 struct io_buffer *iob;
00212                 efx_rx_desc_t *rxd;
00213 
00214                 assert(rxq->buf[buf_id] == NULL);
00215                 iob = alloc_iob(EFX_RX_BUF_SIZE);
00216                 if (!iob)
00217                         break;
00218 
00219                 DBGCP(efx, "pushing rx_buf[%d] iob %p data %p\n",
00220                       buf_id, iob, iob->data);
00221 
00222                 rxq->buf[buf_id] = iob;
00223                 rxd = rxq->ring + desc_id;
00224                 efx_hunt_build_rx_desc(rxd, iob);
00225                 ++rxq->write_ptr;
00226                 ++pushed;
00227                 --space;
00228         }
00229 
00230         /* Push the ptr to hardware */
00231         if (pushed > 0) {
00232                 efx_hunt_notify_rx_desc(efx);
00233 
00234                 DBGCP(efx, "pushed %d rx buffers to fill level %d\n",
00235                       pushed, rxq->write_ptr - rxq->read_ptr);
00236         }
00237 }
00238 
00239 static void
00240 efx_hunt_receive(struct efx_nic *efx, unsigned int id, int len, int drop)
00241 {
00242         struct efx_rx_queue *rxq = &efx->rxq;
00243         unsigned int read_ptr = rxq->read_ptr & EFX_RXD_MASK;
00244         unsigned int buf_ptr = rxq->read_ptr & EFX_NUM_RX_DESC_MASK;
00245         struct io_buffer *iob;
00246 
00247         /* id is the lower 4 bits of the desc index + 1 in huntington*/
00248         /* hence anding with 15 */
00249         assert((id & 15) == ((read_ptr + (len != 0)) & 15));
00250 
00251         /* Pop this rx buffer out of the software ring */
00252         iob = rxq->buf[buf_ptr];
00253         rxq->buf[buf_ptr] = NULL;
00254 
00255         DBGCIO(efx, "popping rx_buf[%d] iob %p data %p with %d bytes %s %x\n",
00256                read_ptr, iob, iob->data, len, drop ? "bad" : "ok", drop);
00257 
00258         /* Pass the packet up if required */
00259         if (drop)
00260                 netdev_rx_err(efx->netdev, iob, EBADMSG);
00261         else {
00262                 iob_put(iob, len);
00263                 iob_pull(iob, efx->rx_prefix_size);
00264                 netdev_rx(efx->netdev, iob);
00265         }
00266 
00267         ++rxq->read_ptr;
00268 }
00269 
00270 int efx_hunt_rx_init(struct net_device *netdev, dma_addr_t *dma_addr)
00271 {
00272         struct efx_nic *efx = netdev_priv(netdev);
00273         struct efx_rx_queue *rxq = &efx->rxq;
00274         size_t bytes;
00275 
00276         /* Allocate hardware receive queue */
00277         bytes = sizeof(efx_rx_desc_t) * EFX_RXD_SIZE;
00278         rxq->ring = efx_hunt_alloc_special_buffer(bytes, &rxq->entry);
00279         if (rxq->ring == NULL)
00280                 return -ENOMEM;
00281 
00282         rxq->read_ptr = rxq->write_ptr = 0;
00283         *dma_addr = rxq->entry.dma_addr;
00284         return 0;
00285 }
00286 
00287 /*******************************************************************************
00288  *
00289  *
00290  * Event queues and interrupts
00291  *
00292  *
00293  ******************************************************************************/
00294 int efx_hunt_ev_init(struct net_device *netdev, dma_addr_t *dma_addr)
00295 {
00296         struct efx_nic *efx = netdev_priv(netdev);
00297         struct efx_ev_queue *evq = &efx->evq;
00298         size_t bytes;
00299 
00300         /* Allocate the hardware event queue */
00301         bytes = sizeof(efx_event_t) * EFX_EVQ_SIZE;
00302         evq->ring = efx_hunt_alloc_special_buffer(bytes, &evq->entry);
00303         if (evq->ring == NULL)
00304                 return -ENOMEM;
00305 
00306         memset(evq->ring, 0xff, bytes);
00307         evq->read_ptr = 0;
00308         *dma_addr = evq->entry.dma_addr;
00309         return 0;
00310 }
00311 
00312 static void
00313 efx_hunt_clear_interrupts(struct efx_nic *efx)
00314 {
00315         efx_dword_t reg;
00316         /* read the ISR */
00317         efx_readl(efx, &reg, ER_DZ_BIU_INT_ISR);
00318 }
00319 
00320 /**
00321  * See if an event is present
00322  *
00323  * @v event            EFX event structure
00324  * @ret True           An event is pending
00325  * @ret False          No event is pending
00326  *
00327  * We check both the high and low dword of the event for all ones.  We
00328  * wrote all ones when we cleared the event, and no valid event can
00329  * have all ones in either its high or low dwords.  This approach is
00330  * robust against reordering.
00331  *
00332  * Note that using a single 64-bit comparison is incorrect; even
00333  * though the CPU read will be atomic, the DMA write may not be.
00334  */
00335 static inline int
00336 efx_hunt_event_present(efx_event_t *event)
00337 {
00338         return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
00339                   EFX_DWORD_IS_ALL_ONES(event->dword[1])));
00340 }
00341 
00342 static void
00343 efx_hunt_evq_read_ack(struct efx_nic *efx)
00344 {
00345         struct efx_ev_queue *evq = &efx->evq;
00346         efx_dword_t reg;
00347 
00348         if (efx->workaround_35388) {
00349                 EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
00350                                      EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
00351                                      ERF_DD_EVQ_IND_RPTR,
00352                                     evq->read_ptr >> ERF_DD_EVQ_IND_RPTR_WIDTH);
00353                 efx_writel_page(efx, &reg, 0, ER_DD_EVQ_INDIRECT);
00354                 EFX_POPULATE_DWORD_2(reg, ERF_DD_EVQ_IND_RPTR_FLAGS,
00355                                      EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
00356                                      ERF_DD_EVQ_IND_RPTR, evq->read_ptr &
00357                                      ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
00358                 efx_writel_page(efx, &reg, 0, ER_DD_EVQ_INDIRECT);
00359         } else {
00360                 EFX_POPULATE_DWORD_1(reg, ERF_DZ_EVQ_RPTR, evq->read_ptr);
00361                 efx_writel_table(efx, &reg, 0, ER_DZ_EVQ_RPTR);
00362         }
00363 }
00364 
00365 static unsigned int
00366 efx_hunt_handle_event(struct efx_nic *efx, efx_event_t *evt)
00367 {
00368         struct efx_rx_queue *rxq = &efx->rxq;
00369         int ev_code, desc_ptr, len;
00370         int next_ptr_lbits, packet_drop;
00371         int rx_cont;
00372 
00373         /* Decode event */
00374         ev_code = EFX_QWORD_FIELD(*evt, ESF_DZ_EV_CODE);
00375 
00376         switch (ev_code) {
00377         case ESE_DZ_EV_CODE_TX_EV:
00378                 desc_ptr = EFX_QWORD_FIELD(*evt, ESF_DZ_TX_DESCR_INDX);
00379                 efx_hunt_transmit_done(efx, desc_ptr);
00380                 break;
00381 
00382         case ESE_DZ_EV_CODE_RX_EV:
00383                 len = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_BYTES);
00384                 next_ptr_lbits = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_DSC_PTR_LBITS);
00385                 rx_cont = EFX_QWORD_FIELD(*evt, ESF_DZ_RX_CONT);
00386 
00387                 /* We don't expect to receive scattered packets, so drop the
00388                  * packet if RX_CONT is set on the current or previous event, or
00389                  * if len is zero.
00390                  */
00391                 packet_drop = (len == 0) | (rx_cont << 1) |
00392                               (rxq->rx_cont_prev << 2);
00393                 efx_hunt_receive(efx, next_ptr_lbits, len, packet_drop);
00394                 rxq->rx_cont_prev = rx_cont;
00395                 return 1;
00396 
00397         default:
00398                 DBGCP(efx, "Unknown event type %d\n", ev_code);
00399                 break;
00400         }
00401         return 0;
00402 }
00403 
00404 void efx_hunt_poll(struct net_device *netdev)
00405 {
00406         struct efx_nic *efx = netdev_priv(netdev);
00407         struct efx_ev_queue *evq = &efx->evq;
00408         efx_event_t *evt;
00409         int budget = 10;
00410 
00411         /* Read the event queue by directly looking for events
00412          * (we don't even bother to read the eventq write ptr)
00413          */
00414         evt = evq->ring + evq->read_ptr;
00415         while (efx_hunt_event_present(evt) && (budget > 0)) {
00416                 DBGCP(efx, "Event at index 0x%x address %p is "
00417                       EFX_QWORD_FMT "\n", evq->read_ptr,
00418                       evt, EFX_QWORD_VAL(*evt));
00419 
00420                 budget -= efx_hunt_handle_event(efx, evt);
00421 
00422                 /* Clear the event */
00423                 EFX_SET_QWORD(*evt);
00424 
00425                 /* Move to the next event. We don't ack the event
00426                  * queue until the end
00427                  */
00428                 evq->read_ptr = ((evq->read_ptr + 1) & EFX_EVQ_MASK);
00429                 evt = evq->ring + evq->read_ptr;
00430         }
00431 
00432         /* Push more rx buffers if needed */
00433         efx_hunt_rxq_fill(efx);
00434 
00435         /* Clear any pending interrupts */
00436         efx_hunt_clear_interrupts(efx);
00437 
00438         /* Ack the event queue if interrupts are enabled */
00439         if (efx->int_en)
00440                 efx_hunt_evq_read_ack(efx);
00441 }
00442 
00443 void efx_hunt_irq(struct net_device *netdev, int enable)
00444 {
00445         struct efx_nic *efx = netdev_priv(netdev);
00446 
00447         efx->int_en = enable;
00448 
00449         /* If interrupts are enabled, prime the event queue.  Otherwise ack any
00450          * pending interrupts
00451          */
00452         if (enable)
00453                 efx_hunt_evq_read_ack(efx);
00454         else if (efx->netdev->state & NETDEV_OPEN)
00455                 efx_hunt_clear_interrupts(efx);
00456 }
00457 
00458 /*******************************************************************************
00459  *
00460  *
00461  * Initialization and Close
00462  *
00463  *
00464  ******************************************************************************/
00465 int efx_hunt_open(struct net_device *netdev)
00466 {
00467         struct efx_nic *efx = netdev_priv(netdev);
00468         efx_dword_t cmd;
00469 
00470         /* Set interrupt moderation to 0*/
00471         EFX_POPULATE_DWORD_2(cmd,
00472                              ERF_DZ_TC_TIMER_MODE, 0,
00473                              ERF_DZ_TC_TIMER_VAL, 0);
00474         efx_writel_page(efx, &cmd, 0, ER_DZ_EVQ_TMR);
00475 
00476         /* Ack the eventq */
00477         if (efx->int_en)
00478                 efx_hunt_evq_read_ack(efx);
00479 
00480         /* Push receive buffers */
00481         efx_hunt_rxq_fill(efx);
00482 
00483         return 0;
00484 }
00485 
00486 void efx_hunt_close(struct net_device *netdev)
00487 {
00488         struct efx_nic *efx = netdev_priv(netdev);
00489         struct efx_rx_queue *rxq = &efx->rxq;
00490         struct efx_tx_queue *txq = &efx->txq;
00491         int i;
00492 
00493         /* Complete outstanding descriptors */
00494         for (i = 0; i < EFX_NUM_RX_DESC; i++) {
00495                 if (rxq->buf[i]) {
00496                         free_iob(rxq->buf[i]);
00497                         rxq->buf[i] = NULL;
00498                 }
00499         }
00500 
00501         for (i = 0; i < EFX_TXD_SIZE; i++) {
00502                 if (txq->buf[i]) {
00503                         netdev_tx_complete(efx->netdev, txq->buf[i]);
00504                         txq->buf[i] = NULL;
00505                 }
00506         }
00507 
00508         /* Clear interrupts */
00509         efx_hunt_clear_interrupts(efx);
00510 }