iPXE
golan.c
Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
00003  *
00004  * This program is free software; you can redistribute it and/or
00005  * modify it under the terms of the GNU General Public License as
00006  * published by the Free Software Foundation; either version 2 of the
00007  * License, or any later version.
00008  *
00009  * This program is distributed in the hope that it will be useful, but
00010  * WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00012  * General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with this program; if not, write to the Free Software
00016  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
00017  * 02110-1301, USA.
00018  */
00019 
00020 FILE_LICENCE ( GPL2_OR_LATER );
00021 
00022 #include <errno.h>
00023 #include <strings.h>
00024 #include <ipxe/malloc.h>
00025 #include <ipxe/umalloc.h>
00026 #include <ipxe/infiniband.h>
00027 #include <ipxe/ib_smc.h>
00028 #include <ipxe/iobuf.h>
00029 #include <ipxe/netdevice.h>
00030 #include "flexboot_nodnic.h"
00031 #include <ipxe/ethernet.h>
00032 #include <ipxe/if_ether.h>
00033 #include <usr/ifmgmt.h>
00034 #include <ipxe/in.h>
00035 #include <byteswap.h>
00036 #include "mlx_utils/include/public/mlx_pci_gw.h"
00037 #include <config/general.h>
00038 #include <ipxe/ipoib.h>
00039 #include "mlx_nodnic/include/mlx_port.h"
00040 #include "nodnic_shomron_prm.h"
00041 #include "golan.h"
00042 #include "mlx_utils/include/public/mlx_bail.h"
00043 #include "mlx_utils/mlx_lib/mlx_link_speed/mlx_link_speed.h"
00044 
00045 
00046 #define DEVICE_IS_CIB( device ) ( device == 0x1011 )
00047 
00048 /******************************************************************************/
00049 /************* Very simple memory management for umalloced pages **************/
00050 /******* Temporary solution until full memory management is implemented *******/
00051 /******************************************************************************/
00052 
00053 struct golan_page {
00054         struct list_head list;
00055         userptr_t addr;
00056 };
00057 
00058 static void golan_free_fw_areas ( struct golan *golan ) {
00059         int i;
00060 
00061         for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
00062                 if ( golan->fw_areas[i].area ) {
00063                         ufree ( golan->fw_areas[i].area );
00064                         golan->fw_areas[i].area = UNULL;
00065                 }
00066         }
00067 }
00068 
00069 static int golan_init_fw_areas ( struct golan *golan ) {
00070         int rc = 0, i =  0;
00071 
00072         if ( ! golan ) {
00073                 rc = -EINVAL;
00074                 goto err_golan_init_fw_areas_bad_param;
00075         }
00076 
00077         for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
00078                 golan->fw_areas[i].area = UNULL;
00079 
00080         return rc;
00081 
00082         err_golan_init_fw_areas_bad_param:
00083         return rc;
00084 }
00085 
00086 /******************************************************************************/
00087 
00088 const char *golan_qp_state_as_string[] = {
00089         "RESET",
00090         "INIT",
00091         "RTR",
00092         "RTS",
00093         "SQD",
00094         "SQE",
00095         "ERR"
00096 };
00097 
00098 static inline int golan_check_rc_and_cmd_status ( struct golan_cmd_layout *cmd, int rc ) {
00099         struct golan_outbox_hdr *out_hdr = ( struct golan_outbox_hdr * ) ( cmd->out );
00100         if ( rc == -EBUSY ) {
00101                 DBG ( "HCA is busy (rc = -EBUSY)\n" );
00102                 return rc;
00103         } else if ( out_hdr->status ) {
00104                 DBG("%s status = 0x%x - syndrom = 0x%x\n", __FUNCTION__,
00105                                 out_hdr->status, be32_to_cpu(out_hdr->syndrome));
00106                 return out_hdr->status;
00107         }
00108         return 0;
00109 }
00110 
00111 #define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable)                                                   \
00112                 do {                                                                                                                    \
00113                         if ( ( rc = golan_check_rc_and_cmd_status ( cmd, rc ) ) )       \
00114                                 goto _lable;                                                                                    \
00115                 } while (0)
00116 
00117 #define GOLAN_PRINT_RC_AND_CMD_STATUS   golan_check_rc_and_cmd_status ( cmd, rc )
00118 
00119 
00120 struct mbox {
00121         union {
00122                 struct golan_cmd_prot_block     mblock;
00123                 u8      data[MAILBOX_STRIDE];
00124                 __be64  qdata[MAILBOX_STRIDE >> 3];
00125         };
00126 };
00127 
00128 static inline uint32_t  ilog2(uint32_t mem)
00129 {
00130         return ( fls ( mem ) - 1 );
00131 }
00132 
00133 #define CTRL_SIG_SZ     (sizeof(mailbox->mblock) - sizeof(mailbox->mblock.bdata) - 2)
00134 
00135 static inline u8 xor8_buf(void *buf, int len)
00136 {
00137         u8 sum = 0;
00138         int i;
00139         u8 *ptr = buf;
00140 
00141         for (i = 0; i < len; ++i)
00142                 sum ^= ptr[i];
00143 
00144         return sum;
00145 }
00146 
00147 static inline const char *cmd_status_str(u8 status)
00148 {
00149         switch (status) {
00150                 case 0x0:       return "OK";
00151                 case 0x1:       return "internal error";
00152                 case 0x2:       return "bad operation";
00153                 case 0x3:       return "bad parameter";
00154                 case 0x4:       return "bad system state";
00155                 case 0x5:       return "bad resource";
00156                 case 0x6:       return "resource busy";
00157                 case 0x8:       return "limits exceeded";
00158                 case 0x9:       return "bad resource state";
00159                 case 0xa:       return "bad index";
00160                 case 0xf:       return "no resources";
00161                 case 0x50:      return "bad input length";
00162                 case 0x51:      return "bad output length";
00163                 case 0x10:      return "bad QP state";
00164                 case 0x30:      return "bad packet (discarded)";
00165                 case 0x40:      return "bad size too many outstanding CQEs";
00166                 case 0xff:      return "Command Timed Out";
00167                 default:        return "unknown status";
00168         }
00169 }
00170 
00171 static inline uint16_t fw_rev_maj(struct golan *golan)
00172 {
00173         return be32_to_cpu(readl(&golan->iseg->fw_rev)) & 0xffff;
00174 }
00175 
00176 static inline u16 fw_rev_min(struct golan *golan)
00177 {
00178         return be32_to_cpu(readl(&golan->iseg->fw_rev)) >> 16;
00179 }
00180 
00181 static inline u16 fw_rev_sub(struct golan *golan)
00182 {
00183         return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) & 0xffff;
00184 }
00185 
00186 static inline u16 cmdif_rev(struct golan *golan)
00187 {
00188         return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) >> 16;
00189 }
00190 
00191 
00192 static inline struct golan_cmd_layout *get_cmd( struct golan *golan, int idx )
00193 {
00194         return golan->cmd.addr + (idx << golan->cmd.log_stride);
00195 }
00196 
00197 static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
00198                                 uint32_t inbox_idx, uint32_t outbox_idx)
00199 {
00200         struct golan_cmd_layout *cmd    = get_cmd(golan, cmd_idx);
00201         struct mbox *mailbox = NULL;
00202 
00203         if (inbox_idx != NO_MBOX) {
00204                 mailbox                         = GET_INBOX(golan, inbox_idx);
00205                 mailbox->mblock.token           = cmd->token;
00206                 mailbox->mblock.ctrl_sig        = ~xor8_buf(mailbox->mblock.rsvd0,
00207                                                                 CTRL_SIG_SZ);
00208         }
00209         if (outbox_idx != NO_MBOX) {
00210                 mailbox                         = GET_OUTBOX(golan, outbox_idx);
00211                 mailbox->mblock.token           = cmd->token;
00212                 mailbox->mblock.ctrl_sig        = ~xor8_buf(mailbox->mblock.rsvd0,
00213                                                                 CTRL_SIG_SZ);
00214         }
00215         cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
00216 }
00217 
00218 static inline void show_out_status(uint32_t *out)
00219 {
00220         DBG("%x\n", be32_to_cpu(out[0]));
00221         DBG("%x\n", be32_to_cpu(out[1]));
00222         DBG("%x\n", be32_to_cpu(out[2]));
00223         DBG("%x\n", be32_to_cpu(out[3]));
00224 }
00225 /**
00226   * Check if CMD has finished.
00227   */
00228 static inline uint32_t is_command_finished( struct golan *golan, int idx)
00229 {
00230         wmb();
00231         return !(get_cmd( golan , idx )->status_own & CMD_OWNER_HW);
00232 }
00233 
00234 /**
00235  * Wait for Golan command completion
00236  *
00237  * @v golan             Golan device
00238  * @ret rc              Return status code
00239  */
00240 static inline int golan_cmd_wait(struct golan *golan, int idx, const char *command)
00241 {
00242         unsigned int wait;
00243         int     rc = -EBUSY;
00244 
00245         for ( wait = GOLAN_HCR_MAX_WAIT_MS ; wait ; --wait ) {
00246                 if (is_command_finished(golan, idx)) {
00247                         rc = CMD_STATUS(golan, idx);
00248                         rmb();
00249                         break;
00250                 } else {
00251                         mdelay ( 1 );
00252                 }
00253         }
00254         if (rc) {
00255                 DBGC (golan ,"[%s]RC is %s[%x]\n", command, cmd_status_str(rc), rc);
00256         }
00257 
00258         golan->cmd_bm &= ~(1 << idx);
00259         return rc;
00260 }
00261 
00262 /**
00263   * Notify the HW that commands are ready
00264   */
00265 static inline void send_command(struct golan *golan)
00266 {
00267         wmb(); //Make sure the command is visible in "memory".
00268         writel(cpu_to_be32(golan->cmd_bm) , &golan->iseg->cmd_dbell);
00269 }
00270 
00271 static inline int send_command_and_wait(struct golan *golan, uint32_t cmd_idx,
00272                                         uint32_t inbox_idx, uint32_t outbox_idx, const char *command)
00273 {
00274         golan_calc_sig(golan, cmd_idx, inbox_idx, outbox_idx);
00275         send_command(golan);
00276         return golan_cmd_wait(golan, cmd_idx, command);
00277 }
00278 
00279 /**
00280   * Prepare a FW command,
00281   * In - comamnd idx (Must be valid)
00282   * writes the command parameters.
00283   */
00284 static inline struct golan_cmd_layout *write_cmd(struct golan *golan, int idx,
00285                                                         uint16_t opcode, uint16_t opmod,
00286                                                         uint16_t inbox_idx,
00287                                                         uint16_t outbox_idx, uint16_t inlen,
00288                                                         uint16_t outlen)
00289 {
00290         struct golan_cmd_layout *cmd    = get_cmd(golan , idx);
00291         struct golan_inbox_hdr *hdr     = (struct golan_inbox_hdr *)cmd->in;
00292         static uint8_t token;
00293 
00294         memset(cmd, 0, sizeof(*cmd));
00295 
00296         cmd->type               = GOLAN_PCI_CMD_XPORT;
00297         cmd->status_own         = CMD_OWNER_HW;
00298         cmd->outlen             = cpu_to_be32(outlen);
00299         cmd->inlen              = cpu_to_be32(inlen);
00300         hdr->opcode             = cpu_to_be16(opcode);
00301         hdr->opmod              = cpu_to_be16(opmod);
00302 
00303         if (inbox_idx != NO_MBOX) {
00304                 memset(GET_INBOX(golan, inbox_idx), 0, MAILBOX_SIZE);
00305                 cmd->in_ptr     = VIRT_2_BE64_BUS(GET_INBOX(golan, inbox_idx));
00306                 cmd->token      = ++token;
00307         }
00308         if (outbox_idx != NO_MBOX) {
00309                 memset(GET_OUTBOX(golan, outbox_idx), 0, MAILBOX_SIZE);
00310                 cmd->out_ptr = VIRT_2_BE64_BUS(GET_OUTBOX(golan, outbox_idx));
00311         }
00312 
00313         golan->cmd_bm |= 1 << idx;
00314 
00315         assert ( cmd != NULL );
00316         return cmd;
00317 }
00318 
00319 static inline int golan_core_enable_hca(struct golan *golan)
00320 {
00321         struct golan_cmd_layout *cmd;
00322         int rc = 0;
00323 
00324         DBGC(golan, "%s\n", __FUNCTION__);
00325 
00326         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ENABLE_HCA, 0x0,
00327                         NO_MBOX, NO_MBOX,
00328                         sizeof(struct golan_enable_hca_mbox_in),
00329                         sizeof(struct golan_enable_hca_mbox_out));
00330 
00331         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00332         GOLAN_PRINT_RC_AND_CMD_STATUS;
00333         return rc;
00334 }
00335 
00336 static inline void golan_disable_hca(struct golan *golan)
00337 {
00338         struct golan_cmd_layout *cmd;
00339         int rc;
00340 
00341         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DISABLE_HCA, 0x0,
00342                                         NO_MBOX, NO_MBOX,
00343                                     sizeof(struct golan_disable_hca_mbox_in),
00344                                     sizeof(struct golan_disable_hca_mbox_out));
00345         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00346         GOLAN_PRINT_RC_AND_CMD_STATUS;
00347 }
00348 
00349 static inline int golan_set_hca_cap(struct golan *golan)
00350 {
00351         struct golan_cmd_layout *cmd;
00352         int rc;
00353 
00354         DBGC(golan, "%s\n", __FUNCTION__);
00355 
00356         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_SET_HCA_CAP, 0x0,
00357                         GEN_MBOX, NO_MBOX,
00358                         sizeof(struct golan_cmd_set_hca_cap_mbox_in),
00359                         sizeof(struct golan_cmd_set_hca_cap_mbox_out));
00360 
00361         golan->caps.flags &= ~GOLAN_DEV_CAP_FLAG_CMDIF_CSUM;
00362         DBGC( golan , "%s caps.uar_sz = %d\n", __FUNCTION__, golan->caps.uar_sz);
00363         DBGC( golan , "%s caps.log_pg_sz = %d\n", __FUNCTION__, golan->caps.log_pg_sz);
00364         DBGC( golan , "%s caps.log_uar_sz = %d\n", __FUNCTION__, be32_to_cpu(golan->caps.uar_page_sz));
00365         golan->caps.uar_page_sz = 0;
00366         golan->caps.log_max_qp = GOLAN_LOG_MAX_QP;
00367 
00368         memcpy(((struct golan_hca_cap *)GET_INBOX(golan, GEN_MBOX)),
00369                    &(golan->caps),
00370                    sizeof(struct golan_hca_cap));
00371 
00372         //if command failed we should reset the caps in golan->caps
00373         rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
00374         GOLAN_PRINT_RC_AND_CMD_STATUS;
00375         return rc;
00376 }
00377 
00378 static inline int golan_qry_hca_cap(struct golan *golan)
00379 {
00380         struct golan_cmd_layout *cmd;
00381         int rc = 0;
00382 
00383         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_CAP, 0x1,
00384                                         NO_MBOX, GEN_MBOX,
00385                                         sizeof(struct golan_cmd_query_hca_cap_mbox_in),
00386                                         sizeof(struct golan_cmd_query_hca_cap_mbox_out));
00387 
00388         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, GEN_MBOX, __FUNCTION__);
00389         GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_hca_cap );
00390 
00391         memcpy(&(golan->caps),
00392                    ((struct golan_hca_cap *)GET_OUTBOX(golan, GEN_MBOX)),
00393                    sizeof(struct golan_hca_cap));
00394 err_query_hca_cap:
00395         return rc;
00396 }
00397 
00398 static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16 func_id ) {
00399         uint32_t out_num_entries = 0;
00400         int size_ibox = 0;
00401         int size_obox = 0;
00402         int rc = 0;
00403 
00404         DBGC(golan, "%s\n", __FUNCTION__);
00405 
00406         while ( pages > 0 ) {
00407                 uint32_t pas_num = min(pages, MAX_PASE_MBOX);
00408                 struct golan_cmd_layout *cmd;
00409                 struct golan_manage_pages_inbox *in;
00410 
00411                 size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
00412                 size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
00413 
00414                 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_TAKE,
00415                                 MEM_MBOX, MEM_MBOX,
00416                                 size_ibox,
00417                                 size_obox);
00418 
00419                 in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
00420 
00421                 in->func_id     = func_id; /* Already BE */
00422                 in->num_entries = cpu_to_be32(pas_num);
00423 
00424                 if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
00425                         out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
00426                 } else {
00427                         if ( rc == -EBUSY ) {
00428                                 DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
00429                         } else {
00430                                 DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
00431                                                 __FUNCTION__, rc, cmd_status_str(rc),
00432                                                 CMD_SYND(golan, MEM_CMD_IDX),
00433                                                 get_cmd( golan , MEM_CMD_IDX )->status_own,
00434                                                 be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
00435                         }
00436                         return rc;
00437                 }
00438 
00439                 pages -= out_num_entries;
00440         }
00441         DBGC( golan , "%s Pages handled\n", __FUNCTION__);
00442         return rc;
00443 }
00444 
00445 static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
00446                 , __be16 func_id,struct golan_firmware_area *fw_area) {
00447         struct mbox *mailbox;
00448         int size_ibox = 0;
00449         int size_obox = 0;
00450         int rc = 0;
00451         userptr_t next_page_addr = UNULL;
00452 
00453         DBGC(golan, "%s\n", __FUNCTION__);
00454         if ( ! fw_area->area ) {
00455                 fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
00456                 if ( fw_area->area == UNULL ) {
00457                         rc = -ENOMEM;
00458                         DBGC (golan ,"Failed to allocated %d pages \n",pages);
00459                         goto err_golan_alloc_fw_area;
00460                 }
00461                 fw_area->npages = pages;
00462         }
00463         assert ( fw_area->npages == pages );
00464         next_page_addr = fw_area->area;
00465         while ( pages > 0 ) {
00466                 uint32_t pas_num = min(pages, MAX_PASE_MBOX);
00467                 unsigned i, j;
00468                 struct golan_cmd_layout *cmd;
00469                 struct golan_manage_pages_inbox *in;
00470                 userptr_t addr = 0;
00471 
00472                 mailbox = GET_INBOX(golan, MEM_MBOX);
00473                 size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
00474                 size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
00475 
00476                 cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_MANAGE_PAGES, GOLAN_PAGES_GIVE,
00477                                 MEM_MBOX, MEM_MBOX,
00478                                 size_ibox,
00479                                 size_obox);
00480 
00481                 in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
00482 
00483                 in->func_id     = func_id; /* Already BE */
00484                 in->num_entries = cpu_to_be32(pas_num);
00485 
00486                 for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
00487                                 next_page_addr += GOLAN_PAGE_SIZE ) {
00488                         addr = next_page_addr;
00489                         if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
00490                                 DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
00491                         }
00492                         mailbox->mblock.data[j] = USR_2_BE64_BUS(addr);
00493                 }
00494 
00495                 if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
00496                         pages -= pas_num;
00497                         golan->total_dma_pages += pas_num;
00498                 } else {
00499                         if ( rc == -EBUSY ) {
00500                                 DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
00501                         } else {
00502                                 DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
00503                                                 __FUNCTION__, rc, cmd_status_str(rc),
00504                                                 CMD_SYND(golan, MEM_CMD_IDX),
00505                                                 get_cmd( golan , MEM_CMD_IDX )->status_own,
00506                                                 be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
00507                         }
00508                         goto err_send_command;
00509                 }
00510         }
00511         DBGC( golan , "%s Pages handled\n", __FUNCTION__);
00512         return 0;
00513 
00514 err_send_command:
00515 err_golan_alloc_fw_area:
00516         /* Go over In box and free pages */
00517         /* Send Error to FW */
00518         /* What is next - Disable HCA? */
00519         DBGC (golan ,"%s Failed (rc = 0x%x)\n", __FUNCTION__, rc);
00520         return rc;
00521 }
00522 
00523 static inline int golan_handle_pages(struct golan *golan,
00524                                         enum golan_qry_pages_mode qry,
00525                                         enum golan_manage_pages_mode mode)
00526 {
00527         struct golan_cmd_layout *cmd;
00528 
00529         int rc = 0;
00530         int32_t pages;
00531         uint16_t total_pages;
00532         __be16  func_id;
00533 
00534         DBGC(golan, "%s\n", __FUNCTION__);
00535 
00536         cmd = write_cmd(golan, MEM_CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, qry,
00537                         NO_MBOX, NO_MBOX,
00538                         sizeof(struct golan_query_pages_inbox),
00539                         sizeof(struct golan_query_pages_outbox));
00540 
00541         rc = send_command_and_wait(golan, MEM_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00542         GOLAN_CHECK_RC_AND_CMD_STATUS( err_handle_pages_query );
00543 
00544         pages = be32_to_cpu(QRY_PAGES_OUT(golan, MEM_CMD_IDX)->num_pages);
00545 
00546         DBGC( golan , "%s pages needed: %d\n", __FUNCTION__, pages);
00547 
00548         func_id = QRY_PAGES_OUT(golan, MEM_CMD_IDX)->func_id;
00549 
00550         total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
00551 
00552         if ( mode == GOLAN_PAGES_GIVE ) {
00553                 rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
00554         } else {
00555                 rc = golan_take_pages(golan, golan->total_dma_pages, func_id);
00556                 golan->total_dma_pages = 0;
00557         }
00558 
00559         if ( rc ) {
00560                 DBGC (golan , "Failed to %s pages (rc = %d) - DMA pages allocated = %d\n",
00561                         ( ( mode == GOLAN_PAGES_GIVE ) ? "give" : "take" ), rc , golan->total_dma_pages );
00562                 return rc;
00563         }
00564 
00565         return 0;
00566 
00567 err_handle_pages_query:
00568         DBGC (golan ,"%s Qyery pages failed (rc = 0x%x)\n", __FUNCTION__, rc);
00569         return rc;
00570 }
00571 
00572 static inline int golan_set_access_reg ( struct golan *golan __attribute__ (( unused )), uint32_t reg __attribute__ (( unused )))
00573 {
00574 #if 0
00575         write_cmd(golan, _CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, 0x0,
00576                         NO_MBOX, NO_MBOX,
00577                         sizeof(struct golan_reg_host_endianess),
00578                         sizeof(struct golan_reg_host_endianess));
00579         in->arg = cpu_to_be32(arg);
00580         in->register_id = cpu_to_be16(reg_num);
00581 #endif
00582         DBGC (golan ," %s Not implemented yet\n", __FUNCTION__);
00583         return 0;
00584 }
00585 
00586 static inline void golan_cmd_uninit ( struct golan *golan )
00587 {
00588         free_dma(golan->mboxes.outbox, GOLAN_PAGE_SIZE);
00589         free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
00590         free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
00591 }
00592 
00593 /**
00594  * Initialise Golan Command Q parameters
00595  *      -- Alocate a 4kb page for the Command Q
00596  *      -- Read the stride and log num commands available
00597  *      -- Write the address to cmdq_phy_addr in iseg
00598  * @v golan             Golan device
00599  */
00600 static inline int golan_cmd_init ( struct golan *golan )
00601 {
00602         int rc = 0;
00603         uint32_t addr_l_sz;
00604 
00605         if (!(golan->cmd.addr = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
00606                 rc = -ENOMEM;
00607                 goto malloc_dma_failed;
00608         }
00609         if (!(golan->mboxes.inbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
00610                 rc = -ENOMEM;
00611                 goto malloc_dma_inbox_failed;
00612         }
00613         if (!(golan->mboxes.outbox = malloc_dma(GOLAN_PAGE_SIZE , GOLAN_PAGE_SIZE))) {
00614                 rc = -ENOMEM;
00615                 goto malloc_dma_outbox_failed;
00616         }
00617         addr_l_sz       = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
00618 
00619         golan->cmd.log_stride   = addr_l_sz & 0xf;
00620         golan->cmd.size         = 1 << (( addr_l_sz >> 4 ) & 0xf);
00621 
00622         addr_l_sz = virt_to_bus(golan->cmd.addr);
00623         writel(0 /* cpu_to_be32(golan->cmd.addr) >> 32 */, &golan->iseg->cmdq_addr_h);
00624         writel(cpu_to_be32(addr_l_sz), &golan->iseg->cmdq_addr_l_sz);
00625         wmb(); //Make sure the addr is visible in "memory".
00626 
00627         addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
00628 
00629         DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
00630         return 0;
00631 
00632 malloc_dma_outbox_failed:
00633         free_dma(golan->mboxes.inbox, GOLAN_PAGE_SIZE);
00634 malloc_dma_inbox_failed:
00635         free_dma(golan->cmd.addr, GOLAN_PAGE_SIZE);
00636 malloc_dma_failed:
00637         DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
00638                    __FUNCTION__, rc);
00639         return rc;
00640 }
00641 
00642 static inline int golan_hca_init(struct golan *golan)
00643 {
00644         struct golan_cmd_layout *cmd;
00645         int rc = 0;
00646 
00647         DBGC(golan, "%s\n", __FUNCTION__);
00648 
00649         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_INIT_HCA, 0x0,
00650                         NO_MBOX, NO_MBOX,
00651                         sizeof(struct golan_cmd_init_hca_mbox_in),
00652                         sizeof(struct golan_cmd_init_hca_mbox_out));
00653 
00654         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00655         GOLAN_PRINT_RC_AND_CMD_STATUS;
00656         return rc;
00657 }
00658 
00659 static inline void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod)
00660 {
00661         struct golan_cmd_layout *cmd;
00662         int rc;
00663 
00664         DBGC (golan, "%s in\n", __FUNCTION__);
00665 
00666         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_TEARDOWN_HCA, op_mod,
00667                         NO_MBOX, NO_MBOX,
00668                         sizeof(struct golan_cmd_teardown_hca_mbox_in),
00669                         sizeof(struct golan_cmd_teardown_hca_mbox_out));
00670 
00671         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00672         GOLAN_PRINT_RC_AND_CMD_STATUS;
00673 
00674         DBGC (golan, "%s HCA teardown compleated\n", __FUNCTION__);
00675 }
00676 
00677 static inline int golan_alloc_uar(struct golan *golan)
00678 {
00679         struct golan_uar *uar = &golan->uar;
00680         struct golan_cmd_layout *cmd;
00681         struct golan_alloc_uar_mbox_out *out;
00682         int rc;
00683 
00684         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_UAR, 0x0,
00685                         NO_MBOX, NO_MBOX,
00686                         sizeof(struct golan_alloc_uar_mbox_in),
00687                         sizeof(struct golan_alloc_uar_mbox_out));
00688 
00689         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00690         GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_uar_cmd );
00691         out = (struct golan_alloc_uar_mbox_out *) ( cmd->out );
00692 
00693         uar->index      = be32_to_cpu(out->uarn) & 0xffffff;
00694 
00695         uar->phys = (pci_bar_start(golan->pci, GOLAN_HCA_BAR) + (uar->index << GOLAN_PAGE_SHIFT));
00696         uar->virt = (void *)(ioremap(uar->phys, GOLAN_PAGE_SIZE));
00697 
00698         DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index);
00699         return 0;
00700 
00701 err_alloc_uar_cmd:
00702         DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
00703         return rc;
00704 }
00705 
00706 static void golan_dealloc_uar(struct golan *golan)
00707 {
00708         struct golan_cmd_layout *cmd;
00709         uint32_t uar_index = golan->uar.index;
00710         int rc;
00711 
00712         DBGC (golan, "%s in\n", __FUNCTION__);
00713 
00714         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_UAR, 0x0,
00715                                         NO_MBOX, NO_MBOX,
00716                                         sizeof(struct golan_free_uar_mbox_in),
00717                                         sizeof(struct golan_free_uar_mbox_out));
00718 
00719         ((struct golan_free_uar_mbox_in *)(cmd->in))->uarn = cpu_to_be32(uar_index);
00720         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00721         GOLAN_PRINT_RC_AND_CMD_STATUS;
00722         golan->uar.index = 0;
00723 
00724         DBGC (golan, "%s UAR (0x%x) was destroyed\n", __FUNCTION__, uar_index);
00725 }
00726 
00727 static void golan_eq_update_ci(struct golan_event_queue *eq, int arm)
00728 {
00729         __be32 *addr = eq->doorbell + (arm ? 0 : 2);
00730         u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
00731         writel(cpu_to_be32(val) , addr);
00732         /* We still want ordering, just not swabbing, so add a barrier */
00733         wmb();
00734 }
00735 
00736 static int golan_create_eq(struct golan *golan)
00737 {
00738         struct golan_event_queue *eq = &golan->eq;
00739         struct golan_create_eq_mbox_in_data *in;
00740         struct golan_cmd_layout *cmd;
00741         struct golan_create_eq_mbox_out *out;
00742         int rc, i;
00743 
00744         eq->cons_index  = 0;
00745         eq->size        = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
00746         eq->eqes        = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
00747         if (!eq->eqes) {
00748                 rc = -ENOMEM;
00749                 goto err_create_eq_eqe_alloc;
00750         }
00751 
00752         /* Set EQEs ownership bit to HW ownership */
00753         for (i = 0; i < GOLAN_NUM_EQES; ++i) {
00754                 eq->eqes[i].owner = GOLAN_EQE_HW_OWNERSHIP;
00755         }
00756 
00757         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_EQ, 0x0,
00758                         GEN_MBOX, NO_MBOX,
00759                         sizeof(struct golan_create_eq_mbox_in) + GOLAN_PAS_SIZE,
00760                         sizeof(struct golan_create_eq_mbox_out));
00761 
00762         in = (struct golan_create_eq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
00763 
00764         /* Fill the physical address of the page */
00765         in->pas[0]              = VIRT_2_BE64_BUS( eq->eqes );
00766         in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
00767         DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
00768         in->events_mask         = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
00769 
00770         rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
00771         GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_eq_cmd );
00772         out = (struct golan_create_eq_mbox_out *)cmd->out;
00773 
00774         eq->eqn         = out->eq_number;
00775         eq->doorbell    = ((void *)golan->uar.virt) + GOLAN_EQ_DOORBELL_OFFSET;
00776 
00777         /* EQs are created in ARMED state */
00778         golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
00779 
00780         DBGC( golan , "%s: Event queue created (EQN = 0x%x)\n", __FUNCTION__, eq->eqn);
00781         return 0;
00782 
00783 err_create_eq_cmd:
00784         free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
00785 err_create_eq_eqe_alloc:
00786         DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
00787         return rc;
00788 }
00789 
00790 static void golan_destory_eq(struct golan *golan)
00791 {
00792         struct golan_cmd_layout *cmd;
00793         struct golan_destroy_eq_mbox_in *in;
00794         uint8_t eqn = golan->eq.eqn;
00795         int rc;
00796 
00797         DBGC (golan, "%s in\n", __FUNCTION__);
00798 
00799         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_EQ, 0x0,
00800                                         NO_MBOX, NO_MBOX,
00801                                         sizeof(struct golan_destroy_eq_mbox_in),
00802                                         sizeof(struct golan_destroy_eq_mbox_out));
00803 
00804         in = GOLAN_MBOX_IN ( cmd, in );
00805         in->eqn = eqn;
00806         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00807         GOLAN_PRINT_RC_AND_CMD_STATUS;
00808 
00809         free_dma ( golan->eq.eqes , GOLAN_PAGE_SIZE );
00810         golan->eq.eqn = 0;
00811 
00812         DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
00813 }
00814 
00815 static int golan_alloc_pd(struct golan *golan)
00816 {
00817         struct golan_cmd_layout *cmd;
00818         struct golan_alloc_pd_mbox_out *out;
00819         int rc;
00820 
00821         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ALLOC_PD, 0x0,
00822                         NO_MBOX, NO_MBOX,
00823                         sizeof(struct golan_alloc_pd_mbox_in),
00824                         sizeof(struct golan_alloc_pd_mbox_out));
00825 
00826         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00827         GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_pd_cmd );
00828         out = (struct golan_alloc_pd_mbox_out *) ( cmd->out );
00829 
00830         golan->pdn = (be32_to_cpu(out->pdn) & 0xffffff);
00831         DBGC( golan , "%s: Protection domain created (PDN = 0x%x)\n", __FUNCTION__,
00832                 golan->pdn);
00833         return 0;
00834 
00835 err_alloc_pd_cmd:
00836         DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
00837         return rc;
00838 }
00839 
00840 static void golan_dealloc_pd(struct golan *golan)
00841 {
00842         struct golan_cmd_layout *cmd;
00843         uint32_t pdn = golan->pdn;
00844         int rc;
00845 
00846         DBGC (golan,"%s in\n", __FUNCTION__);
00847 
00848         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DEALLOC_PD, 0x0,
00849                                         NO_MBOX, NO_MBOX,
00850                                         sizeof(struct golan_alloc_pd_mbox_in),
00851                                         sizeof(struct golan_alloc_pd_mbox_out));
00852 
00853         ((struct golan_dealloc_pd_mbox_in *)(cmd->in))->pdn = cpu_to_be32(pdn);
00854         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00855         GOLAN_PRINT_RC_AND_CMD_STATUS;
00856         golan->pdn = 0;
00857 
00858         DBGC (golan ,"%s Protection domain (0x%x) was destroyed\n", __FUNCTION__, pdn);
00859 }
00860 
00861 static int golan_create_mkey(struct golan *golan)
00862 {
00863         struct golan_create_mkey_mbox_in_data *in;
00864         struct golan_cmd_layout *cmd;
00865         struct golan_create_mkey_mbox_out *out;
00866         int rc;
00867 
00868         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_MKEY, 0x0,
00869                                         GEN_MBOX, NO_MBOX,
00870                                         sizeof(struct golan_create_mkey_mbox_in),
00871                                         sizeof(struct golan_create_mkey_mbox_out));
00872 
00873         in = (struct golan_create_mkey_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
00874 
00875         in->seg.flags                   = GOLAN_IB_ACCESS_LOCAL_WRITE | GOLAN_IB_ACCESS_LOCAL_READ;
00876         in->seg.flags_pd                = cpu_to_be32(golan->pdn | GOLAN_MKEY_LEN64);
00877         in->seg.qpn_mkey7_0             = cpu_to_be32(0xffffff << GOLAN_CREATE_MKEY_SEG_QPN_BIT);
00878 
00879         rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
00880         GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_mkey_cmd );
00881         out = (struct golan_create_mkey_mbox_out *) ( cmd->out );
00882 
00883         golan->mkey = ((be32_to_cpu(out->mkey) & 0xffffff) << 8);
00884         DBGC( golan , "%s: Got DMA Key for local access read/write (MKEY = 0x%x)\n",
00885                    __FUNCTION__, golan->mkey);
00886         return 0;
00887 err_create_mkey_cmd:
00888         DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
00889         return rc;
00890 }
00891 
00892 static void golan_destroy_mkey(struct golan *golan)
00893 {
00894         struct golan_cmd_layout *cmd;
00895         u32 mkey = golan->mkey;
00896         int rc;
00897 
00898         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_MKEY, 0x0,
00899                                         NO_MBOX, NO_MBOX,
00900                                         sizeof(struct golan_destroy_mkey_mbox_in),
00901                                         sizeof(struct golan_destroy_mkey_mbox_out));
00902         ((struct golan_destroy_mkey_mbox_in *)(cmd->in))->mkey = cpu_to_be32(mkey >> 8);
00903         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
00904         GOLAN_PRINT_RC_AND_CMD_STATUS;
00905         golan->mkey = 0;
00906 
00907         DBGC( golan , "%s DMA Key (0x%x) for local access write was destroyed\n"
00908                    , __FUNCTION__, mkey);
00909 }
00910 
00911 
00912 /**
00913  * Initialise Golan PCI parameters
00914  *
00915  * @v golan             Golan device
00916  */
00917 static inline void golan_pci_init(struct golan *golan)
00918 {
00919         struct pci_device *pci = golan->pci;
00920 
00921         /* Fix up PCI device */
00922         adjust_pci_device ( pci );
00923 
00924         /* Get HCA BAR */
00925         golan->iseg     = ioremap ( pci_bar_start ( pci, GOLAN_HCA_BAR),
00926                                         GOLAN_PCI_CONFIG_BAR_SIZE );
00927 }
00928 
00929 static inline struct golan *golan_alloc()
00930 {
00931         void *golan = zalloc(sizeof(struct golan));
00932         if ( !golan )
00933                 goto err_zalloc;
00934 
00935         return golan;
00936 
00937 err_zalloc:
00938         return NULL;
00939 }
00940 
00941 /**
00942  * Create completion queue
00943  *
00944  * @v ibdev             Infiniband device
00945  * @v cq                Completion queue
00946  * @ret rc              Return status code
00947  */
00948 static int golan_create_cq(struct ib_device *ibdev,
00949                                 struct ib_completion_queue *cq)
00950 {
00951         struct golan *golan = ib_get_drvdata(ibdev);
00952         struct golan_completion_queue *golan_cq;
00953         struct golan_cmd_layout *cmd;
00954         struct golan_create_cq_mbox_in_data *in;
00955         struct golan_create_cq_mbox_out *out;
00956         int     rc;
00957         unsigned int i;
00958 
00959         golan_cq = zalloc(sizeof(*golan_cq));
00960         if (!golan_cq) {
00961                 rc = -ENOMEM;
00962                 goto err_create_cq;
00963         }
00964         golan_cq->size                  = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
00965         golan_cq->doorbell_record       = malloc_dma(GOLAN_CQ_DB_RECORD_SIZE,
00966                                                         GOLAN_CQ_DB_RECORD_SIZE);
00967         if (!golan_cq->doorbell_record) {
00968                 rc = -ENOMEM;
00969                 goto err_create_cq_db_alloc;
00970         }
00971 
00972         golan_cq->cqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
00973         if (!golan_cq->cqes) {
00974                 rc = -ENOMEM;
00975                 goto err_create_cq_cqe_alloc;
00976         }
00977 
00978         /* Set CQEs ownership bit to HW ownership */
00979         for (i = 0; i < cq->num_cqes; ++i) {
00980                 golan_cq->cqes[i].op_own = ((GOLAN_CQE_OPCODE_NOT_VALID <<
00981                                                                     GOLAN_CQE_OPCODE_BIT) |
00982                                                                     GOLAN_CQE_HW_OWNERSHIP);
00983         }
00984 
00985         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_CQ, 0x0,
00986                                         GEN_MBOX, NO_MBOX,
00987                                     sizeof(struct golan_create_cq_mbox_in) + GOLAN_PAS_SIZE,
00988                                     sizeof(struct golan_create_cq_mbox_out));
00989 
00990         in = (struct golan_create_cq_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
00991 
00992         /* Fill the physical address of the page */
00993         in->pas[0]              = VIRT_2_BE64_BUS( golan_cq->cqes );
00994         in->ctx.cqe_sz_flags    = GOLAN_CQE_SIZE_64 << 5;
00995         in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
00996         in->ctx.c_eqn           = cpu_to_be16(golan->eq.eqn);
00997         in->ctx.db_record_addr  = VIRT_2_BE64_BUS(golan_cq->doorbell_record);
00998 
00999         rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
01000         GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_cq_cmd );
01001         out = (struct golan_create_cq_mbox_out *) ( cmd->out );
01002 
01003         cq->cqn = (be32_to_cpu(out->cqn) & 0xffffff);
01004 
01005         ib_cq_set_drvdata(cq, golan_cq);
01006 
01007         DBGC( golan , "%s CQ created successfully (CQN = 0x%lx)\n", __FUNCTION__, cq->cqn);
01008         return 0;
01009 
01010 err_create_cq_cmd:
01011         free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
01012 err_create_cq_cqe_alloc:
01013         free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
01014 err_create_cq_db_alloc:
01015         free ( golan_cq );
01016 err_create_cq:
01017         DBGC (golan ,"%s out rc = 0x%x\n", __FUNCTION__, rc);
01018         return rc;
01019 }
01020 
01021 /**
01022  * Destroy completion queue
01023  *
01024  * @v ibdev             Infiniband device
01025  * @v cq                Completion queue
01026  */
01027 static void golan_destroy_cq(struct ib_device *ibdev,
01028                                 struct ib_completion_queue *cq)
01029 {
01030         struct golan                    *golan          = ib_get_drvdata(ibdev);
01031         struct golan_completion_queue   *golan_cq       = ib_cq_get_drvdata(cq);
01032         struct golan_cmd_layout         *cmd;
01033         uint32_t cqn = cq->cqn;
01034         int rc;
01035 
01036         DBGC (golan, "%s in\n", __FUNCTION__);
01037 
01038         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_CQ, 0x0,
01039                                         NO_MBOX, NO_MBOX,
01040                                     sizeof(struct golan_destroy_cq_mbox_in),
01041                                     sizeof(struct golan_destroy_cq_mbox_out));
01042         ((struct golan_destroy_cq_mbox_in *)(cmd->in))->cqn = cpu_to_be32(cqn);
01043         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
01044         GOLAN_PRINT_RC_AND_CMD_STATUS;
01045         cq->cqn = 0;
01046 
01047         ib_cq_set_drvdata(cq, NULL);
01048         free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
01049         free_dma(golan_cq->doorbell_record, GOLAN_CQ_DB_RECORD_SIZE);
01050         free(golan_cq);
01051 
01052         DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
01053 }
01054 
01055 static void golan_cq_clean(struct ib_completion_queue *cq)
01056 {
01057         ib_poll_cq(cq->ibdev, cq);
01058 }
01059 
01060 static int golan_qp_type_to_st(enum ib_queue_pair_type type)
01061 {
01062         int qpt = type;
01063 
01064         switch (qpt) {
01065         case IB_QPT_RC:
01066                 return GOLAN_QP_ST_RC;
01067         case IB_QPT_UD:
01068                 return GOLAN_QP_ST_UD;
01069         case IB_QPT_SMI:
01070                 return GOLAN_QP_ST_QP0;
01071         case IB_QPT_GSI:
01072                 return GOLAN_QP_ST_QP1;
01073         case IB_QPT_ETH:
01074         default:
01075                 return -EINVAL;
01076         }
01077 }
01078 #if 0
01079 static int golan_is_special_qp(enum ib_queue_pair_type type)
01080 {
01081         return (type == IB_QPT_GSI || type == IB_QPT_SMI);
01082 }
01083 #endif
01084 static int golan_create_qp_aux(struct ib_device *ibdev,
01085                                 struct ib_queue_pair *qp,
01086                                 int *qpn)
01087 {
01088         struct golan *golan = ib_get_drvdata(ibdev);
01089         struct golan_queue_pair *golan_qp;
01090         struct golan_create_qp_mbox_in_data *in;
01091         struct golan_cmd_layout *cmd;
01092         struct golan_wqe_data_seg *data;
01093         struct golan_create_qp_mbox_out *out;
01094         uint32_t wqe_size_in_bytes;
01095         uint32_t max_qp_size_in_wqes;
01096         unsigned int i;
01097         int rc;
01098 
01099         golan_qp = zalloc(sizeof(*golan_qp));
01100         if (!golan_qp) {
01101                 rc = -ENOMEM;
01102                 goto err_create_qp;
01103         }
01104 
01105         if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
01106                  ( qp->type == IB_QPT_UD ) ) {
01107                 golan_qp->rq.grh_size = ( qp->recv.num_wqes *
01108                                         sizeof ( golan_qp->rq.grh[0] ));
01109         }
01110 
01111         /* Calculate receive queue size */
01112         golan_qp->rq.size = qp->recv.num_wqes * GOLAN_RECV_WQE_SIZE;
01113         if (GOLAN_RECV_WQE_SIZE > be16_to_cpu(golan->caps.max_wqe_sz_rq)) {
01114                 DBGC (golan ,"%s receive wqe size [%zd] > max wqe size [%d]\n", __FUNCTION__,
01115                                 GOLAN_RECV_WQE_SIZE, be16_to_cpu(golan->caps.max_wqe_sz_rq));
01116                 rc = -EINVAL;
01117                 goto err_create_qp_rq_size;
01118         }
01119 
01120         wqe_size_in_bytes = sizeof(golan_qp->sq.wqes[0]);
01121         /* Calculate send queue size */
01122         if (wqe_size_in_bytes > be16_to_cpu(golan->caps.max_wqe_sz_sq)) {
01123                 DBGC (golan ,"%s send WQE size [%d] > max WQE size [%d]\n", __FUNCTION__,
01124                                 wqe_size_in_bytes,
01125                                 be16_to_cpu(golan->caps.max_wqe_sz_sq));
01126                 rc = -EINVAL;
01127                 goto err_create_qp_sq_wqe_size;
01128         }
01129         golan_qp->sq.size = (qp->send.num_wqes * wqe_size_in_bytes);
01130         max_qp_size_in_wqes = (1 << ((uint32_t)(golan->caps.log_max_qp_sz)));
01131         if (qp->send.num_wqes > max_qp_size_in_wqes) {
01132                 DBGC (golan ,"%s send wq size [%d] > max wq size [%d]\n", __FUNCTION__,
01133                                 golan_qp->sq.size, max_qp_size_in_wqes);
01134                 rc = -EINVAL;
01135                 goto err_create_qp_sq_size;
01136         }
01137 
01138         golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
01139 
01140         /* allocate dma memory for WQEs (1 page is enough) - should change it */
01141         golan_qp->wqes = malloc_dma ( GOLAN_PAGE_SIZE, GOLAN_PAGE_SIZE );
01142         if (!golan_qp->wqes) {
01143                 rc = -ENOMEM;
01144                 goto err_create_qp_wqe_alloc;
01145         }
01146         golan_qp->rq.wqes       = golan_qp->wqes;
01147         golan_qp->sq.wqes       = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
01148                         //(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
01149 
01150         if ( golan_qp->rq.grh_size ) {
01151                 golan_qp->rq.grh = ( golan_qp->wqes +
01152                                 golan_qp->sq.size +
01153                                 golan_qp->rq.size );
01154         }
01155 
01156         /* Invalidate all WQEs */
01157         data = &golan_qp->rq.wqes[0].data[0];
01158         for ( i = 0 ; i < ( golan_qp->rq.size / sizeof ( *data ) ); i++ ){
01159                 data->lkey = cpu_to_be32 ( GOLAN_INVALID_LKEY );
01160                 data++;
01161         }
01162 
01163         golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
01164                                                 sizeof(struct golan_qp_db));
01165         if (!golan_qp->doorbell_record) {
01166                 rc = -ENOMEM;
01167                 goto err_create_qp_db_alloc;
01168         }
01169         memset(golan_qp->doorbell_record, 0, sizeof(struct golan_qp_db));
01170 
01171         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_CREATE_QP, 0x0,
01172                         GEN_MBOX, NO_MBOX,
01173                         sizeof(struct golan_create_qp_mbox_in) + GOLAN_PAS_SIZE,
01174                         sizeof(struct golan_create_qp_mbox_out));
01175 
01176         in = (struct golan_create_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
01177 
01178         /* Fill the physical address of the page */
01179         in->pas[0]                      = VIRT_2_BE64_BUS(golan_qp->wqes);
01180         in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
01181 
01182         in->ctx.flags_pd        = cpu_to_be32(golan->pdn);
01183         in->ctx.flags           = cpu_to_be32((golan_qp_type_to_st(qp->type)
01184                                                 << GOLAN_QP_CTX_ST_BIT) |
01185                                                 (GOLAN_QP_PM_MIGRATED <<
01186                                                 GOLAN_QP_CTX_PM_STATE_BIT));
01187 //      cgs     set to 0, initialy.
01188 //      atomic mode
01189         in->ctx.rq_size_stride  = ((ilog2(qp->recv.num_wqes) <<
01190                                                                 GOLAN_QP_CTX_RQ_SIZE_BIT) |
01191                                                                 (sizeof(golan_qp->rq.wqes[0]) / GOLAN_RECV_WQE_SIZE));
01192         in->ctx.sq_crq_size             = cpu_to_be16(ilog2(golan_qp->sq.size / GOLAN_SEND_WQE_BB_SIZE)
01193                                                                                   << GOLAN_QP_CTX_SQ_SIZE_BIT);
01194         in->ctx.cqn_send                = cpu_to_be32(qp->send.cq->cqn);
01195         in->ctx.cqn_recv                = cpu_to_be32(qp->recv.cq->cqn);
01196         in->ctx.db_rec_addr     = VIRT_2_BE64_BUS(golan_qp->doorbell_record);
01197 
01198         rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
01199         GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_qp_cmd );
01200         out = (struct golan_create_qp_mbox_out *)cmd->out;
01201 
01202         *qpn = (be32_to_cpu(out->qpn) & 0xffffff);
01203         /*
01204         * Hardware wants QPN written in big-endian order (after
01205         * shifting) for send doorbell.  Precompute this value to save
01206         * a little bit when posting sends.
01207         */
01208         golan_qp->doorbell_qpn  = cpu_to_be32(*qpn << 8);
01209         golan_qp->state                 = GOLAN_IB_QPS_RESET;
01210 
01211         ib_qp_set_drvdata(qp, golan_qp);
01212 
01213         return 0;
01214 
01215 err_create_qp_cmd:
01216         free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
01217 err_create_qp_db_alloc:
01218         free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
01219 err_create_qp_wqe_alloc:
01220 err_create_qp_sq_size:
01221 err_create_qp_sq_wqe_size:
01222 err_create_qp_rq_size:
01223         free ( golan_qp );
01224 err_create_qp:
01225         return rc;
01226 }
01227 
01228 /**
01229  * Create queue pair
01230  *
01231  * @v ibdev             Infiniband device
01232  * @v qp                Queue pair
01233  * @ret rc              Return status code
01234  */
01235 static int golan_create_qp(struct ib_device *ibdev,
01236                                 struct ib_queue_pair *qp)
01237 {
01238         int rc, qpn = -1;
01239 
01240         switch (qp->type) {
01241         case IB_QPT_UD:
01242         case IB_QPT_SMI:
01243         case IB_QPT_GSI:
01244                 rc = golan_create_qp_aux(ibdev, qp, &qpn);
01245                 if (rc) {
01246                         DBG ( "%s Failed to create QP (rc = 0x%x)\n", __FUNCTION__, rc);
01247                         return rc;
01248                 }
01249                 qp->qpn = qpn;
01250 
01251                 break;
01252         case IB_QPT_ETH:
01253         case IB_QPT_RC:
01254         default:
01255                 DBG ( "%s unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
01256                 return -EINVAL;
01257         }
01258 
01259         return 0;
01260 }
01261 
01262 static int golan_modify_qp_rst_to_init(struct ib_device *ibdev,
01263                                         struct ib_queue_pair *qp __unused,
01264                                         struct golan_modify_qp_mbox_in_data *in)
01265 {
01266         int rc = 0;
01267 
01268         in->ctx.qkey                    = cpu_to_be32((uint32_t)(qp->qkey));
01269 
01270         in->ctx.pri_path.port           = ibdev->port;
01271         in->ctx.flags                   |= cpu_to_be32(GOLAN_QP_PM_MIGRATED << GOLAN_QP_CTX_PM_STATE_BIT);
01272         in->ctx.pri_path.pkey_index     = 0;
01273         /* QK is 0 */
01274         /* QP cntr set 0 */
01275         return rc;
01276 }
01277 
01278 static int golan_modify_qp_init_to_rtr(struct ib_device *ibdev __unused,
01279                                         struct ib_queue_pair *qp __unused,
01280                                         struct golan_modify_qp_mbox_in_data *in)
01281 {
01282         int rc = 0;
01283 
01284         in->optparam = 0;
01285         return rc;
01286 }
01287 
01288 static int golan_modify_qp_rtr_to_rts(struct ib_device *ibdev __unused,
01289                                         struct ib_queue_pair *qp __unused,
01290                                         struct golan_modify_qp_mbox_in_data *in __unused)
01291 {
01292         int rc = 0;
01293 
01294         in->optparam = 0;
01295         /* In good flow psn in 0 */
01296         return rc;
01297 }
01298 
01299 static int golan_modify_qp_to_rst(struct ib_device *ibdev,
01300                                         struct ib_queue_pair *qp)
01301 {
01302         struct golan *golan = ib_get_drvdata(ibdev);
01303         struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
01304         struct golan_cmd_layout *cmd;
01305         int rc;
01306 
01307         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_2RST_QP, 0x0,
01308                                         NO_MBOX, NO_MBOX,
01309                                         sizeof(struct golan_modify_qp_mbox_in),
01310                                         sizeof(struct golan_modify_qp_mbox_out));
01311         ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
01312         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
01313         GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_2rst_cmd );
01314 
01315         golan_qp->state = GOLAN_IB_QPS_RESET;
01316         DBGC( golan , "%s QP number 0x%lx was modified to RESET\n",
01317                 __FUNCTION__, qp->qpn);
01318 
01319         return 0;
01320 
01321 err_modify_qp_2rst_cmd:
01322         DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
01323                 __FUNCTION__, qp->qpn, rc);
01324         return rc;
01325 }
01326 
01327 static int (*golan_modify_qp_methods[])(struct ib_device *ibdev,
01328                                         struct ib_queue_pair *qp,
01329                                         struct golan_modify_qp_mbox_in_data *in) = {
01330 
01331         [GOLAN_IB_QPS_RESET]    = golan_modify_qp_rst_to_init,
01332         [GOLAN_IB_QPS_INIT]     = golan_modify_qp_init_to_rtr,
01333         [GOLAN_IB_QPS_RTR]      = golan_modify_qp_rtr_to_rts
01334 };
01335 
01336 static int golan_modify_qp(struct ib_device *ibdev,
01337                                 struct ib_queue_pair *qp)
01338 {
01339         struct golan *golan = ib_get_drvdata(ibdev);
01340         struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
01341         struct golan_modify_qp_mbox_in_data *in;
01342         struct golan_cmd_layout *cmd;
01343         enum golan_ib_qp_state prev_state;
01344         int rc;
01345         int modify_cmd[] = {GOLAN_CMD_OP_RST2INIT_QP,
01346                                 GOLAN_CMD_OP_INIT2RTR_QP,
01347                                 GOLAN_CMD_OP_RTR2RTS_QP};
01348 
01349         while (golan_qp->state < GOLAN_IB_QPS_RTS) {
01350                 prev_state = golan_qp->state;
01351                 cmd = write_cmd(golan, DEF_CMD_IDX, modify_cmd[golan_qp->state], 0x0,
01352                                                 GEN_MBOX, NO_MBOX,
01353                                                 sizeof(struct golan_modify_qp_mbox_in),
01354                                                 sizeof(struct golan_modify_qp_mbox_out));
01355 
01356                 in = (struct golan_modify_qp_mbox_in_data *)GET_INBOX(golan, GEN_MBOX);
01357                 ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
01358                 rc = golan_modify_qp_methods[golan_qp->state](ibdev, qp, in);
01359                 if (rc) {
01360                         goto err_modify_qp_fill_inbox;
01361                 }
01362 //              in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
01363                 rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
01364                 GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_cmd );
01365 
01366                 ++(golan_qp->state);
01367 
01368                 DBGC( golan , "%s QP number 0x%lx was modified from %s to %s\n",
01369                         __FUNCTION__, qp->qpn, golan_qp_state_as_string[prev_state],
01370                         golan_qp_state_as_string[golan_qp->state]);
01371         }
01372 
01373         DBGC( golan , "%s QP number 0x%lx is ready to receive/send packets.\n",
01374                 __FUNCTION__, qp->qpn);
01375         return 0;
01376 
01377 err_modify_qp_cmd:
01378 err_modify_qp_fill_inbox:
01379         DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
01380                    __FUNCTION__, qp->qpn, rc);
01381         return rc;
01382 }
01383 
01384 /**
01385  * Destroy queue pair
01386  *
01387  * @v ibdev             Infiniband device
01388  * @v qp                Queue pair
01389  */
01390 static void golan_destroy_qp(struct ib_device *ibdev,
01391                                 struct ib_queue_pair *qp)
01392 {
01393         struct golan            *golan          = ib_get_drvdata(ibdev);
01394         struct golan_queue_pair *golan_qp       = ib_qp_get_drvdata(qp);
01395         struct golan_cmd_layout                 *cmd;
01396         unsigned long            qpn = qp->qpn;
01397         int rc;
01398 
01399         DBGC (golan, "%s in\n", __FUNCTION__);
01400 
01401         if (golan_qp->state != GOLAN_IB_QPS_RESET) {
01402                 if (golan_modify_qp_to_rst(ibdev, qp)) {
01403                         DBGC (golan ,"%s Failed to modify QP 0x%lx to RESET\n", __FUNCTION__,
01404                                    qp->qpn);
01405                 }
01406         }
01407 
01408         if (qp->recv.cq) {
01409                 golan_cq_clean(qp->recv.cq);
01410         }
01411         if (qp->send.cq && (qp->send.cq != qp->recv.cq)) {
01412                 golan_cq_clean(qp->send.cq);
01413         }
01414 
01415         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DESTROY_QP, 0x0,
01416                                         NO_MBOX, NO_MBOX,
01417                                     sizeof(struct golan_destroy_qp_mbox_in),
01418                                     sizeof(struct golan_destroy_qp_mbox_out));
01419         ((struct golan_destroy_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qpn);
01420         rc = send_command_and_wait(golan, DEF_CMD_IDX, NO_MBOX, NO_MBOX, __FUNCTION__);
01421         GOLAN_PRINT_RC_AND_CMD_STATUS;
01422         qp->qpn = 0;
01423 
01424         ib_qp_set_drvdata(qp, NULL);
01425         free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
01426         free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
01427         free(golan_qp);
01428 
01429         DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
01430 }
01431 
01432 /**
01433  * Calculate transmission rate
01434  *
01435  * @v av                Address vector
01436  * @ret golan_rate      Golan rate
01437  */
01438 static unsigned int golan_rate(enum ib_rate rate) {
01439         return (((rate >= IB_RATE_2_5) && (rate <= IB_RATE_120)) ? (rate + 5) : 0);
01440 }
01441 
01442 /**
01443  * Post send work queue entry
01444  *
01445  * @v ibdev             Infiniband device
01446  * @v qp                Queue pair
01447  * @v av                Address vector
01448  * @v iobuf             I/O buffer
01449  * @ret rc              Return status code
01450  */
01451 static int golan_post_send(struct ib_device *ibdev,
01452                                 struct ib_queue_pair *qp,
01453                                 struct ib_address_vector *av,
01454                                 struct io_buffer *iobuf)
01455 {
01456         struct golan                    *golan          = ib_get_drvdata(ibdev);
01457         struct golan_queue_pair         *golan_qp       = ib_qp_get_drvdata(qp);
01458         struct golan_send_wqe_ud *wqe           = NULL;
01459         struct golan_av                 *datagram       = NULL;
01460         unsigned long                   wqe_idx_mask;
01461         unsigned long                   wqe_idx;
01462         struct golan_wqe_data_seg       *data           = NULL;
01463         struct golan_wqe_ctrl_seg       *ctrl           = NULL;
01464 
01465 
01466         wqe_idx_mask = (qp->send.num_wqes - 1);
01467         wqe_idx = (qp->send.next_idx & wqe_idx_mask);
01468         if (qp->send.iobufs[wqe_idx]) {
01469                 DBGC (golan ,"%s Send queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
01470                 return -ENOMEM;
01471         }
01472 
01473         qp->send.iobufs[wqe_idx] = iobuf;
01474 
01475         // change to this
01476         //wqe_size_in_octa_words = golan_qp->sq.wqe_size_in_wqebb >> 4;
01477 
01478         wqe                     = &golan_qp->sq.wqes[wqe_idx].ud;
01479 
01480         //CHECK HW OWNERSHIP BIT ???
01481 
01482         memset(wqe, 0, sizeof(*wqe));
01483 
01484         ctrl                    = &wqe->ctrl;
01485         ctrl->opmod_idx_opcode  = cpu_to_be32(GOLAN_SEND_OPCODE |
01486                                                   ((u32)(golan_qp->sq.next_idx) <<
01487                                                   GOLAN_WQE_CTRL_WQE_IDX_BIT));
01488         ctrl->qpn_ds            = cpu_to_be32(GOLAN_SEND_UD_WQE_SIZE >> 4) |
01489                                                           golan_qp->doorbell_qpn;
01490         ctrl->fm_ce_se          = 0x8;//10 - 0 - 0
01491         data                    = &wqe->data;
01492         data->byte_count        = cpu_to_be32(iob_len(iobuf));
01493         data->lkey              = cpu_to_be32(golan->mkey);
01494         data->addr              = VIRT_2_BE64_BUS(iobuf->data);
01495 
01496         datagram                = &wqe->datagram;
01497         datagram->key.qkey.qkey = cpu_to_be32(av->qkey);
01498         datagram->dqp_dct       = cpu_to_be32((1 << 31) | av->qpn);
01499         datagram->stat_rate_sl  = ((golan_rate(av->rate) << 4) | av->sl);
01500         datagram->fl_mlid       = (ibdev->lid & 0x007f); /* take only the 7 low bits of the LID */
01501         datagram->rlid          = cpu_to_be16(av->lid);
01502         datagram->grh_gid_fl    = cpu_to_be32(av->gid_present << 30);
01503         memcpy(datagram->rgid, av->gid.bytes, 16 /* sizeof(datagram->rgid) */);
01504 
01505         /*
01506         * Make sure that descriptors are written before
01507         * updating doorbell record and ringing the doorbell
01508         */
01509         ++(qp->send.next_idx);
01510         golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
01511         golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
01512         wmb();
01513         writeq(*((__be64 *)ctrl), golan->uar.virt
01514                         + ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
01515                                         : DB_BUFFER0_ODD_OFFSET ) );
01516         return 0;
01517 }
01518 
01519 /**
01520  * Post receive work queue entry
01521  *
01522  * @v ibdev             Infiniband device
01523  * @v qp                Queue pair
01524  * @v iobuf             I/O buffer
01525  * @ret rc              Return status code
01526  */
01527 static int golan_post_recv(struct ib_device *ibdev,
01528                                 struct ib_queue_pair *qp,
01529                                 struct io_buffer *iobuf)
01530 {
01531         struct golan            *golan          = ib_get_drvdata(ibdev);
01532         struct golan_queue_pair *golan_qp       = ib_qp_get_drvdata(qp);
01533         struct ib_work_queue            *wq     = &qp->recv;
01534         struct golan_recv_wqe_ud        *wqe;
01535         struct ib_global_route_header *grh;
01536         struct golan_wqe_data_seg *data;
01537         unsigned int wqe_idx_mask;
01538 
01539         /* Allocate work queue entry */
01540         wqe_idx_mask = (wq->num_wqes - 1);
01541         if (wq->iobufs[wq->next_idx & wqe_idx_mask]) {
01542                 DBGC (golan ,"%s Receive queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
01543                 return -ENOMEM;
01544         }
01545 
01546         wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
01547         wqe = & golan_qp->rq.wqes[wq->next_idx & wqe_idx_mask];
01548 
01549         memset(wqe, 0, sizeof(*wqe));
01550         data = &wqe->data[0];
01551         if ( golan_qp->rq.grh ) {
01552                 grh = &golan_qp->rq.grh[wq->next_idx & wqe_idx_mask];
01553                 data->byte_count = cpu_to_be32 ( sizeof ( *grh ) );
01554                 data->lkey = cpu_to_be32 ( golan->mkey );
01555                 data->addr = VIRT_2_BE64_BUS ( grh );
01556                 data++;
01557         }
01558 
01559         data->byte_count = cpu_to_be32(iob_tailroom(iobuf));
01560         data->lkey = cpu_to_be32(golan->mkey);
01561         data->addr = VIRT_2_BE64_BUS(iobuf->data);
01562 
01563         ++wq->next_idx;
01564 
01565         /*
01566         * Make sure that descriptors are written before
01567         * updating doorbell record and ringing the doorbell
01568         */
01569         wmb();
01570         golan_qp->doorbell_record->recv_db = cpu_to_be16(qp->recv.next_idx & 0xffff);
01571 
01572         return 0;
01573 }
01574 
01575 static int golan_query_vport_context ( struct ib_device *ibdev ) {
01576         struct golan *golan = ib_get_drvdata ( ibdev );
01577         struct golan_cmd_layout *cmd;
01578         struct golan_query_hca_vport_context_inbox *in;
01579         struct golan_query_hca_vport_context_data *context_data;
01580         int rc;
01581 
01582         cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_CONTEXT,
01583                         0x0, GEN_MBOX, GEN_MBOX,
01584                         sizeof(struct golan_query_hca_vport_context_inbox),
01585                         sizeof(struct golan_query_hca_vport_context_outbox) );
01586 
01587         in = GOLAN_MBOX_IN ( cmd, in );
01588         in->port_num = (u8)ibdev->port;
01589 
01590         rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
01591         GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_context_cmd );
01592 
01593         context_data = (struct golan_query_hca_vport_context_data *)( GET_OUTBOX ( golan, GEN_MBOX ) );
01594 
01595         ibdev->node_guid.dwords[0]      = context_data->node_guid[0];
01596         ibdev->node_guid.dwords[1]      = context_data->node_guid[1];
01597         ibdev->lid                                      = be16_to_cpu( context_data->lid );
01598         ibdev->sm_lid                           = be16_to_cpu( context_data->sm_lid );
01599         ibdev->sm_sl                            = context_data->sm_sl;
01600         ibdev->port_state                       = context_data->port_state;
01601 
01602         return 0;
01603 err_query_vport_context_cmd:
01604         DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
01605         return rc;
01606 }
01607 
01608 
01609 static int golan_query_vport_gid ( struct ib_device *ibdev ) {
01610         struct golan *golan = ib_get_drvdata( ibdev );
01611         struct golan_cmd_layout *cmd;
01612         struct golan_query_hca_vport_gid_inbox *in;
01613         union ib_gid *ib_gid;
01614         int rc;
01615 
01616         cmd = write_cmd( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_GID,
01617                         0x0, GEN_MBOX, GEN_MBOX,
01618                         sizeof(struct golan_query_hca_vport_gid_inbox),
01619                         sizeof(struct golan_query_hca_vport_gid_outbox) );
01620 
01621         in = GOLAN_MBOX_IN ( cmd, in );
01622         in->port_num = (u8)ibdev->port;
01623         in->gid_index = 0;
01624         rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
01625         GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_gid_cmd );
01626 
01627         ib_gid = (union ib_gid *)( GET_OUTBOX ( golan, GEN_MBOX ) );
01628 
01629         memcpy ( &ibdev->gid, ib_gid, sizeof(ibdev->gid) );
01630 
01631         return 0;
01632 err_query_vport_gid_cmd:
01633                 DBGC ( golan, "%s [%d] out\n", __FUNCTION__, rc);
01634         return rc;
01635 }
01636 
01637 static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
01638         struct golan *golan = ib_get_drvdata ( ibdev );
01639         struct golan_cmd_layout *cmd;
01640         struct golan_query_hca_vport_pkey_inbox *in;
01641         int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
01642         int rc;
01643 
01644         cmd = write_cmd ( golan, DEF_CMD_IDX, GOLAN_CMD_OP_QUERY_HCA_VPORT_PKEY,
01645                         0x0, GEN_MBOX, GEN_MBOX,
01646                         sizeof(struct golan_query_hca_vport_pkey_inbox),
01647                         sizeof(struct golan_outbox_hdr) + 8 +
01648                         sizeof(struct golan_query_hca_vport_pkey_data) * pkey_table_size_in_entries );
01649 
01650         in = GOLAN_MBOX_IN ( cmd, in );
01651         in->port_num = (u8)ibdev->port;
01652         in->pkey_index = 0xffff;
01653         rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
01654         GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
01655 
01656         return 0;
01657 err_query_vport_pkey_cmd:
01658         DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
01659         return rc;
01660 }
01661 
01662 static int golan_get_ib_info ( struct ib_device *ibdev ) {
01663         int rc;
01664 
01665         rc = golan_query_vport_context ( ibdev );
01666         if ( rc != 0 ) {
01667                 DBG ( "golan_get_ib_info: golan_query_vport_context Failed (rc = %d)\n",rc );
01668                 goto err_query_vport_context;
01669         }
01670 
01671         rc = golan_query_vport_gid ( ibdev );
01672         if ( rc != 0 ) {
01673                 DBG ( "golan_get_ib_info: golan_query_vport_gid Failed (rc = %d)\n",rc );
01674                 goto err_query_vport_gid;
01675         }
01676 
01677         rc = golan_query_vport_pkey ( ibdev );
01678         if ( rc != 0 ) {
01679                 DBG ( "golan_get_ib_info: golan_query_vport_pkey Failed (rc = %d)\n",rc );
01680                 goto err_query_vport_pkey;
01681         }
01682         return rc;
01683 err_query_vport_pkey:
01684 err_query_vport_gid:
01685 err_query_vport_context:
01686         DBG ( "%s [%d] out\n", __FUNCTION__, rc);
01687         return rc;
01688 }
01689 
01690 static int golan_complete(struct ib_device *ibdev,
01691                                 struct ib_completion_queue *cq,
01692                                 struct golan_cqe64 *cqe64)
01693 {
01694         struct golan *golan     = ib_get_drvdata(ibdev);
01695         struct ib_work_queue *wq;
01696         struct golan_queue_pair *golan_qp;
01697         struct ib_queue_pair *qp;
01698         struct io_buffer *iobuf = NULL;
01699         struct ib_address_vector recv_dest;
01700         struct ib_address_vector recv_source;
01701         struct ib_global_route_header *grh;
01702         struct golan_err_cqe *err_cqe64;
01703         int gid_present, idx;
01704         u16 wqe_ctr;
01705         uint8_t opcode;
01706         static int error_state;
01707         uint32_t qpn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
01708         int is_send = 0;
01709         size_t len;
01710 
01711         opcode = cqe64->op_own >> GOLAN_CQE_OPCODE_BIT;
01712         DBGC2( golan , "%s completion with opcode 0x%x\n", __FUNCTION__, opcode);
01713 
01714         if (opcode == GOLAN_CQE_REQ || opcode == GOLAN_CQE_REQ_ERR) {
01715                 is_send = 1;
01716         } else {
01717                 is_send = 0;
01718         }
01719         if (opcode == GOLAN_CQE_REQ_ERR || opcode == GOLAN_CQE_RESP_ERR) {
01720                 err_cqe64 = (struct golan_err_cqe *)cqe64;
01721                 int i = 0;
01722                 if (!error_state++) {
01723                         DBGC (golan ,"\n");
01724                         for ( i = 0 ; i < 16 ; i += 2 ) {
01725                                 DBGC (golan ,"%x       %x\n",
01726                                                 be32_to_cpu(((uint32_t *)(err_cqe64))[i]),
01727                                                 be32_to_cpu(((uint32_t *)(err_cqe64))[i + 1]));
01728                         }
01729                         DBGC (golan ,"CQE with error: Syndrome(0x%x), VendorSynd(0x%x), HW_SYN(0x%x)\n",
01730                                         err_cqe64->syndrome, err_cqe64->vendor_err_synd,
01731                                         err_cqe64->hw_syndrom);
01732                 }
01733         }
01734         /* Identify work queue */
01735         wq = ib_find_wq(cq, qpn, is_send);
01736         if (!wq) {
01737                 DBGC (golan ,"%s unknown %s QPN 0x%x in CQN 0x%lx\n",
01738                        __FUNCTION__, (is_send ? "send" : "recv"), qpn, cq->cqn);
01739                 return -EINVAL;
01740         }
01741 
01742         qp = wq->qp;
01743         golan_qp = ib_qp_get_drvdata ( qp );
01744 
01745         wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
01746         if (is_send) {
01747                 wqe_ctr &= ((GOLAN_WQEBBS_PER_SEND_UD_WQE * wq->num_wqes) - 1);
01748                 idx = wqe_ctr / GOLAN_WQEBBS_PER_SEND_UD_WQE;
01749         } else {
01750                 idx = wqe_ctr & (wq->num_wqes - 1);
01751         }
01752 
01753         iobuf = wq->iobufs[idx];
01754         if (!iobuf) {
01755                 DBGC (golan ,"%s IO Buffer 0x%x not found in QPN 0x%x\n",
01756                            __FUNCTION__, idx, qpn);
01757                 return -EINVAL;
01758         }
01759         wq->iobufs[idx] = NULL;
01760 
01761         if (is_send) {
01762                 ib_complete_send(ibdev, qp, iobuf, (opcode == GOLAN_CQE_REQ_ERR));
01763         } else {
01764                 len = be32_to_cpu(cqe64->byte_cnt);
01765                 memset(&recv_dest, 0, sizeof(recv_dest));
01766                 recv_dest.qpn = qpn;
01767                 /* Construct address vector */
01768                 memset(&recv_source, 0, sizeof(recv_source));
01769                 switch (qp->type) {
01770                 case IB_QPT_SMI:
01771                 case IB_QPT_GSI:
01772                 case IB_QPT_UD:
01773                         /* Locate corresponding GRH */
01774                         assert ( golan_qp->rq.grh != NULL );
01775                         grh = &golan_qp->rq.grh[ idx ];
01776 
01777                         recv_source.qpn = be32_to_cpu(cqe64->flags_rqpn) & 0xffffff;
01778                         recv_source.lid = be16_to_cpu(cqe64->slid);
01779                         recv_source.sl  = (be32_to_cpu(cqe64->flags_rqpn) >> 24) & 0xf;
01780                         gid_present = (be32_to_cpu(cqe64->flags_rqpn) >> 28) & 3;
01781                         if (!gid_present) {
01782                                 recv_dest.gid_present = recv_source.gid_present = 0;
01783                         } else {
01784                                 recv_dest.gid_present = recv_source.gid_present = 1;
01785                                 //if (recv_source.gid_present == 0x1) {
01786                                 memcpy(&recv_source.gid, &grh->sgid, sizeof(recv_source.gid));
01787                                 memcpy(&recv_dest.gid, &grh->dgid, sizeof(recv_dest.gid));
01788                                 //} else { // recv_source.gid_present = 0x3
01789                                         /* GRH is located in the upper 64 byte of the CQE128
01790                                          * currently not supported */
01791                                         //;
01792                                 //}
01793                         }
01794                         len -= sizeof ( *grh );
01795                         break;
01796                 case IB_QPT_RC:
01797                 case IB_QPT_ETH:
01798                 default:
01799                         DBGC (golan ,"%s Unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
01800                         return -EINVAL;
01801                 }
01802                 assert(len <= iob_tailroom(iobuf));
01803                 iob_put(iobuf, len);
01804                 ib_complete_recv(ibdev, qp, &recv_dest, &recv_source, iobuf, (opcode == GOLAN_CQE_RESP_ERR));
01805         }
01806         return 0;
01807 }
01808 
01809 static int golan_is_hw_ownership(struct ib_completion_queue *cq,
01810                                                                  struct golan_cqe64 *cqe64)
01811 {
01812         return ((cqe64->op_own & GOLAN_CQE_OWNER_MASK) !=
01813                         ((cq->next_idx >> ilog2(cq->num_cqes)) & 1));
01814 }
01815 static void golan_poll_cq(struct ib_device *ibdev,
01816                                 struct ib_completion_queue *cq)
01817 {
01818         unsigned int            i;
01819         int                     rc = 0;
01820         unsigned int            cqe_idx_mask;
01821         struct golan_cqe64      *cqe64;
01822         struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
01823         struct golan            *golan  = ib_get_drvdata(ibdev);
01824 
01825         for (i = 0; i < cq->num_cqes; ++i) {
01826                 /* Look for completion entry */
01827                 cqe_idx_mask = (cq->num_cqes - 1);
01828                 cqe64 = &golan_cq->cqes[cq->next_idx & cqe_idx_mask];
01829                 /* temporary valid only for 64 byte CQE */
01830                 if (golan_is_hw_ownership(cq, cqe64) ||
01831                         ((cqe64->op_own >> GOLAN_CQE_OPCODE_BIT) ==
01832                         GOLAN_CQE_OPCODE_NOT_VALID)) {
01833                         break;  /* HW ownership */
01834                 }
01835 
01836                 DBGC2( golan , "%s CQN 0x%lx [%ld] \n", __FUNCTION__, cq->cqn, cq->next_idx);
01837                 /*
01838                  * Make sure we read CQ entry contents after we've checked the
01839                  * ownership bit. (PRM - 6.5.3.2)
01840                  */
01841                 rmb();
01842                 rc = golan_complete(ibdev, cq, cqe64);
01843                 if (rc != 0) {
01844                         DBGC (golan ,"%s CQN 0x%lx failed to complete\n", __FUNCTION__, cq->cqn);
01845                 }
01846 
01847                 /* Update completion queue's index */
01848                 cq->next_idx++;
01849 
01850                 /* Update doorbell record */
01851                 *(golan_cq->doorbell_record) = cpu_to_be32(cq->next_idx & 0xffffff);
01852         }
01853 }
01854 
01855 static const char *golan_eqe_type_str(u8 type)
01856 {
01857         switch (type) {
01858         case GOLAN_EVENT_TYPE_COMP:
01859                 return "GOLAN_EVENT_TYPE_COMP";
01860         case GOLAN_EVENT_TYPE_PATH_MIG:
01861                 return "GOLAN_EVENT_TYPE_PATH_MIG";
01862         case GOLAN_EVENT_TYPE_COMM_EST:
01863                 return "GOLAN_EVENT_TYPE_COMM_EST";
01864         case GOLAN_EVENT_TYPE_SQ_DRAINED:
01865                 return "GOLAN_EVENT_TYPE_SQ_DRAINED";
01866         case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
01867                 return "GOLAN_EVENT_TYPE_SRQ_LAST_WQE";
01868         case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
01869                 return "GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT";
01870         case GOLAN_EVENT_TYPE_CQ_ERROR:
01871                 return "GOLAN_EVENT_TYPE_CQ_ERROR";
01872         case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
01873                 return "GOLAN_EVENT_TYPE_WQ_CATAS_ERROR";
01874         case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
01875                 return "GOLAN_EVENT_TYPE_PATH_MIG_FAILED";
01876         case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
01877                 return "GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
01878         case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
01879                 return "GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR";
01880         case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
01881                 return "GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR";
01882         case GOLAN_EVENT_TYPE_INTERNAL_ERROR:
01883                 return "GOLAN_EVENT_TYPE_INTERNAL_ERROR";
01884         case GOLAN_EVENT_TYPE_PORT_CHANGE:
01885                 return "GOLAN_EVENT_TYPE_PORT_CHANGE";
01886         case GOLAN_EVENT_TYPE_GPIO_EVENT:
01887                 return "GOLAN_EVENT_TYPE_GPIO_EVENT";
01888         case GOLAN_EVENT_TYPE_REMOTE_CONFIG:
01889                 return "GOLAN_EVENT_TYPE_REMOTE_CONFIG";
01890         case GOLAN_EVENT_TYPE_DB_BF_CONGESTION:
01891                 return "GOLAN_EVENT_TYPE_DB_BF_CONGESTION";
01892         case GOLAN_EVENT_TYPE_STALL_EVENT:
01893                 return "GOLAN_EVENT_TYPE_STALL_EVENT";
01894         case GOLAN_EVENT_TYPE_CMD:
01895                 return "GOLAN_EVENT_TYPE_CMD";
01896         case GOLAN_EVENT_TYPE_PAGE_REQUEST:
01897                 return "GOLAN_EVENT_TYPE_PAGE_REQUEST";
01898         default:
01899                 return "Unrecognized event";
01900         }
01901 }
01902 
01903 static const char *golan_eqe_port_subtype_str(u8 subtype)
01904 {
01905         switch (subtype) {
01906         case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
01907                 return "GOLAN_PORT_CHANGE_SUBTYPE_DOWN";
01908         case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
01909                 return "GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE";
01910         case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
01911                 return "GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED";
01912         case GOLAN_PORT_CHANGE_SUBTYPE_LID:
01913                 return "GOLAN_PORT_CHANGE_SUBTYPE_LID";
01914         case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
01915                 return "GOLAN_PORT_CHANGE_SUBTYPE_PKEY";
01916         case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
01917                 return "GOLAN_PORT_CHANGE_SUBTYPE_GUID";
01918         case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
01919                 return "GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG";
01920         default:
01921                 return "Unrecognized event";
01922         }
01923 }
01924 
01925 /**
01926  * Update Infiniband parameters using Commands
01927  *
01928  * @v ibdev             Infiniband device
01929  * @ret rc              Return status code
01930  */
01931 static int golan_ib_update ( struct ib_device *ibdev ) {
01932         int rc;
01933 
01934         /* Get IB parameters */
01935         if ( ( rc = golan_get_ib_info ( ibdev ) ) != 0 )
01936                 return rc;
01937 
01938         /* Notify Infiniband core of potential link state change */
01939         ib_link_state_changed ( ibdev );
01940 
01941         return 0;
01942 }
01943 
01944 static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe)
01945 {
01946         struct ib_device *ibdev;
01947         u8 port;
01948 
01949         port = (eqe->data.port.port >> 4) & 0xf;
01950         ibdev = golan->ports[port - 1].ibdev;
01951 
01952         if ( ! ib_is_open ( ibdev ) )
01953                 return;
01954 
01955         switch (eqe->sub_type) {
01956         case GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
01957         case GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE:
01958                 golan_ib_update ( ibdev );
01959                 /* Fall through */
01960         case GOLAN_PORT_CHANGE_SUBTYPE_DOWN:
01961         case GOLAN_PORT_CHANGE_SUBTYPE_LID:
01962         case GOLAN_PORT_CHANGE_SUBTYPE_PKEY:
01963         case GOLAN_PORT_CHANGE_SUBTYPE_GUID:
01964         case GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED:
01965                 DBGC( golan , "%s event %s(%d) (sub event %s(%d))arrived on port %d\n",
01966                            __FUNCTION__, golan_eqe_type_str(eqe->type), eqe->type,
01967                            golan_eqe_port_subtype_str(eqe->sub_type),
01968                            eqe->sub_type, port);
01969                 break;
01970         default:
01971                 DBGC (golan ,"%s Port event with unrecognized subtype: port %d, sub_type %d\n",
01972                            __FUNCTION__, port, eqe->sub_type);
01973         }
01974 }
01975 
01976 static struct golan_eqe *golan_next_eqe_sw(struct golan_event_queue *eq)
01977 {
01978         uint32_t entry = (eq->cons_index & (GOLAN_NUM_EQES - 1));
01979         struct golan_eqe *eqe = &(eq->eqes[entry]);
01980         return ((eqe->owner != ((eq->cons_index >> ilog2(GOLAN_NUM_EQES)) & 1)) ? NULL : eqe);
01981 }
01982 
01983 
01984 /**
01985  * Poll event queue
01986  *
01987  * @v ibdev             Infiniband device
01988  */
01989 static void golan_poll_eq(struct ib_device *ibdev)
01990 {
01991         struct golan            *golan  = ib_get_drvdata(ibdev);
01992         struct golan_event_queue *eq    = &(golan->eq);
01993         struct golan_eqe        *eqe;
01994         u32 cqn;
01995         int counter = 0;
01996 
01997         while ((eqe = golan_next_eqe_sw(eq)) && (counter < GOLAN_NUM_EQES)) {
01998                 /*
01999                  * Make sure we read EQ entry contents after we've
02000                  * checked the ownership bit.
02001                  */
02002                 rmb();
02003 
02004                 DBGC( golan , "%s eqn %d, eqe type %s\n", __FUNCTION__, eq->eqn,
02005                            golan_eqe_type_str(eqe->type));
02006                 switch (eqe->type) {
02007                 case GOLAN_EVENT_TYPE_COMP:
02008                         /* We dont need to handle completion events since we
02009                          * poll all the CQs after polling the EQ */
02010                         break;
02011                 case GOLAN_EVENT_TYPE_PATH_MIG:
02012                 case GOLAN_EVENT_TYPE_COMM_EST:
02013                 case GOLAN_EVENT_TYPE_SQ_DRAINED:
02014                 case GOLAN_EVENT_TYPE_SRQ_LAST_WQE:
02015                 case GOLAN_EVENT_TYPE_WQ_CATAS_ERROR:
02016                 case GOLAN_EVENT_TYPE_PATH_MIG_FAILED:
02017                 case GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
02018                 case GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR:
02019                 case GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT:
02020                 case GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR:
02021                         DBGC( golan , "%s event %s(%d) arrived\n", __FUNCTION__,
02022                                    golan_eqe_type_str(eqe->type), eqe->type);
02023                         break;
02024                 case GOLAN_EVENT_TYPE_CMD:
02025 //                      golan_cmd_comp_handler(be32_to_cpu(eqe->data.cmd.vector));
02026                         break;
02027                 case GOLAN_EVENT_TYPE_PORT_CHANGE:
02028                         golan_handle_port_event(golan, eqe);
02029                         break;
02030                 case GOLAN_EVENT_TYPE_CQ_ERROR:
02031                         cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
02032                         DBGC (golan ,"CQ error on CQN 0x%x, syndrom 0x%x\n",
02033                                    cqn, eqe->data.cq_err.syndrome);
02034 //                      mlx5_cq_event(dev, cqn, eqe->type);
02035                         break;
02036                 /*
02037                  * currently the driver do not support dynamic memory request
02038                  * during FW run, a follow up change will allocate FW pages once and
02039                  * never release them till driver shutdown, this change will not support
02040                  * this request as currently this request is not issued anyway.
02041                 case GOLAN_EVENT_TYPE_PAGE_REQUEST:
02042                         {
02043                                 // we should check if we get this event while we
02044                                 // waiting for a command
02045                                 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
02046                                 s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
02047 
02048                                 DBGC (golan ,"%s page request for func 0x%x, napges %d\n",
02049                                            __FUNCTION__, func_id, npages);
02050                                 golan_provide_pages(golan, npages, func_id);
02051                         }
02052                         break;
02053                 */
02054                 default:
02055                         DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
02056                                    eqe->type, eq->eqn);
02057                         break;
02058                 }
02059 
02060                 ++eq->cons_index;
02061                 golan_eq_update_ci(eq, GOLAN_EQ_UNARMED);
02062                 ++counter;
02063         }
02064 }
02065 
02066 /**
02067  * Attach to multicast group
02068  *
02069  * @v ibdev             Infiniband device
02070  * @v qp                Queue pair
02071  * @v gid               Multicast GID
02072  * @ret rc              Return status code
02073  */
02074 static int golan_mcast_attach(struct ib_device *ibdev,
02075                                 struct ib_queue_pair *qp,
02076                                 union ib_gid *gid)
02077 {
02078         struct golan *golan = ib_get_drvdata(ibdev);
02079         struct golan_cmd_layout *cmd;
02080         int rc;
02081 
02082         if ( qp == NULL ) {
02083                 DBGC( golan, "%s: Invalid pointer, could not attach QPN to MCG\n",
02084                         __FUNCTION__ );
02085                 return -EFAULT;
02086         }
02087 
02088         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_ATTACH_TO_MCG, 0x0,
02089                                         GEN_MBOX, NO_MBOX,
02090                                         sizeof(struct golan_attach_mcg_mbox_in),
02091                                         sizeof(struct golan_attach_mcg_mbox_out));
02092         ((struct golan_attach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
02093 
02094         memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
02095 
02096         rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
02097         GOLAN_CHECK_RC_AND_CMD_STATUS( err_attach_to_mcg_cmd );
02098 
02099         DBGC( golan , "%s: QPN 0x%lx was attached to MCG\n", __FUNCTION__, qp->qpn);
02100         return 0;
02101 err_attach_to_mcg_cmd:
02102         DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
02103         return rc;
02104 }
02105 
02106 /**
02107  * Detach from multicast group
02108  *
02109  * @v ibdev             Infiniband device
02110  * @v qp                Queue pair
02111  * @v gid               Multicast GID
02112  * @ret rc              Return status code
02113  */
02114 static void golan_mcast_detach(struct ib_device *ibdev,
02115                                 struct ib_queue_pair *qp,
02116                                 union ib_gid *gid)
02117 {
02118         struct golan *golan = ib_get_drvdata(ibdev);
02119         struct golan_cmd_layout *cmd;
02120         int rc;
02121 
02122         cmd = write_cmd(golan, DEF_CMD_IDX, GOLAN_CMD_OP_DETACH_FROM_MCG, 0x0,
02123                                         GEN_MBOX, NO_MBOX,
02124                                     sizeof(struct golan_detach_mcg_mbox_in),
02125                                     sizeof(struct golan_detach_mcg_mbox_out));
02126         ((struct golan_detach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
02127 
02128         memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
02129 
02130         rc = send_command_and_wait(golan, DEF_CMD_IDX, GEN_MBOX, NO_MBOX, __FUNCTION__);
02131         GOLAN_PRINT_RC_AND_CMD_STATUS;
02132 
02133         DBGC( golan , "%s: QPN 0x%lx was detached from MCG\n", __FUNCTION__, qp->qpn);
02134 }
02135 
02136 /**
02137  * Inform embedded subnet management agent of a received MAD
02138  *
02139  * @v ibdev             Infiniband device
02140  * @v mad               MAD
02141  * @ret rc              Return status code
02142  */
02143 static int golan_inform_sma(struct ib_device *ibdev,
02144                                 union ib_mad *mad)
02145 {
02146         if (!ibdev || !mad) {
02147                 return 1;
02148         }
02149 
02150         return 0;
02151 }
02152 
02153 static int golan_register_ibdev(struct golan_port *port)
02154 {
02155         struct ib_device *ibdev = port->ibdev;
02156         int rc;
02157 
02158         golan_get_ib_info ( ibdev );
02159         /* Register Infiniband device */
02160         if ((rc = register_ibdev(ibdev)) != 0) {
02161                 DBG ( "%s port %d could not register IB device: (rc = %d)\n",
02162                         __FUNCTION__, ibdev->port, rc);
02163                 return rc;
02164         }
02165 
02166         port->netdev = ipoib_netdev( ibdev );
02167 
02168         return 0;
02169 }
02170 
02171 static inline void golan_bring_down(struct golan *golan)
02172 {
02173         DBGC(golan, "%s: start\n", __FUNCTION__);
02174 
02175         if (~golan->flags & GOLAN_OPEN) {
02176                 DBGC(golan, "%s: end (already closed)\n", __FUNCTION__);
02177                 return;
02178         }
02179 
02180         golan_destroy_mkey(golan);
02181         golan_dealloc_pd(golan);
02182         golan_destory_eq(golan);
02183         golan_dealloc_uar(golan);
02184         golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
02185         golan_handle_pages(golan, GOLAN_REG_PAGES , GOLAN_PAGES_TAKE);
02186         golan_disable_hca(golan);
02187         golan_cmd_uninit(golan);
02188         golan->flags &= ~GOLAN_OPEN;
02189         DBGC(golan, "%s: end\n", __FUNCTION__);
02190 }
02191 
02192 static int golan_set_link_speed ( struct golan *golan ){
02193         mlx_status status;
02194         int i = 0;
02195         int utils_inited = 0;
02196 
02197         if ( ! golan->utils ) {
02198                 utils_inited = 1;
02199                 status = init_mlx_utils ( & golan->utils, golan->pci );
02200                 MLX_CHECK_STATUS ( golan->pci, status, utils_init_err, "mlx_utils_init failed" );
02201         }
02202 
02203         for ( i = 0; i < golan->caps.num_ports; ++i ) {
02204                 status = mlx_set_link_speed ( golan->utils, i + 1, LINK_SPEED_IB, LINK_SPEED_SDR );
02205                 MLX_CHECK_STATUS ( golan->pci, status, set_link_speed_err, "mlx_set_link_speed failed" );
02206         }
02207 
02208 set_link_speed_err:
02209 if ( utils_inited )
02210         free_mlx_utils ( & golan->utils );
02211 utils_init_err:
02212         return status;
02213 }
02214 
02215 static inline int golan_bring_up(struct golan *golan)
02216 {
02217         int rc = 0;
02218         DBGC(golan, "%s\n", __FUNCTION__);
02219 
02220         if (golan->flags & GOLAN_OPEN)
02221                 return 0;
02222 
02223         if (( rc = golan_cmd_init(golan) ))
02224                 goto out;
02225 
02226         if (( rc = golan_core_enable_hca(golan) ))
02227                 goto cmd_uninit;
02228 
02229         /* Query for need for boot pages */
02230         if (( rc = golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_GIVE) ))
02231                 goto disable;
02232 
02233         if (( rc = golan_qry_hca_cap(golan) ))
02234                 goto pages;
02235 
02236         if (( rc = golan_set_hca_cap(golan) ))
02237                 goto pages;
02238 
02239         if (( rc = golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_GIVE) ))
02240                 goto pages;
02241 
02242         if (( rc = golan_set_link_speed ( golan ) ))
02243                 goto pages_teardown;
02244 
02245         //Reg Init?
02246         if (( rc = golan_hca_init(golan) ))
02247                 goto pages_2;
02248 
02249         if (( rc = golan_alloc_uar(golan) ))
02250                 goto teardown;
02251 
02252         if (( rc = golan_create_eq(golan) ))
02253                 goto de_uar;
02254 
02255         if (( rc = golan_alloc_pd(golan) ))
02256                 goto de_eq;
02257 
02258         if (( rc = golan_create_mkey(golan) ))
02259                 goto de_pd;
02260 
02261         golan->flags |= GOLAN_OPEN;
02262         return 0;
02263 
02264         golan_destroy_mkey(golan);
02265 de_pd:
02266         golan_dealloc_pd(golan);
02267 de_eq:
02268         golan_destory_eq(golan);
02269 de_uar:
02270         golan_dealloc_uar(golan);
02271 teardown:
02272         golan_teardown_hca(golan, GOLAN_TEARDOWN_GRACEFUL);
02273 pages_2:
02274 pages_teardown:
02275         golan_handle_pages(golan, GOLAN_INIT_PAGES, GOLAN_PAGES_TAKE);
02276 pages:
02277         golan_handle_pages(golan, GOLAN_BOOT_PAGES, GOLAN_PAGES_TAKE);
02278 disable:
02279         golan_disable_hca(golan);
02280 cmd_uninit:
02281         golan_cmd_uninit(golan);
02282 out:
02283         return rc;
02284 }
02285 
02286 /**
02287  * Close Infiniband link
02288  *
02289  * @v ibdev             Infiniband device
02290  */
02291 static void golan_ib_close ( struct ib_device *ibdev ) {
02292         struct golan *golan = NULL;
02293 
02294         DBG ( "%s start\n", __FUNCTION__ );
02295         if ( ! ibdev )
02296                 return;
02297         golan = ib_get_drvdata ( ibdev );
02298         golan_bring_down ( golan );
02299         DBG ( "%s end\n", __FUNCTION__ );
02300 }
02301 
02302 /**
02303  * Initialise Infiniband link
02304  *
02305  * @v ibdev             Infiniband device
02306  * @ret rc              Return status code
02307  */
02308 static int golan_ib_open ( struct ib_device *ibdev ) {
02309         struct golan *golan = NULL;
02310         DBG ( "%s start\n", __FUNCTION__ );
02311 
02312         if ( ! ibdev )
02313                 return -EINVAL;
02314         golan = ib_get_drvdata ( ibdev );
02315         golan_bring_up ( golan );
02316         golan_ib_update ( ibdev );
02317 
02318         DBG ( "%s end\n", __FUNCTION__ );
02319         return 0;
02320 }
02321 
02322 /** Golan Infiniband operations */
02323 static struct ib_device_operations golan_ib_operations = {
02324         .create_cq      = golan_create_cq,
02325         .destroy_cq     = golan_destroy_cq,
02326         .create_qp      = golan_create_qp,
02327         .modify_qp      = golan_modify_qp,
02328         .destroy_qp     = golan_destroy_qp,
02329         .post_send      = golan_post_send,
02330         .post_recv      = golan_post_recv,
02331         .poll_cq        = golan_poll_cq,
02332         .poll_eq        = golan_poll_eq,
02333         .open           = golan_ib_open,
02334         .close          = golan_ib_close,
02335         .mcast_attach   = golan_mcast_attach,
02336         .mcast_detach   = golan_mcast_detach,
02337         .set_port_info  = golan_inform_sma,
02338         .set_pkey_table = golan_inform_sma,
02339 };
02340 
02341 static int golan_probe_normal ( struct pci_device *pci ) {
02342         struct golan *golan;
02343         struct ib_device *ibdev;
02344         struct golan_port *port;
02345         int i;
02346         int rc = 0;
02347 
02348         golan = golan_alloc();
02349         if ( !golan ) {
02350                 rc = -ENOMEM;
02351                 goto err_golan_alloc;
02352         }
02353 
02354         /* at POST stage some BIOSes have limited available dynamic memory */
02355         if ( golan_init_fw_areas ( golan ) ) {
02356                 rc = -ENOMEM;
02357                 goto err_golan_golan_init_pages;
02358         }
02359 
02360         /* Setup PCI bus and HCA BAR */
02361         pci_set_drvdata( pci, golan );
02362         golan->pci = pci;
02363         golan_pci_init( golan );
02364         /* config command queues */
02365         if ( golan_bring_up( golan ) ) {
02366                 DBGC (golan ,"golan bringup failed\n");
02367                 rc = -1;
02368                 goto err_golan_bringup;
02369         }
02370 
02371         if ( ! DEVICE_IS_CIB ( pci->device ) ) {
02372                 if ( init_mlx_utils ( & golan->utils, pci ) ) {
02373                         rc = -1;
02374                         goto err_utils_init;
02375                 }
02376         }
02377         /* Allocate Infiniband devices */
02378         for (i = 0; i < golan->caps.num_ports; ++i) {
02379                 ibdev = alloc_ibdev( 0 );
02380                 if ( !ibdev ) {
02381                         rc = -ENOMEM;
02382                         goto err_golan_probe_alloc_ibdev;
02383                 }
02384                 golan->ports[i].ibdev = ibdev;
02385                 golan->ports[i].vep_number = 0;
02386                 ibdev->op = &golan_ib_operations;
02387                 ibdev->dev = &pci->dev;
02388                 ibdev->port = (GOLAN_PORT_BASE + i);
02389                 ib_set_drvdata( ibdev, golan );
02390         }
02391 
02392         /* Register devices */
02393         for ( i = 0; i < golan->caps.num_ports; ++i ) {
02394                 port = &golan->ports[i];
02395                 if ((rc = golan_register_ibdev ( port ) ) != 0 ) {
02396                         goto err_golan_probe_register_ibdev;
02397                 }
02398         }
02399 
02400         golan_bring_down ( golan );
02401 
02402         return 0;
02403 
02404         i = golan->caps.num_ports;
02405 err_golan_probe_register_ibdev:
02406         for ( i-- ; ( signed int ) i >= 0 ; i-- )
02407                 unregister_ibdev ( golan->ports[i].ibdev );
02408 
02409         i = golan->caps.num_ports;
02410 err_golan_probe_alloc_ibdev:
02411         for ( i-- ; ( signed int ) i >= 0 ; i-- )
02412                 ibdev_put ( golan->ports[i].ibdev );
02413         if ( ! DEVICE_IS_CIB ( pci->device ) ) {
02414                 free_mlx_utils ( & golan->utils );
02415         }
02416 err_utils_init:
02417         golan_bring_down ( golan );
02418 err_golan_bringup:
02419         iounmap( golan->iseg );
02420         golan_free_fw_areas ( golan );
02421 err_golan_golan_init_pages:
02422         free ( golan );
02423 err_golan_alloc:
02424         DBGC (golan ,"%s rc = %d\n", __FUNCTION__, rc);
02425         return rc;
02426 }
02427 
02428 static void golan_remove_normal ( struct pci_device *pci ) {
02429         struct golan    *golan = pci_get_drvdata(pci);
02430         struct golan_port *port;
02431         int i;
02432 
02433         DBGC(golan, "%s\n", __FUNCTION__);
02434 
02435         for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
02436                 port = &golan->ports[i];
02437                 unregister_ibdev ( port->ibdev );
02438         }
02439         for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
02440                 netdev_nullify ( golan->ports[i].netdev );
02441         }
02442         for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
02443                 ibdev_put ( golan->ports[i].ibdev );
02444         }
02445         if ( ! DEVICE_IS_CIB ( pci->device ) ) {
02446                 free_mlx_utils ( & golan->utils );
02447         }
02448         iounmap( golan->iseg );
02449         golan_free_fw_areas ( golan );
02450         free(golan);
02451 }
02452 
02453 /***************************************************************************
02454  * NODNIC operations
02455  **************************************************************************/
02456 static mlx_status shomron_tx_uar_send_db ( struct ib_device *ibdev,
02457                 struct nodnic_send_wqbb *wqbb ) {
02458         mlx_status status = MLX_SUCCESS;
02459         struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
02460         struct shomron_nodnic_eth_send_wqe *eth_wqe =
02461                         ( struct shomron_nodnic_eth_send_wqe * )wqbb;
02462         struct shomronprm_wqe_segment_ctrl_send *ctrl;
02463 
02464         if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
02465                 DBG("%s: Invalid parameters\n",__FUNCTION__);
02466                 status = MLX_FAILED;
02467                 goto err;
02468         }
02469         wmb();
02470         ctrl = & eth_wqe->ctrl;
02471         writeq(*((__be64 *)ctrl), flexboot_nodnic->device_priv.uar.virt +
02472                         ( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
02473                         : DB_BUFFER0_EVEN_OFFSET ) );
02474 err:
02475         return status;
02476 }
02477 
02478 static mlx_status shomron_fill_eth_send_wqe ( struct ib_device *ibdev,
02479                            struct ib_queue_pair *qp, struct ib_address_vector *av __unused,
02480                            struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb,
02481                            unsigned long wqe_index ) {
02482         mlx_status status = MLX_SUCCESS;
02483         struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
02484         struct shomron_nodnic_eth_send_wqe *eth_wqe =  NULL;
02485         struct flexboot_nodnic_port *port = &flexboot_nodnic->port[ibdev->port - 1];
02486         struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp =
02487                         ib_qp_get_drvdata ( qp );
02488         nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
02489         struct nodnic_send_ring *send_ring = &nodnic_qp->send;
02490         mlx_uint32 qpn = 0;
02491 
02492         eth_wqe = (struct shomron_nodnic_eth_send_wqe *)wqbb;
02493         memset ( ( ( ( void * ) eth_wqe ) ), 0,
02494                            ( sizeof ( *eth_wqe ) ) );
02495 
02496         status = nodnic_port_get_qpn(&port->port_priv, &send_ring->nodnic_ring,
02497                         &qpn);
02498         if ( status != MLX_SUCCESS ) {
02499                 DBG("nodnic_port_get_qpn failed\n");
02500                 goto err;
02501         }
02502 
02503 #define SHOMRON_GENERATE_CQE 0x3
02504 #define SHOMRON_INLINE_HEADERS_SIZE 18
02505 #define SHOMRON_INLINE_HEADERS_OFFSET 32
02506         MLX_FILL_2 ( &eth_wqe->ctrl, 0, opcode, FLEXBOOT_NODNIC_OPCODE_SEND,
02507                         wqe_index, wqe_index & 0xFFFF);
02508         MLX_FILL_2 ( &eth_wqe->ctrl, 1, ds, 0x4 , qpn, qpn );
02509         MLX_FILL_1 ( &eth_wqe->ctrl, 2,
02510                      ce, SHOMRON_GENERATE_CQE /* generate completion */
02511                          );
02512         MLX_FILL_2 ( &eth_wqe->ctrl, 7,
02513                         inline_headers1,
02514                         cpu_to_be16(*(mlx_uint16 *)iobuf->data),
02515                         inline_headers_size, SHOMRON_INLINE_HEADERS_SIZE
02516                          );
02517         memcpy((void *)&eth_wqe->ctrl + SHOMRON_INLINE_HEADERS_OFFSET,
02518                         iobuf->data + 2, SHOMRON_INLINE_HEADERS_SIZE - 2);
02519         iob_pull(iobuf, SHOMRON_INLINE_HEADERS_SIZE);
02520         MLX_FILL_1 ( &eth_wqe->data[0], 0,
02521                      byte_count, iob_len ( iobuf ) );
02522         MLX_FILL_1 ( &eth_wqe->data[0], 1, l_key,
02523                         flexboot_nodnic->device_priv.lkey );
02524         MLX_FILL_H ( &eth_wqe->data[0], 2,
02525                      local_address_h, virt_to_bus ( iobuf->data ) );
02526         MLX_FILL_1 ( &eth_wqe->data[0], 3,
02527                      local_address_l, virt_to_bus ( iobuf->data ) );
02528 err:
02529         return status;
02530 }
02531 
02532 static mlx_status shomron_fill_completion( void *cqe, struct cqe_data *cqe_data ) {
02533         union shomronprm_completion_entry *cq_entry;
02534         uint32_t opcode;
02535 
02536         cq_entry = (union shomronprm_completion_entry *)cqe;
02537         cqe_data->owner = MLX_GET ( &cq_entry->normal, owner );
02538         opcode = MLX_GET ( &cq_entry->normal, opcode );
02539 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND 0
02540 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV 2
02541 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR 13
02542 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR 14
02543         cqe_data->is_error =
02544                         ( opcode >= FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR);
02545         if ( cqe_data->is_error ) {
02546                 cqe_data->syndrome = MLX_GET ( &cq_entry->error, syndrome );
02547                 cqe_data->vendor_err_syndrome =
02548                                 MLX_GET ( &cq_entry->error, vendor_error_syndrome );
02549                 cqe_data->is_send =
02550                                         (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR);
02551         } else {
02552                 cqe_data->is_send =
02553                         (opcode == FLEXBOOT_NODNIC_OPCODE_CQ_SEND);
02554                 cqe_data->wqe_counter = MLX_GET ( &cq_entry->normal, wqe_counter );
02555                 cqe_data->byte_cnt = MLX_GET ( &cq_entry->normal, byte_cnt );
02556 
02557         }
02558         if ( cqe_data->is_send == TRUE )
02559                 cqe_data->qpn = MLX_GET ( &cq_entry->normal, qpn );
02560         else
02561                 cqe_data->qpn = MLX_GET ( &cq_entry->normal, srqn );
02562 
02563         return 0;
02564 }
02565 
02566 static mlx_status shomron_cqe_set_owner ( void *cq, unsigned int num_cqes ) {
02567         unsigned int i = 0;
02568         union shomronprm_completion_entry *cq_list;
02569 
02570         cq_list = (union shomronprm_completion_entry *)cq;
02571         for ( ; i < num_cqes ; i++ )
02572                 MLX_FILL_1 ( &cq_list[i].normal, 15, owner, 1 );
02573         return 0;
02574 }
02575 
02576 static mlx_size shomron_get_cqe_size () {
02577         return sizeof ( union shomronprm_completion_entry );
02578 }
02579 
02580 struct flexboot_nodnic_callbacks shomron_nodnic_callbacks = {
02581         .get_cqe_size = shomron_get_cqe_size,
02582         .fill_send_wqe[IB_QPT_ETH] = shomron_fill_eth_send_wqe,
02583         .fill_completion = shomron_fill_completion,
02584         .cqe_set_owner = shomron_cqe_set_owner,
02585         .irq = flexboot_nodnic_eth_irq,
02586         .tx_uar_send_doorbell_fn = shomron_tx_uar_send_db,
02587 };
02588 
02589 static int shomron_nodnic_is_supported ( struct pci_device *pci ) {
02590         if ( DEVICE_IS_CIB ( pci->device ) )
02591                 return 0;
02592 
02593         return flexboot_nodnic_is_supported ( pci );
02594 }
02595 /**************************************************************************/
02596 
02597 static int golan_probe ( struct pci_device *pci ) {
02598         int rc = -ENOTSUP;
02599 
02600         DBG ( "%s: start\n", __FUNCTION__ );
02601 
02602         if ( ! pci ) {
02603                 DBG ( "%s: PCI is NULL\n", __FUNCTION__ );
02604                 rc = -EINVAL;
02605                 goto probe_done;
02606         }
02607 
02608         if ( shomron_nodnic_is_supported ( pci ) ) {
02609                 DBG ( "%s: Using NODNIC driver\n", __FUNCTION__ );
02610                 rc = flexboot_nodnic_probe ( pci, &shomron_nodnic_callbacks, NULL );
02611         } else {
02612                 DBG ( "%s: Using normal driver\n", __FUNCTION__ );
02613                 rc = golan_probe_normal ( pci );
02614         }
02615 
02616 probe_done:
02617         DBG ( "%s: rc = %d\n", __FUNCTION__, rc );
02618         return rc;
02619 }
02620 
02621 static void golan_remove ( struct pci_device *pci ) {
02622         DBG ( "%s: start\n", __FUNCTION__ );
02623 
02624         if ( ! shomron_nodnic_is_supported ( pci ) ) {
02625                 DBG ( "%s: Using normal driver remove\n", __FUNCTION__ );
02626                 golan_remove_normal ( pci );
02627                 return;
02628         }
02629 
02630         DBG ( "%s: Using NODNIC driver remove\n", __FUNCTION__ );
02631 
02632         flexboot_nodnic_remove ( pci );
02633 
02634         DBG ( "%s: end\n", __FUNCTION__ );
02635 }
02636 
02637 static struct pci_device_id golan_nics[] = {
02638         PCI_ROM ( 0x15b3, 0x1011, "ConnectIB", "ConnectIB HCA driver: DevID 4113", 0 ),
02639         PCI_ROM ( 0x15b3, 0x1013, "ConnectX-4", "ConnectX-4 HCA driver, DevID 4115", 0 ),
02640         PCI_ROM ( 0x15b3, 0x1015, "ConnectX-4Lx", "ConnectX-4Lx HCA driver, DevID 4117", 0 ),
02641         PCI_ROM ( 0x15b3, 0x1017, "ConnectX-5", "ConnectX-5 HCA driver, DevID 4119", 0 ),
02642         PCI_ROM ( 0x15b3, 0x1019, "ConnectX-5EX", "ConnectX-5EX HCA driver, DevID 4121", 0 ),
02643 };
02644 
02645 struct pci_driver golan_driver __pci_driver = {
02646         .ids            = golan_nics,
02647         .id_count       = (sizeof(golan_nics) / sizeof(golan_nics[0])),
02648         .probe          = golan_probe,
02649         .remove         = golan_remove,
02650 };