iPXE
golan.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2013-2015 Mellanox Technologies Ltd.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of the
7  * License, or any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA.
18  */
19 
20 FILE_LICENCE ( GPL2_OR_LATER );
21 
22 #include <errno.h>
23 #include <strings.h>
24 #include <ipxe/malloc.h>
25 #include <ipxe/umalloc.h>
26 #include <ipxe/infiniband.h>
27 #include <ipxe/ib_smc.h>
28 #include <ipxe/iobuf.h>
29 #include <ipxe/netdevice.h>
30 #include "flexboot_nodnic.h"
31 #include <ipxe/ethernet.h>
32 #include <ipxe/if_ether.h>
33 #include <usr/ifmgmt.h>
34 #include <ipxe/in.h>
35 #include <byteswap.h>
37 #include <config/general.h>
38 #include <ipxe/ipoib.h>
40 #include "nodnic_shomron_prm.h"
41 #include "golan.h"
44 
45 
46 #define DEVICE_IS_CIB( device ) ( device == 0x1011 )
47 
48 /******************************************************************************/
49 /************* Very simple memory management for umalloced pages **************/
50 /******* Temporary solution until full memory management is implemented *******/
51 /******************************************************************************/
52 
53 struct golan_page {
54  struct list_head list;
56 };
57 
58 static void golan_free_fw_areas ( struct golan *golan ) {
59  int i;
60 
61  for (i = 0; i < GOLAN_FW_AREAS_NUM; i++) {
62  if ( golan->fw_areas[i].area ) {
63  ufree ( golan->fw_areas[i].area );
64  golan->fw_areas[i].area = UNULL;
65  }
66  }
67 }
68 
69 static int golan_init_fw_areas ( struct golan *golan ) {
70  int rc = 0, i = 0;
71 
72  if ( ! golan ) {
73  rc = -EINVAL;
74  goto err_golan_init_fw_areas_bad_param;
75  }
76 
77  for (i = 0; i < GOLAN_FW_AREAS_NUM; i++)
78  golan->fw_areas[i].area = UNULL;
79 
80  return rc;
81 
82  err_golan_init_fw_areas_bad_param:
83  return rc;
84 }
85 
86 /******************************************************************************/
87 
88 const char *golan_qp_state_as_string[] = {
89  "RESET",
90  "INIT",
91  "RTR",
92  "RTS",
93  "SQD",
94  "SQE",
95  "ERR"
96 };
97 
98 static inline int golan_check_rc_and_cmd_status ( struct golan_cmd_layout *cmd, int rc ) {
99  struct golan_outbox_hdr *out_hdr = ( struct golan_outbox_hdr * ) ( cmd->out );
100  if ( rc == -EBUSY ) {
101  DBG ( "HCA is busy (rc = -EBUSY)\n" );
102  return rc;
103  } else if ( out_hdr->status ) {
104  DBG("%s status = 0x%x - syndrom = 0x%x\n", __FUNCTION__,
105  out_hdr->status, be32_to_cpu(out_hdr->syndrome));
106  return out_hdr->status;
107  }
108  return 0;
109 }
110 
111 #define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable) \
112  do { \
113  if ( ( rc = golan_check_rc_and_cmd_status ( cmd, rc ) ) ) \
114  goto _lable; \
115  } while (0)
116 
117 #define GOLAN_PRINT_RC_AND_CMD_STATUS golan_check_rc_and_cmd_status ( cmd, rc )
118 
119 
120 struct mbox {
121  union {
125  };
126 };
127 
128 static inline uint32_t ilog2(uint32_t mem)
129 {
130  return ( fls ( mem ) - 1 );
131 }
132 
133 #define CTRL_SIG_SZ (sizeof(mailbox->mblock) - sizeof(mailbox->mblock.bdata) - 2)
134 
135 static inline u8 xor8_buf(void *buf, int len)
136 {
137  u8 sum = 0;
138  int i;
139  u8 *ptr = buf;
140 
141  for (i = 0; i < len; ++i)
142  sum ^= ptr[i];
143 
144  return sum;
145 }
146 
147 static inline const char *cmd_status_str(u8 status)
148 {
149  switch (status) {
150  case 0x0: return "OK";
151  case 0x1: return "internal error";
152  case 0x2: return "bad operation";
153  case 0x3: return "bad parameter";
154  case 0x4: return "bad system state";
155  case 0x5: return "bad resource";
156  case 0x6: return "resource busy";
157  case 0x8: return "limits exceeded";
158  case 0x9: return "bad resource state";
159  case 0xa: return "bad index";
160  case 0xf: return "no resources";
161  case 0x50: return "bad input length";
162  case 0x51: return "bad output length";
163  case 0x10: return "bad QP state";
164  case 0x30: return "bad packet (discarded)";
165  case 0x40: return "bad size too many outstanding CQEs";
166  case 0xff: return "Command Timed Out";
167  default: return "unknown status";
168  }
169 }
170 
171 static inline uint16_t fw_rev_maj(struct golan *golan)
172 {
173  return be32_to_cpu(readl(&golan->iseg->fw_rev)) & 0xffff;
174 }
175 
176 static inline u16 fw_rev_min(struct golan *golan)
177 {
178  return be32_to_cpu(readl(&golan->iseg->fw_rev)) >> 16;
179 }
180 
181 static inline u16 fw_rev_sub(struct golan *golan)
182 {
183  return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) & 0xffff;
184 }
185 
186 static inline u16 cmdif_rev(struct golan *golan)
187 {
188  return be32_to_cpu(readl(&golan->iseg->cmdif_rev_fw_sub)) >> 16;
189 }
190 
191 
192 static inline struct golan_cmd_layout *get_cmd( struct golan *golan, int idx )
193 {
194  return golan->cmd.addr + (idx << golan->cmd.log_stride);
195 }
196 
197 static inline void golan_calc_sig(struct golan *golan, uint32_t cmd_idx,
198  uint32_t inbox_idx, uint32_t outbox_idx)
199 {
200  struct golan_cmd_layout *cmd = get_cmd(golan, cmd_idx);
201  struct mbox *mailbox = NULL;
202 
203  if (inbox_idx != NO_MBOX) {
204  mailbox = GET_INBOX(golan, inbox_idx);
205  mailbox->mblock.token = cmd->token;
206  mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
207  CTRL_SIG_SZ);
208  }
209  if (outbox_idx != NO_MBOX) {
210  mailbox = GET_OUTBOX(golan, outbox_idx);
211  mailbox->mblock.token = cmd->token;
212  mailbox->mblock.ctrl_sig = ~xor8_buf(mailbox->mblock.rsvd0,
213  CTRL_SIG_SZ);
214  }
215  cmd->sig = ~xor8_buf(cmd, sizeof(*cmd));
216 }
217 
218 static inline void show_out_status(uint32_t *out)
219 {
220  DBG("%x\n", be32_to_cpu(out[0]));
221  DBG("%x\n", be32_to_cpu(out[1]));
222  DBG("%x\n", be32_to_cpu(out[2]));
223  DBG("%x\n", be32_to_cpu(out[3]));
224 }
225 /**
226  * Check if CMD has finished.
227  */
228 static inline uint32_t is_command_finished( struct golan *golan, int idx)
229 {
230  wmb();
231  return !(get_cmd( golan , idx )->status_own & CMD_OWNER_HW);
232 }
233 
234 /**
235  * Wait for Golan command completion
236  *
237  * @v golan Golan device
238  * @ret rc Return status code
239  */
240 static inline int golan_cmd_wait(struct golan *golan, int idx, const char *command)
241 {
242  unsigned int wait;
243  int rc = -EBUSY;
244 
245  for ( wait = GOLAN_HCR_MAX_WAIT_MS ; wait ; --wait ) {
246  if (is_command_finished(golan, idx)) {
247  rc = CMD_STATUS(golan, idx);
248  rmb();
249  break;
250  } else {
251  mdelay ( 1 );
252  }
253  }
254  if (rc) {
255  DBGC (golan ,"[%s]RC is %s[%x]\n", command, cmd_status_str(rc), rc);
256  }
257 
258  golan->cmd_bm &= ~(1 << idx);
259  return rc;
260 }
261 
262 /**
263  * Notify the HW that commands are ready
264  */
265 static inline void send_command(struct golan *golan)
266 {
267  wmb(); //Make sure the command is visible in "memory".
269 }
270 
271 static inline int send_command_and_wait(struct golan *golan, uint32_t cmd_idx,
272  uint32_t inbox_idx, uint32_t outbox_idx, const char *command)
273 {
274  golan_calc_sig(golan, cmd_idx, inbox_idx, outbox_idx);
276  return golan_cmd_wait(golan, cmd_idx, command);
277 }
278 
279 /**
280  * Prepare a FW command,
281  * In - comamnd idx (Must be valid)
282  * writes the command parameters.
283  */
284 static inline struct golan_cmd_layout *write_cmd(struct golan *golan, int idx,
286  uint16_t inbox_idx,
287  uint16_t outbox_idx, uint16_t inlen,
289 {
290  struct golan_cmd_layout *cmd = get_cmd(golan , idx);
291  struct golan_inbox_hdr *hdr = (struct golan_inbox_hdr *)cmd->in;
292  static uint8_t token;
293 
294  memset(cmd, 0, sizeof(*cmd));
295 
296  cmd->type = GOLAN_PCI_CMD_XPORT;
297  cmd->status_own = CMD_OWNER_HW;
298  cmd->outlen = cpu_to_be32(outlen);
299  cmd->inlen = cpu_to_be32(inlen);
300  hdr->opcode = cpu_to_be16(opcode);
301  hdr->opmod = cpu_to_be16(opmod);
302 
303  if (inbox_idx != NO_MBOX) {
304  memset(GET_INBOX(golan, inbox_idx), 0, MAILBOX_SIZE);
305  cmd->in_ptr = VIRT_2_BE64_BUS(GET_INBOX(golan, inbox_idx));
306  cmd->token = ++token;
307  }
308  if (outbox_idx != NO_MBOX) {
309  memset(GET_OUTBOX(golan, outbox_idx), 0, MAILBOX_SIZE);
310  cmd->out_ptr = VIRT_2_BE64_BUS(GET_OUTBOX(golan, outbox_idx));
311  }
312 
313  golan->cmd_bm |= 1 << idx;
314 
315  assert ( cmd != NULL );
316  return cmd;
317 }
318 
319 static inline int golan_core_enable_hca(struct golan *golan)
320 {
321  struct golan_cmd_layout *cmd;
322  int rc = 0;
323 
324  DBGC(golan, "%s\n", __FUNCTION__);
325 
327  NO_MBOX, NO_MBOX,
328  sizeof(struct golan_enable_hca_mbox_in),
329  sizeof(struct golan_enable_hca_mbox_out));
330 
333  return rc;
334 }
335 
336 static inline void golan_disable_hca(struct golan *golan)
337 {
338  struct golan_cmd_layout *cmd;
339  int rc;
340 
342  NO_MBOX, NO_MBOX,
343  sizeof(struct golan_disable_hca_mbox_in),
344  sizeof(struct golan_disable_hca_mbox_out));
347 }
348 
349 static inline int golan_set_hca_cap(struct golan *golan)
350 {
351  struct golan_cmd_layout *cmd;
352  int rc;
353 
354  DBGC(golan, "%s\n", __FUNCTION__);
355 
357  GEN_MBOX, NO_MBOX,
358  sizeof(struct golan_cmd_set_hca_cap_mbox_in),
359  sizeof(struct golan_cmd_set_hca_cap_mbox_out));
360 
362  DBGC( golan , "%s caps.uar_sz = %d\n", __FUNCTION__, golan->caps.uar_sz);
363  DBGC( golan , "%s caps.log_pg_sz = %d\n", __FUNCTION__, golan->caps.log_pg_sz);
364  DBGC( golan , "%s caps.log_uar_sz = %d\n", __FUNCTION__, be32_to_cpu(golan->caps.uar_page_sz));
365  golan->caps.uar_page_sz = 0;
367 
369  &(golan->caps),
370  sizeof(struct golan_hca_cap));
371 
372  //if command failed we should reset the caps in golan->caps
375  return rc;
376 }
377 
378 static inline int golan_qry_hca_cap(struct golan *golan)
379 {
380  struct golan_cmd_layout *cmd;
381  int rc = 0;
382 
384  NO_MBOX, GEN_MBOX,
385  sizeof(struct golan_cmd_query_hca_cap_mbox_in),
386  sizeof(struct golan_cmd_query_hca_cap_mbox_out));
387 
389  GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_hca_cap );
390 
391  memcpy(&(golan->caps),
392  ((struct golan_hca_cap *)GET_OUTBOX(golan, GEN_MBOX)),
393  sizeof(struct golan_hca_cap));
394 err_query_hca_cap:
395  return rc;
396 }
397 
398 static inline int golan_take_pages ( struct golan *golan, uint32_t pages, __be16 func_id ) {
399  uint32_t out_num_entries = 0;
400  int size_ibox = 0;
401  int size_obox = 0;
402  int rc = 0;
403 
404  DBGC(golan, "%s\n", __FUNCTION__);
405 
406  while ( pages > 0 ) {
407  uint32_t pas_num = min(pages, MAX_PASE_MBOX);
408  struct golan_cmd_layout *cmd;
410 
411  size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
412  size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
413 
416  size_ibox,
417  size_obox);
418 
419  in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
420 
421  in->func_id = func_id; /* Already BE */
422  in->num_entries = cpu_to_be32(pas_num);
423 
424  if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
425  out_num_entries = be32_to_cpu(((struct golan_manage_pages_outbox *)(cmd->out))->num_entries);
426  } else {
427  if ( rc == -EBUSY ) {
428  DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
429  } else {
430  DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
431  __FUNCTION__, rc, cmd_status_str(rc),
434  be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
435  }
436  return rc;
437  }
438 
439  pages -= out_num_entries;
440  }
441  DBGC( golan , "%s Pages handled\n", __FUNCTION__);
442  return rc;
443 }
444 
445 static inline int golan_provide_pages ( struct golan *golan , uint32_t pages
446  , __be16 func_id,struct golan_firmware_area *fw_area) {
447  struct mbox *mailbox;
448  int size_ibox = 0;
449  int size_obox = 0;
450  int rc = 0;
451  userptr_t next_page_addr = UNULL;
452 
453  DBGC(golan, "%s\n", __FUNCTION__);
454  if ( ! fw_area->area ) {
455  fw_area->area = umalloc ( GOLAN_PAGE_SIZE * pages );
456  if ( fw_area->area == UNULL ) {
457  rc = -ENOMEM;
458  DBGC (golan ,"Failed to allocated %d pages \n",pages);
459  goto err_golan_alloc_fw_area;
460  }
461  fw_area->npages = pages;
462  }
463  assert ( fw_area->npages == pages );
464  next_page_addr = fw_area->area;
465  while ( pages > 0 ) {
466  uint32_t pas_num = min(pages, MAX_PASE_MBOX);
467  unsigned i, j;
468  struct golan_cmd_layout *cmd;
470  userptr_t addr = 0;
471 
472  mailbox = GET_INBOX(golan, MEM_MBOX);
473  size_ibox = sizeof(struct golan_manage_pages_inbox) + (pas_num * GOLAN_PAS_SIZE);
474  size_obox = sizeof(struct golan_manage_pages_outbox) + (pas_num * GOLAN_PAS_SIZE);
475 
478  size_ibox,
479  size_obox);
480 
481  in = (struct golan_manage_pages_inbox *)cmd->in; /* Warning (WE CANT USE THE LAST 2 FIELDS) */
482 
483  in->func_id = func_id; /* Already BE */
484  in->num_entries = cpu_to_be32(pas_num);
485 
486  for ( i = 0 , j = MANAGE_PAGES_PSA_OFFSET; i < pas_num; ++i ,++j,
487  next_page_addr += GOLAN_PAGE_SIZE ) {
488  addr = next_page_addr;
489  if (GOLAN_PAGE_MASK & user_to_phys(addr, 0)) {
490  DBGC (golan ,"Addr not Page alligned [%lx %lx]\n", user_to_phys(addr, 0), addr);
491  }
492  mailbox->mblock.data[j] = USR_2_BE64_BUS(addr);
493  }
494 
495  if ( ( rc = send_command_and_wait(golan, MEM_CMD_IDX, MEM_MBOX, MEM_MBOX, __FUNCTION__) ) == 0 ) {
496  pages -= pas_num;
497  golan->total_dma_pages += pas_num;
498  } else {
499  if ( rc == -EBUSY ) {
500  DBGC (golan ,"HCA is busy (rc = -EBUSY)\n" );
501  } else {
502  DBGC (golan ,"%s: rc =0x%x[%s]<%x> syn 0x%x[0x%x] for %d pages\n",
503  __FUNCTION__, rc, cmd_status_str(rc),
506  be32_to_cpu(CMD_SYND(golan, MEM_CMD_IDX)), pas_num);
507  }
508  goto err_send_command;
509  }
510  }
511  DBGC( golan , "%s Pages handled\n", __FUNCTION__);
512  return 0;
513 
514 err_send_command:
515 err_golan_alloc_fw_area:
516  /* Go over In box and free pages */
517  /* Send Error to FW */
518  /* What is next - Disable HCA? */
519  DBGC (golan ,"%s Failed (rc = 0x%x)\n", __FUNCTION__, rc);
520  return rc;
521 }
522 
523 static inline int golan_handle_pages(struct golan *golan,
524  enum golan_qry_pages_mode qry,
525  enum golan_manage_pages_mode mode)
526 {
527  struct golan_cmd_layout *cmd;
528 
529  int rc = 0;
530  int32_t pages;
531  uint16_t total_pages;
532  __be16 func_id;
533 
534  DBGC(golan, "%s\n", __FUNCTION__);
535 
537  NO_MBOX, NO_MBOX,
538  sizeof(struct golan_query_pages_inbox),
539  sizeof(struct golan_query_pages_outbox));
540 
542  GOLAN_CHECK_RC_AND_CMD_STATUS( err_handle_pages_query );
543 
545 
546  DBGC( golan , "%s pages needed: %d\n", __FUNCTION__, pages);
547 
548  func_id = QRY_PAGES_OUT(golan, MEM_CMD_IDX)->func_id;
549 
550  total_pages = (( pages >= 0 ) ? pages : ( pages * ( -1 ) ));
551 
552  if ( mode == GOLAN_PAGES_GIVE ) {
553  rc = golan_provide_pages(golan, total_pages, func_id, & ( golan->fw_areas[qry-1] ));
554  } else {
556  golan->total_dma_pages = 0;
557  }
558 
559  if ( rc ) {
560  DBGC (golan , "Failed to %s pages (rc = %d) - DMA pages allocated = %d\n",
561  ( ( mode == GOLAN_PAGES_GIVE ) ? "give" : "take" ), rc , golan->total_dma_pages );
562  return rc;
563  }
564 
565  return 0;
566 
567 err_handle_pages_query:
568  DBGC (golan ,"%s Qyery pages failed (rc = 0x%x)\n", __FUNCTION__, rc);
569  return rc;
570 }
571 
573 {
574 #if 0
575  write_cmd(golan, _CMD_IDX, GOLAN_CMD_OP_QUERY_PAGES, 0x0,
576  NO_MBOX, NO_MBOX,
577  sizeof(struct golan_reg_host_endianess),
578  sizeof(struct golan_reg_host_endianess));
579  in->arg = cpu_to_be32(arg);
580  in->register_id = cpu_to_be16(reg_num);
581 #endif
582  DBGC (golan ," %s Not implemented yet\n", __FUNCTION__);
583  return 0;
584 }
585 
586 static inline void golan_cmd_uninit ( struct golan *golan )
587 {
591 }
592 
593 /**
594  * Initialise Golan Command Q parameters
595  * -- Alocate a 4kb page for the Command Q
596  * -- Read the stride and log num commands available
597  * -- Write the address to cmdq_phy_addr in iseg
598  * @v golan Golan device
599  */
600 static inline int golan_cmd_init ( struct golan *golan )
601 {
602  int rc = 0;
603  uint32_t addr_l_sz;
604 
606  rc = -ENOMEM;
607  goto malloc_dma_failed;
608  }
610  rc = -ENOMEM;
611  goto malloc_dma_inbox_failed;
612  }
614  rc = -ENOMEM;
615  goto malloc_dma_outbox_failed;
616  }
617  addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
618 
619  golan->cmd.log_stride = addr_l_sz & 0xf;
620  golan->cmd.size = 1 << (( addr_l_sz >> 4 ) & 0xf);
621 
622  addr_l_sz = virt_to_bus(golan->cmd.addr);
623  writel(0 /* cpu_to_be32(golan->cmd.addr) >> 32 */, &golan->iseg->cmdq_addr_h);
624  writel(cpu_to_be32(addr_l_sz), &golan->iseg->cmdq_addr_l_sz);
625  wmb(); //Make sure the addr is visible in "memory".
626 
627  addr_l_sz = be32_to_cpu(readl(&golan->iseg->cmdq_addr_l_sz));
628 
629  DBGC( golan , "%s Command interface was initialized\n", __FUNCTION__);
630  return 0;
631 
632 malloc_dma_outbox_failed:
634 malloc_dma_inbox_failed:
636 malloc_dma_failed:
637  DBGC (golan ,"%s Failed to initialize command interface (rc = 0x%x)\n",
638  __FUNCTION__, rc);
639  return rc;
640 }
641 
642 static inline int golan_hca_init(struct golan *golan)
643 {
644  struct golan_cmd_layout *cmd;
645  int rc = 0;
646 
647  DBGC(golan, "%s\n", __FUNCTION__);
648 
650  NO_MBOX, NO_MBOX,
651  sizeof(struct golan_cmd_init_hca_mbox_in),
652  sizeof(struct golan_cmd_init_hca_mbox_out));
653 
656  return rc;
657 }
658 
659 static inline void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod)
660 {
661  struct golan_cmd_layout *cmd;
662  int rc;
663 
664  DBGC (golan, "%s in\n", __FUNCTION__);
665 
667  NO_MBOX, NO_MBOX,
668  sizeof(struct golan_cmd_teardown_hca_mbox_in),
669  sizeof(struct golan_cmd_teardown_hca_mbox_out));
670 
673 
674  DBGC (golan, "%s HCA teardown compleated\n", __FUNCTION__);
675 }
676 
677 static inline int golan_alloc_uar(struct golan *golan)
678 {
679  struct golan_uar *uar = &golan->uar;
680  struct golan_cmd_layout *cmd;
682  int rc;
683 
685  NO_MBOX, NO_MBOX,
686  sizeof(struct golan_alloc_uar_mbox_in),
687  sizeof(struct golan_alloc_uar_mbox_out));
688 
690  GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_uar_cmd );
691  out = (struct golan_alloc_uar_mbox_out *) ( cmd->out );
692 
693  uar->index = be32_to_cpu(out->uarn) & 0xffffff;
694 
696  uar->virt = (void *)(ioremap(uar->phys, GOLAN_PAGE_SIZE));
697 
698  DBGC( golan , "%s: UAR allocated with index 0x%x\n", __FUNCTION__, uar->index);
699  return 0;
700 
701 err_alloc_uar_cmd:
702  DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
703  return rc;
704 }
705 
706 static void golan_dealloc_uar(struct golan *golan)
707 {
708  struct golan_cmd_layout *cmd;
709  uint32_t uar_index = golan->uar.index;
710  int rc;
711 
712  DBGC (golan, "%s in\n", __FUNCTION__);
713 
715  NO_MBOX, NO_MBOX,
716  sizeof(struct golan_free_uar_mbox_in),
717  sizeof(struct golan_free_uar_mbox_out));
718 
719  ((struct golan_free_uar_mbox_in *)(cmd->in))->uarn = cpu_to_be32(uar_index);
722  golan->uar.index = 0;
723 
724  DBGC (golan, "%s UAR (0x%x) was destroyed\n", __FUNCTION__, uar_index);
725 }
726 
727 static void golan_eq_update_ci(struct golan_event_queue *eq, int arm)
728 {
729  __be32 *addr = eq->doorbell + (arm ? 0 : 2);
730  u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
732  /* We still want ordering, just not swabbing, so add a barrier */
733  wmb();
734 }
735 
736 static int golan_create_eq(struct golan *golan)
737 {
738  struct golan_event_queue *eq = &golan->eq;
740  struct golan_cmd_layout *cmd;
742  int rc, i;
743 
744  eq->cons_index = 0;
745  eq->size = GOLAN_NUM_EQES * sizeof(eq->eqes[0]);
747  if (!eq->eqes) {
748  rc = -ENOMEM;
749  goto err_create_eq_eqe_alloc;
750  }
751 
752  /* Set EQEs ownership bit to HW ownership */
753  for (i = 0; i < GOLAN_NUM_EQES; ++i) {
755  }
756 
758  GEN_MBOX, NO_MBOX,
759  sizeof(struct golan_create_eq_mbox_in) + GOLAN_PAS_SIZE,
760  sizeof(struct golan_create_eq_mbox_out));
761 
763 
764  /* Fill the physical address of the page */
765  in->pas[0] = VIRT_2_BE64_BUS( eq->eqes );
766  in->ctx.log_sz_usr_page = cpu_to_be32((ilog2(GOLAN_NUM_EQES)) << 24 | golan->uar.index);
767  DBGC( golan , "UAR idx %x (BE %x)\n", golan->uar.index, in->ctx.log_sz_usr_page);
768  in->events_mask = cpu_to_be64(1 << GOLAN_EVENT_TYPE_PORT_CHANGE);
769 
771  GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_eq_cmd );
772  out = (struct golan_create_eq_mbox_out *)cmd->out;
773 
774  eq->eqn = out->eq_number;
775  eq->doorbell = ((void *)golan->uar.virt) + GOLAN_EQ_DOORBELL_OFFSET;
776 
777  /* EQs are created in ARMED state */
779 
780  DBGC( golan , "%s: Event queue created (EQN = 0x%x)\n", __FUNCTION__, eq->eqn);
781  return 0;
782 
783 err_create_eq_cmd:
784  free_dma ( eq->eqes , GOLAN_PAGE_SIZE );
785 err_create_eq_eqe_alloc:
786  DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
787  return rc;
788 }
789 
790 static void golan_destory_eq(struct golan *golan)
791 {
792  struct golan_cmd_layout *cmd;
794  uint8_t eqn = golan->eq.eqn;
795  int rc;
796 
797  DBGC (golan, "%s in\n", __FUNCTION__);
798 
800  NO_MBOX, NO_MBOX,
801  sizeof(struct golan_destroy_eq_mbox_in),
802  sizeof(struct golan_destroy_eq_mbox_out));
803 
804  in = GOLAN_MBOX_IN ( cmd, in );
805  in->eqn = eqn;
808 
810  golan->eq.eqn = 0;
811 
812  DBGC( golan, "%s Event queue (0x%x) was destroyed\n", __FUNCTION__, eqn);
813 }
814 
815 static int golan_alloc_pd(struct golan *golan)
816 {
817  struct golan_cmd_layout *cmd;
819  int rc;
820 
822  NO_MBOX, NO_MBOX,
823  sizeof(struct golan_alloc_pd_mbox_in),
824  sizeof(struct golan_alloc_pd_mbox_out));
825 
827  GOLAN_CHECK_RC_AND_CMD_STATUS( err_alloc_pd_cmd );
828  out = (struct golan_alloc_pd_mbox_out *) ( cmd->out );
829 
830  golan->pdn = (be32_to_cpu(out->pdn) & 0xffffff);
831  DBGC( golan , "%s: Protection domain created (PDN = 0x%x)\n", __FUNCTION__,
832  golan->pdn);
833  return 0;
834 
835 err_alloc_pd_cmd:
836  DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
837  return rc;
838 }
839 
840 static void golan_dealloc_pd(struct golan *golan)
841 {
842  struct golan_cmd_layout *cmd;
843  uint32_t pdn = golan->pdn;
844  int rc;
845 
846  DBGC (golan,"%s in\n", __FUNCTION__);
847 
849  NO_MBOX, NO_MBOX,
850  sizeof(struct golan_alloc_pd_mbox_in),
851  sizeof(struct golan_alloc_pd_mbox_out));
852 
853  ((struct golan_dealloc_pd_mbox_in *)(cmd->in))->pdn = cpu_to_be32(pdn);
856  golan->pdn = 0;
857 
858  DBGC (golan ,"%s Protection domain (0x%x) was destroyed\n", __FUNCTION__, pdn);
859 }
860 
861 static int golan_create_mkey(struct golan *golan)
862 {
864  struct golan_cmd_layout *cmd;
866  int rc;
867 
869  GEN_MBOX, NO_MBOX,
870  sizeof(struct golan_create_mkey_mbox_in),
871  sizeof(struct golan_create_mkey_mbox_out));
872 
874 
876  in->seg.flags_pd = cpu_to_be32(golan->pdn | GOLAN_MKEY_LEN64);
877  in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << GOLAN_CREATE_MKEY_SEG_QPN_BIT);
878 
880  GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_mkey_cmd );
881  out = (struct golan_create_mkey_mbox_out *) ( cmd->out );
882 
883  golan->mkey = ((be32_to_cpu(out->mkey) & 0xffffff) << 8);
884  DBGC( golan , "%s: Got DMA Key for local access read/write (MKEY = 0x%x)\n",
885  __FUNCTION__, golan->mkey);
886  return 0;
887 err_create_mkey_cmd:
888  DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
889  return rc;
890 }
891 
892 static void golan_destroy_mkey(struct golan *golan)
893 {
894  struct golan_cmd_layout *cmd;
895  u32 mkey = golan->mkey;
896  int rc;
897 
899  NO_MBOX, NO_MBOX,
900  sizeof(struct golan_destroy_mkey_mbox_in),
901  sizeof(struct golan_destroy_mkey_mbox_out));
902  ((struct golan_destroy_mkey_mbox_in *)(cmd->in))->mkey = cpu_to_be32(mkey >> 8);
905  golan->mkey = 0;
906 
907  DBGC( golan , "%s DMA Key (0x%x) for local access write was destroyed\n"
908  , __FUNCTION__, mkey);
909 }
910 
911 
912 /**
913  * Initialise Golan PCI parameters
914  *
915  * @v golan Golan device
916  */
917 static inline void golan_pci_init(struct golan *golan)
918 {
919  struct pci_device *pci = golan->pci;
920 
921  /* Fix up PCI device */
922  adjust_pci_device ( pci );
923 
924  /* Get HCA BAR */
927 }
928 
929 static inline struct golan *golan_alloc()
930 {
931  void *golan = zalloc(sizeof(struct golan));
932  if ( !golan )
933  goto err_zalloc;
934 
935  return golan;
936 
937 err_zalloc:
938  return NULL;
939 }
940 
941 /**
942  * Create completion queue
943  *
944  * @v ibdev Infiniband device
945  * @v cq Completion queue
946  * @ret rc Return status code
947  */
948 static int golan_create_cq(struct ib_device *ibdev,
949  struct ib_completion_queue *cq)
950 {
951  struct golan *golan = ib_get_drvdata(ibdev);
952  struct golan_completion_queue *golan_cq;
953  struct golan_cmd_layout *cmd;
956  int rc;
957  unsigned int i;
958 
959  golan_cq = zalloc(sizeof(*golan_cq));
960  if (!golan_cq) {
961  rc = -ENOMEM;
962  goto err_create_cq;
963  }
964  golan_cq->size = sizeof(golan_cq->cqes[0]) * cq->num_cqes;
967  if (!golan_cq->doorbell_record) {
968  rc = -ENOMEM;
969  goto err_create_cq_db_alloc;
970  }
971 
973  if (!golan_cq->cqes) {
974  rc = -ENOMEM;
975  goto err_create_cq_cqe_alloc;
976  }
977 
978  /* Set CQEs ownership bit to HW ownership */
979  for (i = 0; i < cq->num_cqes; ++i) {
980  golan_cq->cqes[i].op_own = ((GOLAN_CQE_OPCODE_NOT_VALID <<
983  }
984 
986  GEN_MBOX, NO_MBOX,
987  sizeof(struct golan_create_cq_mbox_in) + GOLAN_PAS_SIZE,
988  sizeof(struct golan_create_cq_mbox_out));
989 
991 
992  /* Fill the physical address of the page */
993  in->pas[0] = VIRT_2_BE64_BUS( golan_cq->cqes );
994  in->ctx.cqe_sz_flags = GOLAN_CQE_SIZE_64 << 5;
995  in->ctx.log_sz_usr_page = cpu_to_be32(((ilog2(cq->num_cqes)) << 24) | golan->uar.index);
996  in->ctx.c_eqn = cpu_to_be16(golan->eq.eqn);
997  in->ctx.db_record_addr = VIRT_2_BE64_BUS(golan_cq->doorbell_record);
998 
1000  GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_cq_cmd );
1001  out = (struct golan_create_cq_mbox_out *) ( cmd->out );
1002 
1003  cq->cqn = (be32_to_cpu(out->cqn) & 0xffffff);
1004 
1005  ib_cq_set_drvdata(cq, golan_cq);
1006 
1007  DBGC( golan , "%s CQ created successfully (CQN = 0x%lx)\n", __FUNCTION__, cq->cqn);
1008  return 0;
1009 
1010 err_create_cq_cmd:
1011  free_dma( golan_cq->cqes , GOLAN_PAGE_SIZE );
1012 err_create_cq_cqe_alloc:
1014 err_create_cq_db_alloc:
1015  free ( golan_cq );
1016 err_create_cq:
1017  DBGC (golan ,"%s out rc = 0x%x\n", __FUNCTION__, rc);
1018  return rc;
1019 }
1020 
1021 /**
1022  * Destroy completion queue
1023  *
1024  * @v ibdev Infiniband device
1025  * @v cq Completion queue
1026  */
1027 static void golan_destroy_cq(struct ib_device *ibdev,
1028  struct ib_completion_queue *cq)
1029 {
1030  struct golan *golan = ib_get_drvdata(ibdev);
1031  struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
1032  struct golan_cmd_layout *cmd;
1033  uint32_t cqn = cq->cqn;
1034  int rc;
1035 
1036  DBGC (golan, "%s in\n", __FUNCTION__);
1037 
1039  NO_MBOX, NO_MBOX,
1040  sizeof(struct golan_destroy_cq_mbox_in),
1041  sizeof(struct golan_destroy_cq_mbox_out));
1042  ((struct golan_destroy_cq_mbox_in *)(cmd->in))->cqn = cpu_to_be32(cqn);
1045  cq->cqn = 0;
1046 
1047  ib_cq_set_drvdata(cq, NULL);
1048  free_dma ( golan_cq->cqes , GOLAN_PAGE_SIZE );
1050  free(golan_cq);
1051 
1052  DBGC (golan, "%s CQ number 0x%x was destroyed\n", __FUNCTION__, cqn);
1053 }
1054 
1055 static void golan_cq_clean(struct ib_completion_queue *cq)
1056 {
1057  ib_poll_cq(cq->ibdev, cq);
1058 }
1059 
1061 {
1062  int qpt = type;
1063 
1064  switch (qpt) {
1065  case IB_QPT_RC:
1066  return GOLAN_QP_ST_RC;
1067  case IB_QPT_UD:
1068  return GOLAN_QP_ST_UD;
1069  case IB_QPT_SMI:
1070  return GOLAN_QP_ST_QP0;
1071  case IB_QPT_GSI:
1072  return GOLAN_QP_ST_QP1;
1073  case IB_QPT_ETH:
1074  default:
1075  return -EINVAL;
1076  }
1077 }
1078 #if 0
1079 static int golan_is_special_qp(enum ib_queue_pair_type type)
1080 {
1081  return (type == IB_QPT_GSI || type == IB_QPT_SMI);
1082 }
1083 #endif
1084 static int golan_create_qp_aux(struct ib_device *ibdev,
1085  struct ib_queue_pair *qp,
1086  int *qpn)
1087 {
1088  struct golan *golan = ib_get_drvdata(ibdev);
1089  struct golan_queue_pair *golan_qp;
1091  struct golan_cmd_layout *cmd;
1092  struct golan_wqe_data_seg *data;
1093  struct golan_create_qp_mbox_out *out;
1094  uint32_t wqe_size_in_bytes;
1095  uint32_t max_qp_size_in_wqes;
1096  unsigned int i;
1097  int rc;
1098 
1099  golan_qp = zalloc(sizeof(*golan_qp));
1100  if (!golan_qp) {
1101  rc = -ENOMEM;
1102  goto err_create_qp;
1103  }
1104 
1105  if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
1106  ( qp->type == IB_QPT_UD ) ) {
1107  golan_qp->rq.grh_size = ( qp->recv.num_wqes *
1108  sizeof ( golan_qp->rq.grh[0] ));
1109  }
1110 
1111  /* Calculate receive queue size */
1112  golan_qp->rq.size = qp->recv.num_wqes * GOLAN_RECV_WQE_SIZE;
1114  DBGC (golan ,"%s receive wqe size [%zd] > max wqe size [%d]\n", __FUNCTION__,
1116  rc = -EINVAL;
1117  goto err_create_qp_rq_size;
1118  }
1119 
1120  wqe_size_in_bytes = sizeof(golan_qp->sq.wqes[0]);
1121  /* Calculate send queue size */
1122  if (wqe_size_in_bytes > be16_to_cpu(golan->caps.max_wqe_sz_sq)) {
1123  DBGC (golan ,"%s send WQE size [%d] > max WQE size [%d]\n", __FUNCTION__,
1124  wqe_size_in_bytes,
1126  rc = -EINVAL;
1127  goto err_create_qp_sq_wqe_size;
1128  }
1129  golan_qp->sq.size = (qp->send.num_wqes * wqe_size_in_bytes);
1130  max_qp_size_in_wqes = (1 << ((uint32_t)(golan->caps.log_max_qp_sz)));
1131  if (qp->send.num_wqes > max_qp_size_in_wqes) {
1132  DBGC (golan ,"%s send wq size [%d] > max wq size [%d]\n", __FUNCTION__,
1133  golan_qp->sq.size, max_qp_size_in_wqes);
1134  rc = -EINVAL;
1135  goto err_create_qp_sq_size;
1136  }
1137 
1138  golan_qp->size = golan_qp->sq.size + golan_qp->rq.size;
1139 
1140  /* allocate dma memory for WQEs (1 page is enough) - should change it */
1142  if (!golan_qp->wqes) {
1143  rc = -ENOMEM;
1144  goto err_create_qp_wqe_alloc;
1145  }
1146  golan_qp->rq.wqes = golan_qp->wqes;
1147  golan_qp->sq.wqes = golan_qp->wqes + golan_qp->rq.size;//(union golan_send_wqe *)&
1148  //(((struct golan_recv_wqe_ud *)(golan_qp->wqes))[qp->recv.num_wqes]);
1149 
1150  if ( golan_qp->rq.grh_size ) {
1151  golan_qp->rq.grh = ( golan_qp->wqes +
1152  golan_qp->sq.size +
1153  golan_qp->rq.size );
1154  }
1155 
1156  /* Invalidate all WQEs */
1157  data = &golan_qp->rq.wqes[0].data[0];
1158  for ( i = 0 ; i < ( golan_qp->rq.size / sizeof ( *data ) ); i++ ){
1159  data->lkey = cpu_to_be32 ( GOLAN_INVALID_LKEY );
1160  data++;
1161  }
1162 
1163  golan_qp->doorbell_record = malloc_dma(sizeof(struct golan_qp_db),
1164  sizeof(struct golan_qp_db));
1165  if (!golan_qp->doorbell_record) {
1166  rc = -ENOMEM;
1167  goto err_create_qp_db_alloc;
1168  }
1169  memset(golan_qp->doorbell_record, 0, sizeof(struct golan_qp_db));
1170 
1172  GEN_MBOX, NO_MBOX,
1173  sizeof(struct golan_create_qp_mbox_in) + GOLAN_PAS_SIZE,
1174  sizeof(struct golan_create_qp_mbox_out));
1175 
1177 
1178  /* Fill the physical address of the page */
1179  in->pas[0] = VIRT_2_BE64_BUS(golan_qp->wqes);
1180  in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
1181 
1182  in->ctx.flags_pd = cpu_to_be32(golan->pdn);
1183  in->ctx.flags = cpu_to_be32((golan_qp_type_to_st(qp->type)
1184  << GOLAN_QP_CTX_ST_BIT) |
1187 // cgs set to 0, initialy.
1188 // atomic mode
1189  in->ctx.rq_size_stride = ((ilog2(qp->recv.num_wqes) <<
1191  (sizeof(golan_qp->rq.wqes[0]) / GOLAN_RECV_WQE_SIZE));
1192  in->ctx.sq_crq_size = cpu_to_be16(ilog2(golan_qp->sq.size / GOLAN_SEND_WQE_BB_SIZE)
1194  in->ctx.cqn_send = cpu_to_be32(qp->send.cq->cqn);
1195  in->ctx.cqn_recv = cpu_to_be32(qp->recv.cq->cqn);
1196  in->ctx.db_rec_addr = VIRT_2_BE64_BUS(golan_qp->doorbell_record);
1197 
1199  GOLAN_CHECK_RC_AND_CMD_STATUS( err_create_qp_cmd );
1200  out = (struct golan_create_qp_mbox_out *)cmd->out;
1201 
1202  *qpn = (be32_to_cpu(out->qpn) & 0xffffff);
1203  /*
1204  * Hardware wants QPN written in big-endian order (after
1205  * shifting) for send doorbell. Precompute this value to save
1206  * a little bit when posting sends.
1207  */
1208  golan_qp->doorbell_qpn = cpu_to_be32(*qpn << 8);
1209  golan_qp->state = GOLAN_IB_QPS_RESET;
1210 
1211  ib_qp_set_drvdata(qp, golan_qp);
1212 
1213  return 0;
1214 
1215 err_create_qp_cmd:
1216  free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1217 err_create_qp_db_alloc:
1218  free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1219 err_create_qp_wqe_alloc:
1220 err_create_qp_sq_size:
1221 err_create_qp_sq_wqe_size:
1222 err_create_qp_rq_size:
1223  free ( golan_qp );
1224 err_create_qp:
1225  return rc;
1226 }
1227 
1228 /**
1229  * Create queue pair
1230  *
1231  * @v ibdev Infiniband device
1232  * @v qp Queue pair
1233  * @ret rc Return status code
1234  */
1235 static int golan_create_qp(struct ib_device *ibdev,
1236  struct ib_queue_pair *qp)
1237 {
1238  int rc, qpn = -1;
1239 
1240  switch (qp->type) {
1241  case IB_QPT_UD:
1242  case IB_QPT_SMI:
1243  case IB_QPT_GSI:
1244  rc = golan_create_qp_aux(ibdev, qp, &qpn);
1245  if (rc) {
1246  DBG ( "%s Failed to create QP (rc = 0x%x)\n", __FUNCTION__, rc);
1247  return rc;
1248  }
1249  qp->qpn = qpn;
1250 
1251  break;
1252  case IB_QPT_ETH:
1253  case IB_QPT_RC:
1254  default:
1255  DBG ( "%s unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
1256  return -EINVAL;
1257  }
1258 
1259  return 0;
1260 }
1261 
1262 static int golan_modify_qp_rst_to_init(struct ib_device *ibdev,
1263  struct ib_queue_pair *qp __unused,
1265 {
1266  int rc = 0;
1267 
1268  in->ctx.qkey = cpu_to_be32((uint32_t)(qp->qkey));
1269 
1270  in->ctx.pri_path.port = ibdev->port;
1272  in->ctx.pri_path.pkey_index = 0;
1273  /* QK is 0 */
1274  /* QP cntr set 0 */
1275  return rc;
1276 }
1277 
1279  struct ib_queue_pair *qp __unused,
1281 {
1282  int rc = 0;
1283 
1284  in->optparam = 0;
1285  return rc;
1286 }
1287 
1289  struct ib_queue_pair *qp __unused,
1291 {
1292  int rc = 0;
1293 
1294  in->optparam = 0;
1295  /* In good flow psn in 0 */
1296  return rc;
1297 }
1298 
1299 static int golan_modify_qp_to_rst(struct ib_device *ibdev,
1300  struct ib_queue_pair *qp)
1301 {
1302  struct golan *golan = ib_get_drvdata(ibdev);
1303  struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1304  struct golan_cmd_layout *cmd;
1305  int rc;
1306 
1308  NO_MBOX, NO_MBOX,
1309  sizeof(struct golan_modify_qp_mbox_in),
1310  sizeof(struct golan_modify_qp_mbox_out));
1311  ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
1313  GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_2rst_cmd );
1314 
1315  golan_qp->state = GOLAN_IB_QPS_RESET;
1316  DBGC( golan , "%s QP number 0x%lx was modified to RESET\n",
1317  __FUNCTION__, qp->qpn);
1318 
1319  return 0;
1320 
1321 err_modify_qp_2rst_cmd:
1322  DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
1323  __FUNCTION__, qp->qpn, rc);
1324  return rc;
1325 }
1326 
1327 static int (*golan_modify_qp_methods[])(struct ib_device *ibdev,
1328  struct ib_queue_pair *qp,
1329  struct golan_modify_qp_mbox_in_data *in) = {
1330 
1334 };
1335 
1336 static int golan_modify_qp(struct ib_device *ibdev,
1337  struct ib_queue_pair *qp)
1338 {
1339  struct golan *golan = ib_get_drvdata(ibdev);
1340  struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1342  struct golan_cmd_layout *cmd;
1343  enum golan_ib_qp_state prev_state;
1344  int rc;
1345  int modify_cmd[] = {GOLAN_CMD_OP_RST2INIT_QP,
1348 
1349  while (golan_qp->state < GOLAN_IB_QPS_RTS) {
1350  prev_state = golan_qp->state;
1351  cmd = write_cmd(golan, DEF_CMD_IDX, modify_cmd[golan_qp->state], 0x0,
1352  GEN_MBOX, NO_MBOX,
1353  sizeof(struct golan_modify_qp_mbox_in),
1354  sizeof(struct golan_modify_qp_mbox_out));
1355 
1357  ((struct golan_modify_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
1358  rc = golan_modify_qp_methods[golan_qp->state](ibdev, qp, in);
1359  if (rc) {
1360  goto err_modify_qp_fill_inbox;
1361  }
1362 // in->ctx.qp_counter_set_usr_page = cpu_to_be32(golan->uar.index);
1364  GOLAN_CHECK_RC_AND_CMD_STATUS( err_modify_qp_cmd );
1365 
1366  ++(golan_qp->state);
1367 
1368  DBGC( golan , "%s QP number 0x%lx was modified from %s to %s\n",
1369  __FUNCTION__, qp->qpn, golan_qp_state_as_string[prev_state],
1370  golan_qp_state_as_string[golan_qp->state]);
1371  }
1372 
1373  DBGC( golan , "%s QP number 0x%lx is ready to receive/send packets.\n",
1374  __FUNCTION__, qp->qpn);
1375  return 0;
1376 
1377 err_modify_qp_cmd:
1378 err_modify_qp_fill_inbox:
1379  DBGC (golan ,"%s Failed to modify QP number 0x%lx (rc = 0x%x)\n",
1380  __FUNCTION__, qp->qpn, rc);
1381  return rc;
1382 }
1383 
1384 /**
1385  * Destroy queue pair
1386  *
1387  * @v ibdev Infiniband device
1388  * @v qp Queue pair
1389  */
1390 static void golan_destroy_qp(struct ib_device *ibdev,
1391  struct ib_queue_pair *qp)
1392 {
1393  struct golan *golan = ib_get_drvdata(ibdev);
1394  struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1395  struct golan_cmd_layout *cmd;
1396  unsigned long qpn = qp->qpn;
1397  int rc;
1398 
1399  DBGC (golan, "%s in\n", __FUNCTION__);
1400 
1401  if (golan_qp->state != GOLAN_IB_QPS_RESET) {
1402  if (golan_modify_qp_to_rst(ibdev, qp)) {
1403  DBGC (golan ,"%s Failed to modify QP 0x%lx to RESET\n", __FUNCTION__,
1404  qp->qpn);
1405  }
1406  }
1407 
1408  if (qp->recv.cq) {
1409  golan_cq_clean(qp->recv.cq);
1410  }
1411  if (qp->send.cq && (qp->send.cq != qp->recv.cq)) {
1412  golan_cq_clean(qp->send.cq);
1413  }
1414 
1416  NO_MBOX, NO_MBOX,
1417  sizeof(struct golan_destroy_qp_mbox_in),
1418  sizeof(struct golan_destroy_qp_mbox_out));
1419  ((struct golan_destroy_qp_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qpn);
1422  qp->qpn = 0;
1423 
1425  free_dma(golan_qp->doorbell_record, sizeof(struct golan_qp_db));
1426  free_dma ( golan_qp->wqes, GOLAN_PAGE_SIZE );
1427  free(golan_qp);
1428 
1429  DBGC( golan ,"%s QP 0x%lx was destroyed\n", __FUNCTION__, qpn);
1430 }
1431 
1432 /**
1433  * Calculate transmission rate
1434  *
1435  * @v av Address vector
1436  * @ret golan_rate Golan rate
1437  */
1438 static unsigned int golan_rate(enum ib_rate rate) {
1439  return (((rate >= IB_RATE_2_5) && (rate <= IB_RATE_120)) ? (rate + 5) : 0);
1440 }
1441 
1442 /**
1443  * Post send work queue entry
1444  *
1445  * @v ibdev Infiniband device
1446  * @v qp Queue pair
1447  * @v av Address vector
1448  * @v iobuf I/O buffer
1449  * @ret rc Return status code
1450  */
1451 static int golan_post_send(struct ib_device *ibdev,
1452  struct ib_queue_pair *qp,
1453  struct ib_address_vector *av,
1454  struct io_buffer *iobuf)
1455 {
1456  struct golan *golan = ib_get_drvdata(ibdev);
1457  struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1458  struct golan_send_wqe_ud *wqe = NULL;
1459  struct golan_av *datagram = NULL;
1460  unsigned long wqe_idx_mask;
1461  unsigned long wqe_idx;
1462  struct golan_wqe_data_seg *data = NULL;
1463  struct golan_wqe_ctrl_seg *ctrl = NULL;
1464 
1465 
1466  wqe_idx_mask = (qp->send.num_wqes - 1);
1467  wqe_idx = (qp->send.next_idx & wqe_idx_mask);
1468  if (qp->send.iobufs[wqe_idx]) {
1469  DBGC (golan ,"%s Send queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
1470  return -ENOMEM;
1471  }
1472 
1473  qp->send.iobufs[wqe_idx] = iobuf;
1474 
1475  // change to this
1476  //wqe_size_in_octa_words = golan_qp->sq.wqe_size_in_wqebb >> 4;
1477 
1478  wqe = &golan_qp->sq.wqes[wqe_idx].ud;
1479 
1480  //CHECK HW OWNERSHIP BIT ???
1481 
1482  memset(wqe, 0, sizeof(*wqe));
1483 
1484  ctrl = &wqe->ctrl;
1485  ctrl->opmod_idx_opcode = cpu_to_be32(GOLAN_SEND_OPCODE |
1486  ((u32)(golan_qp->sq.next_idx) <<
1488  ctrl->qpn_ds = cpu_to_be32(GOLAN_SEND_UD_WQE_SIZE >> 4) |
1489  golan_qp->doorbell_qpn;
1490  ctrl->fm_ce_se = 0x8;//10 - 0 - 0
1491  data = &wqe->data;
1492  data->byte_count = cpu_to_be32(iob_len(iobuf));
1493  data->lkey = cpu_to_be32(golan->mkey);
1494  data->addr = VIRT_2_BE64_BUS(iobuf->data);
1495 
1496  datagram = &wqe->datagram;
1497  datagram->key.qkey.qkey = cpu_to_be32(av->qkey);
1498  datagram->dqp_dct = cpu_to_be32((1 << 31) | av->qpn);
1499  datagram->stat_rate_sl = ((golan_rate(av->rate) << 4) | av->sl);
1500  datagram->fl_mlid = (ibdev->lid & 0x007f); /* take only the 7 low bits of the LID */
1501  datagram->rlid = cpu_to_be16(av->lid);
1502  datagram->grh_gid_fl = cpu_to_be32(av->gid_present << 30);
1503  memcpy(datagram->rgid, av->gid.bytes, 16 /* sizeof(datagram->rgid) */);
1504 
1505  /*
1506  * Make sure that descriptors are written before
1507  * updating doorbell record and ringing the doorbell
1508  */
1509  ++(qp->send.next_idx);
1510  golan_qp->sq.next_idx = (golan_qp->sq.next_idx + GOLAN_WQEBBS_PER_SEND_UD_WQE);
1511  golan_qp->doorbell_record->send_db = cpu_to_be16(golan_qp->sq.next_idx);
1512  wmb();
1513  writeq(*((__be64 *)ctrl), golan->uar.virt
1514  + ( ( golan_qp->sq.next_idx & 0x1 ) ? DB_BUFFER0_EVEN_OFFSET
1515  : DB_BUFFER0_ODD_OFFSET ) );
1516  return 0;
1517 }
1518 
1519 /**
1520  * Post receive work queue entry
1521  *
1522  * @v ibdev Infiniband device
1523  * @v qp Queue pair
1524  * @v iobuf I/O buffer
1525  * @ret rc Return status code
1526  */
1527 static int golan_post_recv(struct ib_device *ibdev,
1528  struct ib_queue_pair *qp,
1529  struct io_buffer *iobuf)
1530 {
1531  struct golan *golan = ib_get_drvdata(ibdev);
1532  struct golan_queue_pair *golan_qp = ib_qp_get_drvdata(qp);
1533  struct ib_work_queue *wq = &qp->recv;
1534  struct golan_recv_wqe_ud *wqe;
1535  struct ib_global_route_header *grh;
1536  struct golan_wqe_data_seg *data;
1537  unsigned int wqe_idx_mask;
1538 
1539  /* Allocate work queue entry */
1540  wqe_idx_mask = (wq->num_wqes - 1);
1541  if (wq->iobufs[wq->next_idx & wqe_idx_mask]) {
1542  DBGC (golan ,"%s Receive queue of QPN 0x%lx is full\n", __FUNCTION__, qp->qpn);
1543  return -ENOMEM;
1544  }
1545 
1546  wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1547  wqe = & golan_qp->rq.wqes[wq->next_idx & wqe_idx_mask];
1548 
1549  memset(wqe, 0, sizeof(*wqe));
1550  data = &wqe->data[0];
1551  if ( golan_qp->rq.grh ) {
1552  grh = &golan_qp->rq.grh[wq->next_idx & wqe_idx_mask];
1553  data->byte_count = cpu_to_be32 ( sizeof ( *grh ) );
1554  data->lkey = cpu_to_be32 ( golan->mkey );
1555  data->addr = VIRT_2_BE64_BUS ( grh );
1556  data++;
1557  }
1558 
1559  data->byte_count = cpu_to_be32(iob_tailroom(iobuf));
1560  data->lkey = cpu_to_be32(golan->mkey);
1561  data->addr = VIRT_2_BE64_BUS(iobuf->data);
1562 
1563  ++wq->next_idx;
1564 
1565  /*
1566  * Make sure that descriptors are written before
1567  * updating doorbell record and ringing the doorbell
1568  */
1569  wmb();
1570  golan_qp->doorbell_record->recv_db = cpu_to_be16(qp->recv.next_idx & 0xffff);
1571 
1572  return 0;
1573 }
1574 
1575 static int golan_query_vport_context ( struct ib_device *ibdev ) {
1576  struct golan *golan = ib_get_drvdata ( ibdev );
1577  struct golan_cmd_layout *cmd;
1580  int rc;
1581 
1583  0x0, GEN_MBOX, GEN_MBOX,
1584  sizeof(struct golan_query_hca_vport_context_inbox),
1585  sizeof(struct golan_query_hca_vport_context_outbox) );
1586 
1587  in = GOLAN_MBOX_IN ( cmd, in );
1588  in->port_num = (u8)ibdev->port;
1589 
1590  rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1591  GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_context_cmd );
1592 
1594 
1595  ibdev->node_guid.dwords[0] = context_data->node_guid[0];
1596  ibdev->node_guid.dwords[1] = context_data->node_guid[1];
1597  ibdev->lid = be16_to_cpu( context_data->lid );
1598  ibdev->sm_lid = be16_to_cpu( context_data->sm_lid );
1599  ibdev->sm_sl = context_data->sm_sl;
1601 
1602  return 0;
1603 err_query_vport_context_cmd:
1604  DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
1605  return rc;
1606 }
1607 
1608 
1609 static int golan_query_vport_gid ( struct ib_device *ibdev ) {
1610  struct golan *golan = ib_get_drvdata( ibdev );
1611  struct golan_cmd_layout *cmd;
1613  union ib_gid *ib_gid;
1614  int rc;
1615 
1617  0x0, GEN_MBOX, GEN_MBOX,
1618  sizeof(struct golan_query_hca_vport_gid_inbox),
1619  sizeof(struct golan_query_hca_vport_gid_outbox) );
1620 
1621  in = GOLAN_MBOX_IN ( cmd, in );
1622  in->port_num = (u8)ibdev->port;
1623  in->gid_index = 0;
1624  rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1625  GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_gid_cmd );
1626 
1627  ib_gid = (union ib_gid *)( GET_OUTBOX ( golan, GEN_MBOX ) );
1628 
1629  memcpy ( &ibdev->gid, ib_gid, sizeof(ibdev->gid) );
1630 
1631  return 0;
1632 err_query_vport_gid_cmd:
1633  DBGC ( golan, "%s [%d] out\n", __FUNCTION__, rc);
1634  return rc;
1635 }
1636 
1637 static int golan_query_vport_pkey ( struct ib_device *ibdev ) {
1638  struct golan *golan = ib_get_drvdata ( ibdev );
1639  struct golan_cmd_layout *cmd;
1641  int pkey_table_size_in_entries = (1 << (7 + golan->caps.pkey_table_size));
1642  int rc;
1643 
1645  0x0, GEN_MBOX, GEN_MBOX,
1646  sizeof(struct golan_query_hca_vport_pkey_inbox),
1647  sizeof(struct golan_outbox_hdr) + 8 +
1648  sizeof(struct golan_query_hca_vport_pkey_data) * pkey_table_size_in_entries );
1649 
1650  in = GOLAN_MBOX_IN ( cmd, in );
1651  in->port_num = (u8)ibdev->port;
1652  in->pkey_index = 0xffff;
1653  rc = send_command_and_wait ( golan, DEF_CMD_IDX, GEN_MBOX, GEN_MBOX, __FUNCTION__ );
1654  GOLAN_CHECK_RC_AND_CMD_STATUS( err_query_vport_pkey_cmd );
1655 
1656  return 0;
1657 err_query_vport_pkey_cmd:
1658  DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
1659  return rc;
1660 }
1661 
1662 static int golan_get_ib_info ( struct ib_device *ibdev ) {
1663  int rc;
1664 
1665  rc = golan_query_vport_context ( ibdev );
1666  if ( rc != 0 ) {
1667  DBG ( "golan_get_ib_info: golan_query_vport_context Failed (rc = %d)\n",rc );
1668  goto err_query_vport_context;
1669  }
1670 
1671  rc = golan_query_vport_gid ( ibdev );
1672  if ( rc != 0 ) {
1673  DBG ( "golan_get_ib_info: golan_query_vport_gid Failed (rc = %d)\n",rc );
1674  goto err_query_vport_gid;
1675  }
1676 
1677  rc = golan_query_vport_pkey ( ibdev );
1678  if ( rc != 0 ) {
1679  DBG ( "golan_get_ib_info: golan_query_vport_pkey Failed (rc = %d)\n",rc );
1680  goto err_query_vport_pkey;
1681  }
1682  return rc;
1683 err_query_vport_pkey:
1684 err_query_vport_gid:
1685 err_query_vport_context:
1686  DBG ( "%s [%d] out\n", __FUNCTION__, rc);
1687  return rc;
1688 }
1689 
1690 static int golan_complete(struct ib_device *ibdev,
1691  struct ib_completion_queue *cq,
1692  struct golan_cqe64 *cqe64)
1693 {
1694  struct golan *golan = ib_get_drvdata(ibdev);
1695  struct ib_work_queue *wq;
1696  struct golan_queue_pair *golan_qp;
1697  struct ib_queue_pair *qp;
1698  struct io_buffer *iobuf = NULL;
1699  struct ib_address_vector recv_dest;
1700  struct ib_address_vector recv_source;
1701  struct ib_global_route_header *grh;
1702  struct golan_err_cqe *err_cqe64;
1703  int gid_present, idx;
1704  u16 wqe_ctr;
1705  uint8_t opcode;
1706  static int error_state;
1707  uint32_t qpn = be32_to_cpu(cqe64->sop_drop_qpn) & 0xffffff;
1708  int is_send = 0;
1709  size_t len;
1710 
1711  opcode = cqe64->op_own >> GOLAN_CQE_OPCODE_BIT;
1712  DBGC2( golan , "%s completion with opcode 0x%x\n", __FUNCTION__, opcode);
1713 
1715  is_send = 1;
1716  } else {
1717  is_send = 0;
1718  }
1720  err_cqe64 = (struct golan_err_cqe *)cqe64;
1721  int i = 0;
1722  if (!error_state++) {
1723  DBGC (golan ,"\n");
1724  for ( i = 0 ; i < 16 ; i += 2 ) {
1725  DBGC (golan ,"%x %x\n",
1726  be32_to_cpu(((uint32_t *)(err_cqe64))[i]),
1727  be32_to_cpu(((uint32_t *)(err_cqe64))[i + 1]));
1728  }
1729  DBGC (golan ,"CQE with error: Syndrome(0x%x), VendorSynd(0x%x), HW_SYN(0x%x)\n",
1730  err_cqe64->syndrome, err_cqe64->vendor_err_synd,
1731  err_cqe64->hw_syndrom);
1732  }
1733  }
1734  /* Identify work queue */
1735  wq = ib_find_wq(cq, qpn, is_send);
1736  if (!wq) {
1737  DBGC (golan ,"%s unknown %s QPN 0x%x in CQN 0x%lx\n",
1738  __FUNCTION__, (is_send ? "send" : "recv"), qpn, cq->cqn);
1739  return -EINVAL;
1740  }
1741 
1742  qp = wq->qp;
1743  golan_qp = ib_qp_get_drvdata ( qp );
1744 
1745  wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
1746  if (is_send) {
1747  wqe_ctr &= ((GOLAN_WQEBBS_PER_SEND_UD_WQE * wq->num_wqes) - 1);
1748  idx = wqe_ctr / GOLAN_WQEBBS_PER_SEND_UD_WQE;
1749  } else {
1750  idx = wqe_ctr & (wq->num_wqes - 1);
1751  }
1752 
1753  iobuf = wq->iobufs[idx];
1754  if (!iobuf) {
1755  DBGC (golan ,"%s IO Buffer 0x%x not found in QPN 0x%x\n",
1756  __FUNCTION__, idx, qpn);
1757  return -EINVAL;
1758  }
1759  wq->iobufs[idx] = NULL;
1760 
1761  if (is_send) {
1762  ib_complete_send(ibdev, qp, iobuf, (opcode == GOLAN_CQE_REQ_ERR));
1763  } else {
1764  len = be32_to_cpu(cqe64->byte_cnt);
1765  memset(&recv_dest, 0, sizeof(recv_dest));
1766  recv_dest.qpn = qpn;
1767  /* Construct address vector */
1768  memset(&recv_source, 0, sizeof(recv_source));
1769  switch (qp->type) {
1770  case IB_QPT_SMI:
1771  case IB_QPT_GSI:
1772  case IB_QPT_UD:
1773  /* Locate corresponding GRH */
1774  assert ( golan_qp->rq.grh != NULL );
1775  grh = &golan_qp->rq.grh[ idx ];
1776 
1777  recv_source.qpn = be32_to_cpu(cqe64->flags_rqpn) & 0xffffff;
1778  recv_source.lid = be16_to_cpu(cqe64->slid);
1779  recv_source.sl = (be32_to_cpu(cqe64->flags_rqpn) >> 24) & 0xf;
1780  gid_present = (be32_to_cpu(cqe64->flags_rqpn) >> 28) & 3;
1781  if (!gid_present) {
1782  recv_dest.gid_present = recv_source.gid_present = 0;
1783  } else {
1784  recv_dest.gid_present = recv_source.gid_present = 1;
1785  //if (recv_source.gid_present == 0x1) {
1786  memcpy(&recv_source.gid, &grh->sgid, sizeof(recv_source.gid));
1787  memcpy(&recv_dest.gid, &grh->dgid, sizeof(recv_dest.gid));
1788  //} else { // recv_source.gid_present = 0x3
1789  /* GRH is located in the upper 64 byte of the CQE128
1790  * currently not supported */
1791  //;
1792  //}
1793  }
1794  len -= sizeof ( *grh );
1795  break;
1796  case IB_QPT_RC:
1797  case IB_QPT_ETH:
1798  default:
1799  DBGC (golan ,"%s Unsupported QP type (0x%x)\n", __FUNCTION__, qp->type);
1800  return -EINVAL;
1801  }
1802  assert(len <= iob_tailroom(iobuf));
1803  iob_put(iobuf, len);
1804  ib_complete_recv(ibdev, qp, &recv_dest, &recv_source, iobuf, (opcode == GOLAN_CQE_RESP_ERR));
1805  }
1806  return 0;
1807 }
1808 
1810  struct golan_cqe64 *cqe64)
1811 {
1812  return ((cqe64->op_own & GOLAN_CQE_OWNER_MASK) !=
1813  ((cq->next_idx >> ilog2(cq->num_cqes)) & 1));
1814 }
1815 static void golan_poll_cq(struct ib_device *ibdev,
1816  struct ib_completion_queue *cq)
1817 {
1818  unsigned int i;
1819  int rc = 0;
1820  unsigned int cqe_idx_mask;
1821  struct golan_cqe64 *cqe64;
1822  struct golan_completion_queue *golan_cq = ib_cq_get_drvdata(cq);
1823  struct golan *golan = ib_get_drvdata(ibdev);
1824 
1825  for (i = 0; i < cq->num_cqes; ++i) {
1826  /* Look for completion entry */
1827  cqe_idx_mask = (cq->num_cqes - 1);
1828  cqe64 = &golan_cq->cqes[cq->next_idx & cqe_idx_mask];
1829  /* temporary valid only for 64 byte CQE */
1830  if (golan_is_hw_ownership(cq, cqe64) ||
1831  ((cqe64->op_own >> GOLAN_CQE_OPCODE_BIT) ==
1833  break; /* HW ownership */
1834  }
1835 
1836  DBGC2( golan , "%s CQN 0x%lx [%ld] \n", __FUNCTION__, cq->cqn, cq->next_idx);
1837  /*
1838  * Make sure we read CQ entry contents after we've checked the
1839  * ownership bit. (PRM - 6.5.3.2)
1840  */
1841  rmb();
1842  rc = golan_complete(ibdev, cq, cqe64);
1843  if (rc != 0) {
1844  DBGC (golan ,"%s CQN 0x%lx failed to complete\n", __FUNCTION__, cq->cqn);
1845  }
1846 
1847  /* Update completion queue's index */
1848  cq->next_idx++;
1849 
1850  /* Update doorbell record */
1851  *(golan_cq->doorbell_record) = cpu_to_be32(cq->next_idx & 0xffffff);
1852  }
1853 }
1854 
1855 static const char *golan_eqe_type_str(u8 type)
1856 {
1857  switch (type) {
1858  case GOLAN_EVENT_TYPE_COMP:
1859  return "GOLAN_EVENT_TYPE_COMP";
1861  return "GOLAN_EVENT_TYPE_PATH_MIG";
1863  return "GOLAN_EVENT_TYPE_COMM_EST";
1865  return "GOLAN_EVENT_TYPE_SQ_DRAINED";
1867  return "GOLAN_EVENT_TYPE_SRQ_LAST_WQE";
1869  return "GOLAN_EVENT_TYPE_SRQ_RQ_LIMIT";
1871  return "GOLAN_EVENT_TYPE_CQ_ERROR";
1873  return "GOLAN_EVENT_TYPE_WQ_CATAS_ERROR";
1875  return "GOLAN_EVENT_TYPE_PATH_MIG_FAILED";
1877  return "GOLAN_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
1879  return "GOLAN_EVENT_TYPE_WQ_ACCESS_ERROR";
1881  return "GOLAN_EVENT_TYPE_SRQ_CATAS_ERROR";
1883  return "GOLAN_EVENT_TYPE_INTERNAL_ERROR";
1885  return "GOLAN_EVENT_TYPE_PORT_CHANGE";
1887  return "GOLAN_EVENT_TYPE_GPIO_EVENT";
1889  return "GOLAN_EVENT_TYPE_REMOTE_CONFIG";
1891  return "GOLAN_EVENT_TYPE_DB_BF_CONGESTION";
1893  return "GOLAN_EVENT_TYPE_STALL_EVENT";
1894  case GOLAN_EVENT_TYPE_CMD:
1895  return "GOLAN_EVENT_TYPE_CMD";
1897  return "GOLAN_EVENT_TYPE_PAGE_REQUEST";
1898  default:
1899  return "Unrecognized event";
1900  }
1901 }
1902 
1904 {
1905  switch (subtype) {
1907  return "GOLAN_PORT_CHANGE_SUBTYPE_DOWN";
1909  return "GOLAN_PORT_CHANGE_SUBTYPE_ACTIVE";
1911  return "GOLAN_PORT_CHANGE_SUBTYPE_INITIALIZED";
1913  return "GOLAN_PORT_CHANGE_SUBTYPE_LID";
1915  return "GOLAN_PORT_CHANGE_SUBTYPE_PKEY";
1917  return "GOLAN_PORT_CHANGE_SUBTYPE_GUID";
1919  return "GOLAN_PORT_CHANGE_SUBTYPE_CLIENT_REREG";
1920  default:
1921  return "Unrecognized event";
1922  }
1923 }
1924 
1925 /**
1926  * Update Infiniband parameters using Commands
1927  *
1928  * @v ibdev Infiniband device
1929  * @ret rc Return status code
1930  */
1931 static int golan_ib_update ( struct ib_device *ibdev ) {
1932  int rc;
1933 
1934  /* Get IB parameters */
1935  if ( ( rc = golan_get_ib_info ( ibdev ) ) != 0 )
1936  return rc;
1937 
1938  /* Notify Infiniband core of potential link state change */
1939  ib_link_state_changed ( ibdev );
1940 
1941  return 0;
1942 }
1943 
1944 static inline void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe)
1945 {
1946  struct ib_device *ibdev;
1947  u8 port;
1948 
1949  port = (eqe->data.port.port >> 4) & 0xf;
1950  ibdev = golan->ports[port - 1].ibdev;
1951 
1952  if ( ! ib_is_open ( ibdev ) )
1953  return;
1954 
1955  switch (eqe->sub_type) {
1958  golan_ib_update ( ibdev );
1959  /* Fall through */
1965  DBGC( golan , "%s event %s(%d) (sub event %s(%d))arrived on port %d\n",
1966  __FUNCTION__, golan_eqe_type_str(eqe->type), eqe->type,
1968  eqe->sub_type, port);
1969  break;
1970  default:
1971  DBGC (golan ,"%s Port event with unrecognized subtype: port %d, sub_type %d\n",
1972  __FUNCTION__, port, eqe->sub_type);
1973  }
1974 }
1975 
1977 {
1978  uint32_t entry = (eq->cons_index & (GOLAN_NUM_EQES - 1));
1979  struct golan_eqe *eqe = &(eq->eqes[entry]);
1980  return ((eqe->owner != ((eq->cons_index >> ilog2(GOLAN_NUM_EQES)) & 1)) ? NULL : eqe);
1981 }
1982 
1983 
1984 /**
1985  * Poll event queue
1986  *
1987  * @v ibdev Infiniband device
1988  */
1989 static void golan_poll_eq(struct ib_device *ibdev)
1990 {
1991  struct golan *golan = ib_get_drvdata(ibdev);
1992  struct golan_event_queue *eq = &(golan->eq);
1993  struct golan_eqe *eqe;
1994  u32 cqn;
1995  int counter = 0;
1996 
1997  while ((eqe = golan_next_eqe_sw(eq)) && (counter < GOLAN_NUM_EQES)) {
1998  /*
1999  * Make sure we read EQ entry contents after we've
2000  * checked the ownership bit.
2001  */
2002  rmb();
2003 
2004  DBGC( golan , "%s eqn %d, eqe type %s\n", __FUNCTION__, eq->eqn,
2005  golan_eqe_type_str(eqe->type));
2006  switch (eqe->type) {
2007  case GOLAN_EVENT_TYPE_COMP:
2008  /* We dont need to handle completion events since we
2009  * poll all the CQs after polling the EQ */
2010  break;
2021  DBGC( golan , "%s event %s(%d) arrived\n", __FUNCTION__,
2022  golan_eqe_type_str(eqe->type), eqe->type);
2023  break;
2024  case GOLAN_EVENT_TYPE_CMD:
2025 // golan_cmd_comp_handler(be32_to_cpu(eqe->data.cmd.vector));
2026  break;
2029  break;
2031  cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2032  DBGC (golan ,"CQ error on CQN 0x%x, syndrom 0x%x\n",
2033  cqn, eqe->data.cq_err.syndrome);
2034 // mlx5_cq_event(dev, cqn, eqe->type);
2035  break;
2036  /*
2037  * currently the driver do not support dynamic memory request
2038  * during FW run, a follow up change will allocate FW pages once and
2039  * never release them till driver shutdown, this change will not support
2040  * this request as currently this request is not issued anyway.
2041  case GOLAN_EVENT_TYPE_PAGE_REQUEST:
2042  {
2043  // we should check if we get this event while we
2044  // waiting for a command
2045  u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
2046  s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
2047 
2048  DBGC (golan ,"%s page request for func 0x%x, napges %d\n",
2049  __FUNCTION__, func_id, npages);
2050  golan_provide_pages(golan, npages, func_id);
2051  }
2052  break;
2053  */
2054  default:
2055  DBGC (golan ,"%s Unhandled event 0x%x on EQ 0x%x\n", __FUNCTION__,
2056  eqe->type, eq->eqn);
2057  break;
2058  }
2059 
2060  ++eq->cons_index;
2062  ++counter;
2063  }
2064 }
2065 
2066 /**
2067  * Attach to multicast group
2068  *
2069  * @v ibdev Infiniband device
2070  * @v qp Queue pair
2071  * @v gid Multicast GID
2072  * @ret rc Return status code
2073  */
2074 static int golan_mcast_attach(struct ib_device *ibdev,
2075  struct ib_queue_pair *qp,
2076  union ib_gid *gid)
2077 {
2078  struct golan *golan = ib_get_drvdata(ibdev);
2079  struct golan_cmd_layout *cmd;
2080  int rc;
2081 
2082  if ( qp == NULL ) {
2083  DBGC( golan, "%s: Invalid pointer, could not attach QPN to MCG\n",
2084  __FUNCTION__ );
2085  return -EFAULT;
2086  }
2087 
2089  GEN_MBOX, NO_MBOX,
2090  sizeof(struct golan_attach_mcg_mbox_in),
2091  sizeof(struct golan_attach_mcg_mbox_out));
2092  ((struct golan_attach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
2093 
2094  memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
2095 
2097  GOLAN_CHECK_RC_AND_CMD_STATUS( err_attach_to_mcg_cmd );
2098 
2099  DBGC( golan , "%s: QPN 0x%lx was attached to MCG\n", __FUNCTION__, qp->qpn);
2100  return 0;
2101 err_attach_to_mcg_cmd:
2102  DBGC (golan ,"%s [%d] out\n", __FUNCTION__, rc);
2103  return rc;
2104 }
2105 
2106 /**
2107  * Detach from multicast group
2108  *
2109  * @v ibdev Infiniband device
2110  * @v qp Queue pair
2111  * @v gid Multicast GID
2112  * @ret rc Return status code
2113  */
2114 static void golan_mcast_detach(struct ib_device *ibdev,
2115  struct ib_queue_pair *qp,
2116  union ib_gid *gid)
2117 {
2118  struct golan *golan = ib_get_drvdata(ibdev);
2119  struct golan_cmd_layout *cmd;
2120  int rc;
2121 
2123  GEN_MBOX, NO_MBOX,
2124  sizeof(struct golan_detach_mcg_mbox_in),
2125  sizeof(struct golan_detach_mcg_mbox_out));
2126  ((struct golan_detach_mcg_mbox_in *)(cmd->in))->qpn = cpu_to_be32(qp->qpn);
2127 
2128  memcpy(GET_INBOX(golan, GEN_MBOX), gid, sizeof(*gid));
2129 
2132 
2133  DBGC( golan , "%s: QPN 0x%lx was detached from MCG\n", __FUNCTION__, qp->qpn);
2134 }
2135 
2136 /**
2137  * Inform embedded subnet management agent of a received MAD
2138  *
2139  * @v ibdev Infiniband device
2140  * @v mad MAD
2141  * @ret rc Return status code
2142  */
2143 static int golan_inform_sma(struct ib_device *ibdev,
2144  union ib_mad *mad)
2145 {
2146  if (!ibdev || !mad) {
2147  return 1;
2148  }
2149 
2150  return 0;
2151 }
2152 
2154 {
2155  struct ib_device *ibdev = port->ibdev;
2156  int rc;
2157 
2158  golan_get_ib_info ( ibdev );
2159  /* Register Infiniband device */
2160  if ((rc = register_ibdev(ibdev)) != 0) {
2161  DBG ( "%s port %d could not register IB device: (rc = %d)\n",
2162  __FUNCTION__, ibdev->port, rc);
2163  return rc;
2164  }
2165 
2166  port->netdev = ipoib_netdev( ibdev );
2167 
2168  return 0;
2169 }
2170 
2171 static inline void golan_bring_down(struct golan *golan)
2172 {
2173  DBGC(golan, "%s: start\n", __FUNCTION__);
2174 
2175  if (~golan->flags & GOLAN_OPEN) {
2176  DBGC(golan, "%s: end (already closed)\n", __FUNCTION__);
2177  return;
2178  }
2179 
2188  golan->flags &= ~GOLAN_OPEN;
2189  DBGC(golan, "%s: end\n", __FUNCTION__);
2190 }
2191 
2192 static int golan_set_link_speed ( struct golan *golan ){
2194  int i = 0;
2195  int utils_inited = 0;
2196 
2197  if ( ! golan->utils ) {
2198  utils_inited = 1;
2200  MLX_CHECK_STATUS ( golan->pci, status, utils_init_err, "mlx_utils_init failed" );
2201  }
2202 
2203  for ( i = 0; i < golan->caps.num_ports; ++i ) {
2205  MLX_CHECK_STATUS ( golan->pci, status, set_link_speed_err, "mlx_set_link_speed failed" );
2206  }
2207 
2208 set_link_speed_err:
2209 if ( utils_inited )
2210  free_mlx_utils ( & golan->utils );
2211 utils_init_err:
2212  return status;
2213 }
2214 
2215 static inline int golan_bring_up(struct golan *golan)
2216 {
2217  int rc = 0;
2218  DBGC(golan, "%s\n", __FUNCTION__);
2219 
2220  if (golan->flags & GOLAN_OPEN)
2221  return 0;
2222 
2223  if (( rc = golan_cmd_init(golan) ))
2224  goto out;
2225 
2226  if (( rc = golan_core_enable_hca(golan) ))
2227  goto cmd_uninit;
2228 
2229  /* Query for need for boot pages */
2231  goto disable;
2232 
2233  if (( rc = golan_qry_hca_cap(golan) ))
2234  goto pages;
2235 
2236  if (( rc = golan_set_hca_cap(golan) ))
2237  goto pages;
2238 
2240  goto pages;
2241 
2242  if (( rc = golan_set_link_speed ( golan ) ))
2243  goto pages_teardown;
2244 
2245  //Reg Init?
2246  if (( rc = golan_hca_init(golan) ))
2247  goto pages_2;
2248 
2249  if (( rc = golan_alloc_uar(golan) ))
2250  goto teardown;
2251 
2252  if (( rc = golan_create_eq(golan) ))
2253  goto de_uar;
2254 
2255  if (( rc = golan_alloc_pd(golan) ))
2256  goto de_eq;
2257 
2258  if (( rc = golan_create_mkey(golan) ))
2259  goto de_pd;
2260 
2261  golan->flags |= GOLAN_OPEN;
2262  return 0;
2263 
2265 de_pd:
2267 de_eq:
2269 de_uar:
2271 teardown:
2273 pages_2:
2274 pages_teardown:
2276 pages:
2278 disable:
2280 cmd_uninit:
2282 out:
2283  return rc;
2284 }
2285 
2286 /**
2287  * Close Infiniband link
2288  *
2289  * @v ibdev Infiniband device
2290  */
2291 static void golan_ib_close ( struct ib_device *ibdev ) {
2292  struct golan *golan = NULL;
2293 
2294  DBG ( "%s start\n", __FUNCTION__ );
2295  if ( ! ibdev )
2296  return;
2297  golan = ib_get_drvdata ( ibdev );
2298  golan_bring_down ( golan );
2299  DBG ( "%s end\n", __FUNCTION__ );
2300 }
2301 
2302 /**
2303  * Initialise Infiniband link
2304  *
2305  * @v ibdev Infiniband device
2306  * @ret rc Return status code
2307  */
2308 static int golan_ib_open ( struct ib_device *ibdev ) {
2309  struct golan *golan = NULL;
2310  DBG ( "%s start\n", __FUNCTION__ );
2311 
2312  if ( ! ibdev )
2313  return -EINVAL;
2314  golan = ib_get_drvdata ( ibdev );
2315  golan_bring_up ( golan );
2316  golan_ib_update ( ibdev );
2317 
2318  DBG ( "%s end\n", __FUNCTION__ );
2319  return 0;
2320 }
2321 
2322 /** Golan Infiniband operations */
2325  .destroy_cq = golan_destroy_cq,
2326  .create_qp = golan_create_qp,
2327  .modify_qp = golan_modify_qp,
2328  .destroy_qp = golan_destroy_qp,
2329  .post_send = golan_post_send,
2330  .post_recv = golan_post_recv,
2331  .poll_cq = golan_poll_cq,
2332  .poll_eq = golan_poll_eq,
2333  .open = golan_ib_open,
2334  .close = golan_ib_close,
2335  .mcast_attach = golan_mcast_attach,
2336  .mcast_detach = golan_mcast_detach,
2337  .set_port_info = golan_inform_sma,
2338  .set_pkey_table = golan_inform_sma,
2339 };
2340 
2341 static int golan_probe_normal ( struct pci_device *pci ) {
2342  struct golan *golan;
2343  struct ib_device *ibdev;
2344  struct golan_port *port;
2345  int i;
2346  int rc = 0;
2347 
2348  golan = golan_alloc();
2349  if ( !golan ) {
2350  rc = -ENOMEM;
2351  goto err_golan_alloc;
2352  }
2353 
2354  /* at POST stage some BIOSes have limited available dynamic memory */
2355  if ( golan_init_fw_areas ( golan ) ) {
2356  rc = -ENOMEM;
2357  goto err_golan_golan_init_pages;
2358  }
2359 
2360  /* Setup PCI bus and HCA BAR */
2361  pci_set_drvdata( pci, golan );
2362  golan->pci = pci;
2363  golan_pci_init( golan );
2364  /* config command queues */
2365  if ( golan_bring_up( golan ) ) {
2366  DBGC (golan ,"golan bringup failed\n");
2367  rc = -1;
2368  goto err_golan_bringup;
2369  }
2370 
2371  if ( ! DEVICE_IS_CIB ( pci->device ) ) {
2372  if ( init_mlx_utils ( & golan->utils, pci ) ) {
2373  rc = -1;
2374  goto err_utils_init;
2375  }
2376  }
2377  /* Allocate Infiniband devices */
2378  for (i = 0; i < golan->caps.num_ports; ++i) {
2379  ibdev = alloc_ibdev( 0 );
2380  if ( !ibdev ) {
2381  rc = -ENOMEM;
2382  goto err_golan_probe_alloc_ibdev;
2383  }
2384  golan->ports[i].ibdev = ibdev;
2385  golan->ports[i].vep_number = 0;
2387  ibdev->dev = &pci->dev;
2388  ibdev->port = (GOLAN_PORT_BASE + i);
2390  }
2391 
2392  /* Register devices */
2393  for ( i = 0; i < golan->caps.num_ports; ++i ) {
2394  port = &golan->ports[i];
2395  if ((rc = golan_register_ibdev ( port ) ) != 0 ) {
2396  goto err_golan_probe_register_ibdev;
2397  }
2398  }
2399 
2400  golan_bring_down ( golan );
2401 
2402  return 0;
2403 
2404  i = golan->caps.num_ports;
2405 err_golan_probe_register_ibdev:
2406  for ( i-- ; ( signed int ) i >= 0 ; i-- )
2408 
2409  i = golan->caps.num_ports;
2410 err_golan_probe_alloc_ibdev:
2411  for ( i-- ; ( signed int ) i >= 0 ; i-- )
2412  ibdev_put ( golan->ports[i].ibdev );
2413  if ( ! DEVICE_IS_CIB ( pci->device ) ) {
2414  free_mlx_utils ( & golan->utils );
2415  }
2416 err_utils_init:
2417  golan_bring_down ( golan );
2418 err_golan_bringup:
2419  iounmap( golan->iseg );
2421 err_golan_golan_init_pages:
2422  free ( golan );
2423 err_golan_alloc:
2424  DBGC (golan ,"%s rc = %d\n", __FUNCTION__, rc);
2425  return rc;
2426 }
2427 
2428 static void golan_remove_normal ( struct pci_device *pci ) {
2429  struct golan *golan = pci_get_drvdata(pci);
2430  struct golan_port *port;
2431  int i;
2432 
2433  DBGC(golan, "%s\n", __FUNCTION__);
2434 
2435  for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2436  port = &golan->ports[i];
2437  unregister_ibdev ( port->ibdev );
2438  }
2439  for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2440  netdev_nullify ( golan->ports[i].netdev );
2441  }
2442  for ( i = ( golan->caps.num_ports - 1 ) ; i >= 0 ; i-- ) {
2443  ibdev_put ( golan->ports[i].ibdev );
2444  }
2445  if ( ! DEVICE_IS_CIB ( pci->device ) ) {
2446  free_mlx_utils ( & golan->utils );
2447  }
2448  iounmap( golan->iseg );
2450  free(golan);
2451 }
2452 
2453 /***************************************************************************
2454  * NODNIC operations
2455  **************************************************************************/
2457  struct nodnic_send_wqbb *wqbb ) {
2459  struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
2460  struct shomron_nodnic_eth_send_wqe *eth_wqe =
2461  ( struct shomron_nodnic_eth_send_wqe * )wqbb;
2462  struct shomronprm_wqe_segment_ctrl_send *ctrl;
2463 
2464  if ( ! eth_wqe || ! flexboot_nodnic->device_priv.uar.virt ) {
2465  DBG("%s: Invalid parameters\n",__FUNCTION__);
2466  status = MLX_FAILED;
2467  goto err;
2468  }
2469  wmb();
2470  ctrl = & eth_wqe->ctrl;
2472  ( ( MLX_GET ( ctrl, wqe_index ) & 0x1 ) ? DB_BUFFER0_ODD_OFFSET
2473  : DB_BUFFER0_EVEN_OFFSET ) );
2474 err:
2475  return status;
2476 }
2477 
2479  struct ib_queue_pair *qp, struct ib_address_vector *av __unused,
2480  struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb,
2481  unsigned long wqe_index ) {
2483  struct flexboot_nodnic *flexboot_nodnic = ib_get_drvdata ( ibdev );
2484  struct shomron_nodnic_eth_send_wqe *eth_wqe = NULL;
2486  struct flexboot_nodnic_queue_pair *flexboot_nodnic_qp =
2487  ib_qp_get_drvdata ( qp );
2488  nodnic_qp *nodnic_qp = flexboot_nodnic_qp->nodnic_queue_pair;
2489  struct nodnic_send_ring *send_ring = &nodnic_qp->send;
2490  mlx_uint32 qpn = 0;
2491 
2492  eth_wqe = (struct shomron_nodnic_eth_send_wqe *)wqbb;
2493  memset ( ( ( ( void * ) eth_wqe ) ), 0,
2494  ( sizeof ( *eth_wqe ) ) );
2495 
2496  status = nodnic_port_get_qpn(&port->port_priv, &send_ring->nodnic_ring,
2497  &qpn);
2498  if ( status != MLX_SUCCESS ) {
2499  DBG("nodnic_port_get_qpn failed\n");
2500  goto err;
2501  }
2502 
2503 #define SHOMRON_GENERATE_CQE 0x3
2504 #define SHOMRON_INLINE_HEADERS_SIZE 18
2505 #define SHOMRON_INLINE_HEADERS_OFFSET 32
2506  MLX_FILL_2 ( &eth_wqe->ctrl, 0, opcode, FLEXBOOT_NODNIC_OPCODE_SEND,
2507  wqe_index, wqe_index & 0xFFFF);
2508  MLX_FILL_2 ( &eth_wqe->ctrl, 1, ds, 0x4 , qpn, qpn );
2509  MLX_FILL_1 ( &eth_wqe->ctrl, 2,
2510  ce, SHOMRON_GENERATE_CQE /* generate completion */
2511  );
2512  MLX_FILL_2 ( &eth_wqe->ctrl, 7,
2513  inline_headers1,
2514  cpu_to_be16(*(mlx_uint16 *)iobuf->data),
2515  inline_headers_size, SHOMRON_INLINE_HEADERS_SIZE
2516  );
2517  memcpy((void *)&eth_wqe->ctrl + SHOMRON_INLINE_HEADERS_OFFSET,
2518  iobuf->data + 2, SHOMRON_INLINE_HEADERS_SIZE - 2);
2520  MLX_FILL_1 ( &eth_wqe->data[0], 0,
2521  byte_count, iob_len ( iobuf ) );
2522  MLX_FILL_1 ( &eth_wqe->data[0], 1, l_key,
2524  MLX_FILL_H ( &eth_wqe->data[0], 2,
2525  local_address_h, virt_to_bus ( iobuf->data ) );
2526  MLX_FILL_1 ( &eth_wqe->data[0], 3,
2527  local_address_l, virt_to_bus ( iobuf->data ) );
2528 err:
2529  return status;
2530 }
2531 
2532 static mlx_status shomron_fill_completion( void *cqe, struct cqe_data *cqe_data ) {
2533  union shomronprm_completion_entry *cq_entry;
2534  uint32_t opcode;
2535 
2536  cq_entry = (union shomronprm_completion_entry *)cqe;
2537  cqe_data->owner = MLX_GET ( &cq_entry->normal, owner );
2538  opcode = MLX_GET ( &cq_entry->normal, opcode );
2539 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND 0
2540 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV 2
2541 #define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR 13
2542 #define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR 14
2543  cqe_data->is_error =
2545  if ( cqe_data->is_error ) {
2546  cqe_data->syndrome = MLX_GET ( &cq_entry->error, syndrome );
2548  MLX_GET ( &cq_entry->error, vendor_error_syndrome );
2549  cqe_data->is_send =
2551  } else {
2552  cqe_data->is_send =
2554  cqe_data->wqe_counter = MLX_GET ( &cq_entry->normal, wqe_counter );
2555  cqe_data->byte_cnt = MLX_GET ( &cq_entry->normal, byte_cnt );
2556 
2557  }
2558  if ( cqe_data->is_send == TRUE )
2559  cqe_data->qpn = MLX_GET ( &cq_entry->normal, qpn );
2560  else
2561  cqe_data->qpn = MLX_GET ( &cq_entry->normal, srqn );
2562 
2563  return 0;
2564 }
2565 
2566 static mlx_status shomron_cqe_set_owner ( void *cq, unsigned int num_cqes ) {
2567  unsigned int i = 0;
2568  union shomronprm_completion_entry *cq_list;
2569 
2570  cq_list = (union shomronprm_completion_entry *)cq;
2571  for ( ; i < num_cqes ; i++ )
2572  MLX_FILL_1 ( &cq_list[i].normal, 15, owner, 1 );
2573  return 0;
2574 }
2575 
2577  return sizeof ( union shomronprm_completion_entry );
2578 }
2579 
2582  .fill_send_wqe[IB_QPT_ETH] = shomron_fill_eth_send_wqe,
2583  .fill_completion = shomron_fill_completion,
2584  .cqe_set_owner = shomron_cqe_set_owner,
2585  .irq = flexboot_nodnic_eth_irq,
2586  .tx_uar_send_doorbell_fn = shomron_tx_uar_send_db,
2587 };
2588 
2589 static int shomron_nodnic_is_supported ( struct pci_device *pci ) {
2590  if ( DEVICE_IS_CIB ( pci->device ) )
2591  return 0;
2592 
2593  return flexboot_nodnic_is_supported ( pci );
2594 }
2595 /**************************************************************************/
2596 
2597 static int golan_probe ( struct pci_device *pci ) {
2598  int rc = -ENOTSUP;
2599 
2600  DBG ( "%s: start\n", __FUNCTION__ );
2601 
2602  if ( ! pci ) {
2603  DBG ( "%s: PCI is NULL\n", __FUNCTION__ );
2604  rc = -EINVAL;
2605  goto probe_done;
2606  }
2607 
2608  if ( shomron_nodnic_is_supported ( pci ) ) {
2609  DBG ( "%s: Using NODNIC driver\n", __FUNCTION__ );
2611  } else {
2612  DBG ( "%s: Using normal driver\n", __FUNCTION__ );
2613  rc = golan_probe_normal ( pci );
2614  }
2615 
2616 probe_done:
2617  DBG ( "%s: rc = %d\n", __FUNCTION__, rc );
2618  return rc;
2619 }
2620 
2621 static void golan_remove ( struct pci_device *pci ) {
2622  DBG ( "%s: start\n", __FUNCTION__ );
2623 
2624  if ( ! shomron_nodnic_is_supported ( pci ) ) {
2625  DBG ( "%s: Using normal driver remove\n", __FUNCTION__ );
2626  golan_remove_normal ( pci );
2627  return;
2628  }
2629 
2630  DBG ( "%s: Using NODNIC driver remove\n", __FUNCTION__ );
2631 
2632  flexboot_nodnic_remove ( pci );
2633 
2634  DBG ( "%s: end\n", __FUNCTION__ );
2635 }
2636 
2637 static struct pci_device_id golan_nics[] = {
2638  PCI_ROM ( 0x15b3, 0x1011, "ConnectIB", "ConnectIB HCA driver: DevID 4113", 0 ),
2639  PCI_ROM ( 0x15b3, 0x1013, "ConnectX-4", "ConnectX-4 HCA driver, DevID 4115", 0 ),
2640  PCI_ROM ( 0x15b3, 0x1015, "ConnectX-4Lx", "ConnectX-4Lx HCA driver, DevID 4117", 0 ),
2641  PCI_ROM ( 0x15b3, 0x1017, "ConnectX-5", "ConnectX-5 HCA driver, DevID 4119", 0 ),
2642  PCI_ROM ( 0x15b3, 0x1019, "ConnectX-5EX", "ConnectX-5EX HCA driver, DevID 4121", 0 ),
2643  PCI_ROM ( 0x15b3, 0x101b, "ConnectX-6", "ConnectX-6 HCA driver, DevID 4123", 0 ),
2644  PCI_ROM ( 0x15b3, 0x101d, "ConnectX-6DX", "ConnectX-6DX HCA driver, DevID 4125", 0 ),
2645  PCI_ROM ( 0x15b3, 0xa2d2, "BlueField", "BlueField integrated ConnectX-5 network controller HCA driver, DevID 41682", 0 ),
2646 };
2647 
2648 struct pci_driver golan_driver __pci_driver = {
2649  .ids = golan_nics,
2650  .id_count = (sizeof(golan_nics) / sizeof(golan_nics[0])),
2651  .probe = golan_probe,
2652  .remove = golan_remove,
2653 };
void unregister_ibdev(struct ib_device *ibdev)
Unregister Infiniband device.
Definition: infiniband.c:1005
struct nodnic_ring nodnic_ring
#define cpu_to_be16(value)
Definition: byteswap.h:109
uint16_t u16
Definition: stdint.h:21
#define iob_pull(iobuf, len)
Definition: iobuf.h:98
static struct pci_device_id golan_nics[]
Definition: golan.c:2637
#define EINVAL
Invalid argument.
Definition: errno.h:428
static __always_inline void ib_set_drvdata(struct ib_device *ibdev, void *priv)
Set Infiniband device driver-private data.
Definition: infiniband.h:696
#define GET_OUTBOX(golan, idx)
Definition: golan.h:76
u32 doorbell_qpn
Definition: golan.h:185
#define GOLAN_NUM_EQES
Definition: golan.h:217
struct golan_cmd_prot_block mblock
Definition: golan.c:122
#define CTRL_SIG_SZ
Definition: golan.c:133
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define MEM_CMD_IDX
Definition: golan.h:57
Infiniband protocol.
unsigned short uint16_t
Definition: stdint.h:11
#define SHOMRON_INLINE_HEADERS_OFFSET
wmb()
#define MLX_FILL_2(_ptr, _index,...)
Definition: mlx_bitops.h:171
#define FLEXBOOT_NODNIC_OPCODE_CQ_RECV_ERR
#define iob_put(iobuf, len)
Definition: iobuf.h:116
u16 next_idx
Definition: golan.h:176
struct golan_wqe_data_seg data[2]
Definition: golan.h:156
struct nodnic_send_ring send
__be16 max_wqe_sz_rq
Definition: CIB_PRM.h:276
__be32 cmdq_addr_h
Definition: CIB_PRM.h:57
__be16 opmod
Definition: CIB_PRM.h:30
A PCI driver.
Definition: pci.h:224
#define EBUSY
Device or resource busy.
Definition: errno.h:338
struct arbelprm_completion_queue_entry normal
Definition: arbel.h:11
union ib_gid gid
Port GID (comprising GID prefix and port GUID)
Definition: infiniband.h:439
static int ib_is_open(struct ib_device *ibdev)
Check whether or not Infiniband device is open.
Definition: infiniband.h:575
struct golan_firmware_area fw_areas[GOLAN_FW_AREAS_NUM]
Definition: golan.h:341
static int golan_cmd_wait(struct golan *golan, int idx, const char *command)
Wait for Golan command completion.
Definition: golan.c:240
static mlx_status shomron_tx_uar_send_db(struct ib_device *ibdev, struct nodnic_send_wqbb *wqbb)
Definition: golan.c:2456
Infiniband device operations.
Definition: infiniband.h:254
u8 owner
Definition: CIB_PRM.h:700
#define GOLAN_OPEN
Definition: golan.h:321
static int golan_probe_normal(struct pci_device *pci)
Definition: golan.c:2341
__be16 func_id
Definition: CIB_PRM.h:30
__be32 in[4]
Definition: CIB_PRM.h:35
struct shomronprm_completion_queue_entry normal
static int golan_create_qp_aux(struct ib_device *ibdev, struct ib_queue_pair *qp, int *qpn)
Definition: golan.c:1084
int flexboot_nodnic_is_supported(struct pci_device *pci)
struct ib_device * ibdev
Infiniband device.
uint8_t opcode
Opcode.
Definition: ena.h:16
#define DB_BUFFER0_EVEN_OFFSET
Definition: golan.h:219
uint32_t pdn
Definition: golan.h:334
static void golan_ib_close(struct ib_device *ibdev)
Close Infiniband link.
Definition: golan.c:2291
#define GOLAN_INVALID_LKEY
Definition: golan.h:39
Error codes.
static int(* golan_modify_qp_methods[])(struct ib_device *ibdev, struct ib_queue_pair *qp, struct golan_modify_qp_mbox_in_data *in)
Definition: golan.c:1327
mlx_uint32 is_error
static int golan_hca_init(struct golan *golan)
Definition: golan.c:642
__be32 srqn
Definition: CIB_PRM.h:29
u8 owner
Definition: CIB_PRM.h:36
__be16 send_db
Definition: CIB_PRM.h:978
static int golan_take_pages(struct golan *golan, uint32_t pages, __be16 func_id)
Definition: golan.c:398
A command-line command.
Definition: command.h:9
struct net_device * ipoib_netdev(struct ib_device *ibdev)
Find IPoIB network device.
Definition: ipoib.c:1036
I/O buffers.
uint16_t lid
Port LID.
Definition: infiniband.h:441
static int golan_is_hw_ownership(struct ib_completion_queue *cq, struct golan_cqe64 *cqe64)
Definition: golan.c:1809
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:226
#define GOLAN_CQ_DB_RECORD_SIZE
Definition: golan.h:192
uint8_t type
Type.
Definition: ena.h:16
static int golan_probe(struct pci_device *pci)
Definition: golan.c:2597
struct golan_wqe_ctrl_seg ctrl
Definition: golan.h:145
__be16 slid
Definition: CIB_PRM.h:877
static void show_out_status(uint32_t *out)
Definition: golan.c:218
struct shomronprm_completion_with_error error
static void golan_cq_clean(struct ib_completion_queue *cq)
Definition: golan.c:1055
nodnic_device_priv device_priv
nodnic device
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
#define DBGC(...)
Definition: compiler.h:505
#define min(x, y)
Definition: ath.h:34
__be32 byte_count
Definition: CIB_PRM.h:28
#define GEN_MBOX
Definition: golan.h:60
mlx_uint32 byte_cnt
#define FLEXBOOT_NODNIC_OPCODE_CQ_SEND
static mlx_status shomron_cqe_set_owner(void *cq, unsigned int num_cqes)
Definition: golan.c:2566
#define GOLAN_CREATE_MKEY_SEG_QPN_BIT
Definition: CIB_PRM.h:730
#define GOLAN_HCA_BAR
Definition: golan.h:45
struct device * dev
Underlying device.
Definition: infiniband.h:410
golan_qry_pages_mode
Definition: CIB_PRM.h:76
static int golan_qry_hca_cap(struct golan *golan)
Definition: golan.c:378
__be16 max_wqe_sz_sq
Definition: CIB_PRM.h:274
static void golan_destory_eq(struct golan *golan)
Definition: golan.c:790
static __always_inline void * ib_qp_get_drvdata(struct ib_queue_pair *qp)
Get Infiniband queue pair driver-private data.
Definition: infiniband.h:641
struct pci_device * pci
Definition: golan.h:324
static uint32_t ilog2(uint32_t mem)
Definition: golan.c:128
#define GOLAN_QP_CTX_PM_STATE_BIT
Definition: CIB_PRM.h:893
static int golan_get_ib_info(struct ib_device *ibdev)
Definition: golan.c:1662
#define GOLAN_WQEBBS_PER_SEND_UD_WQE
Definition: golan.h:130
int init_mlx_utils(mlx_utils **utils, struct pci_device *pci)
__be32 outlen
Definition: CIB_PRM.h:41
static void golan_handle_port_event(struct golan *golan, struct golan_eqe *eqe)
Definition: golan.c:1944
struct ib_global_route_header grh
Definition: ib_packet.h:16
uint32_t dwords[2]
Definition: ib_packet.h:21
static void golan_mcast_detach(struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid)
Detach from multicast group.
Definition: golan.c:2114
u32 mkey
Definition: golan.h:335
void ib_link_state_changed(struct ib_device *ibdev)
Notify of Infiniband link state change.
Definition: infiniband.c:637
static int golan_inform_sma(struct ib_device *ibdev, union ib_mad *mad)
Inform embedded subnet management agent of a received MAD.
Definition: golan.c:2143
u8 data[MAILBOX_STRIDE]
Definition: golan.c:123
static void golan_pci_init(struct golan *golan)
Initialise Golan PCI parameters.
Definition: golan.c:917
static mlx_size shomron_get_cqe_size()
Definition: golan.c:2576
unsigned int gid_present
GID is present.
Definition: infiniband.h:90
#define MEM_MBOX
Definition: golan.h:59
static int golan_modify_qp_to_rst(struct ib_device *ibdev, struct ib_queue_pair *qp)
Definition: golan.c:1299
userptr_t addr
Definition: golan.c:55
static void golan_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: golan.c:1027
#define GOLAN_QP_CTX_RQ_SIZE_BIT
Definition: CIB_PRM.h:896
struct golan_hca_cap caps
Definition: golan.h:327
#define GOLAN_PAS_SIZE
Definition: golan.h:37
static struct golan * golan_alloc()
Definition: golan.c:929
volatile u8 status_own
Definition: CIB_PRM.h:45
__be16 wqe_counter
Definition: CIB_PRM.h:886
#define GOLAN_PRINT_RC_AND_CMD_STATUS
Definition: golan.c:117
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:149
const char * golan_qp_state_as_string[]
Definition: golan.c:88
An Infiniband Global Identifier.
Definition: ib_packet.h:33
__be32 qpn
Definition: CIB_PRM.h:29
uint32_t cmd_bm
Definition: golan.h:330
struct device dev
Generic device.
Definition: pci.h:189
static void golan_teardown_hca(struct golan *golan, enum golan_teardown op_mod)
Definition: golan.c:659
unsigned long long __be64
Definition: CIB_PRM.h:25
union ib_gid dgid
Destiniation GID.
Definition: ib_packet.h:106
#define ENOTSUP
Operation not supported.
Definition: errno.h:589
A doubly-linked list entry (or list head)
Definition: list.h:18
static int golan_create_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Create queue pair.
Definition: golan.c:1235
enum ib_rate rate
Rate.
Definition: infiniband.h:86
Dynamic memory allocation.
#define CMD_OWNER_HW
Definition: CIB_PRM.h:35
#define GOLAN_QP_CTX_ST_BIT
Definition: CIB_PRM.h:892
static void golan_disable_hca(struct golan *golan)
Definition: golan.c:336
An Infiniband device.
Definition: infiniband.h:398
uint8_t status
Status.
Definition: ena.h:16
uint32_t mlx_uint32
#define NO_MBOX
Definition: golan.h:58
u8 fl_mlid
Definition: CIB_PRM.h:1005
static int golan_set_hca_cap(struct golan *golan)
Definition: golan.c:349
#define GOLAN_MKEY_LEN64
Definition: CIB_PRM.h:729
#define DEVICE_IS_CIB(device)
Definition: golan.c:46
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:338
#define rmb()
Definition: io.h:484
#define ENOMEM
Not enough space.
Definition: errno.h:534
#define QRY_PAGES_OUT(golan, idx)
Definition: golan.h:68
#define GOLAN_IB_ACCESS_LOCAL_READ
Definition: CIB_PRM.h:727
__be32 flags_rqpn
Definition: CIB_PRM.h:878
u8 vep_number
VEP number.
Definition: golan.h:313
uint8_t bytes[16]
Definition: ib_packet.h:34
void * memcpy(void *dest, const void *src, size_t len) __nonnull
static int golan_query_vport_context(struct ib_device *ibdev)
Definition: golan.c:1575
#define GOLAN_FW_AREAS_NUM
Definition: golan.h:340
__be16 pkey_table_size
Definition: CIB_PRM.h:258
static struct ib_device_operations golan_ib_operations
Golan Infiniband operations.
Definition: golan.c:2323
__be32 syndrome
Definition: CIB_PRM.h:205
u8 port
Port number.
Definition: CIB_PRM.h:31
static __always_inline void * ib_get_drvdata(struct ib_device *ibdev)
Get Infiniband device driver-private data.
Definition: infiniband.h:707
u8 vendor_err_synd
Definition: CIB_PRM.h:865
uint8_t eqn
Definition: golan.h:299
mlx_utils * utils
Definition: golan.h:337
unsigned long qkey
Queue key.
Definition: infiniband.h:79
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
IP over Infiniband.
struct golan_query_hca_vport_context_data context_data
Definition: CIB_PRM.h:30
uint16_t device
Device ID.
Definition: pci.h:204
Definition: golan.h:323
static int golan_ib_open(struct ib_device *ibdev)
Initialise Infiniband link.
Definition: golan.c:2308
__be64 qdata[MAILBOX_STRIDE >> 3]
Definition: golan.c:124
#define be32_to_cpu(value)
Definition: byteswap.h:116
union golan_send_wqe * wqes
Definition: golan.h:172
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
struct golan_av datagram
Definition: golan.h:146
Ethernet protocol.
static unsigned int golan_rate(enum ib_rate rate)
Calculate transmission rate.
Definition: golan.c:1438
struct ib_device_operations * op
Infiniband operations.
Definition: infiniband.h:416
void flexboot_nodnic_eth_irq(struct net_device *netdev, int enable)
static int golan_complete(struct ib_device *ibdev, struct ib_completion_queue *cq, struct golan_cqe64 *cqe64)
Definition: golan.c:1690
__be32 qkey
Definition: CIB_PRM.h:998
An Infiniband Work Queue.
Definition: infiniband.h:100
#define GOLAN_IB_ACCESS_LOCAL_WRITE
Definition: CIB_PRM.h:728
__be32 sop_drop_qpn
Definition: CIB_PRM.h:885
static int send_command_and_wait(struct golan *golan, uint32_t cmd_idx, uint32_t inbox_idx, uint32_t outbox_idx, const char *command)
Definition: golan.c:271
static void golan_poll_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Definition: golan.c:1815
struct golan_port ports[GOLAN_MAX_PORTS]
Definition: golan.h:339
void ib_complete_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Complete send work queue entry.
Definition: infiniband.c:515
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
struct golan_hca_init_seg * iseg
Definition: golan.h:325
__be16 wqe_counter
Definition: CIB_PRM.h:36
#define GOLAN_WQE_CTRL_WQE_IDX_BIT
Definition: golan.h:132
uint32_t cons_index
Definition: golan.h:304
struct pci_driver golan_driver __pci_driver
Definition: golan.c:2648
A 16-bit general register.
Definition: registers.h:24
#define GOLAN_SEND_UD_WQE_SIZE
Definition: golan.h:128
uint32_t index
Definition: golan.h:109
golan_teardown
Definition: CIB_PRM.h:342
static uint32_t is_command_finished(struct golan *golan, int idx)
Check if CMD has finished.
Definition: golan.c:228
__be64 data[GOLAN_CMD_PAS_CNT]
Definition: CIB_PRM.h:410
__be32 num_pages
Definition: CIB_PRM.h:31
#define be16_to_cpu(value)
Definition: byteswap.h:115
uint16_t sm_lid
Subnet manager LID.
Definition: infiniband.h:443
static int golan_alloc_pd(struct golan *golan)
Definition: golan.c:815
__be32 out[4]
Definition: CIB_PRM.h:36
#define u8
Definition: igbvf_osdep.h:38
void * wqes
Definition: golan.h:180
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
static u16 fw_rev_sub(struct golan *golan)
Definition: golan.c:181
#define GOLAN_LOG_MAX_QP
Definition: CIB_PRM.h:36
Definition: golan.c:120
__be32 cmd_dbell
Definition: CIB_PRM.h:59
void * outbox
Definition: golan.h:318
static struct golan_eqe * golan_next_eqe_sw(struct golan_event_queue *eq)
Definition: golan.c:1976
#define MLX_SUCCESS
An Infiniband Global Route Header.
Definition: ib_packet.h:89
__be32 mkey
Definition: CIB_PRM.h:29
struct golan_eqe_cq_err cq_err
Definition: CIB_PRM.h:682
#define CMD_STATUS(golan, idx)
Definition: golan.h:66
__be16 recv_db
Definition: CIB_PRM.h:976
#define EFAULT
Bad address.
Definition: errno.h:393
struct ib_work_queue * ib_find_wq(struct ib_completion_queue *cq, unsigned long qpn, int is_send)
Find work queue belonging to completion queue.
Definition: infiniband.c:396
u8 sub_type
Definition: CIB_PRM.h:695
u32 flags
Definition: golan.h:336
struct golan_wqe_data_seg data
Definition: golan.h:147
static int golan_qp_type_to_st(enum ib_queue_pair_type type)
Definition: golan.c:1060
unsigned int num_wqes
Number of work queue entries.
Definition: infiniband.h:112
static void golan_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: golan.c:1989
#define GOLAN_PAGE_SHIFT
Definition: golan.h:51
static struct golan_cmd_layout * get_cmd(struct golan *golan, int idx)
Definition: golan.c:192
uint8_t sm_sl
Subnet manager SL.
Definition: infiniband.h:445
static mlx_status shomron_fill_completion(void *cqe, struct cqe_data *cqe_data)
Definition: golan.c:2532
u8 type
Definition: CIB_PRM.h:693
static void golan_free_fw_areas(struct golan *golan)
Definition: golan.c:58
struct golan_mboxes mboxes
Definition: golan.h:328
static const char * golan_eqe_type_str(u8 type)
Definition: golan.c:1855
#define GOLAN_CQE_OPCODE_NOT_VALID
Definition: golan.h:190
A flexboot_nodnic device.
#define GOLAN_PAGE_MASK
Definition: golan.h:53
static u16 fw_rev_min(struct golan *golan)
Definition: golan.c:176
static u8 xor8_buf(void *buf, int len)
Definition: golan.c:135
unsigned int port
Port number.
Definition: infiniband.h:418
static __always_inline void ibdev_put(struct ib_device *ibdev)
Drop reference to Infiniband device.
Definition: infiniband.h:597
static __always_inline void ib_cq_set_drvdata(struct ib_completion_queue *cq, void *priv)
Set Infiniband completion queue driver-private data.
Definition: infiniband.h:674
union ib_gid sgid
Source GID.
Definition: ib_packet.h:104
static void(* free)(struct refcnt *refcnt))
Definition: refcnt.h:54
struct net_device * netdev
Network device.
Definition: golan.h:311
ib_queue_pair_type
An Infiniband queue pair type.
Definition: infiniband.h:138
#define MANAGE_PAGES_PSA_OFFSET
Definition: golan.h:195
union aes_table_entry entry[256]
Table entries, indexed by S(N)
Definition: aes.c:26
__be32 syndrome
Definition: CIB_PRM.h:30
#define GOLAN_HCR_MAX_WAIT_MS
Definition: golan.h:47
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:624
#define GOLAN_CQE_OPCODE_BIT
Definition: golan.h:191
uint8_t subtype
Slow protocols subtype.
Definition: eth_slow.h:12
static void golan_cmd_uninit(struct golan *golan)
Definition: golan.c:586
static unsigned int unsigned int reg
Definition: intel.h:245
size_t grh_size
Size of GRH buffers.
Definition: golan.h:168
A PCI device.
Definition: pci.h:187
static int golan_check_rc_and_cmd_status(struct golan_cmd_layout *cmd, int rc)
Definition: golan.c:98
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:151
static int golan_modify_qp_rtr_to_rts(struct ib_device *ibdev __attribute__((unused)), struct ib_queue_pair *qp __attribute__((unused)), struct golan_modify_qp_mbox_in_data *in __attribute__((unused)))
Definition: golan.c:1288
struct ib_device * ibdev
Infiniband device.
Definition: golan.h:309
struct ib_device * alloc_ibdev(size_t priv_size)
Allocate Infiniband device.
Definition: infiniband.c:937
#define SHOMRON_GENERATE_CQE
User memory allocation.
#define MLX_GET(_ptr, _field)
Definition: mlx_bitops.h:222
void * virt
Definition: golan.h:110
union ev_data data
Definition: CIB_PRM.h:697
struct golan_uar uar
Definition: golan.h:332
static size_t iob_tailroom(struct io_buffer *iobuf)
Calculate available space at end of an I/O buffer.
Definition: iobuf.h:171
union ib_guid node_guid
Node GUID.
Definition: infiniband.h:437
#define GOLAN_RECV_WQE_SIZE
Definition: golan.h:129
static void golan_remove_normal(struct pci_device *pci)
Definition: golan.c:2428
An Infiniband Completion Queue.
Definition: infiniband.h:224
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:511
static int shomron_nodnic_is_supported(struct pci_device *pci)
Definition: golan.c:2589
#define MLX_FILL_1(_ptr, _index,...)
Definition: mlx_bitops.h:167
u32 addr
Definition: sky2.h:8
size_t mlx_size
#define MLX_FILL_H(_structure_st, _index, _field, _address)
Definition: mlx_bitops.h:240
static void golan_destroy_mkey(struct golan *golan)
Definition: golan.c:892
unsigned char uint8_t
Definition: stdint.h:10
static struct golan_cmd_layout * write_cmd(struct golan *golan, int idx, uint16_t opcode, uint16_t opmod, uint16_t inbox_idx, uint16_t outbox_idx, uint16_t inlen, uint16_t outlen)
Prepare a FW command, In - comamnd idx (Must be valid) writes the command parameters.
Definition: golan.c:284
static int golan_bring_up(struct golan *golan)
Definition: golan.c:2215
static mlx_status shomron_fill_eth_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *av __attribute__((unused)), struct io_buffer *iobuf, struct nodnic_send_wqbb *wqbb, unsigned long wqe_index)
Definition: golan.c:2478
static const char * golan_eqe_port_subtype_str(u8 subtype)
Definition: golan.c:1903
mlx_uint32 vendor_err_syndrome
static u16 cmdif_rev(struct golan *golan)
Definition: golan.c:186
#define FLEXBOOT_NODNIC_OPCODE_CQ_SEND_ERR
static int golan_query_vport_gid(struct ib_device *ibdev)
Definition: golan.c:1609
static void golan_dealloc_pd(struct golan *golan)
Definition: golan.c:840
#define GOLAN_CHECK_RC_AND_CMD_STATUS(_lable)
Definition: golan.c:111
unsigned long qpn
Queue Pair Number.
Definition: infiniband.h:74
#define SHOMRON_INLINE_HEADERS_SIZE
int register_ibdev(struct ib_device *ibdev)
Register Infiniband device.
Definition: infiniband.c:964
__be32 uarn
Definition: CIB_PRM.h:29
void flexboot_nodnic_remove(struct pci_device *pci)
A PCI device ID list entry.
Definition: pci.h:151
static int golan_init_fw_areas(struct golan *golan)
Definition: golan.c:69
struct ib_queue_pair * qp
Containing queue pair.
Definition: infiniband.h:102
#define VIRT_2_BE64_BUS(addr)
Definition: golan.h:70
static int golan_set_link_speed(struct golan *golan)
Definition: golan.c:2192
unsigned int uint32_t
Definition: stdint.h:12
__be32 grh_gid_fl
Definition: CIB_PRM.h:1010
static int golan_post_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *av, struct io_buffer *iobuf)
Post send work queue entry.
Definition: golan.c:1451
struct golan_event_queue eq
Definition: golan.h:333
unsigned long next_idx
Next work queue entry index.
Definition: infiniband.h:122
#define MLX_FAILED
uint32_t ds
Definition: librm.h:254
struct golan_eqe_port_state port
Definition: CIB_PRM.h:684
uint32_t total_dma_pages
Definition: golan.h:331
#define TRUE
Definition: tlan.h:46
static void send_command(struct golan *golan)
Notify the HW that commands are ready.
Definition: golan.c:265
uint16_t mlx_uint16
u8 token
Definition: CIB_PRM.h:42
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
unsigned long next_idx
Next completion queue entry index.
Definition: infiniband.h:240
void __asmcall int val
Definition: setjmp.h:28
static int golan_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Definition: golan.c:1336
struct ib_global_route_header * grh
GRH buffers (if applicable)
Definition: golan.h:166
__be32 cmdq_addr_l_sz
Definition: CIB_PRM.h:58
An Infiniband Queue Pair.
Definition: infiniband.h:157
#define MAX_PASE_MBOX
Definition: golan.h:64
uint32_t hdr
Message header.
Definition: intelvf.h:12
ib_rate
Infiniband transmission rates.
Definition: infiniband.h:59
static void golan_calc_sig(struct golan *golan, uint32_t cmd_idx, uint32_t inbox_idx, uint32_t outbox_idx)
Definition: golan.c:197
unsigned int sl
Service level.
Definition: infiniband.h:88
u8 rgid[16]
Definition: CIB_PRM.h:1011
FILE_LICENCE(GPL2_OR_LATER)
Network device management.
static int golan_alloc_uar(struct golan *golan)
Definition: golan.c:677
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:348
uint32_t npages
Definition: golan.h:117
struct arbelprm_qp_db_record qp
Definition: arbel.h:13
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
static int golan_provide_pages(struct golan *golan, uint32_t pages, __be16 func_id, struct golan_firmware_area *fw_area)
Definition: golan.c:445
#define cpu_to_be32(value)
Definition: byteswap.h:110
struct flexboot_nodnic_callbacks shomron_nodnic_callbacks
Definition: golan.c:2580
__be16 log_max_qp
Definition: CIB_PRM.h:232
__be64 * doorbell_record
Definition: golan.h:211
struct golan_send_wq sq
Definition: golan.h:183
static int golan_query_vport_pkey(struct ib_device *ibdev)
Definition: golan.c:1637
__be32 dqp_dct
Definition: CIB_PRM.h:1003
__be64 flags
Definition: CIB_PRM.h:267
#define GOLAN_EQ_DOORBELL_OFFSET
Definition: golan.h:218
u16 log_stride
Definition: golan.h:104
uint32_t __be32
Definition: CIB_PRM.h:26
#define MAILBOX_SIZE
Definition: CIB_PRM.h:1165
signed int int32_t
Definition: stdint.h:17
static int golan_create_mkey(struct golan *golan)
Definition: golan.c:861
mlx_uint32 syndrome
#define DEF_CMD_IDX
Definition: golan.h:56
struct golan_recv_wqe_ud * wqes
Definition: golan.h:160
#define UNULL
Equivalent of NULL for user pointers.
Definition: uaccess.h:36
General configuration.
__be32 cmdif_rev_fw_sub
Definition: CIB_PRM.h:55
static __always_inline void ufree(userptr_t userptr)
Free external memory.
Definition: umalloc.h:65
uint32_t len
Length.
Definition: ena.h:14
uint8_t unused[32]
Unused.
Definition: eltorito.h:15
static __always_inline userptr_t umalloc(size_t size)
Allocate external memory.
Definition: umalloc.h:54
static unsigned int ctrl
Definition: intel.h:270
struct flexboot_nodnic_port port[FLEXBOOT_NODNIC_MAX_PORTS]
flexboot_nodnic ports
#define DBGC2(...)
Definition: compiler.h:522
#define GOLAN_SEND_OPCODE
Definition: golan.h:131
int(* probe)(struct pci_device *pci)
Probe device.
Definition: pci.h:237
static int golan_set_access_reg(struct golan *golan, uint32_t reg)
Definition: golan.c:572
u8 log_max_qp_sz
Definition: CIB_PRM.h:231
int size
Definition: golan.h:162
void * data
Start of data.
Definition: iobuf.h:44
static void golan_dealloc_uar(struct golan *golan)
Definition: golan.c:706
__be32 pdn
Definition: CIB_PRM.h:29
static void golan_bring_down(struct golan *golan)
Definition: golan.c:2171
int size
Definition: golan.h:174
struct golan_cqe64 * cqes
Definition: golan.h:209
union ib_gid gid
GID, if present.
Definition: infiniband.h:92
void ib_poll_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Poll completion queue.
Definition: infiniband.c:161
#define USR_2_BE64_BUS(addr)
Definition: golan.h:72
static void *__malloc malloc_dma(size_t size, size_t phys_align)
Allocate memory for DMA.
Definition: malloc.h:66
#define CMD_SYND(golan, idx)
Definition: golan.h:67
uint8_t port_state
Port state.
Definition: infiniband.h:423
u8 eqn
Definition: CIB_PRM.h:30
static int golan_create_eq(struct golan *golan)
Definition: golan.c:736
unsigned long cqn
Completion queue number.
Definition: infiniband.h:230
struct list_head list
Definition: golan.c:54
void iounmap(volatile const void *io_addr)
Unmap I/O address.
#define cpu_to_be64(value)
Definition: byteswap.h:111
unsigned int num_cqes
Number of completion queue entries.
Definition: infiniband.h:232
static int golan_core_enable_hca(struct golan *golan)
Definition: golan.c:319
golan_manage_pages_mode
Definition: CIB_PRM.h:70
void * inbox
Definition: golan.h:317
A management datagram.
Definition: ib_mad.h:610
__be16 rlid
Definition: CIB_PRM.h:1006
A flexboot nodnic port.
static int golan_modify_qp_init_to_rtr(struct ib_device *ibdev __attribute__((unused)), struct ib_queue_pair *qp __attribute__((unused)), struct golan_modify_qp_mbox_in_data *in)
Definition: golan.c:1278
static void golan_eq_update_ci(struct golan_event_queue *eq, int arm)
Definition: golan.c:727
struct shomronprm_wqe_segment_ctrl_send ctrl
void ib_complete_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct ib_address_vector *source, struct io_buffer *iobuf, int rc)
Complete receive work queue entry.
Definition: infiniband.c:536
static const char * cmd_status_str(u8 status)
Definition: golan.c:147
void free_mlx_utils(mlx_utils **utils)
static int golan_cmd_init(struct golan *golan)
Initialise Golan Command Q parameters – Alocate a 4kb page for the Command Q – Read the stride and lo...
Definition: golan.c:600
struct golan_recv_wq rq
Definition: golan.h:182
static void golan_destroy_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Destroy queue pair.
Definition: golan.c:1390
An Infiniband Address Vector.
Definition: infiniband.h:72
mlx_boolean owner
int mlx_status
struct golan_send_wqe_ud ud
Definition: golan.h:151
static int golan_modify_qp_rst_to_init(struct ib_device *ibdev, struct ib_queue_pair *qp __attribute__((unused)), struct golan_modify_qp_mbox_in_data *in)
Definition: golan.c:1262
Network interface management.
__be32 uar_page_sz
Definition: CIB_PRM.h:288
#define MAILBOX_STRIDE
Definition: CIB_PRM.h:31
int flexboot_nodnic_probe(struct pci_device *pci, struct flexboot_nodnic_callbacks *callbacks, void *drv_priv __attribute__((unused)))
mlx_status nodnic_port_get_qpn(IN nodnic_port_priv *port_priv, IN struct nodnic_ring *ring, OUT mlx_uint32 *qpn)
Definition: mlx_port.c:796
#define GOLAN_QP_CTX_SQ_SIZE_BIT
Definition: CIB_PRM.h:897
unsigned long phys
Definition: golan.h:111
static int golan_register_ibdev(struct golan_port *port)
Definition: golan.c:2153
Infiniband Subnet Management Client.
union golan_av::@373 key
u8 stat_rate_sl
Definition: CIB_PRM.h:1004
struct golan_qp_db * doorbell_record
Definition: golan.h:184
__be32 inlen
Definition: CIB_PRM.h:30
unsigned int lid
Local ID.
Definition: infiniband.h:81
mlx_uint32 qpn
struct golan_cmdq_md cmd
Definition: golan.h:326
struct arbelprm_port_state_change_st data
Message.
Definition: arbel.h:12
#define GOLAN_MBOX_IN(cmd_ptr, in_ptr)
Definition: golan.h:78
mlx_uint32 wqe_counter
#define GOLAN_SEND_WQE_BB_SIZE
Definition: golan.h:127
#define DBG(...)
Print a debugging message.
Definition: compiler.h:498
void * ioremap(unsigned long bus_addr, size_t len)
Map bus address as an I/O address.
__be32 byte_cnt
Definition: CIB_PRM.h:883
#define GOLAN_PORT_BASE
Definition: golan.h:42
__be32 cqn
Definition: CIB_PRM.h:29
enum golan_ib_qp_state state
Definition: golan.h:186
#define MLX_CHECK_STATUS(id, status, label, message)
Definition: mlx_bail.h:37
#define fls(x)
Find last (i.e.
Definition: strings.h:166
static int golan_post_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf)
Post receive work queue entry.
Definition: golan.c:1527
u8 gid[16]
Definition: CIB_PRM.h:31
static void free_dma(void *ptr, size_t size)
Free memory allocated with malloc_dma()
Definition: malloc.h:81
#define FLEXBOOT_NODNIC_OPCODE_SEND
#define GOLAN_CQE_OWNER_MASK
Definition: golan.h:193
#define GOLAN_PCI_CONFIG_BAR_SIZE
Definition: golan.h:35
static __always_inline void * ib_cq_get_drvdata(struct ib_completion_queue *cq)
Get Infiniband completion queue driver-private data.
Definition: infiniband.h:685
struct golan_eqe * eqes
Definition: golan.h:301
struct ib_device * ibdev
Containing Infiniband device.
Definition: infiniband.h:226
#define NULL
NULL pointer (VOID *)
Definition: Base.h:362
struct golan_eqe_cmd cmd
Definition: CIB_PRM.h:29
static void golan_remove(struct pci_device *pci)
Definition: golan.c:2621
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:283
u16 size
Definition: golan.h:105
void * addr
Definition: golan.h:103
__be32 byte_cnt
Definition: CIB_PRM.h:37
static uint16_t fw_rev_maj(struct golan *golan)
Definition: golan.c:171
static int golan_create_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
Definition: golan.c:948
static int golan_ib_update(struct ib_device *ibdev)
Update Infiniband parameters using Commands.
Definition: golan.c:1931
void writeq(uint64_t data, volatile uint64_t *io_addr)
Write 64-bit qword to memory-mapped device.
static int golan_mcast_attach(struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid)
Attach to multicast group.
Definition: golan.c:2074
userptr_t area
Firmware area in external memory.
Definition: golan.h:124
uint8_t u8
Definition: stdint.h:19
uint32_t u32
Definition: stdint.h:23
union ib_mad mad
Definition: arbel.h:12
static __always_inline void ib_qp_set_drvdata(struct ib_queue_pair *qp, void *priv)
Set Infiniband queue pair driver-private data.
Definition: infiniband.h:630
int(* create_cq)(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
Definition: infiniband.h:261
#define DB_BUFFER0_ODD_OFFSET
Definition: golan.h:220
#define GOLAN_PAGE_SIZE
Definition: golan.h:52
unsigned long userptr_t
A pointer to a user buffer.
Definition: uaccess.h:33
__be32 * doorbell
Definition: golan.h:303
mlx_uint32 is_send
String functions.
#define GET_INBOX(golan, idx)
Definition: golan.h:75
uint16_t __be16
Definition: CIB_PRM.h:27
void * memset(void *dest, int character, size_t len) __nonnull
#define GOLAN_PCI_CMD_XPORT
Definition: CIB_PRM.h:34
A flexboot nodnic queue pair.
static unsigned int unsigned int mem
Definition: intel.h:271
A persistent I/O buffer.
Definition: iobuf.h:32
static int golan_handle_pages(struct golan *golan, enum golan_qry_pages_mode qry, enum golan_manage_pages_mode mode)
Definition: golan.c:523
golan_ib_qp_state
Definition: golan.h:134
volatile u8 status_own
Definition: CIB_PRM.h:199
struct io_buffer ** iobufs
I/O buffers assigned to work queue.
Definition: infiniband.h:124