iPXE
hermon.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3  * Copyright (C) 2008 Mellanox Technologies Ltd.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of the
8  * License, or any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 FILE_LICENCE ( GPL2_OR_LATER );
22 
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <strings.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <byteswap.h>
31 #include <ipxe/io.h>
32 #include <ipxe/pci.h>
33 #include <ipxe/pcibackup.h>
34 #include <ipxe/malloc.h>
35 #include <ipxe/umalloc.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/netdevice.h>
38 #include <ipxe/infiniband.h>
39 #include <ipxe/ib_smc.h>
40 #include <ipxe/if_ether.h>
41 #include <ipxe/ethernet.h>
42 #include <ipxe/fcoe.h>
43 #include <ipxe/vlan.h>
44 #include <ipxe/bofm.h>
45 #include <ipxe/nvsvpd.h>
46 #include <ipxe/nvo.h>
47 #include "hermon.h"
48 
49 /**
50  * @file
51  *
52  * Mellanox Hermon Infiniband HCA
53  *
54  */
55 
56 /***************************************************************************
57  *
58  * Queue number allocation
59  *
60  ***************************************************************************
61  */
62 
63 /**
64  * Allocate offsets within usage bitmask
65  *
66  * @v bits Usage bitmask
67  * @v bits_len Length of usage bitmask
68  * @v num_bits Number of contiguous bits to allocate within bitmask
69  * @ret bit First free bit within bitmask, or negative error
70  */
72  unsigned int bits_len,
73  unsigned int num_bits ) {
74  unsigned int bit = 0;
75  hermon_bitmask_t mask = 1;
76  unsigned int found = 0;
77 
78  /* Search bits for num_bits contiguous free bits */
79  while ( bit < bits_len ) {
80  if ( ( mask & *bits ) == 0 ) {
81  if ( ++found == num_bits )
82  goto found;
83  } else {
84  found = 0;
85  }
86  bit++;
87  mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
88  if ( mask == 1 )
89  bits++;
90  }
91  return -ENFILE;
92 
93  found:
94  /* Mark bits as in-use */
95  do {
96  *bits |= mask;
97  if ( mask == 1 )
98  bits--;
99  mask = ( mask >> 1 ) | ( mask << ( 8 * sizeof ( mask ) - 1 ) );
100  } while ( --found );
101 
102  return ( bit - num_bits + 1 );
103 }
104 
105 /**
106  * Free offsets within usage bitmask
107  *
108  * @v bits Usage bitmask
109  * @v bit Starting bit within bitmask
110  * @v num_bits Number of contiguous bits to free within bitmask
111  */
113  int bit, unsigned int num_bits ) {
114  hermon_bitmask_t mask;
115 
116  for ( ; num_bits ; bit++, num_bits-- ) {
117  mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
118  bits[ ( bit / ( 8 * sizeof ( mask ) ) ) ] &= ~mask;
119  }
120 }
121 
122 /***************************************************************************
123  *
124  * HCA commands
125  *
126  ***************************************************************************
127  */
128 
129 /**
130  * Wait for Hermon command completion
131  *
132  * @v hermon Hermon device
133  * @v hcr HCA command registers
134  * @ret rc Return status code
135  */
136 static int hermon_cmd_wait ( struct hermon *hermon,
137  struct hermonprm_hca_command_register *hcr ) {
138  unsigned int wait;
139 
140  for ( wait = ( 100 * HERMON_HCR_MAX_WAIT_MS ) ; wait ; wait-- ) {
141  hcr->u.dwords[6] =
142  readl ( hermon->config + HERMON_HCR_REG ( 6 ) );
143  if ( ( MLX_GET ( hcr, go ) == 0 ) &&
144  ( MLX_GET ( hcr, t ) == hermon->toggle ) )
145  return 0;
146  udelay ( 10 );
147  }
148  return -EBUSY;
149 }
150 
151 /**
152  * Issue HCA command
153  *
154  * @v hermon Hermon device
155  * @v command Command opcode, flags and input/output lengths
156  * @v op_mod Opcode modifier (0 if no modifier applicable)
157  * @v in Input parameters
158  * @v in_mod Input modifier (0 if no modifier applicable)
159  * @v out Output parameters
160  * @ret rc Return status code
161  */
162 static int hermon_cmd ( struct hermon *hermon, unsigned long command,
163  unsigned int op_mod, const void *in,
164  unsigned int in_mod, void *out ) {
165  struct hermonprm_hca_command_register hcr;
166  unsigned int opcode = HERMON_HCR_OPCODE ( command );
167  size_t in_len = HERMON_HCR_IN_LEN ( command );
168  size_t out_len = HERMON_HCR_OUT_LEN ( command );
169  void *in_buffer;
170  void *out_buffer;
171  unsigned int status;
172  unsigned int i;
173  int rc;
174 
175  assert ( in_len <= HERMON_MBOX_SIZE );
176  assert ( out_len <= HERMON_MBOX_SIZE );
177 
178  DBGC2 ( hermon, "Hermon %p command %04x in %zx%s out %zx%s\n",
179  hermon, opcode, in_len,
180  ( ( command & HERMON_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
181  ( ( command & HERMON_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
182 
183  /* Check that HCR is free */
184  if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
185  DBGC ( hermon, "Hermon %p command interface locked\n",
186  hermon );
187  return rc;
188  }
189 
190  /* Flip HCR toggle */
191  hermon->toggle = ( 1 - hermon->toggle );
192 
193  /* Prepare HCR */
194  memset ( &hcr, 0, sizeof ( hcr ) );
195  in_buffer = &hcr.u.dwords[0];
196  if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
198  in_buffer = hermon->mailbox_in;
199  MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
200  MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
201  }
202  memcpy ( in_buffer, in, in_len );
203  MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
204  out_buffer = &hcr.u.dwords[3];
205  if ( out_len && ( command & HERMON_HCR_OUT_MBOX ) ) {
206  out_buffer = hermon->mailbox_out;
207  MLX_FILL_H ( &hcr, 3, out_param_h,
208  virt_to_bus ( out_buffer ) );
209  MLX_FILL_1 ( &hcr, 4, out_param_l,
210  virt_to_bus ( out_buffer ) );
211  }
212  MLX_FILL_4 ( &hcr, 6,
213  opcode, opcode,
214  opcode_modifier, op_mod,
215  go, 1,
216  t, hermon->toggle );
218  &hcr, sizeof ( hcr ) );
219  if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
220  DBGC2 ( hermon, "Input mailbox:\n" );
221  DBGC2_HDA ( hermon, virt_to_phys ( in_buffer ), in_buffer,
222  ( ( in_len < 512 ) ? in_len : 512 ) );
223  }
224 
225  /* Issue command */
226  for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
227  i++ ) {
228  writel ( hcr.u.dwords[i],
229  hermon->config + HERMON_HCR_REG ( i ) );
230  barrier();
231  }
232 
233  /* Wait for command completion */
234  if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
235  DBGC ( hermon, "Hermon %p timed out waiting for command "
236  "%04x:\n", hermon, opcode );
237  DBGC_HDA ( hermon,
239  &hcr, sizeof ( hcr ) );
240  return rc;
241  }
242 
243  /* Check command status */
244  status = MLX_GET ( &hcr, status );
245  if ( status != 0 ) {
246  DBGC ( hermon, "Hermon %p command %04x failed with status "
247  "%02x:\n", hermon, opcode, status );
248  DBGC_HDA ( hermon,
250  &hcr, sizeof ( hcr ) );
251  return -EIO;
252  }
253 
254  /* Read output parameters, if any */
255  hcr.u.dwords[3] = readl ( hermon->config + HERMON_HCR_REG ( 3 ) );
256  hcr.u.dwords[4] = readl ( hermon->config + HERMON_HCR_REG ( 4 ) );
257  memcpy ( out, out_buffer, out_len );
258  if ( out_len ) {
259  DBGC2 ( hermon, "Output%s:\n",
260  ( command & HERMON_HCR_OUT_MBOX ) ? " mailbox" : "" );
261  DBGC2_HDA ( hermon, virt_to_phys ( out_buffer ), out_buffer,
262  ( ( out_len < 512 ) ? out_len : 512 ) );
263  }
264 
265  return 0;
266 }
267 
268 static inline int
270  struct hermonprm_query_dev_cap *dev_cap ) {
271  return hermon_cmd ( hermon,
273  1, sizeof ( *dev_cap ) ),
274  0, NULL, 0, dev_cap );
275 }
276 
277 static inline int
278 hermon_cmd_query_fw ( struct hermon *hermon, struct hermonprm_query_fw *fw ) {
279  return hermon_cmd ( hermon,
281  1, sizeof ( *fw ) ),
282  0, NULL, 0, fw );
283 }
284 
285 static inline int
287  const struct hermonprm_init_hca *init_hca ) {
288  return hermon_cmd ( hermon,
290  1, sizeof ( *init_hca ) ),
291  0, init_hca, 0, NULL );
292 }
293 
294 static inline int
296  return hermon_cmd ( hermon,
298  0, NULL, 0, NULL );
299 }
300 
301 static inline int
302 hermon_cmd_init_port ( struct hermon *hermon, unsigned int port ) {
303  return hermon_cmd ( hermon,
305  0, NULL, port, NULL );
306 }
307 
308 static inline int
309 hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
310  return hermon_cmd ( hermon,
312  0, NULL, port, NULL );
313 }
314 
315 static inline int
316 hermon_cmd_set_port ( struct hermon *hermon, int is_ethernet,
317  unsigned int port_selector,
318  const union hermonprm_set_port *set_port ) {
319  return hermon_cmd ( hermon,
321  1, sizeof ( *set_port ) ),
322  is_ethernet, set_port, port_selector, NULL );
323 }
324 
325 static inline int
326 hermon_cmd_sw2hw_mpt ( struct hermon *hermon, unsigned int index,
327  const struct hermonprm_mpt *mpt ) {
328  return hermon_cmd ( hermon,
330  1, sizeof ( *mpt ) ),
331  0, mpt, index, NULL );
332 }
333 
334 static inline int
335 hermon_cmd_hw2sw_mpt ( struct hermon *hermon, unsigned int index ) {
336  return hermon_cmd ( hermon,
338  0, NULL, index, NULL );
339 }
340 
341 static inline int
343  const struct hermonprm_write_mtt *write_mtt ) {
344  return hermon_cmd ( hermon,
346  1, sizeof ( *write_mtt ) ),
347  0, write_mtt, 1, NULL );
348 }
349 
350 static inline int
351 hermon_cmd_map_eq ( struct hermon *hermon, unsigned long index_map,
352  const struct hermonprm_event_mask *mask ) {
353  return hermon_cmd ( hermon,
355  0, sizeof ( *mask ) ),
356  0, mask, index_map, NULL );
357 }
358 
359 static inline int
360 hermon_cmd_sw2hw_eq ( struct hermon *hermon, unsigned int index,
361  const struct hermonprm_eqc *eqctx ) {
362  return hermon_cmd ( hermon,
364  1, sizeof ( *eqctx ) ),
365  0, eqctx, index, NULL );
366 }
367 
368 static inline int
369 hermon_cmd_hw2sw_eq ( struct hermon *hermon, unsigned int index,
370  struct hermonprm_eqc *eqctx ) {
371  return hermon_cmd ( hermon,
373  1, sizeof ( *eqctx ) ),
374  1, NULL, index, eqctx );
375 }
376 
377 static inline int
378 hermon_cmd_query_eq ( struct hermon *hermon, unsigned int index,
379  struct hermonprm_eqc *eqctx ) {
380  return hermon_cmd ( hermon,
382  1, sizeof ( *eqctx ) ),
383  0, NULL, index, eqctx );
384 }
385 
386 static inline int
387 hermon_cmd_sw2hw_cq ( struct hermon *hermon, unsigned long cqn,
388  const struct hermonprm_completion_queue_context *cqctx ){
389  return hermon_cmd ( hermon,
391  1, sizeof ( *cqctx ) ),
392  0, cqctx, cqn, NULL );
393 }
394 
395 static inline int
396 hermon_cmd_hw2sw_cq ( struct hermon *hermon, unsigned long cqn,
397  struct hermonprm_completion_queue_context *cqctx ) {
398  return hermon_cmd ( hermon,
400  1, sizeof ( *cqctx ) ),
401  0, NULL, cqn, cqctx );
402 }
403 
404 static inline int
405 hermon_cmd_query_cq ( struct hermon *hermon, unsigned long cqn,
406  struct hermonprm_completion_queue_context *cqctx ) {
407  return hermon_cmd ( hermon,
409  1, sizeof ( *cqctx ) ),
410  0, NULL, cqn, cqctx );
411 }
412 
413 static inline int
414 hermon_cmd_rst2init_qp ( struct hermon *hermon, unsigned long qpn,
415  const struct hermonprm_qp_ee_state_transitions *ctx ){
416  return hermon_cmd ( hermon,
418  1, sizeof ( *ctx ) ),
419  0, ctx, qpn, NULL );
420 }
421 
422 static inline int
423 hermon_cmd_init2rtr_qp ( struct hermon *hermon, unsigned long qpn,
424  const struct hermonprm_qp_ee_state_transitions *ctx ){
425  return hermon_cmd ( hermon,
427  1, sizeof ( *ctx ) ),
428  0, ctx, qpn, NULL );
429 }
430 
431 static inline int
432 hermon_cmd_rtr2rts_qp ( struct hermon *hermon, unsigned long qpn,
433  const struct hermonprm_qp_ee_state_transitions *ctx ) {
434  return hermon_cmd ( hermon,
436  1, sizeof ( *ctx ) ),
437  0, ctx, qpn, NULL );
438 }
439 
440 static inline int
441 hermon_cmd_rts2rts_qp ( struct hermon *hermon, unsigned long qpn,
442  const struct hermonprm_qp_ee_state_transitions *ctx ) {
443  return hermon_cmd ( hermon,
445  1, sizeof ( *ctx ) ),
446  0, ctx, qpn, NULL );
447 }
448 
449 static inline int
450 hermon_cmd_2rst_qp ( struct hermon *hermon, unsigned long qpn ) {
451  return hermon_cmd ( hermon,
453  0x03, NULL, qpn, NULL );
454 }
455 
456 static inline int
457 hermon_cmd_query_qp ( struct hermon *hermon, unsigned long qpn,
458  struct hermonprm_qp_ee_state_transitions *ctx ) {
459  return hermon_cmd ( hermon,
461  1, sizeof ( *ctx ) ),
462  0, NULL, qpn, ctx );
463 }
464 
465 static inline int
466 hermon_cmd_conf_special_qp ( struct hermon *hermon, unsigned int internal_qps,
467  unsigned long base_qpn ) {
468  return hermon_cmd ( hermon,
470  internal_qps, NULL, base_qpn, NULL );
471 }
472 
473 static inline int
474 hermon_cmd_mad_ifc ( struct hermon *hermon, unsigned int port,
475  union hermonprm_mad *mad ) {
476  return hermon_cmd ( hermon,
478  1, sizeof ( *mad ),
479  1, sizeof ( *mad ) ),
480  0x03, mad, port, mad );
481 }
482 
483 static inline int
484 hermon_cmd_read_mcg ( struct hermon *hermon, unsigned int index,
485  struct hermonprm_mcg_entry *mcg ) {
486  return hermon_cmd ( hermon,
488  1, sizeof ( *mcg ) ),
489  0, NULL, index, mcg );
490 }
491 
492 static inline int
493 hermon_cmd_write_mcg ( struct hermon *hermon, unsigned int index,
494  const struct hermonprm_mcg_entry *mcg ) {
495  return hermon_cmd ( hermon,
497  1, sizeof ( *mcg ) ),
498  0, mcg, index, NULL );
499 }
500 
501 static inline int
502 hermon_cmd_mgid_hash ( struct hermon *hermon, const union ib_gid *gid,
503  struct hermonprm_mgm_hash *hash ) {
504  return hermon_cmd ( hermon,
506  1, sizeof ( *gid ),
507  0, sizeof ( *hash ) ),
508  0, gid, 0, hash );
509 }
510 
511 static inline int
512 hermon_cmd_mod_stat_cfg ( struct hermon *hermon, unsigned int mode,
513  unsigned int input_mod,
514  struct hermonprm_scalar_parameter *portion ) {
515  return hermon_cmd ( hermon,
517  0, sizeof ( *portion ),
518  0, sizeof ( *portion ) ),
519  mode, portion, input_mod, portion );
520 }
521 
522 static inline int
523 hermon_cmd_query_port ( struct hermon *hermon, unsigned int port,
524  struct hermonprm_query_port_cap *query_port ) {
525  return hermon_cmd ( hermon,
527  1, sizeof ( *query_port ) ),
528  0, NULL, port, query_port );
529 }
530 
531 static inline int
532 hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
533  struct hermonprm_sense_port *port_type ) {
534  return hermon_cmd ( hermon,
536  0, sizeof ( *port_type ) ),
537  0, NULL, port, port_type );
538 }
539 
540 static inline int
542  return hermon_cmd ( hermon,
544  0, NULL, 0, NULL );
545 }
546 
547 static inline int
548 hermon_cmd_unmap_icm ( struct hermon *hermon, unsigned int page_count,
549  const struct hermonprm_scalar_parameter *offset ) {
550  return hermon_cmd ( hermon,
552  0, sizeof ( *offset ) ),
553  0, offset, page_count, NULL );
554 }
555 
556 static inline int
558  const struct hermonprm_virtual_physical_mapping *map ) {
559  return hermon_cmd ( hermon,
561  1, sizeof ( *map ) ),
562  0, map, 1, NULL );
563 }
564 
565 static inline int
567  return hermon_cmd ( hermon,
569  0, NULL, 0, NULL );
570 }
571 
572 static inline int
574  const struct hermonprm_virtual_physical_mapping *map ) {
575  return hermon_cmd ( hermon,
577  1, sizeof ( *map ) ),
578  0, map, 1, NULL );
579 }
580 
581 static inline int
583  const struct hermonprm_scalar_parameter *icm_size,
584  struct hermonprm_scalar_parameter *icm_aux_size ) {
585  return hermon_cmd ( hermon,
587  0, sizeof ( *icm_size ),
588  0, sizeof (*icm_aux_size) ),
589  0, icm_size, 0, icm_aux_size );
590 }
591 
592 static inline int
594  return hermon_cmd ( hermon,
596  0, NULL, 0, NULL );
597 }
598 
599 static inline int
601  const struct hermonprm_virtual_physical_mapping *map ) {
602  return hermon_cmd ( hermon,
604  1, sizeof ( *map ) ),
605  0, map, 1, NULL );
606 }
607 
608 /***************************************************************************
609  *
610  * Memory translation table operations
611  *
612  ***************************************************************************
613  */
614 
615 /**
616  * Allocate MTT entries
617  *
618  * @v hermon Hermon device
619  * @v memory Memory to map into MTT
620  * @v len Length of memory to map
621  * @v mtt MTT descriptor to fill in
622  * @ret rc Return status code
623  */
624 static int hermon_alloc_mtt ( struct hermon *hermon,
625  const void *memory, size_t len,
626  struct hermon_mtt *mtt ) {
627  struct hermonprm_write_mtt write_mtt;
630  unsigned int page_offset;
631  unsigned int num_pages;
632  int mtt_offset;
633  unsigned int mtt_base_addr;
634  unsigned int i;
635  int rc;
636 
637  /* Find available MTT entries */
638  start = virt_to_phys ( memory );
639  page_offset = ( start & ( HERMON_PAGE_SIZE - 1 ) );
640  start -= page_offset;
641  len += page_offset;
644  num_pages );
645  if ( mtt_offset < 0 ) {
646  rc = mtt_offset;
647  DBGC ( hermon, "Hermon %p could not allocate %d MTT entries: "
648  "%s\n", hermon, num_pages, strerror ( rc ) );
649  goto err_mtt_offset;
650  }
651  mtt_base_addr = ( ( hermon->cap.reserved_mtts + mtt_offset ) *
653  addr = start;
654 
655  /* Fill in MTT structure */
656  mtt->mtt_offset = mtt_offset;
657  mtt->num_pages = num_pages;
658  mtt->mtt_base_addr = mtt_base_addr;
659  mtt->page_offset = page_offset;
660 
661  /* Construct and issue WRITE_MTT commands */
662  for ( i = 0 ; i < num_pages ; i++ ) {
663  memset ( &write_mtt, 0, sizeof ( write_mtt ) );
664  MLX_FILL_1 ( &write_mtt.mtt_base_addr, 1,
665  value, mtt_base_addr );
666  MLX_FILL_H ( &write_mtt.mtt, 0, ptag_h, addr );
667  MLX_FILL_2 ( &write_mtt.mtt, 1,
668  p, 1,
669  ptag_l, ( addr >> 3 ) );
670  if ( ( rc = hermon_cmd_write_mtt ( hermon,
671  &write_mtt ) ) != 0 ) {
672  DBGC ( hermon, "Hermon %p could not write MTT at %x: "
673  "%s\n", hermon, mtt_base_addr,
674  strerror ( rc ) );
675  goto err_write_mtt;
676  }
679  }
680 
681  DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] for "
682  "[%08lx,%08lx,%08lx,%08lx)\n", hermon, mtt->mtt_offset,
683  ( mtt->mtt_offset + mtt->num_pages - 1 ), start,
684  ( start + page_offset ), ( start + len ), addr );
685 
686  return 0;
687 
688  err_write_mtt:
689  hermon_bitmask_free ( hermon->mtt_inuse, mtt_offset, num_pages );
690  err_mtt_offset:
691  return rc;
692 }
693 
694 /**
695  * Free MTT entries
696  *
697  * @v hermon Hermon device
698  * @v mtt MTT descriptor
699  */
700 static void hermon_free_mtt ( struct hermon *hermon,
701  struct hermon_mtt *mtt ) {
702 
703  DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] freed\n",
704  hermon, mtt->mtt_offset,
705  ( mtt->mtt_offset + mtt->num_pages - 1 ) );
706  hermon_bitmask_free ( hermon->mtt_inuse, mtt->mtt_offset,
707  mtt->num_pages );
708 }
709 
710 /***************************************************************************
711  *
712  * Static configuration operations
713  *
714  ***************************************************************************
715  */
716 
717 /**
718  * Calculate offset within static configuration
719  *
720  * @v field Field
721  * @ret offset Offset
722  */
723 #define HERMON_MOD_STAT_CFG_OFFSET( field ) \
724  ( ( MLX_BIT_OFFSET ( struct hermonprm_mod_stat_cfg_st, field ) / 8 ) \
725  & ~( sizeof ( struct hermonprm_scalar_parameter ) - 1 ) )
726 
727 /**
728  * Query or modify static configuration
729  *
730  * @v hermon Hermon device
731  * @v port Port
732  * @v mode Command mode
733  * @v offset Offset within static configuration
734  * @v stat_cfg Static configuration
735  * @ret rc Return status code
736  */
737 static int hermon_mod_stat_cfg ( struct hermon *hermon, unsigned int port,
738  unsigned int mode, unsigned int offset,
739  struct hermonprm_mod_stat_cfg *stat_cfg ) {
740  struct hermonprm_scalar_parameter *portion =
741  ( ( void * ) &stat_cfg->u.bytes[offset] );
742  struct hermonprm_mod_stat_cfg_input_mod mod;
743  int rc;
744 
745  /* Sanity check */
746  assert ( ( offset % sizeof ( *portion ) ) == 0 );
747 
748  /* Construct input modifier */
749  memset ( &mod, 0, sizeof ( mod ) );
750  MLX_FILL_2 ( &mod, 0,
751  portnum, port,
752  offset, offset );
753 
754  /* Issue command */
755  if ( ( rc = hermon_cmd_mod_stat_cfg ( hermon, mode,
756  be32_to_cpu ( mod.u.dwords[0] ),
757  portion ) ) != 0 )
758  return rc;
759 
760  return 0;
761 }
762 
763 /***************************************************************************
764  *
765  * MAD operations
766  *
767  ***************************************************************************
768  */
769 
770 /**
771  * Issue management datagram
772  *
773  * @v ibdev Infiniband device
774  * @v mad Management datagram
775  * @ret rc Return status code
776  */
777 static int hermon_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
778  struct hermon *hermon = ib_get_drvdata ( ibdev );
779  union hermonprm_mad mad_ifc;
780  int rc;
781 
782  linker_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ),
783  mad_size_mismatch );
784 
785  /* Copy in request packet */
786  memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
787 
788  /* Issue MAD */
789  if ( ( rc = hermon_cmd_mad_ifc ( hermon, ibdev->port,
790  &mad_ifc ) ) != 0 ) {
791  DBGC ( hermon, "Hermon %p port %d could not issue MAD IFC: "
792  "%s\n", hermon, ibdev->port, strerror ( rc ) );
793  return rc;
794  }
795 
796  /* Copy out reply packet */
797  memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
798 
799  if ( mad->hdr.status != 0 ) {
800  DBGC ( hermon, "Hermon %p port %d MAD IFC status %04x\n",
801  hermon, ibdev->port, ntohs ( mad->hdr.status ) );
802  return -EIO;
803  }
804  return 0;
805 }
806 
807 /***************************************************************************
808  *
809  * Completion queue operations
810  *
811  ***************************************************************************
812  */
813 
814 /**
815  * Dump completion queue context (for debugging only)
816  *
817  * @v hermon Hermon device
818  * @v cq Completion queue
819  * @ret rc Return status code
820  */
821 static __attribute__ (( unused )) int
823  struct hermonprm_completion_queue_context cqctx;
824  int rc;
825 
826  /* Do nothing unless debugging is enabled */
827  if ( ! DBG_LOG )
828  return 0;
829 
830  /* Dump completion queue context */
831  memset ( &cqctx, 0, sizeof ( cqctx ) );
832  if ( ( rc = hermon_cmd_query_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
833  DBGC ( hermon, "Hermon %p CQN %#lx QUERY_CQ failed: %s\n",
834  hermon, cq->cqn, strerror ( rc ) );
835  return rc;
836  }
837  DBGC ( hermon, "Hermon %p CQN %#lx context:\n", hermon, cq->cqn );
838  DBGC_HDA ( hermon, 0, &cqctx, sizeof ( cqctx ) );
839 
840  return 0;
841 }
842 
843 /**
844  * Create completion queue
845  *
846  * @v ibdev Infiniband device
847  * @v cq Completion queue
848  * @ret rc Return status code
849  */
850 static int hermon_create_cq ( struct ib_device *ibdev,
851  struct ib_completion_queue *cq ) {
852  struct hermon *hermon = ib_get_drvdata ( ibdev );
853  struct hermon_completion_queue *hermon_cq;
854  struct hermonprm_completion_queue_context cqctx;
855  int cqn_offset;
856  unsigned int i;
857  int rc;
858 
859  /* Find a free completion queue number */
860  cqn_offset = hermon_bitmask_alloc ( hermon->cq_inuse,
861  HERMON_MAX_CQS, 1 );
862  if ( cqn_offset < 0 ) {
863  DBGC ( hermon, "Hermon %p out of completion queues\n",
864  hermon );
865  rc = cqn_offset;
866  goto err_cqn_offset;
867  }
868  cq->cqn = ( hermon->cap.reserved_cqs + cqn_offset );
869 
870  /* Allocate control structures */
871  hermon_cq = zalloc ( sizeof ( *hermon_cq ) );
872  if ( ! hermon_cq ) {
873  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate CQ\n",
874  hermon, cq->cqn );
875  rc = -ENOMEM;
876  goto err_hermon_cq;
877  }
878 
879  /* Allocate doorbell */
880  hermon_cq->doorbell = malloc_phys ( sizeof ( hermon_cq->doorbell[0] ),
881  sizeof ( hermon_cq->doorbell[0] ) );
882  if ( ! hermon_cq->doorbell ) {
883  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate "
884  "doorbell\n", hermon, cq->cqn );
885  rc = -ENOMEM;
886  goto err_doorbell;
887  }
888  memset ( hermon_cq->doorbell, 0, sizeof ( hermon_cq->doorbell[0] ) );
889 
890  /* Allocate completion queue itself */
891  hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
892  hermon_cq->cqe = malloc_phys ( hermon_cq->cqe_size,
893  sizeof ( hermon_cq->cqe[0] ) );
894  if ( ! hermon_cq->cqe ) {
895  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate CQEs\n",
896  hermon, cq->cqn );
897  rc = -ENOMEM;
898  goto err_cqe;
899  }
900  memset ( hermon_cq->cqe, 0, hermon_cq->cqe_size );
901  for ( i = 0 ; i < cq->num_cqes ; i++ ) {
902  MLX_FILL_1 ( &hermon_cq->cqe[i].normal, 7, owner, 1 );
903  }
904  barrier();
905 
906  /* Allocate MTT entries */
907  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_cq->cqe,
908  hermon_cq->cqe_size,
909  &hermon_cq->mtt ) ) != 0 ) {
910  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate MTTs: "
911  "%s\n", hermon, cq->cqn, strerror ( rc ) );
912  goto err_alloc_mtt;
913  }
914 
915  /* Hand queue over to hardware */
916  memset ( &cqctx, 0, sizeof ( cqctx ) );
917  MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
918  MLX_FILL_1 ( &cqctx, 2,
919  page_offset, ( hermon_cq->mtt.page_offset >> 5 ) );
920  MLX_FILL_2 ( &cqctx, 3,
921  usr_page, HERMON_UAR_NON_EQ_PAGE,
922  log_cq_size, fls ( cq->num_cqes - 1 ) );
923  MLX_FILL_1 ( &cqctx, 5, c_eqn, hermon->eq.eqn );
924  MLX_FILL_H ( &cqctx, 6, mtt_base_addr_h,
925  hermon_cq->mtt.mtt_base_addr );
926  MLX_FILL_1 ( &cqctx, 7, mtt_base_addr_l,
927  ( hermon_cq->mtt.mtt_base_addr >> 3 ) );
928  MLX_FILL_H ( &cqctx, 14, db_record_addr_h,
929  virt_to_phys ( hermon_cq->doorbell ) );
930  MLX_FILL_1 ( &cqctx, 15, db_record_addr_l,
931  ( virt_to_phys ( hermon_cq->doorbell ) >> 3 ) );
932  if ( ( rc = hermon_cmd_sw2hw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
933  DBGC ( hermon, "Hermon %p CQN %#lx SW2HW_CQ failed: %s\n",
934  hermon, cq->cqn, strerror ( rc ) );
935  goto err_sw2hw_cq;
936  }
937 
938  DBGC ( hermon, "Hermon %p CQN %#lx ring [%08lx,%08lx), doorbell "
939  "%08lx\n", hermon, cq->cqn, virt_to_phys ( hermon_cq->cqe ),
940  ( virt_to_phys ( hermon_cq->cqe ) + hermon_cq->cqe_size ),
941  virt_to_phys ( hermon_cq->doorbell ) );
942  ib_cq_set_drvdata ( cq, hermon_cq );
943  return 0;
944 
945  err_sw2hw_cq:
946  hermon_free_mtt ( hermon, &hermon_cq->mtt );
947  err_alloc_mtt:
948  free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
949  err_cqe:
950  free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
951  err_doorbell:
952  free ( hermon_cq );
953  err_hermon_cq:
954  hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
955  err_cqn_offset:
956  return rc;
957 }
958 
959 /**
960  * Destroy completion queue
961  *
962  * @v ibdev Infiniband device
963  * @v cq Completion queue
964  */
965 static void hermon_destroy_cq ( struct ib_device *ibdev,
966  struct ib_completion_queue *cq ) {
967  struct hermon *hermon = ib_get_drvdata ( ibdev );
968  struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
969  struct hermonprm_completion_queue_context cqctx;
970  int cqn_offset;
971  int rc;
972 
973  /* Take ownership back from hardware */
974  if ( ( rc = hermon_cmd_hw2sw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
975  DBGC ( hermon, "Hermon %p CQN %#lx FATAL HW2SW_CQ failed: "
976  "%s\n", hermon, cq->cqn, strerror ( rc ) );
977  /* Leak memory and return; at least we avoid corruption */
978  return;
979  }
980 
981  /* Free MTT entries */
982  hermon_free_mtt ( hermon, &hermon_cq->mtt );
983 
984  /* Free memory */
985  free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
986  free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
987  free ( hermon_cq );
988 
989  /* Mark queue number as free */
990  cqn_offset = ( cq->cqn - hermon->cap.reserved_cqs );
991  hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
992 
993  ib_cq_set_drvdata ( cq, NULL );
994 }
995 
996 /***************************************************************************
997  *
998  * Queue pair operations
999  *
1000  ***************************************************************************
1001  */
1002 
1003 /**
1004  * Assign queue pair number
1005  *
1006  * @v ibdev Infiniband device
1007  * @v qp Queue pair
1008  * @ret rc Return status code
1009  */
1010 static int hermon_alloc_qpn ( struct ib_device *ibdev,
1011  struct ib_queue_pair *qp ) {
1012  struct hermon *hermon = ib_get_drvdata ( ibdev );
1013  unsigned int port_offset;
1014  int qpn_offset;
1015 
1016  /* Calculate queue pair number */
1017  port_offset = ( ibdev->port - HERMON_PORT_BASE );
1018 
1019  switch ( qp->type ) {
1020  case IB_QPT_SMI:
1021  qp->qpn = ( hermon->special_qpn_base + port_offset );
1022  return 0;
1023  case IB_QPT_GSI:
1024  qp->qpn = ( hermon->special_qpn_base + 2 + port_offset );
1025  return 0;
1026  case IB_QPT_UD:
1027  case IB_QPT_RC:
1028  case IB_QPT_ETH:
1029  /* Find a free queue pair number */
1030  qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
1031  HERMON_MAX_QPS, 1 );
1032  if ( qpn_offset < 0 ) {
1033  DBGC ( hermon, "Hermon %p out of queue pairs\n",
1034  hermon );
1035  return qpn_offset;
1036  }
1037  qp->qpn = ( ( random() & HERMON_QPN_RANDOM_MASK ) |
1038  ( hermon->qpn_base + qpn_offset ) );
1039  return 0;
1040  default:
1041  DBGC ( hermon, "Hermon %p unsupported QP type %d\n",
1042  hermon, qp->type );
1043  return -ENOTSUP;
1044  }
1045 }
1046 
1047 /**
1048  * Free queue pair number
1049  *
1050  * @v ibdev Infiniband device
1051  * @v qp Queue pair
1052  */
1053 static void hermon_free_qpn ( struct ib_device *ibdev,
1054  struct ib_queue_pair *qp ) {
1055  struct hermon *hermon = ib_get_drvdata ( ibdev );
1056  int qpn_offset;
1057 
1058  qpn_offset = ( ( qp->qpn & ~HERMON_QPN_RANDOM_MASK )
1059  - hermon->qpn_base );
1060  if ( qpn_offset >= 0 )
1061  hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
1062 }
1063 
1064 /**
1065  * Calculate transmission rate
1066  *
1067  * @v av Address vector
1068  * @ret hermon_rate Hermon rate
1069  */
1070 static unsigned int hermon_rate ( struct ib_address_vector *av ) {
1071  return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
1072  ? ( av->rate + 5 ) : 0 );
1073 }
1074 
1075 /**
1076  * Calculate schedule queue
1077  *
1078  * @v ibdev Infiniband device
1079  * @v qp Queue pair
1080  * @ret sched_queue Schedule queue
1081  */
1082 static unsigned int hermon_sched_queue ( struct ib_device *ibdev,
1083  struct ib_queue_pair *qp ) {
1084  return ( ( ( qp->type == IB_QPT_SMI ) ?
1086  ( ( ibdev->port - 1 ) << 6 ) );
1087 }
1088 
1089 /** Queue pair transport service type map */
1090 static uint8_t hermon_qp_st[] = {
1093  [IB_QPT_UD] = HERMON_ST_UD,
1094  [IB_QPT_RC] = HERMON_ST_RC,
1096 };
1097 
1098 /**
1099  * Dump queue pair context (for debugging only)
1100  *
1101  * @v hermon Hermon device
1102  * @v qp Queue pair
1103  * @ret rc Return status code
1104  */
1105 static __attribute__ (( unused )) int
1107  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1108  struct hermonprm_qp_ee_state_transitions qpctx;
1109  unsigned int state;
1110  int rc;
1111 
1112  /* Do nothing unless debugging is enabled */
1113  if ( ! DBG_LOG )
1114  return 0;
1115 
1116  /* Dump queue pair context */
1117  memset ( &qpctx, 0, sizeof ( qpctx ) );
1118  if ( ( rc = hermon_cmd_query_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ) {
1119  DBGC ( hermon, "Hermon %p QPN %#lx QUERY_QP failed: %s\n",
1120  hermon, qp->qpn, strerror ( rc ) );
1121  return rc;
1122  }
1123  state = MLX_GET ( &qpctx, qpc_eec_data.state );
1124  if ( state != hermon_qp->state ) {
1125  DBGC ( hermon, "Hermon %p QPN %#lx state %d unexpected "
1126  "(should be %d)\n",
1127  hermon, qp->qpn, state, hermon_qp->state );
1128  }
1129  DBGC ( hermon, "Hermon %p QPN %#lx state %d context:\n",
1130  hermon, qp->qpn, state );
1131  DBGC_HDA ( hermon, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
1132 
1133  return 0;
1134 }
1135 
1136 /**
1137  * Create queue pair
1138  *
1139  * @v ibdev Infiniband device
1140  * @v qp Queue pair
1141  * @ret rc Return status code
1142  */
1143 static int hermon_create_qp ( struct ib_device *ibdev,
1144  struct ib_queue_pair *qp ) {
1145  struct hermon *hermon = ib_get_drvdata ( ibdev );
1146  struct hermon_queue_pair *hermon_qp;
1147  struct hermonprm_qp_ee_state_transitions qpctx;
1148  struct hermonprm_wqe_segment_data_ptr *data;
1149  unsigned int i;
1150  int rc;
1151 
1152  /* Calculate queue pair number */
1153  if ( ( rc = hermon_alloc_qpn ( ibdev, qp ) ) != 0 )
1154  goto err_alloc_qpn;
1155 
1156  /* Allocate control structures */
1157  hermon_qp = zalloc ( sizeof ( *hermon_qp ) );
1158  if ( ! hermon_qp ) {
1159  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate QP\n",
1160  hermon, qp->qpn );
1161  rc = -ENOMEM;
1162  goto err_hermon_qp;
1163  }
1164 
1165  /* Allocate doorbells */
1166  hermon_qp->recv.doorbell =
1167  malloc_phys ( sizeof ( hermon_qp->recv.doorbell[0] ),
1168  sizeof ( hermon_qp->recv.doorbell[0] ) );
1169  if ( ! hermon_qp->recv.doorbell ) {
1170  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate "
1171  "doorbell\n", hermon, qp->qpn );
1172  rc = -ENOMEM;
1173  goto err_recv_doorbell;
1174  }
1175  memset ( hermon_qp->recv.doorbell, 0,
1176  sizeof ( hermon_qp->recv.doorbell[0] ) );
1177  hermon_qp->send.doorbell =
1180 
1181  /* Allocate work queue buffer */
1182  hermon_qp->send.num_wqes = ( qp->send.num_wqes /* headroom */ + 1 +
1183  ( 2048 / sizeof ( hermon_qp->send.wqe[0] ) ) );
1184  hermon_qp->send.num_wqes =
1185  ( 1 << fls ( hermon_qp->send.num_wqes - 1 ) ); /* round up */
1186  hermon_qp->send.wqe_size = ( hermon_qp->send.num_wqes *
1187  sizeof ( hermon_qp->send.wqe[0] ) );
1188  hermon_qp->recv.wqe_size = ( qp->recv.num_wqes *
1189  sizeof ( hermon_qp->recv.wqe[0] ) );
1190  if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
1191  ( qp->type == IB_QPT_UD ) ) {
1192  hermon_qp->recv.grh_size = ( qp->recv.num_wqes *
1193  sizeof ( hermon_qp->recv.grh[0] ));
1194  }
1195  hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
1196  hermon_qp->recv.wqe_size +
1197  hermon_qp->recv.grh_size );
1198  hermon_qp->wqe = malloc_phys ( hermon_qp->wqe_size,
1199  sizeof ( hermon_qp->send.wqe[0] ) );
1200  if ( ! hermon_qp->wqe ) {
1201  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate WQEs\n",
1202  hermon, qp->qpn );
1203  rc = -ENOMEM;
1204  goto err_alloc_wqe;
1205  }
1206  hermon_qp->send.wqe = hermon_qp->wqe;
1207  hermon_qp->recv.wqe = ( hermon_qp->wqe + hermon_qp->send.wqe_size );
1208  if ( hermon_qp->recv.grh_size ) {
1209  hermon_qp->recv.grh = ( hermon_qp->wqe +
1210  hermon_qp->send.wqe_size +
1211  hermon_qp->recv.wqe_size );
1212  }
1213 
1214  /* Initialise work queue entries */
1215  memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
1216  memset ( hermon_qp->recv.wqe, 0, hermon_qp->recv.wqe_size );
1217  data = &hermon_qp->recv.wqe[0].recv.data[0];
1218  for ( i = 0 ; i < ( hermon_qp->recv.wqe_size / sizeof ( *data ) ); i++){
1219  MLX_FILL_1 ( data, 1, l_key, HERMON_INVALID_LKEY );
1220  data++;
1221  }
1222 
1223  /* Allocate MTT entries */
1224  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe,
1225  hermon_qp->wqe_size,
1226  &hermon_qp->mtt ) ) != 0 ) {
1227  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate MTTs: "
1228  "%s\n", hermon, qp->qpn, strerror ( rc ) );
1229  goto err_alloc_mtt;
1230  }
1231 
1232  /* Transition queue to INIT state */
1233  memset ( &qpctx, 0, sizeof ( qpctx ) );
1234  MLX_FILL_2 ( &qpctx, 2,
1235  qpc_eec_data.pm_state, HERMON_PM_STATE_MIGRATED,
1236  qpc_eec_data.st, hermon_qp_st[qp->type] );
1237  MLX_FILL_1 ( &qpctx, 3, qpc_eec_data.pd, HERMON_GLOBAL_PD );
1238  MLX_FILL_4 ( &qpctx, 4,
1239  qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
1240  qpc_eec_data.log_rq_stride,
1241  ( fls ( sizeof ( hermon_qp->recv.wqe[0] ) - 1 ) - 4 ),
1242  qpc_eec_data.log_sq_size,
1243  fls ( hermon_qp->send.num_wqes - 1 ),
1244  qpc_eec_data.log_sq_stride,
1245  ( fls ( sizeof ( hermon_qp->send.wqe[0] ) - 1 ) - 4 ) );
1246  MLX_FILL_1 ( &qpctx, 5,
1247  qpc_eec_data.usr_page, HERMON_UAR_NON_EQ_PAGE );
1248  MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
1249  MLX_FILL_4 ( &qpctx, 38,
1250  qpc_eec_data.rre, 1,
1251  qpc_eec_data.rwe, 1,
1252  qpc_eec_data.rae, 1,
1253  qpc_eec_data.page_offset,
1254  ( hermon_qp->mtt.page_offset >> 6 ) );
1255  MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
1256  MLX_FILL_H ( &qpctx, 42, qpc_eec_data.db_record_addr_h,
1257  virt_to_phys ( hermon_qp->recv.doorbell ) );
1258  MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.db_record_addr_l,
1259  ( virt_to_phys ( hermon_qp->recv.doorbell ) >> 2 ) );
1260  MLX_FILL_H ( &qpctx, 52, qpc_eec_data.mtt_base_addr_h,
1261  hermon_qp->mtt.mtt_base_addr );
1262  MLX_FILL_1 ( &qpctx, 53, qpc_eec_data.mtt_base_addr_l,
1263  ( hermon_qp->mtt.mtt_base_addr >> 3 ) );
1264  if ( ( rc = hermon_cmd_rst2init_qp ( hermon, qp->qpn,
1265  &qpctx ) ) != 0 ) {
1266  DBGC ( hermon, "Hermon %p QPN %#lx RST2INIT_QP failed: %s\n",
1267  hermon, qp->qpn, strerror ( rc ) );
1268  goto err_rst2init_qp;
1269  }
1270  hermon_qp->state = HERMON_QP_ST_INIT;
1271 
1272  DBGC ( hermon, "Hermon %p QPN %#lx send ring [%08lx,%08lx), doorbell "
1273  "%08lx\n", hermon, qp->qpn,
1274  virt_to_phys ( hermon_qp->send.wqe ),
1275  ( virt_to_phys ( hermon_qp->send.wqe ) +
1276  hermon_qp->send.wqe_size ),
1277  virt_to_phys ( hermon_qp->send.doorbell ) );
1278  DBGC ( hermon, "Hermon %p QPN %#lx receive ring [%08lx,%08lx), "
1279  "doorbell %08lx\n", hermon, qp->qpn,
1280  virt_to_phys ( hermon_qp->recv.wqe ),
1281  ( virt_to_phys ( hermon_qp->recv.wqe ) +
1282  hermon_qp->recv.wqe_size ),
1283  virt_to_phys ( hermon_qp->recv.doorbell ) );
1284  DBGC ( hermon, "Hermon %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1285  hermon, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
1286  ib_qp_set_drvdata ( qp, hermon_qp );
1287  return 0;
1288 
1289  hermon_cmd_2rst_qp ( hermon, qp->qpn );
1290  err_rst2init_qp:
1291  hermon_free_mtt ( hermon, &hermon_qp->mtt );
1292  err_alloc_mtt:
1293  free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
1294  err_alloc_wqe:
1295  free_phys ( hermon_qp->recv.doorbell,
1296  sizeof ( hermon_qp->recv.doorbell[0] ) );
1297  err_recv_doorbell:
1298  free ( hermon_qp );
1299  err_hermon_qp:
1300  hermon_free_qpn ( ibdev, qp );
1301  err_alloc_qpn:
1302  return rc;
1303 }
1304 
1305 /**
1306  * Modify queue pair
1307  *
1308  * @v ibdev Infiniband device
1309  * @v qp Queue pair
1310  * @ret rc Return status code
1311  */
1312 static int hermon_modify_qp ( struct ib_device *ibdev,
1313  struct ib_queue_pair *qp ) {
1314  struct hermon *hermon = ib_get_drvdata ( ibdev );
1315  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1316  struct hermonprm_qp_ee_state_transitions qpctx;
1317  int rc;
1318 
1319  /* Transition queue to RTR state, if applicable */
1320  if ( hermon_qp->state < HERMON_QP_ST_RTR ) {
1321  memset ( &qpctx, 0, sizeof ( qpctx ) );
1322  MLX_FILL_2 ( &qpctx, 4,
1323  qpc_eec_data.mtu,
1324  ( ( qp->type == IB_QPT_ETH ) ?
1326  qpc_eec_data.msg_max, 31 );
1327  MLX_FILL_1 ( &qpctx, 7,
1328  qpc_eec_data.remote_qpn_een, qp->av.qpn );
1329  MLX_FILL_1 ( &qpctx, 9,
1330  qpc_eec_data.primary_address_path.rlid,
1331  qp->av.lid );
1332  MLX_FILL_1 ( &qpctx, 10,
1333  qpc_eec_data.primary_address_path.max_stat_rate,
1334  hermon_rate ( &qp->av ) );
1335  memcpy ( &qpctx.u.dwords[12], &qp->av.gid,
1336  sizeof ( qp->av.gid ) );
1337  MLX_FILL_1 ( &qpctx, 16,
1338  qpc_eec_data.primary_address_path.sched_queue,
1339  hermon_sched_queue ( ibdev, qp ) );
1340  MLX_FILL_1 ( &qpctx, 39,
1341  qpc_eec_data.next_rcv_psn, qp->recv.psn );
1342  if ( ( rc = hermon_cmd_init2rtr_qp ( hermon, qp->qpn,
1343  &qpctx ) ) != 0 ) {
1344  DBGC ( hermon, "Hermon %p QPN %#lx INIT2RTR_QP failed:"
1345  " %s\n", hermon, qp->qpn, strerror ( rc ) );
1346  return rc;
1347  }
1348  hermon_qp->state = HERMON_QP_ST_RTR;
1349  }
1350 
1351  /* Transition queue to RTS state */
1352  if ( hermon_qp->state < HERMON_QP_ST_RTS ) {
1353  memset ( &qpctx, 0, sizeof ( qpctx ) );
1354  MLX_FILL_1 ( &qpctx, 10,
1355  qpc_eec_data.primary_address_path.ack_timeout,
1356  14 /* 4.096us * 2^(14) = 67ms */ );
1357  MLX_FILL_2 ( &qpctx, 30,
1358  qpc_eec_data.retry_count, HERMON_RETRY_MAX,
1359  qpc_eec_data.rnr_retry, HERMON_RETRY_MAX );
1360  MLX_FILL_1 ( &qpctx, 32,
1361  qpc_eec_data.next_send_psn, qp->send.psn );
1362  if ( ( rc = hermon_cmd_rtr2rts_qp ( hermon, qp->qpn,
1363  &qpctx ) ) != 0 ) {
1364  DBGC ( hermon, "Hermon %p QPN %#lx RTR2RTS_QP failed: "
1365  "%s\n", hermon, qp->qpn, strerror ( rc ) );
1366  return rc;
1367  }
1368  hermon_qp->state = HERMON_QP_ST_RTS;
1369  }
1370 
1371  /* Update parameters in RTS state */
1372  memset ( &qpctx, 0, sizeof ( qpctx ) );
1374  MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
1375  if ( ( rc = hermon_cmd_rts2rts_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ){
1376  DBGC ( hermon, "Hermon %p QPN %#lx RTS2RTS_QP failed: %s\n",
1377  hermon, qp->qpn, strerror ( rc ) );
1378  return rc;
1379  }
1380 
1381  return 0;
1382 }
1383 
1384 /**
1385  * Destroy queue pair
1386  *
1387  * @v ibdev Infiniband device
1388  * @v qp Queue pair
1389  */
1390 static void hermon_destroy_qp ( struct ib_device *ibdev,
1391  struct ib_queue_pair *qp ) {
1392  struct hermon *hermon = ib_get_drvdata ( ibdev );
1393  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1394  int rc;
1395 
1396  /* Take ownership back from hardware */
1397  if ( ( rc = hermon_cmd_2rst_qp ( hermon, qp->qpn ) ) != 0 ) {
1398  DBGC ( hermon, "Hermon %p QPN %#lx FATAL 2RST_QP failed: %s\n",
1399  hermon, qp->qpn, strerror ( rc ) );
1400  /* Leak memory and return; at least we avoid corruption */
1401  return;
1402  }
1403 
1404  /* Free MTT entries */
1405  hermon_free_mtt ( hermon, &hermon_qp->mtt );
1406 
1407  /* Free memory */
1408  free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
1409  free_phys ( hermon_qp->recv.doorbell,
1410  sizeof ( hermon_qp->recv.doorbell[0] ) );
1411  free ( hermon_qp );
1412 
1413  /* Mark queue number as free */
1414  hermon_free_qpn ( ibdev, qp );
1415 
1416  ib_qp_set_drvdata ( qp, NULL );
1417 }
1418 
1419 /***************************************************************************
1420  *
1421  * Work request operations
1422  *
1423  ***************************************************************************
1424  */
1425 
1426 /**
1427  * Construct UD send work queue entry
1428  *
1429  * @v ibdev Infiniband device
1430  * @v qp Queue pair
1431  * @v dest Destination address vector
1432  * @v iobuf I/O buffer
1433  * @v wqe Send work queue entry
1434  * @ret opcode Control opcode
1435  */
1436 static __attribute__ (( unused )) unsigned int
1438  struct ib_queue_pair *qp __unused,
1440  struct io_buffer *iobuf __unused,
1441  union hermon_send_wqe *wqe ) {
1442 
1443  MLX_FILL_1 ( &wqe->ctrl, 1, ds, ( sizeof ( wqe->ctrl ) / 16 ) );
1444  MLX_FILL_1 ( &wqe->ctrl, 2, c, 0x03 /* generate completion */ );
1445  return HERMON_OPCODE_NOP;
1446 }
1447 
1448 /**
1449  * Construct UD send work queue entry
1450  *
1451  * @v ibdev Infiniband device
1452  * @v qp Queue pair
1453  * @v dest Destination address vector
1454  * @v iobuf I/O buffer
1455  * @v wqe Send work queue entry
1456  * @ret opcode Control opcode
1457  */
1458 static unsigned int
1460  struct ib_queue_pair *qp __unused,
1461  struct ib_address_vector *dest,
1462  struct io_buffer *iobuf,
1463  union hermon_send_wqe *wqe ) {
1464  struct hermon *hermon = ib_get_drvdata ( ibdev );
1465 
1466  MLX_FILL_1 ( &wqe->ud.ctrl, 1, ds,
1467  ( ( offsetof ( typeof ( wqe->ud ), data[1] ) / 16 ) ) );
1468  MLX_FILL_1 ( &wqe->ud.ctrl, 2, c, 0x03 /* generate completion */ );
1469  MLX_FILL_2 ( &wqe->ud.ud, 0,
1470  ud_address_vector.pd, HERMON_GLOBAL_PD,
1471  ud_address_vector.port_number, ibdev->port );
1472  MLX_FILL_2 ( &wqe->ud.ud, 1,
1473  ud_address_vector.rlid, dest->lid,
1474  ud_address_vector.g, dest->gid_present );
1475  MLX_FILL_1 ( &wqe->ud.ud, 2,
1476  ud_address_vector.max_stat_rate, hermon_rate ( dest ) );
1477  MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
1478  memcpy ( &wqe->ud.ud.u.dwords[4], &dest->gid, sizeof ( dest->gid ) );
1479  MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
1480  MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
1481  MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
1482  MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, hermon->lkey );
1483  MLX_FILL_H ( &wqe->ud.data[0], 2,
1484  local_address_h, virt_to_bus ( iobuf->data ) );
1485  MLX_FILL_1 ( &wqe->ud.data[0], 3,
1486  local_address_l, virt_to_bus ( iobuf->data ) );
1487  return HERMON_OPCODE_SEND;
1488 }
1489 
1490 /**
1491  * Construct MLX send work queue entry
1492  *
1493  * @v ibdev Infiniband device
1494  * @v qp Queue pair
1495  * @v dest Destination address vector
1496  * @v iobuf I/O buffer
1497  * @v wqe Send work queue entry
1498  * @ret opcode Control opcode
1499  */
1500 static unsigned int
1502  struct ib_queue_pair *qp,
1503  struct ib_address_vector *dest,
1504  struct io_buffer *iobuf,
1505  union hermon_send_wqe *wqe ) {
1506  struct hermon *hermon = ib_get_drvdata ( ibdev );
1507  struct io_buffer headers;
1508 
1509  /* Construct IB headers */
1510  iob_populate ( &headers, &wqe->mlx.headers, 0,
1511  sizeof ( wqe->mlx.headers ) );
1512  iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
1513  ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1514 
1515  /* Fill work queue entry */
1516  MLX_FILL_1 ( &wqe->mlx.ctrl, 1, ds,
1517  ( ( offsetof ( typeof ( wqe->mlx ), data[2] ) / 16 ) ) );
1518  MLX_FILL_5 ( &wqe->mlx.ctrl, 2,
1519  c, 0x03 /* generate completion */,
1520  icrc, 0 /* generate ICRC */,
1521  max_statrate, hermon_rate ( dest ),
1522  slr, 0,
1523  v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
1524  MLX_FILL_1 ( &wqe->mlx.ctrl, 3, rlid, dest->lid );
1525  MLX_FILL_1 ( &wqe->mlx.data[0], 0,
1526  byte_count, iob_len ( &headers ) );
1527  MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, hermon->lkey );
1528  MLX_FILL_H ( &wqe->mlx.data[0], 2,
1529  local_address_h, virt_to_bus ( headers.data ) );
1530  MLX_FILL_1 ( &wqe->mlx.data[0], 3,
1531  local_address_l, virt_to_bus ( headers.data ) );
1532  MLX_FILL_1 ( &wqe->mlx.data[1], 0,
1533  byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
1534  MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, hermon->lkey );
1535  MLX_FILL_H ( &wqe->mlx.data[1], 2,
1536  local_address_h, virt_to_bus ( iobuf->data ) );
1537  MLX_FILL_1 ( &wqe->mlx.data[1], 3,
1538  local_address_l, virt_to_bus ( iobuf->data ) );
1539  return HERMON_OPCODE_SEND;
1540 }
1541 
1542 /**
1543  * Construct RC send work queue entry
1544  *
1545  * @v ibdev Infiniband device
1546  * @v qp Queue pair
1547  * @v dest Destination address vector
1548  * @v iobuf I/O buffer
1549  * @v wqe Send work queue entry
1550  * @ret opcode Control opcode
1551  */
1552 static unsigned int
1554  struct ib_queue_pair *qp __unused,
1556  struct io_buffer *iobuf,
1557  union hermon_send_wqe *wqe ) {
1558  struct hermon *hermon = ib_get_drvdata ( ibdev );
1559 
1560  MLX_FILL_1 ( &wqe->rc.ctrl, 1, ds,
1561  ( ( offsetof ( typeof ( wqe->rc ), data[1] ) / 16 ) ) );
1562  MLX_FILL_1 ( &wqe->rc.ctrl, 2, c, 0x03 /* generate completion */ );
1563  MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
1564  MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, hermon->lkey );
1565  MLX_FILL_H ( &wqe->rc.data[0], 2,
1566  local_address_h, virt_to_bus ( iobuf->data ) );
1567  MLX_FILL_1 ( &wqe->rc.data[0], 3,
1568  local_address_l, virt_to_bus ( iobuf->data ) );
1569  return HERMON_OPCODE_SEND;
1570 }
1571 
1572 /**
1573  * Construct Ethernet send work queue entry
1574  *
1575  * @v ibdev Infiniband device
1576  * @v qp Queue pair
1577  * @v dest Destination address vector
1578  * @v iobuf I/O buffer
1579  * @v wqe Send work queue entry
1580  * @ret opcode Control opcode
1581  */
1582 static unsigned int
1584  struct ib_queue_pair *qp __unused,
1586  struct io_buffer *iobuf,
1587  union hermon_send_wqe *wqe ) {
1588  struct hermon *hermon = ib_get_drvdata ( ibdev );
1589 
1590  /* Fill work queue entry */
1591  MLX_FILL_1 ( &wqe->eth.ctrl, 1, ds,
1592  ( ( offsetof ( typeof ( wqe->mlx ), data[1] ) / 16 ) ) );
1593  MLX_FILL_2 ( &wqe->eth.ctrl, 2,
1594  c, 0x03 /* generate completion */,
1595  s, 1 /* inhibit ICRC */ );
1596  MLX_FILL_1 ( &wqe->eth.data[0], 0,
1597  byte_count, iob_len ( iobuf ) );
1598  MLX_FILL_1 ( &wqe->eth.data[0], 1, l_key, hermon->lkey );
1599  MLX_FILL_H ( &wqe->eth.data[0], 2,
1600  local_address_h, virt_to_bus ( iobuf->data ) );
1601  MLX_FILL_1 ( &wqe->eth.data[0], 3,
1602  local_address_l, virt_to_bus ( iobuf->data ) );
1603  return HERMON_OPCODE_SEND;
1604 }
1605 
1606 /** Work queue entry constructors */
1607 static unsigned int
1608 ( * hermon_fill_send_wqe[] ) ( struct ib_device *ibdev,
1609  struct ib_queue_pair *qp,
1610  struct ib_address_vector *dest,
1611  struct io_buffer *iobuf,
1612  union hermon_send_wqe *wqe ) = {
1618 };
1619 
1620 /**
1621  * Post send work queue entry
1622  *
1623  * @v ibdev Infiniband device
1624  * @v qp Queue pair
1625  * @v dest Destination address vector
1626  * @v iobuf I/O buffer
1627  * @ret rc Return status code
1628  */
1629 static int hermon_post_send ( struct ib_device *ibdev,
1630  struct ib_queue_pair *qp,
1631  struct ib_address_vector *dest,
1632  struct io_buffer *iobuf ) {
1633  struct hermon *hermon = ib_get_drvdata ( ibdev );
1634  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1635  struct ib_work_queue *wq = &qp->send;
1636  struct hermon_send_work_queue *hermon_send_wq = &hermon_qp->send;
1637  union hermon_send_wqe *wqe;
1638  union hermonprm_doorbell_register db_reg;
1639  unsigned long wqe_idx_mask;
1640  unsigned long wqe_idx;
1641  unsigned int owner;
1642  unsigned int opcode;
1643 
1644  /* Allocate work queue entry */
1645  wqe_idx = ( wq->next_idx & ( hermon_send_wq->num_wqes - 1 ) );
1646  owner = ( ( wq->next_idx & hermon_send_wq->num_wqes ) ? 1 : 0 );
1647  wqe_idx_mask = ( wq->num_wqes - 1 );
1648  if ( wq->iobufs[ wqe_idx & wqe_idx_mask ] ) {
1649  DBGC ( hermon, "Hermon %p QPN %#lx send queue full",
1650  hermon, qp->qpn );
1651  return -ENOBUFS;
1652  }
1653  wq->iobufs[ wqe_idx & wqe_idx_mask ] = iobuf;
1654  wqe = &hermon_send_wq->wqe[wqe_idx];
1655 
1656  /* Construct work queue entry */
1657  memset ( ( ( ( void * ) wqe ) + 4 /* avoid ctrl.owner */ ), 0,
1658  ( sizeof ( *wqe ) - 4 ) );
1659  assert ( qp->type < ( sizeof ( hermon_fill_send_wqe ) /
1660  sizeof ( hermon_fill_send_wqe[0] ) ) );
1661  assert ( hermon_fill_send_wqe[qp->type] != NULL );
1662  opcode = hermon_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
1663  barrier();
1664  MLX_FILL_2 ( &wqe->ctrl, 0,
1665  opcode, opcode,
1666  owner, owner );
1667  DBGCP ( hermon, "Hermon %p QPN %#lx posting send WQE %#lx:\n",
1668  hermon, qp->qpn, wqe_idx );
1669  DBGCP_HDA ( hermon, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
1670 
1671  /* Ring doorbell register */
1672  MLX_FILL_1 ( &db_reg.send, 0, qn, qp->qpn );
1673  barrier();
1674  writel ( db_reg.dword[0], hermon_send_wq->doorbell );
1675 
1676  /* Update work queue's index */
1677  wq->next_idx++;
1678 
1679  return 0;
1680 }
1681 
1682 /**
1683  * Post receive work queue entry
1684  *
1685  * @v ibdev Infiniband device
1686  * @v qp Queue pair
1687  * @v iobuf I/O buffer
1688  * @ret rc Return status code
1689  */
1690 static int hermon_post_recv ( struct ib_device *ibdev,
1691  struct ib_queue_pair *qp,
1692  struct io_buffer *iobuf ) {
1693  struct hermon *hermon = ib_get_drvdata ( ibdev );
1694  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1695  struct ib_work_queue *wq = &qp->recv;
1696  struct hermon_recv_work_queue *hermon_recv_wq = &hermon_qp->recv;
1697  struct hermonprm_recv_wqe *wqe;
1698  struct hermonprm_wqe_segment_data_ptr *data;
1699  struct ib_global_route_header *grh;
1700  unsigned int wqe_idx_mask;
1701 
1702  /* Allocate work queue entry */
1703  wqe_idx_mask = ( wq->num_wqes - 1 );
1704  if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1705  DBGC ( hermon, "Hermon %p QPN %#lx receive queue full",
1706  hermon, qp->qpn );
1707  return -ENOBUFS;
1708  }
1709  wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1710  wqe = &hermon_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1711 
1712  /* Construct work queue entry */
1713  data = &wqe->data[0];
1714  if ( hermon_qp->recv.grh ) {
1715  grh = &hermon_qp->recv.grh[wq->next_idx & wqe_idx_mask];
1716  MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
1717  MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
1718  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
1719  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
1720  data++;
1721  }
1722  MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
1723  MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
1724  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
1725  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
1726 
1727  /* Update work queue's index */
1728  wq->next_idx++;
1729 
1730  /* Update doorbell record */
1731  barrier();
1732  MLX_FILL_1 ( hermon_recv_wq->doorbell, 0, receive_wqe_counter,
1733  ( wq->next_idx & 0xffff ) );
1734 
1735  return 0;
1736 }
1737 
1738 /**
1739  * Handle completion
1740  *
1741  * @v ibdev Infiniband device
1742  * @v cq Completion queue
1743  * @v cqe Hardware completion queue entry
1744  * @ret rc Return status code
1745  */
1746 static int hermon_complete ( struct ib_device *ibdev,
1747  struct ib_completion_queue *cq,
1748  union hermonprm_completion_entry *cqe ) {
1749  struct hermon *hermon = ib_get_drvdata ( ibdev );
1750  struct hermon_queue_pair *hermon_qp;
1751  struct ib_work_queue *wq;
1752  struct ib_queue_pair *qp;
1753  struct io_buffer *iobuf;
1754  struct ib_address_vector recv_dest;
1755  struct ib_address_vector recv_source;
1756  struct ib_global_route_header *grh;
1757  struct ib_address_vector *source;
1758  unsigned int opcode;
1759  unsigned long qpn;
1760  int is_send;
1761  unsigned long wqe_idx;
1762  unsigned long wqe_idx_mask;
1763  size_t len;
1764  int rc = 0;
1765 
1766  /* Parse completion */
1767  qpn = MLX_GET ( &cqe->normal, qpn );
1768  is_send = MLX_GET ( &cqe->normal, s_r );
1769  opcode = MLX_GET ( &cqe->normal, opcode );
1770  if ( opcode >= HERMON_OPCODE_RECV_ERROR ) {
1771  /* "s" field is not valid for error opcodes */
1772  is_send = ( opcode == HERMON_OPCODE_SEND_ERROR );
1773  DBGC ( hermon, "Hermon %p CQN %#lx syndrome %x vendor %x\n",
1774  hermon, cq->cqn, MLX_GET ( &cqe->error, syndrome ),
1775  MLX_GET ( &cqe->error, vendor_error_syndrome ) );
1776  rc = -EIO;
1777  /* Don't return immediately; propagate error to completer */
1778  }
1779 
1780  /* Identify work queue */
1781  wq = ib_find_wq ( cq, qpn, is_send );
1782  if ( ! wq ) {
1783  DBGC ( hermon, "Hermon %p CQN %#lx unknown %s QPN %#lx\n",
1784  hermon, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
1785  return -EIO;
1786  }
1787  qp = wq->qp;
1788  hermon_qp = ib_qp_get_drvdata ( qp );
1789 
1790  /* Identify work queue entry */
1791  wqe_idx = MLX_GET ( &cqe->normal, wqe_counter );
1792  wqe_idx_mask = ( wq->num_wqes - 1 );
1793  DBGCP ( hermon, "Hermon %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1794  hermon, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1795  wqe_idx );
1796  DBGCP_HDA ( hermon, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1797 
1798  /* Identify I/O buffer */
1799  iobuf = wq->iobufs[ wqe_idx & wqe_idx_mask ];
1800  if ( ! iobuf ) {
1801  DBGC ( hermon, "Hermon %p CQN %#lx QPN %#lx empty %s WQE "
1802  "%#lx\n", hermon, cq->cqn, qp->qpn,
1803  ( is_send ? "send" : "recv" ), wqe_idx );
1804  return -EIO;
1805  }
1806  wq->iobufs[ wqe_idx & wqe_idx_mask ] = NULL;
1807 
1808  if ( is_send ) {
1809  /* Hand off to completion handler */
1810  ib_complete_send ( ibdev, qp, iobuf, rc );
1811  } else if ( rc != 0 ) {
1812  /* Dump queue state (for debugging) */
1814  /* Hand off to completion handler */
1815  ib_complete_recv ( ibdev, qp, NULL, NULL, iobuf, rc );
1816  } else {
1817  /* Set received length */
1818  len = MLX_GET ( &cqe->normal, byte_cnt );
1819  memset ( &recv_dest, 0, sizeof ( recv_dest ) );
1820  recv_dest.qpn = qpn;
1821  memset ( &recv_source, 0, sizeof ( recv_source ) );
1822  switch ( qp->type ) {
1823  case IB_QPT_SMI:
1824  case IB_QPT_GSI:
1825  case IB_QPT_UD:
1826  /* Locate corresponding GRH */
1827  assert ( hermon_qp->recv.grh != NULL );
1828  grh = &hermon_qp->recv.grh[ wqe_idx & wqe_idx_mask ];
1829  len -= sizeof ( *grh );
1830  /* Construct address vector */
1831  source = &recv_source;
1832  source->qpn = MLX_GET ( &cqe->normal, srq_rqpn );
1833  source->lid = MLX_GET ( &cqe->normal, slid_smac47_32 );
1834  source->sl = MLX_GET ( &cqe->normal, sl );
1835  recv_dest.gid_present = source->gid_present =
1836  MLX_GET ( &cqe->normal, g );
1837  memcpy ( &recv_dest.gid, &grh->dgid,
1838  sizeof ( recv_dest.gid ) );
1839  memcpy ( &source->gid, &grh->sgid,
1840  sizeof ( source->gid ) );
1841  break;
1842  case IB_QPT_RC:
1843  source = &qp->av;
1844  break;
1845  case IB_QPT_ETH:
1846  /* Construct address vector */
1847  source = &recv_source;
1848  source->vlan_present = MLX_GET ( &cqe->normal, vlan );
1849  source->vlan = MLX_GET ( &cqe->normal, vid );
1850  break;
1851  default:
1852  assert ( 0 );
1853  return -EINVAL;
1854  }
1855  assert ( len <= iob_tailroom ( iobuf ) );
1856  iob_put ( iobuf, len );
1857  /* Hand off to completion handler */
1858  ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, 0 );
1859  }
1860 
1861  return rc;
1862 }
1863 
1864 /**
1865  * Poll completion queue
1866  *
1867  * @v ibdev Infiniband device
1868  * @v cq Completion queue
1869  */
1870 static void hermon_poll_cq ( struct ib_device *ibdev,
1871  struct ib_completion_queue *cq ) {
1872  struct hermon *hermon = ib_get_drvdata ( ibdev );
1873  struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
1874  union hermonprm_completion_entry *cqe;
1875  unsigned int cqe_idx_mask;
1876  int rc;
1877 
1878  while ( 1 ) {
1879  /* Look for completion entry */
1880  cqe_idx_mask = ( cq->num_cqes - 1 );
1881  cqe = &hermon_cq->cqe[cq->next_idx & cqe_idx_mask];
1882  if ( MLX_GET ( &cqe->normal, owner ) ^
1883  ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
1884  /* Entry still owned by hardware; end of poll */
1885  break;
1886  }
1887 
1888  /* Handle completion */
1889  if ( ( rc = hermon_complete ( ibdev, cq, cqe ) ) != 0 ) {
1890  DBGC ( hermon, "Hermon %p CQN %#lx failed to complete:"
1891  " %s\n", hermon, cq->cqn, strerror ( rc ) );
1892  DBGC_HDA ( hermon, virt_to_phys ( cqe ),
1893  cqe, sizeof ( *cqe ) );
1894  }
1895 
1896  /* Update completion queue's index */
1897  cq->next_idx++;
1898 
1899  /* Update doorbell record */
1900  MLX_FILL_1 ( hermon_cq->doorbell, 0, update_ci,
1901  ( cq->next_idx & 0x00ffffffUL ) );
1902  }
1903 }
1904 
1905 /***************************************************************************
1906  *
1907  * Event queues
1908  *
1909  ***************************************************************************
1910  */
1911 
1912 /**
1913  * Dump event queue context (for debugging only)
1914  *
1915  * @v hermon Hermon device
1916  * @v hermon_eq Event queue
1917  * @ret rc Return status code
1918  */
1919 static __attribute__ (( unused )) int
1921  struct hermon_event_queue *hermon_eq ) {
1922  struct hermonprm_eqc eqctx;
1923  int rc;
1924 
1925  /* Do nothing unless debugging is enabled */
1926  if ( ! DBG_LOG )
1927  return 0;
1928 
1929  /* Dump event queue context */
1930  memset ( &eqctx, 0, sizeof ( eqctx ) );
1931  if ( ( rc = hermon_cmd_query_eq ( hermon, hermon_eq->eqn,
1932  &eqctx ) ) != 0 ) {
1933  DBGC ( hermon, "Hermon %p EQN %#lx QUERY_EQ failed: %s\n",
1934  hermon, hermon_eq->eqn, strerror ( rc ) );
1935  return rc;
1936  }
1937  DBGC ( hermon, "Hermon %p EQN %#lx context:\n",
1938  hermon, hermon_eq->eqn );
1939  DBGC_HDA ( hermon, 0, &eqctx, sizeof ( eqctx ) );
1940 
1941  return 0;
1942 }
1943 
1944 /**
1945  * Dump unconsumed event queue entries (for debugging only)
1946  *
1947  * @v hermon Hermon device
1948  * @v hermon_eq Event queue
1949  * @ret rc Return status code
1950  */
1951 static __attribute__ (( unused )) int
1953  struct hermon_event_queue *hermon_eq ) {
1954  struct hermonprm_eqc eqctx;
1955  union hermonprm_event_entry *eqe;
1956  unsigned int mask;
1957  unsigned int prod;
1958  unsigned int cons;
1959  unsigned int idx;
1960  int rc;
1961 
1962  /* Do nothing unless debugging is enabled */
1963  if ( ! DBG_LOG )
1964  return 0;
1965 
1966  /* Dump event queue entries */
1967  memset ( &eqctx, 0, sizeof ( eqctx ) );
1968  if ( ( rc = hermon_cmd_query_eq ( hermon, hermon_eq->eqn,
1969  &eqctx ) ) != 0 ) {
1970  DBGC ( hermon, "Hermon %p EQN %#lx QUERY_EQ failed: %s\n",
1971  hermon, hermon_eq->eqn, strerror ( rc ) );
1972  return rc;
1973  }
1974  mask = ( HERMON_NUM_EQES - 1 );
1975  prod = MLX_GET ( &eqctx, producer_counter ) & mask;
1976  cons = MLX_GET ( &eqctx, consumer_counter ) & mask;
1977  idx = hermon_eq->next_idx;
1978  if ( ( idx & mask ) != ( cons & mask ) ) {
1979  DBGC ( hermon, "Hermon %p EQN %#lx mismatch: SW %#x != HW "
1980  "%#x\n", hermon, hermon_eq->eqn, idx, cons );
1981  }
1982  for ( ; ( idx & mask ) != ( prod & mask ) ; idx++ ) {
1983  eqe = &hermon_eq->eqe[idx & mask];
1984  DBGC ( hermon, "Hermon %p EQN %#lx event %#x owner %d type "
1985  "%#02x:%#02x\n", hermon, hermon_eq->eqn, idx,
1986  MLX_GET ( &eqe->generic, owner ),
1987  MLX_GET ( &eqe->generic, event_type ),
1988  MLX_GET ( &eqe->generic, event_sub_type ) );
1989  DBGC_HDA ( hermon, 0, eqe, sizeof ( *eqe ) );
1990  }
1991 
1992  return 0;
1993 }
1994 
1995 /**
1996  * Create event queue
1997  *
1998  * @v hermon Hermon device
1999  * @ret rc Return status code
2000  */
2001 static int hermon_create_eq ( struct hermon *hermon ) {
2002  struct hermon_event_queue *hermon_eq = &hermon->eq;
2003  struct hermonprm_eqc eqctx;
2004  struct hermonprm_event_mask mask;
2005  unsigned int i;
2006  int rc;
2007 
2008  /* Select event queue number */
2009  hermon_eq->eqn = ( 4 * hermon->cap.reserved_uars );
2010  if ( hermon_eq->eqn < hermon->cap.reserved_eqs )
2011  hermon_eq->eqn = hermon->cap.reserved_eqs;
2012 
2013  /* Calculate doorbell address */
2014  hermon_eq->doorbell =
2015  ( hermon->uar + HERMON_DB_EQ_OFFSET ( hermon_eq->eqn ) );
2016 
2017  /* Allocate event queue itself */
2018  hermon_eq->eqe_size =
2019  ( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
2020  hermon_eq->eqe = malloc_phys ( hermon_eq->eqe_size,
2021  sizeof ( hermon_eq->eqe[0] ) );
2022  if ( ! hermon_eq->eqe ) {
2023  DBGC ( hermon, "Hermon %p EQN %#lx could not allocate EQEs\n",
2024  hermon, hermon_eq->eqn );
2025  rc = -ENOMEM;
2026  goto err_eqe;
2027  }
2028  memset ( hermon_eq->eqe, 0, hermon_eq->eqe_size );
2029  for ( i = 0 ; i < HERMON_NUM_EQES ; i++ ) {
2030  MLX_FILL_1 ( &hermon_eq->eqe[i].generic, 7, owner, 1 );
2031  }
2032  barrier();
2033 
2034  /* Allocate MTT entries */
2035  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_eq->eqe,
2036  hermon_eq->eqe_size,
2037  &hermon_eq->mtt ) ) != 0 ) {
2038  DBGC ( hermon, "Hermon %p EQN %#lx could not allocate MTTs: "
2039  "%s\n", hermon, hermon_eq->eqn, strerror ( rc ) );
2040  goto err_alloc_mtt;
2041  }
2042 
2043  /* Hand queue over to hardware */
2044  memset ( &eqctx, 0, sizeof ( eqctx ) );
2045  MLX_FILL_2 ( &eqctx, 0,
2046  st, 0xa /* "Fired" */,
2047  oi, 1 );
2048  MLX_FILL_1 ( &eqctx, 2,
2049  page_offset, ( hermon_eq->mtt.page_offset >> 5 ) );
2050  MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( HERMON_NUM_EQES - 1 ) );
2051  MLX_FILL_H ( &eqctx, 6, mtt_base_addr_h,
2052  hermon_eq->mtt.mtt_base_addr );
2053  MLX_FILL_1 ( &eqctx, 7, mtt_base_addr_l,
2054  ( hermon_eq->mtt.mtt_base_addr >> 3 ) );
2055  if ( ( rc = hermon_cmd_sw2hw_eq ( hermon, hermon_eq->eqn,
2056  &eqctx ) ) != 0 ) {
2057  DBGC ( hermon, "Hermon %p EQN %#lx SW2HW_EQ failed: %s\n",
2058  hermon, hermon_eq->eqn, strerror ( rc ) );
2059  goto err_sw2hw_eq;
2060  }
2061 
2062  /* Map all events to this event queue */
2063  memset ( &mask, 0xff, sizeof ( mask ) );
2064  if ( ( rc = hermon_cmd_map_eq ( hermon,
2065  ( HERMON_MAP_EQ | hermon_eq->eqn ),
2066  &mask ) ) != 0 ) {
2067  DBGC ( hermon, "Hermon %p EQN %#lx MAP_EQ failed: %s\n",
2068  hermon, hermon_eq->eqn, strerror ( rc ) );
2069  goto err_map_eq;
2070  }
2071 
2072  DBGC ( hermon, "Hermon %p EQN %#lx ring [%08lx,%08lx), doorbell "
2073  "%08lx\n", hermon, hermon_eq->eqn,
2074  virt_to_phys ( hermon_eq->eqe ),
2075  ( virt_to_phys ( hermon_eq->eqe ) + hermon_eq->eqe_size ),
2076  virt_to_phys ( hermon_eq->doorbell ) );
2077  return 0;
2078 
2079  err_map_eq:
2080  hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn, &eqctx );
2081  err_sw2hw_eq:
2082  hermon_free_mtt ( hermon, &hermon_eq->mtt );
2083  err_alloc_mtt:
2084  free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
2085  err_eqe:
2086  memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
2087  return rc;
2088 }
2089 
2090 /**
2091  * Destroy event queue
2092  *
2093  * @v hermon Hermon device
2094  */
2095 static void hermon_destroy_eq ( struct hermon *hermon ) {
2096  struct hermon_event_queue *hermon_eq = &hermon->eq;
2097  struct hermonprm_eqc eqctx;
2098  struct hermonprm_event_mask mask;
2099  int rc;
2100 
2101  /* Unmap events from event queue */
2102  memset ( &mask, 0xff, sizeof ( mask ) );
2103  if ( ( rc = hermon_cmd_map_eq ( hermon,
2104  ( HERMON_UNMAP_EQ | hermon_eq->eqn ),
2105  &mask ) ) != 0 ) {
2106  DBGC ( hermon, "Hermon %p EQN %#lx FATAL MAP_EQ failed to "
2107  "unmap: %s\n", hermon, hermon_eq->eqn, strerror ( rc ) );
2108  /* Continue; HCA may die but system should survive */
2109  }
2110 
2111  /* Take ownership back from hardware */
2112  if ( ( rc = hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn,
2113  &eqctx ) ) != 0 ) {
2114  DBGC ( hermon, "Hermon %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
2115  hermon, hermon_eq->eqn, strerror ( rc ) );
2116  /* Leak memory and return; at least we avoid corruption */
2117  return;
2118  }
2119 
2120  /* Free MTT entries */
2121  hermon_free_mtt ( hermon, &hermon_eq->mtt );
2122 
2123  /* Free memory */
2124  free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
2125  memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
2126 }
2127 
2128 /**
2129  * Handle port state event
2130  *
2131  * @v hermon Hermon device
2132  * @v eqe Port state change event queue entry
2133  */
2135  union hermonprm_event_entry *eqe){
2136  unsigned int port;
2137  int link_up;
2138 
2139  /* Get port and link status */
2140  port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
2141  link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
2142  DBGC ( hermon, "Hermon %p port %d link %s\n", hermon, ( port + 1 ),
2143  ( link_up ? "up" : "down" ) );
2144 
2145  /* Sanity check */
2146  if ( port >= hermon->cap.num_ports ) {
2147  DBGC ( hermon, "Hermon %p port %d does not exist!\n",
2148  hermon, ( port + 1 ) );
2149  return;
2150  }
2151 
2152  /* Notify device of port state change */
2154  link_up );
2155 }
2156 
2157 /**
2158  * Handle port management event
2159  *
2160  * @v hermon Hermon device
2161  * @v eqe Port management change event queue entry
2162  */
2164  union hermonprm_event_entry *eqe){
2165  unsigned int port;
2166 
2167  /* Get port */
2168  port = ( MLX_GET ( &eqe->port_mgmnt_change, port ) - 1 );
2169  DBGC ( hermon, "Hermon %p port %d management change\n",
2170  hermon, ( port + 1 ) );
2171 
2172  /* Sanity check */
2173  if ( port >= hermon->cap.num_ports ) {
2174  DBGC ( hermon, "Hermon %p port %d does not exist!\n",
2175  hermon, ( port + 1 ) );
2176  return;
2177  }
2178 
2179  /* Update MAD parameters */
2181 }
2182 
2183 /**
2184  * Poll event queue
2185  *
2186  * @v ibdev Infiniband device
2187  */
2188 static void hermon_poll_eq ( struct ib_device *ibdev ) {
2189  struct hermon *hermon = ib_get_drvdata ( ibdev );
2190  struct hermon_event_queue *hermon_eq = &hermon->eq;
2191  union hermonprm_event_entry *eqe;
2192  union hermonprm_doorbell_register db_reg;
2193  unsigned long now;
2194  unsigned long elapsed;
2195  unsigned int eqe_idx_mask;
2196  unsigned int event_type;
2197 
2198  /* No event is generated upon reaching INIT, so we must poll
2199  * separately for link state changes while we remain DOWN.
2200  */
2201  if ( ib_is_open ( ibdev ) &&
2202  ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
2203  now = currticks();
2204  elapsed = ( now - hermon->last_poll );
2205  if ( elapsed >= HERMON_LINK_POLL_INTERVAL ) {
2206  hermon->last_poll = now;
2207  ib_smc_update ( ibdev, hermon_mad );
2208  }
2209  }
2210 
2211  /* Poll event queue */
2212  while ( 1 ) {
2213  /* Look for event entry */
2214  eqe_idx_mask = ( HERMON_NUM_EQES - 1 );
2215  eqe = &hermon_eq->eqe[hermon_eq->next_idx & eqe_idx_mask];
2216  if ( MLX_GET ( &eqe->generic, owner ) ^
2217  ( ( hermon_eq->next_idx & HERMON_NUM_EQES ) ? 1 : 0 ) ) {
2218  /* Entry still owned by hardware; end of poll */
2219  break;
2220  }
2221  DBGCP ( hermon, "Hermon %p EQN %#lx event:\n",
2222  hermon, hermon_eq->eqn );
2223  DBGCP_HDA ( hermon, virt_to_phys ( eqe ),
2224  eqe, sizeof ( *eqe ) );
2225 
2226  /* Handle event */
2227  event_type = MLX_GET ( &eqe->generic, event_type );
2228  switch ( event_type ) {
2231  break;
2234  break;
2235  default:
2236  DBGC ( hermon, "Hermon %p EQN %#lx unrecognised event "
2237  "type %#02x:%#02x\n",
2238  hermon, hermon_eq->eqn, event_type,
2239  MLX_GET ( &eqe->generic, event_sub_type ) );
2240  DBGC_HDA ( hermon, virt_to_phys ( eqe ),
2241  eqe, sizeof ( *eqe ) );
2242  break;
2243  }
2244 
2245  /* Update event queue's index */
2246  hermon_eq->next_idx++;
2247 
2248  /* Ring doorbell */
2249  MLX_FILL_1 ( &db_reg.event, 0,
2250  ci, ( hermon_eq->next_idx & 0x00ffffffUL ) );
2251  writel ( db_reg.dword[0], hermon_eq->doorbell );
2252  }
2253 }
2254 
2255 /***************************************************************************
2256  *
2257  * Firmware control
2258  *
2259  ***************************************************************************
2260  */
2261 
2262 /**
2263  * Map virtual to physical address for firmware usage
2264  *
2265  * @v hermon Hermon device
2266  * @v map Mapping function
2267  * @v va Virtual address
2268  * @v pa Physical address
2269  * @v len Length of region
2270  * @ret rc Return status code
2271  */
2272 static int hermon_map_vpm ( struct hermon *hermon,
2273  int ( *map ) ( struct hermon *hermon,
2274  const struct hermonprm_virtual_physical_mapping* ),
2275  uint64_t va, physaddr_t pa, size_t len ) {
2276  struct hermonprm_virtual_physical_mapping mapping;
2277  physaddr_t start;
2278  physaddr_t low;
2279  physaddr_t high;
2280  physaddr_t end;
2281  size_t size;
2282  int rc;
2283 
2284  /* Sanity checks */
2285  assert ( ( va & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2286  assert ( ( pa & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2287  assert ( ( len & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2288  assert ( len != 0 );
2289 
2290  /* Calculate starting points */
2291  start = pa;
2292  end = ( start + len );
2293  size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
2294  low = high = ( end & ~( size - 1 ) );
2295  assert ( start < low );
2296  assert ( high <= end );
2297 
2298  /* These mappings tend to generate huge volumes of
2299  * uninteresting debug data, which basically makes it
2300  * impossible to use debugging otherwise.
2301  */
2303 
2304  /* Map blocks in descending order of size */
2305  while ( size >= HERMON_PAGE_SIZE ) {
2306 
2307  /* Find the next candidate block */
2308  if ( ( low - size ) >= start ) {
2309  low -= size;
2310  pa = low;
2311  } else if ( high <= ( end - size ) ) {
2312  pa = high;
2313  high += size;
2314  } else {
2315  size >>= 1;
2316  continue;
2317  }
2318  assert ( ( va & ( size - 1 ) ) == 0 );
2319  assert ( ( pa & ( size - 1 ) ) == 0 );
2320 
2321  /* Map this block */
2322  memset ( &mapping, 0, sizeof ( mapping ) );
2323  MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2324  MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2325  MLX_FILL_H ( &mapping, 2, pa_h, pa );
2326  MLX_FILL_2 ( &mapping, 3,
2327  log2size, ( ( fls ( size ) - 1 ) - 12 ),
2328  pa_l, ( pa >> 12 ) );
2329  if ( ( rc = map ( hermon, &mapping ) ) != 0 ) {
2331  DBGC ( hermon, "Hermon %p could not map %08llx+%zx to "
2332  "%08lx: %s\n",
2333  hermon, va, size, pa, strerror ( rc ) );
2334  return rc;
2335  }
2336  va += size;
2337  }
2338  assert ( low == start );
2339  assert ( high == end );
2340 
2342  return 0;
2343 }
2344 
2345 /**
2346  * Start firmware running
2347  *
2348  * @v hermon Hermon device
2349  * @ret rc Return status code
2350  */
2351 static int hermon_start_firmware ( struct hermon *hermon ) {
2352  struct hermonprm_query_fw fw;
2353  unsigned int fw_pages;
2354  size_t fw_len;
2355  physaddr_t fw_base;
2356  int rc;
2357 
2358  /* Get firmware parameters */
2359  if ( ( rc = hermon_cmd_query_fw ( hermon, &fw ) ) != 0 ) {
2360  DBGC ( hermon, "Hermon %p could not query firmware: %s\n",
2361  hermon, strerror ( rc ) );
2362  goto err_query_fw;
2363  }
2364  DBGC ( hermon, "Hermon %p firmware version %d.%d.%d\n", hermon,
2365  MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2366  MLX_GET ( &fw, fw_rev_subminor ) );
2367  fw_pages = MLX_GET ( &fw, fw_pages );
2368  DBGC ( hermon, "Hermon %p requires %d pages (%d kB) for firmware\n",
2369  hermon, fw_pages, ( fw_pages * 4 ) );
2370 
2371  /* Allocate firmware pages and map firmware area */
2372  fw_len = ( fw_pages * HERMON_PAGE_SIZE );
2373  if ( ! hermon->firmware_area ) {
2374  hermon->firmware_len = fw_len;
2376  if ( ! hermon->firmware_area ) {
2377  DBGC ( hermon, "Hermon %p could not allocate firmware "
2378  "area\n", hermon );
2379  rc = -ENOMEM;
2380  goto err_alloc_fa;
2381  }
2382  } else {
2383  assert ( hermon->firmware_len == fw_len );
2384  }
2385  fw_base = user_to_phys ( hermon->firmware_area, 0 );
2386  DBGC ( hermon, "Hermon %p firmware area at physical [%08lx,%08lx)\n",
2387  hermon, fw_base, ( fw_base + fw_len ) );
2389  0, fw_base, fw_len ) ) != 0 ) {
2390  DBGC ( hermon, "Hermon %p could not map firmware: %s\n",
2391  hermon, strerror ( rc ) );
2392  goto err_map_fa;
2393  }
2394 
2395  /* Start firmware */
2396  if ( ( rc = hermon_cmd_run_fw ( hermon ) ) != 0 ) {
2397  DBGC ( hermon, "Hermon %p could not run firmware: %s\n",
2398  hermon, strerror ( rc ) );
2399  goto err_run_fw;
2400  }
2401 
2402  DBGC ( hermon, "Hermon %p firmware started\n", hermon );
2403  return 0;
2404 
2405  err_run_fw:
2406  err_map_fa:
2408  err_alloc_fa:
2409  err_query_fw:
2410  return rc;
2411 }
2412 
2413 /**
2414  * Stop firmware running
2415  *
2416  * @v hermon Hermon device
2417  */
2418 static void hermon_stop_firmware ( struct hermon *hermon ) {
2419  int rc;
2420 
2421  if ( ( rc = hermon_cmd_unmap_fa ( hermon ) ) != 0 ) {
2422  DBGC ( hermon, "Hermon %p FATAL could not stop firmware: %s\n",
2423  hermon, strerror ( rc ) );
2424  /* Leak memory and return; at least we avoid corruption */
2426  return;
2427  }
2428 }
2429 
2430 /***************************************************************************
2431  *
2432  * Infinihost Context Memory management
2433  *
2434  ***************************************************************************
2435  */
2436 
2437 /**
2438  * Get device limits
2439  *
2440  * @v hermon Hermon device
2441  * @ret rc Return status code
2442  */
2443 static int hermon_get_cap ( struct hermon *hermon ) {
2444  struct hermonprm_query_dev_cap dev_cap;
2445  int rc;
2446 
2447  if ( ( rc = hermon_cmd_query_dev_cap ( hermon, &dev_cap ) ) != 0 ) {
2448  DBGC ( hermon, "Hermon %p could not get device limits: %s\n",
2449  hermon, strerror ( rc ) );
2450  return rc;
2451  }
2452 
2453  hermon->cap.cmpt_entry_size = MLX_GET ( &dev_cap, c_mpt_entry_sz );
2455  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_qps ) );
2456  hermon->cap.qpc_entry_size = MLX_GET ( &dev_cap, qpc_entry_sz );
2457  hermon->cap.altc_entry_size = MLX_GET ( &dev_cap, altc_entry_sz );
2458  hermon->cap.auxc_entry_size = MLX_GET ( &dev_cap, aux_entry_sz );
2460  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_srqs ) );
2461  hermon->cap.srqc_entry_size = MLX_GET ( &dev_cap, srq_entry_sz );
2463  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_cqs ) );
2464  hermon->cap.cqc_entry_size = MLX_GET ( &dev_cap, cqc_entry_sz );
2465  hermon->cap.reserved_eqs = MLX_GET ( &dev_cap, num_rsvd_eqs );
2466  if ( hermon->cap.reserved_eqs == 0 ) {
2467  /* Backward compatibility */
2469  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_eqs ) );
2470  }
2471  hermon->cap.eqc_entry_size = MLX_GET ( &dev_cap, eqc_entry_sz );
2473  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mtts ) );
2474  hermon->cap.mtt_entry_size = MLX_GET ( &dev_cap, mtt_entry_sz );
2476  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mrws ) );
2477  hermon->cap.dmpt_entry_size = MLX_GET ( &dev_cap, d_mpt_entry_sz );
2478  hermon->cap.reserved_uars = MLX_GET ( &dev_cap, num_rsvd_uars );
2479  hermon->cap.num_ports = MLX_GET ( &dev_cap, num_ports );
2480  hermon->cap.dpdp = MLX_GET ( &dev_cap, dpdp );
2481 
2482  /* Sanity check */
2483  if ( hermon->cap.num_ports > HERMON_MAX_PORTS ) {
2484  DBGC ( hermon, "Hermon %p has %d ports (only %d supported)\n",
2487  }
2488 
2489  return 0;
2490 }
2491 
2492 /**
2493  * Align ICM table
2494  *
2495  * @v icm_offset Current ICM offset
2496  * @v len ICM table length
2497  * @ret icm_offset ICM offset
2498  */
2499 static uint64_t icm_align ( uint64_t icm_offset, size_t len ) {
2500 
2501  /* Round up to a multiple of the table size */
2502  assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
2503  return ( ( icm_offset + len - 1 ) & ~( ( ( uint64_t ) len ) - 1 ) );
2504 }
2505 
2506 /**
2507  * Map ICM (allocating if necessary)
2508  *
2509  * @v hermon Hermon device
2510  * @v init_hca INIT_HCA structure to fill in
2511  * @ret rc Return status code
2512  */
2513 static int hermon_map_icm ( struct hermon *hermon,
2514  struct hermonprm_init_hca *init_hca ) {
2515  struct hermonprm_scalar_parameter icm_size;
2516  struct hermonprm_scalar_parameter icm_aux_size;
2517  uint64_t icm_offset = 0;
2518  unsigned int log_num_qps, log_num_srqs, log_num_cqs, log_num_eqs;
2519  unsigned int log_num_mtts, log_num_mpts, log_num_mcs;
2520  size_t cmpt_max_len;
2521  size_t icm_len, icm_aux_len;
2522  size_t len;
2523  physaddr_t icm_phys;
2524  int i;
2525  int rc;
2526 
2527  /*
2528  * Start by carving up the ICM virtual address space
2529  *
2530  */
2531 
2532  /* Calculate number of each object type within ICM */
2533  log_num_qps = fls ( hermon->cap.reserved_qps +
2535  log_num_srqs = fls ( hermon->cap.reserved_srqs - 1 );
2536  log_num_cqs = fls ( hermon->cap.reserved_cqs + HERMON_MAX_CQS - 1 );
2537  log_num_eqs = fls ( hermon->cap.reserved_eqs + HERMON_MAX_EQS - 1 );
2538  log_num_mtts = fls ( hermon->cap.reserved_mtts + HERMON_MAX_MTTS - 1 );
2539  log_num_mpts = fls ( hermon->cap.reserved_mrws + 1 - 1 );
2540  log_num_mcs = HERMON_LOG_MULTICAST_HASH_SIZE;
2541 
2542  /* ICM starts with the cMPT tables, which are sparse */
2543  cmpt_max_len = ( HERMON_CMPT_MAX_ENTRIES *
2544  ( ( uint64_t ) hermon->cap.cmpt_entry_size ) );
2545  len = ( ( ( ( 1 << log_num_qps ) * hermon->cap.cmpt_entry_size ) +
2546  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2547  hermon->icm_map[HERMON_ICM_QP_CMPT].offset = icm_offset;
2549  icm_offset += cmpt_max_len;
2550  len = ( ( ( ( 1 << log_num_srqs ) * hermon->cap.cmpt_entry_size ) +
2551  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2552  hermon->icm_map[HERMON_ICM_SRQ_CMPT].offset = icm_offset;
2554  icm_offset += cmpt_max_len;
2555  len = ( ( ( ( 1 << log_num_cqs ) * hermon->cap.cmpt_entry_size ) +
2556  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2557  hermon->icm_map[HERMON_ICM_CQ_CMPT].offset = icm_offset;
2559  icm_offset += cmpt_max_len;
2560  len = ( ( ( ( 1 << log_num_eqs ) * hermon->cap.cmpt_entry_size ) +
2561  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2562  hermon->icm_map[HERMON_ICM_EQ_CMPT].offset = icm_offset;
2564  icm_offset += cmpt_max_len;
2565 
2566  hermon->icm_map[HERMON_ICM_OTHER].offset = icm_offset;
2567 
2568  /* Queue pair contexts */
2569  len = ( ( 1 << log_num_qps ) * hermon->cap.qpc_entry_size );
2570  icm_offset = icm_align ( icm_offset, len );
2571  MLX_FILL_1 ( init_hca, 12,
2572  qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_h,
2573  ( icm_offset >> 32 ) );
2574  MLX_FILL_2 ( init_hca, 13,
2575  qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
2576  ( icm_offset >> 5 ),
2577  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
2578  log_num_qps );
2579  DBGC ( hermon, "Hermon %p ICM QPC is %d x %#zx at [%08llx,%08llx)\n",
2580  hermon, ( 1 << log_num_qps ), hermon->cap.qpc_entry_size,
2581  icm_offset, ( icm_offset + len ) );
2582  icm_offset += len;
2583 
2584  /* Extended alternate path contexts */
2585  len = ( ( 1 << log_num_qps ) * hermon->cap.altc_entry_size );
2586  icm_offset = icm_align ( icm_offset, len );
2587  MLX_FILL_1 ( init_hca, 24,
2588  qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_h,
2589  ( icm_offset >> 32 ) );
2590  MLX_FILL_1 ( init_hca, 25,
2591  qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_l,
2592  icm_offset );
2593  DBGC ( hermon, "Hermon %p ICM ALTC is %d x %#zx at [%08llx,%08llx)\n",
2594  hermon, ( 1 << log_num_qps ), hermon->cap.altc_entry_size,
2595  icm_offset, ( icm_offset + len ) );
2596  icm_offset += len;
2597 
2598  /* Extended auxiliary contexts */
2599  len = ( ( 1 << log_num_qps ) * hermon->cap.auxc_entry_size );
2600  icm_offset = icm_align ( icm_offset, len );
2601  MLX_FILL_1 ( init_hca, 28,
2602  qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_h,
2603  ( icm_offset >> 32 ) );
2604  MLX_FILL_1 ( init_hca, 29,
2605  qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_l,
2606  icm_offset );
2607  DBGC ( hermon, "Hermon %p ICM AUXC is %d x %#zx at [%08llx,%08llx)\n",
2608  hermon, ( 1 << log_num_qps ), hermon->cap.auxc_entry_size,
2609  icm_offset, ( icm_offset + len ) );
2610  icm_offset += len;
2611 
2612  /* Shared receive queue contexts */
2613  len = ( ( 1 << log_num_srqs ) * hermon->cap.srqc_entry_size );
2614  icm_offset = icm_align ( icm_offset, len );
2615  MLX_FILL_1 ( init_hca, 18,
2616  qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_h,
2617  ( icm_offset >> 32 ) );
2618  MLX_FILL_2 ( init_hca, 19,
2619  qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
2620  ( icm_offset >> 5 ),
2621  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
2622  log_num_srqs );
2623  DBGC ( hermon, "Hermon %p ICM SRQC is %d x %#zx at [%08llx,%08llx)\n",
2624  hermon, ( 1 << log_num_srqs ), hermon->cap.srqc_entry_size,
2625  icm_offset, ( icm_offset + len ) );
2626  icm_offset += len;
2627 
2628  /* Completion queue contexts */
2629  len = ( ( 1 << log_num_cqs ) * hermon->cap.cqc_entry_size );
2630  icm_offset = icm_align ( icm_offset, len );
2631  MLX_FILL_1 ( init_hca, 20,
2632  qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_h,
2633  ( icm_offset >> 32 ) );
2634  MLX_FILL_2 ( init_hca, 21,
2635  qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
2636  ( icm_offset >> 5 ),
2637  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
2638  log_num_cqs );
2639  DBGC ( hermon, "Hermon %p ICM CQC is %d x %#zx at [%08llx,%08llx)\n",
2640  hermon, ( 1 << log_num_cqs ), hermon->cap.cqc_entry_size,
2641  icm_offset, ( icm_offset + len ) );
2642  icm_offset += len;
2643 
2644  /* Event queue contexts */
2645  len = ( ( 1 << log_num_eqs ) * hermon->cap.eqc_entry_size );
2646  icm_offset = icm_align ( icm_offset, len );
2647  MLX_FILL_1 ( init_hca, 32,
2648  qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_h,
2649  ( icm_offset >> 32 ) );
2650  MLX_FILL_2 ( init_hca, 33,
2651  qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
2652  ( icm_offset >> 5 ),
2653  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_eq,
2654  log_num_eqs );
2655  DBGC ( hermon, "Hermon %p ICM EQC is %d x %#zx at [%08llx,%08llx)\n",
2656  hermon, ( 1 << log_num_eqs ), hermon->cap.eqc_entry_size,
2657  icm_offset, ( icm_offset + len ) );
2658  icm_offset += len;
2659 
2660  /* Memory translation table */
2661  len = ( ( 1 << log_num_mtts ) * hermon->cap.mtt_entry_size );
2662  icm_offset = icm_align ( icm_offset, len );
2663  MLX_FILL_1 ( init_hca, 64,
2664  tpt_parameters.mtt_base_addr_h, ( icm_offset >> 32 ) );
2665  MLX_FILL_1 ( init_hca, 65,
2666  tpt_parameters.mtt_base_addr_l, icm_offset );
2667  DBGC ( hermon, "Hermon %p ICM MTT is %d x %#zx at [%08llx,%08llx)\n",
2668  hermon, ( 1 << log_num_mtts ), hermon->cap.mtt_entry_size,
2669  icm_offset, ( icm_offset + len ) );
2670  icm_offset += len;
2671 
2672  /* Memory protection table */
2673  len = ( ( 1 << log_num_mpts ) * hermon->cap.dmpt_entry_size );
2674  icm_offset = icm_align ( icm_offset, len );
2675  MLX_FILL_1 ( init_hca, 60,
2676  tpt_parameters.dmpt_base_adr_h, ( icm_offset >> 32 ) );
2677  MLX_FILL_1 ( init_hca, 61,
2678  tpt_parameters.dmpt_base_adr_l, icm_offset );
2679  MLX_FILL_1 ( init_hca, 62,
2680  tpt_parameters.log_dmpt_sz, log_num_mpts );
2681  DBGC ( hermon, "Hermon %p ICM DMPT is %d x %#zx at [%08llx,%08llx)\n",
2682  hermon, ( 1 << log_num_mpts ), hermon->cap.dmpt_entry_size,
2683  icm_offset, ( icm_offset + len ) );
2684  icm_offset += len;
2685 
2686  /* Multicast table */
2687  len = ( ( 1 << log_num_mcs ) * sizeof ( struct hermonprm_mcg_entry ) );
2688  icm_offset = icm_align ( icm_offset, len );
2689  MLX_FILL_1 ( init_hca, 48,
2690  multicast_parameters.mc_base_addr_h,
2691  ( icm_offset >> 32 ) );
2692  MLX_FILL_1 ( init_hca, 49,
2693  multicast_parameters.mc_base_addr_l, icm_offset );
2694  MLX_FILL_1 ( init_hca, 52,
2695  multicast_parameters.log_mc_table_entry_sz,
2696  fls ( sizeof ( struct hermonprm_mcg_entry ) - 1 ) );
2697  MLX_FILL_1 ( init_hca, 53,
2698  multicast_parameters.log_mc_table_hash_sz, log_num_mcs );
2699  MLX_FILL_1 ( init_hca, 54,
2700  multicast_parameters.log_mc_table_sz, log_num_mcs );
2701  DBGC ( hermon, "Hermon %p ICM MC is %d x %#zx at [%08llx,%08llx)\n",
2702  hermon, ( 1 << log_num_mcs ),
2703  sizeof ( struct hermonprm_mcg_entry ),
2704  icm_offset, ( icm_offset + len ) );
2705  icm_offset += len;
2706 
2707 
2709  ( icm_offset - hermon->icm_map[HERMON_ICM_OTHER].offset );
2710 
2711  /*
2712  * Allocate and map physical memory for (portions of) ICM
2713  *
2714  * Map is:
2715  * ICM AUX area (aligned to its own size)
2716  * cMPT areas
2717  * Other areas
2718  */
2719 
2720  /* Calculate physical memory required for ICM */
2721  icm_len = 0;
2722  for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2723  icm_len += hermon->icm_map[i].len;
2724  }
2725 
2726  /* Get ICM auxiliary area size */
2727  memset ( &icm_size, 0, sizeof ( icm_size ) );
2728  MLX_FILL_1 ( &icm_size, 0, value_hi, ( icm_offset >> 32 ) );
2729  MLX_FILL_1 ( &icm_size, 1, value, icm_offset );
2730  if ( ( rc = hermon_cmd_set_icm_size ( hermon, &icm_size,
2731  &icm_aux_size ) ) != 0 ) {
2732  DBGC ( hermon, "Hermon %p could not set ICM size: %s\n",
2733  hermon, strerror ( rc ) );
2734  goto err_set_icm_size;
2735  }
2736  icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * HERMON_PAGE_SIZE );
2737 
2738  /* Allocate ICM data and auxiliary area */
2739  DBGC ( hermon, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
2740  hermon, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
2741  if ( ! hermon->icm ) {
2742  hermon->icm_len = icm_len;
2743  hermon->icm_aux_len = icm_aux_len;
2745  if ( ! hermon->icm ) {
2746  DBGC ( hermon, "Hermon %p could not allocate ICM\n",
2747  hermon );
2748  rc = -ENOMEM;
2749  goto err_alloc;
2750  }
2751  } else {
2752  assert ( hermon->icm_len == icm_len );
2753  assert ( hermon->icm_aux_len == icm_aux_len );
2754  }
2755  icm_phys = user_to_phys ( hermon->icm, 0 );
2756 
2757  /* Map ICM auxiliary area */
2758  DBGC ( hermon, "Hermon %p mapping ICM AUX => %08lx\n",
2759  hermon, icm_phys );
2761  0, icm_phys, icm_aux_len ) ) != 0 ) {
2762  DBGC ( hermon, "Hermon %p could not map AUX ICM: %s\n",
2763  hermon, strerror ( rc ) );
2764  goto err_map_icm_aux;
2765  }
2766  icm_phys += icm_aux_len;
2767 
2768  /* MAP ICM area */
2769  for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2770  DBGC ( hermon, "Hermon %p mapping ICM %llx+%zx => %08lx\n",
2771  hermon, hermon->icm_map[i].offset,
2772  hermon->icm_map[i].len, icm_phys );
2774  hermon->icm_map[i].offset,
2775  icm_phys,
2776  hermon->icm_map[i].len ) ) != 0 ){
2777  DBGC ( hermon, "Hermon %p could not map ICM: %s\n",
2778  hermon, strerror ( rc ) );
2779  goto err_map_icm;
2780  }
2781  icm_phys += hermon->icm_map[i].len;
2782  }
2783 
2784  return 0;
2785 
2786  err_map_icm:
2787  assert ( i == 0 ); /* We don't handle partial failure at present */
2788  err_map_icm_aux:
2790  err_alloc:
2791  err_set_icm_size:
2792  return rc;
2793 }
2794 
2795 /**
2796  * Unmap ICM
2797  *
2798  * @v hermon Hermon device
2799  */
2800 static void hermon_unmap_icm ( struct hermon *hermon ) {
2801  struct hermonprm_scalar_parameter unmap_icm;
2802  int i;
2803 
2804  for ( i = ( HERMON_ICM_NUM_REGIONS - 1 ) ; i >= 0 ; i-- ) {
2805  memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2806  MLX_FILL_1 ( &unmap_icm, 0, value_hi,
2807  ( hermon->icm_map[i].offset >> 32 ) );
2808  MLX_FILL_1 ( &unmap_icm, 1, value,
2809  hermon->icm_map[i].offset );
2811  ( 1 << fls ( ( hermon->icm_map[i].len /
2812  HERMON_PAGE_SIZE ) - 1)),
2813  &unmap_icm );
2814  }
2816 }
2817 
2818 /***************************************************************************
2819  *
2820  * Initialisation and teardown
2821  *
2822  ***************************************************************************
2823  */
2824 
2825 /**
2826  * Reset device
2827  *
2828  * @v hermon Hermon device
2829  * @ret rc Return status code
2830  */
2831 static int hermon_reset ( struct hermon *hermon ) {
2832  struct pci_device *pci = hermon->pci;
2833  struct pci_config_backup backup;
2834  static const uint8_t backup_exclude[] =
2835  PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2836  uint16_t vendor;
2837  unsigned int i;
2838 
2839  /* Reset command interface toggle */
2840  hermon->toggle = 0;
2841 
2842  /* Perform device reset and preserve PCI configuration */
2843  pci_backup ( pci, &backup, backup_exclude );
2846 
2847  /* Wait until device starts responding to configuration cycles */
2848  for ( i = 0 ; i < HERMON_RESET_MAX_WAIT_MS ; i++ ) {
2849 
2850  /* Read PCI vendor ID */
2852  if ( vendor == pci->vendor ) {
2853 
2854  /* Restore PCI configuration */
2855  pci_restore ( pci, &backup, backup_exclude );
2856 
2857  DBGC ( hermon, "Hermon %p reset after %dms\n",
2858  hermon, i );
2859  return 0;
2860  }
2861 
2862  /* Delay */
2863  mdelay ( 1 );
2864  }
2865 
2866  DBGC ( hermon, "Hermon %p timed out waiting for reset\n", hermon );
2867  return -ETIMEDOUT;
2868 }
2869 
2870 /**
2871  * Set up memory protection table
2872  *
2873  * @v hermon Hermon device
2874  * @ret rc Return status code
2875  */
2876 static int hermon_setup_mpt ( struct hermon *hermon ) {
2877  struct hermonprm_mpt mpt;
2878  uint32_t key;
2879  int rc;
2880 
2881  /* Derive key */
2883  hermon->lkey = ( ( key << 8 ) | ( key >> 24 ) );
2884 
2885  /* Initialise memory protection table */
2886  memset ( &mpt, 0, sizeof ( mpt ) );
2887  MLX_FILL_7 ( &mpt, 0,
2888  atomic, 1,
2889  rw, 1,
2890  rr, 1,
2891  lw, 1,
2892  lr, 1,
2893  pa, 1,
2894  r_w, 1 );
2895  MLX_FILL_1 ( &mpt, 2, mem_key, key );
2896  MLX_FILL_1 ( &mpt, 3,
2897  pd, HERMON_GLOBAL_PD );
2898  MLX_FILL_1 ( &mpt, 10, len64, 1 );
2899  if ( ( rc = hermon_cmd_sw2hw_mpt ( hermon,
2901  &mpt ) ) != 0 ) {
2902  DBGC ( hermon, "Hermon %p could not set up MPT: %s\n",
2903  hermon, strerror ( rc ) );
2904  return rc;
2905  }
2906 
2907  return 0;
2908 }
2909 
2910 /**
2911  * Unmap memory protection table
2912  *
2913  * @v hermon Hermon device
2914  * @ret rc Return status code
2915  */
2916 static int hermon_unmap_mpt ( struct hermon *hermon ) {
2917  int rc;
2918 
2919  if ( ( rc = hermon_cmd_hw2sw_mpt ( hermon,
2920  hermon->cap.reserved_mrws ) ) != 0 ){
2921  DBGC ( hermon, "Hermon %p could not unmap MPT: %s\n",
2922  hermon, strerror ( rc ) );
2923  return rc;
2924  }
2925 
2926  return 0;
2927 }
2928 
2929 /**
2930  * Configure special queue pairs
2931  *
2932  * @v hermon Hermon device
2933  * @ret rc Return status code
2934  */
2936  int rc;
2937 
2938  /* Special QP block must be aligned on its own size */
2941  & ~( HERMON_NUM_SPECIAL_QPS - 1 ) );
2944  DBGC ( hermon, "Hermon %p special QPs at [%lx,%lx]\n", hermon,
2945  hermon->special_qpn_base, ( hermon->qpn_base - 1 ) );
2946 
2947  /* Issue command to configure special QPs */
2948  if ( ( rc = hermon_cmd_conf_special_qp ( hermon, 0x00,
2949  hermon->special_qpn_base ) ) != 0 ) {
2950  DBGC ( hermon, "Hermon %p could not configure special QPs: "
2951  "%s\n", hermon, strerror ( rc ) );
2952  return rc;
2953  }
2954 
2955  return 0;
2956 }
2957 
2958 /**
2959  * Start Hermon device
2960  *
2961  * @v hermon Hermon device
2962  * @v running Firmware is already running
2963  * @ret rc Return status code
2964  */
2965 static int hermon_start ( struct hermon *hermon, int running ) {
2966  struct hermonprm_init_hca init_hca;
2967  unsigned int i;
2968  int rc;
2969 
2970  /* Start firmware if not already running */
2971  if ( ! running ) {
2972  if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
2973  goto err_start_firmware;
2974  }
2975 
2976  /* Allocate and map ICM */
2977  memset ( &init_hca, 0, sizeof ( init_hca ) );
2978  if ( ( rc = hermon_map_icm ( hermon, &init_hca ) ) != 0 )
2979  goto err_map_icm;
2980 
2981  /* Initialise HCA */
2982  MLX_FILL_1 ( &init_hca, 0, version, 0x02 /* "Must be 0x02" */ );
2983  MLX_FILL_1 ( &init_hca, 5, udp, 1 );
2984  MLX_FILL_1 ( &init_hca, 74, uar_parameters.log_max_uars, 8 );
2985  if ( ( rc = hermon_cmd_init_hca ( hermon, &init_hca ) ) != 0 ) {
2986  DBGC ( hermon, "Hermon %p could not initialise HCA: %s\n",
2987  hermon, strerror ( rc ) );
2988  goto err_init_hca;
2989  }
2990 
2991  /* Set up memory protection */
2992  if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
2993  goto err_setup_mpt;
2994  for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
2995  hermon->port[i].ibdev->rdma_key = hermon->lkey;
2996 
2997  /* Set up event queue */
2998  if ( ( rc = hermon_create_eq ( hermon ) ) != 0 )
2999  goto err_create_eq;
3000 
3001  /* Configure special QPs */
3002  if ( ( rc = hermon_configure_special_qps ( hermon ) ) != 0 )
3003  goto err_conf_special_qps;
3004 
3005  DBGC ( hermon, "Hermon %p device started\n", hermon );
3006  return 0;
3007 
3008  err_conf_special_qps:
3010  err_create_eq:
3012  err_setup_mpt:
3014  err_init_hca:
3016  err_map_icm:
3018  err_start_firmware:
3019  return rc;
3020 }
3021 
3022 /**
3023  * Stop Hermon device
3024  *
3025  * @v hermon Hermon device
3026  */
3027 static void hermon_stop ( struct hermon *hermon ) {
3033  hermon_reset ( hermon );
3034 }
3035 
3036 /**
3037  * Open Hermon device
3038  *
3039  * @v hermon Hermon device
3040  * @ret rc Return status code
3041  */
3042 static int hermon_open ( struct hermon *hermon ) {
3043  int rc;
3044 
3045  /* Start device if applicable */
3046  if ( hermon->open_count == 0 ) {
3047  if ( ( rc = hermon_start ( hermon, 0 ) ) != 0 )
3048  return rc;
3049  }
3050 
3051  /* Increment open counter */
3052  hermon->open_count++;
3053 
3054  return 0;
3055 }
3056 
3057 /**
3058  * Close Hermon device
3059  *
3060  * @v hermon Hermon device
3061  */
3062 static void hermon_close ( struct hermon *hermon ) {
3063 
3064  /* Decrement open counter */
3065  assert ( hermon->open_count != 0 );
3066  hermon->open_count--;
3067 
3068  /* Stop device if applicable */
3069  if ( hermon->open_count == 0 )
3070  hermon_stop ( hermon );
3071 }
3072 
3073 /***************************************************************************
3074  *
3075  * Infiniband link-layer operations
3076  *
3077  ***************************************************************************
3078  */
3079 
3080 /**
3081  * Initialise Infiniband link
3082  *
3083  * @v ibdev Infiniband device
3084  * @ret rc Return status code
3085  */
3086 static int hermon_ib_open ( struct ib_device *ibdev ) {
3087  struct hermon *hermon = ib_get_drvdata ( ibdev );
3088  union hermonprm_set_port set_port;
3089  int rc;
3090 
3091  /* Open hardware */
3092  if ( ( rc = hermon_open ( hermon ) ) != 0 )
3093  goto err_open;
3094 
3095  /* Set port parameters */
3096  memset ( &set_port, 0, sizeof ( set_port ) );
3097  MLX_FILL_8 ( &set_port.ib, 0,
3098  mmc, 1,
3099  mvc, 1,
3100  mp, 1,
3101  mg, 1,
3102  mtu_cap, IB_MTU_2048,
3103  vl_cap, IB_VL_0,
3104  rcm, 1,
3105  lss, 1 );
3106  MLX_FILL_2 ( &set_port.ib, 10,
3107  max_pkey, 1,
3108  max_gid, 1 );
3109  MLX_FILL_1 ( &set_port.ib, 28,
3110  link_speed_supported, 1 );
3111  if ( ( rc = hermon_cmd_set_port ( hermon, 0, ibdev->port,
3112  &set_port ) ) != 0 ) {
3113  DBGC ( hermon, "Hermon %p port %d could not set port: %s\n",
3114  hermon, ibdev->port, strerror ( rc ) );
3115  goto err_set_port;
3116  }
3117 
3118  /* Initialise port */
3119  if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
3120  DBGC ( hermon, "Hermon %p port %d could not initialise port: "
3121  "%s\n", hermon, ibdev->port, strerror ( rc ) );
3122  goto err_init_port;
3123  }
3124 
3125  /* Update MAD parameters */
3126  ib_smc_update ( ibdev, hermon_mad );
3127 
3128  return 0;
3129 
3130  err_init_port:
3131  err_set_port:
3132  hermon_close ( hermon );
3133  err_open:
3134  return rc;
3135 }
3136 
3137 /**
3138  * Close Infiniband link
3139  *
3140  * @v ibdev Infiniband device
3141  */
3142 static void hermon_ib_close ( struct ib_device *ibdev ) {
3143  struct hermon *hermon = ib_get_drvdata ( ibdev );
3144  int rc;
3145 
3146  /* Close port */
3147  if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
3148  DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
3149  hermon, ibdev->port, strerror ( rc ) );
3150  /* Nothing we can do about this */
3151  }
3152 
3153  /* Close hardware */
3154  hermon_close ( hermon );
3155 }
3156 
3157 /**
3158  * Inform embedded subnet management agent of a received MAD
3159  *
3160  * @v ibdev Infiniband device
3161  * @v mad MAD
3162  * @ret rc Return status code
3163  */
3164 static int hermon_inform_sma ( struct ib_device *ibdev,
3165  union ib_mad *mad ) {
3166  int rc;
3167 
3168  /* Send the MAD to the embedded SMA */
3169  if ( ( rc = hermon_mad ( ibdev, mad ) ) != 0 )
3170  return rc;
3171 
3172  /* Update parameters held in software */
3173  ib_smc_update ( ibdev, hermon_mad );
3174 
3175  return 0;
3176 }
3177 
3178 /***************************************************************************
3179  *
3180  * Multicast group operations
3181  *
3182  ***************************************************************************
3183  */
3184 
3185 /**
3186  * Attach to multicast group
3187  *
3188  * @v ibdev Infiniband device
3189  * @v qp Queue pair
3190  * @v gid Multicast GID
3191  * @ret rc Return status code
3192  */
3193 static int hermon_mcast_attach ( struct ib_device *ibdev,
3194  struct ib_queue_pair *qp,
3195  union ib_gid *gid ) {
3196  struct hermon *hermon = ib_get_drvdata ( ibdev );
3197  struct hermonprm_mgm_hash hash;
3198  struct hermonprm_mcg_entry mcg;
3199  unsigned int index;
3200  int rc;
3201 
3202  /* Generate hash table index */
3203  if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
3204  DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
3205  hermon, strerror ( rc ) );
3206  return rc;
3207  }
3208  index = MLX_GET ( &hash, hash );
3209 
3210  /* Check for existing hash table entry */
3211  if ( ( rc = hermon_cmd_read_mcg ( hermon, index, &mcg ) ) != 0 ) {
3212  DBGC ( hermon, "Hermon %p could not read MCG %#x: %s\n",
3213  hermon, index, strerror ( rc ) );
3214  return rc;
3215  }
3216  if ( MLX_GET ( &mcg, hdr.members_count ) != 0 ) {
3217  /* FIXME: this implementation allows only a single QP
3218  * per multicast group, and doesn't handle hash
3219  * collisions. Sufficient for IPoIB but may need to
3220  * be extended in future.
3221  */
3222  DBGC ( hermon, "Hermon %p MGID index %#x already in use\n",
3223  hermon, index );
3224  return -EBUSY;
3225  }
3226 
3227  /* Update hash table entry */
3228  MLX_FILL_1 ( &mcg, 1, hdr.members_count, 1 );
3229  MLX_FILL_1 ( &mcg, 8, qp[0].qpn, qp->qpn );
3230  memcpy ( &mcg.u.dwords[4], gid, sizeof ( *gid ) );
3231  if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
3232  DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
3233  hermon, index, strerror ( rc ) );
3234  return rc;
3235  }
3236 
3237  return 0;
3238 }
3239 
3240 /**
3241  * Detach from multicast group
3242  *
3243  * @v ibdev Infiniband device
3244  * @v qp Queue pair
3245  * @v gid Multicast GID
3246  */
3247 static void hermon_mcast_detach ( struct ib_device *ibdev,
3248  struct ib_queue_pair *qp __unused,
3249  union ib_gid *gid ) {
3250  struct hermon *hermon = ib_get_drvdata ( ibdev );
3251  struct hermonprm_mgm_hash hash;
3252  struct hermonprm_mcg_entry mcg;
3253  unsigned int index;
3254  int rc;
3255 
3256  /* Generate hash table index */
3257  if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
3258  DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
3259  hermon, strerror ( rc ) );
3260  return;
3261  }
3262  index = MLX_GET ( &hash, hash );
3263 
3264  /* Clear hash table entry */
3265  memset ( &mcg, 0, sizeof ( mcg ) );
3266  if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
3267  DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
3268  hermon, index, strerror ( rc ) );
3269  return;
3270  }
3271 }
3272 
3273 /** Hermon Infiniband operations */
3276  .destroy_cq = hermon_destroy_cq,
3277  .create_qp = hermon_create_qp,
3278  .modify_qp = hermon_modify_qp,
3279  .destroy_qp = hermon_destroy_qp,
3280  .post_send = hermon_post_send,
3281  .post_recv = hermon_post_recv,
3282  .poll_cq = hermon_poll_cq,
3283  .poll_eq = hermon_poll_eq,
3284  .open = hermon_ib_open,
3285  .close = hermon_ib_close,
3286  .mcast_attach = hermon_mcast_attach,
3287  .mcast_detach = hermon_mcast_detach,
3288  .set_port_info = hermon_inform_sma,
3289  .set_pkey_table = hermon_inform_sma,
3290 };
3291 
3292 /**
3293  * Register Hermon Infiniband device
3294  *
3295  * @v hermon Hermon device
3296  * @v port Hermon port
3297  * @ret rc Return status code
3298  */
3299 static int hermon_register_ibdev ( struct hermon *hermon,
3300  struct hermon_port *port ) {
3301  struct ib_device *ibdev = port->ibdev;
3302  int rc;
3303 
3304  /* Use Ethernet MAC as eIPoIB local EMAC */
3305  memcpy ( ibdev->lemac, port->eth_mac.raw, ETH_ALEN );
3306 
3307  /* Initialise parameters using SMC */
3308  ib_smc_init ( ibdev, hermon_mad );
3309 
3310  /* Register Infiniband device */
3311  if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
3312  DBGC ( hermon, "Hermon %p port %d could not register IB "
3313  "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
3314  return rc;
3315  }
3316 
3317  return 0;
3318 }
3319 
3320 /**
3321  * Handle Hermon Infiniband device port state change
3322  *
3323  * @v hermon Hermon device
3324  * @v port Hermon port
3325  * @v link_up Link is up
3326  */
3328  struct hermon_port *port,
3329  int link_up __unused ) {
3330  struct ib_device *ibdev = port->ibdev;
3331 
3332  /* Update MAD parameters */
3333  ib_smc_update ( ibdev, hermon_mad );
3334 }
3335 
3336 /**
3337  * Unregister Hermon Infiniband device
3338  *
3339  * @v hermon Hermon device
3340  * @v port Hermon port
3341  */
3343  struct hermon_port *port ) {
3344  struct ib_device *ibdev = port->ibdev;
3345 
3346  unregister_ibdev ( ibdev );
3347 }
3348 
3349 /** Hermon Infiniband port type */
3352  .state_change = hermon_state_change_ibdev,
3353  .unregister_dev = hermon_unregister_ibdev,
3354 };
3355 
3356 /***************************************************************************
3357  *
3358  * Ethernet operation
3359  *
3360  ***************************************************************************
3361  */
3362 
3363 /** Number of Hermon Ethernet send work queue entries */
3364 #define HERMON_ETH_NUM_SEND_WQES 16
3365 
3366 /** Number of Hermon Ethernet receive work queue entries */
3367 #define HERMON_ETH_NUM_RECV_WQES 8
3368 
3369 /** Number of Hermon Ethernet completion entries */
3370 #define HERMON_ETH_NUM_CQES 32
3371 
3372 /**
3373  * Transmit packet via Hermon Ethernet device
3374  *
3375  * @v netdev Network device
3376  * @v iobuf I/O buffer
3377  * @ret rc Return status code
3378  */
3380  struct io_buffer *iobuf ) {
3381  struct hermon_port *port = netdev->priv;
3382  struct ib_device *ibdev = port->ibdev;
3383  struct hermon *hermon = ib_get_drvdata ( ibdev );
3384  int rc;
3385 
3386  /* Transmit packet */
3387  if ( ( rc = ib_post_send ( ibdev, port->eth_qp, NULL,
3388  iobuf ) ) != 0 ) {
3389  DBGC ( hermon, "Hermon %p port %d could not transmit: %s\n",
3390  hermon, ibdev->port, strerror ( rc ) );
3391  return rc;
3392  }
3393 
3394  return 0;
3395 }
3396 
3397 /** Hermon Ethernet queue pair operations */
3399  .alloc_iob = alloc_iob,
3400 };
3401 
3402 /**
3403  * Handle Hermon Ethernet device send completion
3404  *
3405  * @v ibdev Infiniband device
3406  * @v qp Queue pair
3407  * @v iobuf I/O buffer
3408  * @v rc Completion status code
3409  */
3410 static void hermon_eth_complete_send ( struct ib_device *ibdev __unused,
3411  struct ib_queue_pair *qp,
3412  struct io_buffer *iobuf, int rc ) {
3413  struct net_device *netdev = ib_qp_get_ownerdata ( qp );
3414 
3415  netdev_tx_complete_err ( netdev, iobuf, rc );
3416 }
3417 
3418 /**
3419  * Handle Hermon Ethernet device receive completion
3420  *
3421  * @v ibdev Infiniband device
3422  * @v qp Queue pair
3423  * @v dest Destination address vector, or NULL
3424  * @v source Source address vector, or NULL
3425  * @v iobuf I/O buffer
3426  * @v rc Completion status code
3427  */
3428 static void hermon_eth_complete_recv ( struct ib_device *ibdev __unused,
3429  struct ib_queue_pair *qp,
3431  struct ib_address_vector *source,
3432  struct io_buffer *iobuf, int rc ) {
3433  struct net_device *netdev = ib_qp_get_ownerdata ( qp );
3434  unsigned int tag;
3435 
3436  /* Identify VLAN tag, if applicable */
3437  tag = ( source->vlan_present ? source->vlan : 0 );
3438 
3439  /* Hand off to network layer */
3440  if ( rc == 0 ) {
3441  vlan_netdev_rx ( netdev, tag, iobuf );
3442  } else {
3443  vlan_netdev_rx_err ( netdev, tag, iobuf, rc );
3444  }
3445 }
3446 
3447 /** Hermon Ethernet device completion operations */
3450  .complete_recv = hermon_eth_complete_recv,
3451 };
3452 
3453 /**
3454  * Poll Hermon Ethernet device
3455  *
3456  * @v netdev Network device
3457  */
3458 static void hermon_eth_poll ( struct net_device *netdev ) {
3459  struct hermon_port *port = netdev->priv;
3460  struct ib_device *ibdev = port->ibdev;
3461 
3462  ib_poll_eq ( ibdev );
3463 }
3464 
3465 /**
3466  * Open Hermon Ethernet device
3467  *
3468  * @v netdev Network device
3469  * @ret rc Return status code
3470  */
3471 static int hermon_eth_open ( struct net_device *netdev ) {
3472  struct hermon_port *port = netdev->priv;
3473  struct ib_device *ibdev = port->ibdev;
3474  struct hermon *hermon = ib_get_drvdata ( ibdev );
3475  union hermonprm_set_port set_port;
3476  int rc;
3477 
3478  /* Open hardware */
3479  if ( ( rc = hermon_open ( hermon ) ) != 0 )
3480  goto err_open;
3481 
3482  /* Allocate completion queue */
3483  if ( ( rc = ib_create_cq ( ibdev, HERMON_ETH_NUM_CQES,
3484  &hermon_eth_cq_op, &port->eth_cq ) ) != 0 ) {
3485  DBGC ( hermon, "Hermon %p port %d could not create completion "
3486  "queue: %s\n", hermon, ibdev->port, strerror ( rc ) );
3487  goto err_create_cq;
3488  }
3489 
3490  /* Allocate queue pair */
3492  port->eth_cq, HERMON_ETH_NUM_RECV_WQES,
3493  port->eth_cq, &hermon_eth_qp_op,
3494  netdev->name, &port->eth_qp ) ) != 0 ) {
3495  DBGC ( hermon, "Hermon %p port %d could not create queue "
3496  "pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
3497  goto err_create_qp;
3498  }
3499  ib_qp_set_ownerdata ( port->eth_qp, netdev );
3500 
3501  /* Activate queue pair */
3502  if ( ( rc = ib_modify_qp ( ibdev, port->eth_qp ) ) != 0 ) {
3503  DBGC ( hermon, "Hermon %p port %d could not modify queue "
3504  "pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
3505  goto err_modify_qp;
3506  }
3507 
3508  /* Fill receive rings */
3509  ib_refill_recv ( ibdev, port->eth_qp );
3510 
3511  /* Set port general parameters */
3512  memset ( &set_port, 0, sizeof ( set_port ) );
3513  MLX_FILL_3 ( &set_port.general, 0,
3514  v_mtu, 1,
3515  v_pprx, 1,
3516  v_pptx, 1 );
3517  MLX_FILL_1 ( &set_port.general, 1,
3518  mtu, ( ETH_FRAME_LEN + 40 /* Used by card */ ) );
3519  MLX_FILL_1 ( &set_port.general, 2,
3520  pfctx, ( 1 << FCOE_VLAN_PRIORITY ) );
3521  MLX_FILL_1 ( &set_port.general, 3,
3522  pfcrx, ( 1 << FCOE_VLAN_PRIORITY ) );
3523  if ( ( rc = hermon_cmd_set_port ( hermon, 1,
3525  ibdev->port ),
3526  &set_port ) ) != 0 ) {
3527  DBGC ( hermon, "Hermon %p port %d could not set port general "
3528  "parameters: %s\n",
3529  hermon, ibdev->port, strerror ( rc ) );
3530  goto err_set_port_general_params;
3531  }
3532 
3533  /* Set port receive QP */
3534  memset ( &set_port, 0, sizeof ( set_port ) );
3535  MLX_FILL_1 ( &set_port.rqp_calc, 0, base_qpn, port->eth_qp->qpn );
3536  MLX_FILL_1 ( &set_port.rqp_calc, 2,
3537  mac_miss_index, 128 /* MAC misses go to promisc QP */ );
3538  MLX_FILL_2 ( &set_port.rqp_calc, 3,
3539  vlan_miss_index, 127 /* VLAN misses go to promisc QP */,
3540  no_vlan_index, 126 /* VLAN-free go to promisc QP */ );
3541  MLX_FILL_2 ( &set_port.rqp_calc, 5,
3542  promisc_qpn, port->eth_qp->qpn,
3543  en_uc_promisc, 1 );
3544  MLX_FILL_2 ( &set_port.rqp_calc, 6,
3545  def_mcast_qpn, port->eth_qp->qpn,
3546  mc_promisc_mode, 2 /* Receive all multicasts */ );
3547  if ( ( rc = hermon_cmd_set_port ( hermon, 1,
3549  ibdev->port ),
3550  &set_port ) ) != 0 ) {
3551  DBGC ( hermon, "Hermon %p port %d could not set port receive "
3552  "QP: %s\n", hermon, ibdev->port, strerror ( rc ) );
3553  goto err_set_port_receive_qp;
3554  }
3555 
3556  /* Initialise port */
3557  if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
3558  DBGC ( hermon, "Hermon %p port %d could not initialise port: "
3559  "%s\n", hermon, ibdev->port, strerror ( rc ) );
3560  goto err_init_port;
3561  }
3562 
3563  return 0;
3564 
3565  err_init_port:
3566  err_set_port_receive_qp:
3567  err_set_port_general_params:
3568  err_modify_qp:
3569  ib_destroy_qp ( ibdev, port->eth_qp );
3570  err_create_qp:
3571  ib_destroy_cq ( ibdev, port->eth_cq );
3572  err_create_cq:
3573  hermon_close ( hermon );
3574  err_open:
3575  return rc;
3576 }
3577 
3578 /**
3579  * Close Hermon Ethernet device
3580  *
3581  * @v netdev Network device
3582  */
3583 static void hermon_eth_close ( struct net_device *netdev ) {
3584  struct hermon_port *port = netdev->priv;
3585  struct ib_device *ibdev = port->ibdev;
3586  struct hermon *hermon = ib_get_drvdata ( ibdev );
3587  int rc;
3588 
3589  /* Close port */
3590  if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
3591  DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
3592  hermon, ibdev->port, strerror ( rc ) );
3593  /* Nothing we can do about this */
3594  }
3595 
3596  /* Tear down the queues */
3597  ib_destroy_qp ( ibdev, port->eth_qp );
3598  ib_destroy_cq ( ibdev, port->eth_cq );
3599 
3600  /* Close hardware */
3601  hermon_close ( hermon );
3602 }
3603 
3604 /** Hermon Ethernet network device operations */
3606  .open = hermon_eth_open,
3607  .close = hermon_eth_close,
3608  .transmit = hermon_eth_transmit,
3609  .poll = hermon_eth_poll,
3610 };
3611 
3612 /**
3613  * Register Hermon Ethernet device
3614  *
3615  * @v hermon Hermon device
3616  * @v port Hermon port
3617  * @ret rc Return status code
3618  */
3619 static int hermon_register_netdev ( struct hermon *hermon,
3620  struct hermon_port *port ) {
3621  struct net_device *netdev = port->netdev;
3622  struct ib_device *ibdev = port->ibdev;
3623  int rc;
3624 
3625  /* Set MAC address */
3626  memcpy ( netdev->hw_addr, port->eth_mac.raw, ETH_ALEN );
3627 
3628  /* Register network device */
3629  if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
3630  DBGC ( hermon, "Hermon %p port %d could not register network "
3631  "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
3632  goto err_register_netdev;
3633  }
3634 
3635  /* Register non-volatile options */
3636  if ( ( rc = register_nvo ( &port->nvo,
3637  netdev_settings ( netdev ) ) ) != 0 ) {
3638  DBGC ( hermon, "Hermon %p port %d could not register non-"
3639  "volatile options: %s\n",
3640  hermon, ibdev->port, strerror ( rc ) );
3641  goto err_register_nvo;
3642  }
3643 
3644  return 0;
3645 
3646  unregister_nvo ( &port->nvo );
3647  err_register_nvo:
3649  err_register_netdev:
3650  return rc;
3651 }
3652 
3653 /**
3654  * Handle Hermon Ethernet device port state change
3655  *
3656  * @v hermon Hermon device
3657  * @v port Hermon port
3658  * @v link_up Link is up
3659  */
3661  struct hermon_port *port,
3662  int link_up ) {
3663  struct net_device *netdev = port->netdev;
3664 
3665  if ( link_up ) {
3666  netdev_link_up ( netdev );
3667  } else {
3669  }
3670 }
3671 
3672 /**
3673  * Unregister Hermon Ethernet device
3674  *
3675  * @v hermon Hermon device
3676  * @v port Hermon port
3677  */
3679  struct hermon_port *port ) {
3680  struct net_device *netdev = port->netdev;
3681 
3682  unregister_nvo ( &port->nvo );
3684 }
3685 
3686 /** Hermon Ethernet port type */
3689  .state_change = hermon_state_change_netdev,
3690  .unregister_dev = hermon_unregister_netdev,
3691 };
3692 
3693 /***************************************************************************
3694  *
3695  * Port type detection
3696  *
3697  ***************************************************************************
3698  */
3699 
3700 /** Timeout for port sensing */
3701 #define HERMON_SENSE_PORT_TIMEOUT ( TICKS_PER_SEC / 2 )
3702 
3703 /**
3704  * Name port type
3705  *
3706  * @v port_type Port type
3707  * @v port_type_name Port type name
3708  */
3709 static inline const char * hermon_name_port_type ( unsigned int port_type ) {
3710  switch ( port_type ) {
3711  case HERMON_PORT_TYPE_UNKNOWN: return "unknown";
3712  case HERMON_PORT_TYPE_IB: return "Infiniband";
3713  case HERMON_PORT_TYPE_ETH: return "Ethernet";
3714  default: return "INVALID";
3715  }
3716 }
3717 
3718 /**
3719  * Sense port type
3720  *
3721  * @v hermon Hermon device
3722  * @v port Hermon port
3723  * @ret port_type Port type, or negative error
3724  */
3725 static int hermon_sense_port_type ( struct hermon *hermon,
3726  struct hermon_port *port ) {
3727  struct ib_device *ibdev = port->ibdev;
3728  struct hermonprm_sense_port sense_port;
3729  int port_type;
3730  int rc;
3731 
3732  /* If DPDP is not supported, always assume Infiniband */
3733  if ( ! hermon->cap.dpdp ) {
3734  port_type = HERMON_PORT_TYPE_IB;
3735  DBGC ( hermon, "Hermon %p port %d does not support DPDP; "
3736  "assuming an %s network\n", hermon, ibdev->port,
3737  hermon_name_port_type ( port_type ) );
3738  return port_type;
3739  }
3740 
3741  /* Sense the port type */
3742  if ( ( rc = hermon_cmd_sense_port ( hermon, ibdev->port,
3743  &sense_port ) ) != 0 ) {
3744  DBGC ( hermon, "Hermon %p port %d sense failed: %s\n",
3745  hermon, ibdev->port, strerror ( rc ) );
3746  return rc;
3747  }
3748  port_type = MLX_GET ( &sense_port, port_type );
3749 
3750  DBGC ( hermon, "Hermon %p port %d sensed an %s network\n",
3751  hermon, ibdev->port, hermon_name_port_type ( port_type ) );
3752  return port_type;
3753 }
3754 
3755 /**
3756  * Set port type
3757  *
3758  * @v hermon Hermon device
3759  * @v port Hermon port
3760  * @ret rc Return status code
3761  */
3762 static int hermon_set_port_type ( struct hermon *hermon,
3763  struct hermon_port *port ) {
3764  struct ib_device *ibdev = port->ibdev;
3765  struct hermonprm_query_port_cap query_port;
3766  int ib_supported;
3767  int eth_supported;
3768  int port_type;
3769  unsigned long start;
3770  unsigned long elapsed;
3771  int rc;
3772 
3773  /* Check to see which types are supported */
3774  if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
3775  &query_port ) ) != 0 ) {
3776  DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
3777  hermon, ibdev->port, strerror ( rc ) );
3778  return rc;
3779  }
3780  ib_supported = MLX_GET ( &query_port, ib );
3781  eth_supported = MLX_GET ( &query_port, eth );
3782  DBGC ( hermon, "Hermon %p port %d supports%s%s%s\n",
3783  hermon, ibdev->port, ( ib_supported ? " Infiniband" : "" ),
3784  ( ( ib_supported && eth_supported ) ? " and" : "" ),
3785  ( eth_supported ? " Ethernet" : "" ) );
3786 
3787  /* Record Ethernet MAC address */
3788  port->eth_mac.part.h = htons ( MLX_GET ( &query_port, mac_47_32 ) );
3789  port->eth_mac.part.l = htonl ( MLX_GET ( &query_port, mac_31_0 ) );
3790 
3791  /* Sense network, if applicable */
3792  if ( ib_supported && eth_supported ) {
3793 
3794  /* Both types are supported; try sensing network */
3795  start = currticks();
3796  do {
3797  /* Try sensing port */
3798  port_type = hermon_sense_port_type ( hermon, port );
3799  if ( port_type < 0 ) {
3800  rc = port_type;
3801  return rc;
3802  }
3803 
3804  /* Avoid spamming debug output */
3805  mdelay ( 50 );
3806  } while ( ( port_type == HERMON_PORT_TYPE_UNKNOWN ) &&
3807  ( ( elapsed = ( currticks() - start ) ) <
3809 
3810  /* Set port type based on sensed network, defaulting
3811  * to Infiniband if nothing was sensed.
3812  */
3813  switch ( port_type ) {
3814  case HERMON_PORT_TYPE_ETH:
3815  port->type = &hermon_port_type_eth;
3816  break;
3817  case HERMON_PORT_TYPE_IB:
3819  port->type = &hermon_port_type_ib;
3820  break;
3821  default:
3822  return -EINVAL;
3823  }
3824 
3825  } else if ( eth_supported ) {
3826  port->type = &hermon_port_type_eth;
3827  } else {
3828  port->type = &hermon_port_type_ib;
3829  }
3830 
3831  assert ( port->type != NULL );
3832  return 0;
3833 }
3834 
3835 /***************************************************************************
3836  *
3837  * BOFM interface
3838  *
3839  ***************************************************************************
3840  */
3841 
3842 /**
3843  * Harvest Ethernet MAC for BOFM
3844  *
3845  * @v bofm BOFM device
3846  * @v mport Multi-port index
3847  * @v mac MAC to fill in
3848  * @ret rc Return status code
3849  */
3850 static int hermon_bofm_harvest ( struct bofm_device *bofm, unsigned int mport,
3851  uint8_t *mac ) {
3852  struct hermon *hermon = container_of ( bofm, struct hermon, bofm );
3853  struct hermonprm_mod_stat_cfg stat_cfg;
3854  union {
3855  uint8_t bytes[8];
3856  uint32_t dwords[2];
3857  } buf;
3858  int rc;
3859 
3860  /* Query static configuration */
3861  if ( ( rc = hermon_mod_stat_cfg ( hermon, mport,
3863  HERMON_MOD_STAT_CFG_OFFSET ( mac_m ),
3864  &stat_cfg ) ) != 0 ) {
3865  DBGC ( hermon, "Hermon %p port %d could not query "
3866  "configuration: %s\n", hermon, mport, strerror ( rc ) );
3867  return rc;
3868  }
3869 
3870  /* Retrieve MAC address */
3871  buf.dwords[0] = htonl ( MLX_GET ( &stat_cfg, mac_high ) );
3872  buf.dwords[1] = htonl ( MLX_GET ( &stat_cfg, mac_low ) );
3873  memcpy ( mac, &buf.bytes[ sizeof ( buf.bytes ) - ETH_ALEN ],
3874  ETH_ALEN );
3875 
3876  DBGC ( hermon, "Hermon %p port %d harvested MAC address %s\n",
3877  hermon, mport, eth_ntoa ( mac ) );
3878 
3879  return 0;
3880 }
3881 
3882 /**
3883  * Update Ethernet MAC for BOFM
3884  *
3885  * @v bofm BOFM device
3886  * @v mport Multi-port index
3887  * @v mac MAC to fill in
3888  * @ret rc Return status code
3889  */
3890 static int hermon_bofm_update ( struct bofm_device *bofm, unsigned int mport,
3891  const uint8_t *mac ) {
3892  struct hermon *hermon = container_of ( bofm, struct hermon, bofm );
3893  struct hermonprm_mod_stat_cfg stat_cfg;
3894  union {
3895  uint8_t bytes[8];
3896  uint32_t dwords[2];
3897  } buf;
3898  int rc;
3899 
3900  /* Prepare MAC address */
3901  memset ( &buf, 0, sizeof ( buf ) );
3902  memcpy ( &buf.bytes[ sizeof ( buf.bytes ) - ETH_ALEN ], mac,
3903  ETH_ALEN );
3904 
3905  /* Modify static configuration */
3906  memset ( &stat_cfg, 0, sizeof ( stat_cfg ) );
3907  MLX_FILL_2 ( &stat_cfg, 36,
3908  mac_m, 1,
3909  mac_high, ntohl ( buf.dwords[0] ) );
3910  MLX_FILL_1 ( &stat_cfg, 37, mac_low, ntohl ( buf.dwords[1] ) );
3911  if ( ( rc = hermon_mod_stat_cfg ( hermon, mport,
3913  HERMON_MOD_STAT_CFG_OFFSET ( mac_m ),
3914  &stat_cfg ) ) != 0 ) {
3915  DBGC ( hermon, "Hermon %p port %d could not modify "
3916  "configuration: %s\n", hermon, mport, strerror ( rc ) );
3917  return rc;
3918  }
3919 
3920  DBGC ( hermon, "Hermon %p port %d updated MAC address to %s\n",
3921  hermon, mport, eth_ntoa ( mac ) );
3922 
3923  return 0;
3924 }
3925 
3926 /** Hermon BOFM operations */
3929  .update = hermon_bofm_update,
3930 };
3931 
3932 /***************************************************************************
3933  *
3934  * PCI interface
3935  *
3936  ***************************************************************************
3937  */
3938 
3939 /**
3940  * Allocate Hermon device
3941  *
3942  * @v pci PCI device
3943  * @v id PCI ID
3944  * @ret rc Return status code
3945  */
3946 static struct hermon * hermon_alloc ( void ) {
3947  struct hermon *hermon;
3948 
3949  /* Allocate Hermon device */
3950  hermon = zalloc ( sizeof ( *hermon ) );
3951  if ( ! hermon )
3952  goto err_hermon;
3953 
3954  /* Allocate space for mailboxes */
3957  if ( ! hermon->mailbox_in )
3958  goto err_mailbox_in;
3961  if ( ! hermon->mailbox_out )
3962  goto err_mailbox_out;
3963 
3964  return hermon;
3965 
3967  err_mailbox_out:
3969  err_mailbox_in:
3970  free ( hermon );
3971  err_hermon:
3972  return NULL;
3973 }
3974 
3975 /**
3976  * Free Hermon device
3977  *
3978  * @v hermon Hermon device
3979  */
3980 static void hermon_free ( struct hermon *hermon ) {
3981 
3982  ufree ( hermon->icm );
3983  ufree ( hermon->firmware_area );
3986  free ( hermon );
3987 }
3988 
3989 /**
3990  * Probe PCI device
3991  *
3992  * @v pci PCI device
3993  * @v id PCI ID
3994  * @ret rc Return status code
3995  */
3996 static int hermon_probe ( struct pci_device *pci ) {
3997  struct hermon *hermon;
3998  struct ib_device *ibdev;
3999  struct net_device *netdev;
4000  struct hermon_port *port;
4001  unsigned long config;
4002  unsigned long uar;
4003  unsigned int i;
4004  int rc;
4005 
4006  /* Allocate Hermon device */
4007  hermon = hermon_alloc();
4008  if ( ! hermon ) {
4009  rc = -ENOMEM;
4010  goto err_alloc;
4011  }
4012  pci_set_drvdata ( pci, hermon );
4013  hermon->pci = pci;
4014 
4015  /* Fix up PCI device */
4016  adjust_pci_device ( pci );
4017 
4018  /* Map PCI BARs */
4019  config = pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR );
4020  hermon->config = pci_ioremap ( pci, config,
4022  uar = pci_bar_start ( pci, HERMON_PCI_UAR_BAR );
4023  hermon->uar = pci_ioremap ( pci, uar,
4025 
4026  /* Reset device */
4027  if ( ( rc = hermon_reset ( hermon ) ) != 0 )
4028  goto err_reset;
4029 
4030  /* Start firmware */
4031  if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
4032  goto err_start_firmware;
4033 
4034  /* Get device limits */
4035  if ( ( rc = hermon_get_cap ( hermon ) ) != 0 )
4036  goto err_get_cap;
4037 
4038  /* Allocate Infiniband devices */
4039  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4040  ibdev = alloc_ibdev ( 0 );
4041  if ( ! ibdev ) {
4042  rc = -ENOMEM;
4043  goto err_alloc_ibdev;
4044  }
4045  hermon->port[i].ibdev = ibdev;
4047  ibdev->dev = &pci->dev;
4048  ibdev->port = ( HERMON_PORT_BASE + i );
4051  }
4052 
4053  /* Allocate network devices */
4054  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4055  netdev = alloc_etherdev ( 0 );
4056  if ( ! netdev ) {
4057  rc = -ENOMEM;
4058  goto err_alloc_netdev;
4059  }
4060  hermon->port[i].netdev = netdev;
4062  netdev->dev = &pci->dev;
4063  netdev->priv = &hermon->port[i];
4064  }
4065 
4066  /* Start device */
4067  if ( ( rc = hermon_start ( hermon, 1 ) ) != 0 )
4068  goto err_start;
4069 
4070  /* Determine port types */
4071  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4072  port = &hermon->port[i];
4073  if ( ( rc = hermon_set_port_type ( hermon, port ) ) != 0 )
4074  goto err_set_port_type;
4075  }
4076 
4077  /* Initialise non-volatile storage */
4078  nvs_vpd_init ( &hermon->nvsvpd, pci );
4079  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4080  port = &hermon->port[i];
4082  HERMON_VPD_FIELD ( port->ibdev->port ),
4083  &port->nvo, NULL );
4084  }
4085 
4086  /* Register devices */
4087  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4088  port = &hermon->port[i];
4089  if ( ( rc = port->type->register_dev ( hermon, port ) ) != 0 )
4090  goto err_register;
4091  }
4092 
4093  /* Leave device quiescent until opened */
4094  if ( hermon->open_count == 0 )
4095  hermon_stop ( hermon );
4096 
4097  return 0;
4098 
4099  i = hermon->cap.num_ports;
4100  err_register:
4101  for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
4102  port = &hermon->port[i];
4103  port->type->unregister_dev ( hermon, port );
4104  }
4105  err_set_port_type:
4106  hermon_stop ( hermon );
4107  err_start:
4108  i = hermon->cap.num_ports;
4109  err_alloc_netdev:
4110  for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
4111  netdev_nullify ( hermon->port[i].netdev );
4112  netdev_put ( hermon->port[i].netdev );
4113  }
4114  i = hermon->cap.num_ports;
4115  err_alloc_ibdev:
4116  for ( i-- ; ( signed int ) i >= 0 ; i-- )
4117  ibdev_put ( hermon->port[i].ibdev );
4118  err_get_cap:
4120  err_start_firmware:
4121  err_reset:
4122  iounmap ( hermon->uar );
4123  iounmap ( hermon->config );
4124  hermon_free ( hermon );
4125  err_alloc:
4126  return rc;
4127 }
4128 
4129 /**
4130  * Remove PCI device
4131  *
4132  * @v pci PCI device
4133  */
4134 static void hermon_remove ( struct pci_device *pci ) {
4135  struct hermon *hermon = pci_get_drvdata ( pci );
4136  struct hermon_port *port;
4137  int i;
4138 
4139  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
4140  port = &hermon->port[i];
4141  port->type->unregister_dev ( hermon, port );
4142  }
4143  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
4144  netdev_nullify ( hermon->port[i].netdev );
4145  netdev_put ( hermon->port[i].netdev );
4146  }
4147  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- )
4148  ibdev_put ( hermon->port[i].ibdev );
4149  iounmap ( hermon->uar );
4150  iounmap ( hermon->config );
4151  hermon_free ( hermon );
4152 }
4153 
4154 /**
4155  * Probe PCI device for BOFM
4156  *
4157  * @v pci PCI device
4158  * @v id PCI ID
4159  * @ret rc Return status code
4160  */
4161 static int hermon_bofm_probe ( struct pci_device *pci ) {
4162  struct hermon *hermon;
4163  unsigned long config;
4164  int rc;
4165 
4166  /* Allocate Hermon device */
4167  hermon = hermon_alloc();
4168  if ( ! hermon ) {
4169  rc = -ENOMEM;
4170  goto err_alloc;
4171  }
4172  pci_set_drvdata ( pci, hermon );
4173  hermon->pci = pci;
4174 
4175  /* Fix up PCI device */
4176  adjust_pci_device ( pci );
4177 
4178  /* Map PCI BAR */
4182 
4183  /* Initialise BOFM device */
4185 
4186  /* Register BOFM device */
4187  if ( ( rc = bofm_register ( &hermon->bofm ) ) != 0 ) {
4188  DBGC ( hermon, "Hermon %p could not register BOFM device: "
4189  "%s\n", hermon, strerror ( rc ) );
4190  goto err_bofm_register;
4191  }
4192 
4193  return 0;
4194 
4195  err_bofm_register:
4196  iounmap ( hermon->config );
4197  hermon_free ( hermon );
4198  err_alloc:
4199  return rc;
4200 }
4201 
4202 /**
4203  * Remove PCI device for BOFM
4204  *
4205  * @v pci PCI device
4206  */
4207 static void hermon_bofm_remove ( struct pci_device *pci ) {
4208  struct hermon *hermon = pci_get_drvdata ( pci );
4209 
4210  bofm_unregister ( &hermon->bofm );
4211  iounmap ( hermon->config );
4212  hermon_free ( hermon );
4213 }
4214 
4215 static struct pci_device_id hermon_nics[] = {
4216  /* Mellanox ConnectX VPI (ethernet + infiniband) */
4217  PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver", 0 ),
4218  PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver", 0 ),
4219 
4220  /* Mellanox ConnectX EN (ethernet only) */
4221  PCI_ROM ( 0x15b3, 0x6368, "mt25448", "MT25448 HCA driver", 0 ),
4222  PCI_ROM ( 0x15b3, 0x6372, "mt25458", "MT25458 HCA driver", 0 ),
4223 
4224  /* Mellanox ConnectX-2 VPI (ethernet + infiniband) */
4225  PCI_ROM ( 0x15b3, 0x6732, "mt26418", "MT26418 HCA driver", 0 ),
4226  PCI_ROM ( 0x15b3, 0x673c, "mt26428", "MT26428 HCA driver", 0 ),
4227  PCI_ROM ( 0x15b3, 0x6746, "mt26438", "MT26438 HCA driver", 0 ),
4228  PCI_ROM ( 0x15b3, 0x6778, "mt26488", "MT26488 HCA driver", 0 ),
4229 
4230  /* Mellanox ConnectX-2 EN (ethernet only) */
4231  PCI_ROM ( 0x15b3, 0x6750, "mt26448", "MT26448 HCA driver", 0 ),
4232  PCI_ROM ( 0x15b3, 0x675a, "mt26458", "MT26458 HCA driver", 0 ),
4233  PCI_ROM ( 0x15b3, 0x6764, "mt26468", "MT26468 HCA driver", 0 ),
4234  PCI_ROM ( 0x15b3, 0x676e, "mt26478", "MT26478 HCA driver", 0 ),
4235 
4236  /* Mellanox ConnectX-3 VPI (ethernet + infiniband) */
4237  PCI_ROM ( 0x15b3, 0x1003, "mt4099", "ConnectX-3 HCA driver", 0 ),
4238  PCI_ROM ( 0x15b3, 0x1007, "mt4103", "ConnectX-3 Pro HCA driver", 0 ),
4239 };
4240 
4241 struct pci_driver hermon_driver __pci_driver = {
4242  .ids = hermon_nics,
4243  .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
4244  .probe = hermon_probe,
4245  .remove = hermon_remove,
4246 };
4247 
4248 struct pci_driver hermon_bofm_driver __bofm_driver = {
4249  .ids = hermon_nics,
4250  .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
4253 };
void unregister_ibdev(struct ib_device *ibdev)
Unregister Infiniband device.
Definition: infiniband.c:985
#define HERMON_MTU_ETH
Definition: hermon.h:101
static int hermon_cmd_query_dev_cap(struct hermon *hermon, struct hermonprm_query_dev_cap *dev_cap)
Definition: hermon.c:269
size_t eqe_size
Size of event queue.
Definition: hermon.h:780
#define HERMON_PORT_BASE
Definition: hermon.h:28
static void hermon_ib_close(struct ib_device *ibdev)
Close Infiniband link.
Definition: hermon.c:3142
uint32_t c
Definition: md4.c:30
#define __attribute__(x)
Definition: compiler.h:10
struct hermonprm_port_mgmnt_change_event port_mgmnt_change
Definition: hermon.h:542
#define EINVAL
Invalid argument.
Definition: errno.h:428
static __always_inline void ib_set_drvdata(struct ib_device *ibdev, void *priv)
Set Infiniband device driver-private data.
Definition: infiniband.h:697
#define HERMON_SCHED_DEFAULT
Definition: hermon.h:129
void ib_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: infiniband.c:878
iPXE I/O API
static void hermon_event_port_state_change(struct hermon *hermon, union hermonprm_event_entry *eqe)
Handle port state event.
Definition: hermon.c:2134
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define HERMON_HCR_OUT_LEN(_command)
Definition: hermon.h:978
struct ib_device * ibdev
Infiniband device.
Definition: hermon.h:848
Infiniband protocol.
#define MLX_FILL_7(_ptr, _index,...)
Definition: mlx_bitops.h:191
struct net_device * netdev
Network device.
Definition: hermon.h:850
unsigned short uint16_t
Definition: stdint.h:11
uint32_t low
Low 16 bits of address.
Definition: myson.h:19
static int hermon_cmd_unmap_icm_aux(struct hermon *hermon)
Definition: hermon.c:566
struct hermonprm_set_port_rqp_calc rqp_calc
Definition: hermon.h:559
#define MLX_FILL_2(_ptr, _index,...)
Definition: mlx_bitops.h:171
struct hermon_send_work_queue send
Send work queue.
Definition: hermon.h:744
static int hermon_cmd_sw2hw_cq(struct hermon *hermon, unsigned long cqn, const struct hermonprm_completion_queue_context *cqctx)
Definition: hermon.c:387
struct hermon_mtt mtt
MTT descriptor.
Definition: hermon.h:764
static int hermon_cmd_sw2hw_eq(struct hermon *hermon, unsigned int index, const struct hermonprm_eqc *eqctx)
Definition: hermon.c:360
#define iob_put(iobuf, len)
Definition: iobuf.h:120
static int hermon_post_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf)
Post send work queue entry.
Definition: hermon.c:1629
#define MLX_FILL_4(_ptr, _index,...)
Definition: mlx_bitops.h:179
static void hermon_unmap_icm(struct hermon *hermon)
Unmap ICM.
Definition: hermon.c:2800
#define IB_QPN_SMI
Subnet management interface QPN.
Definition: infiniband.h:21
#define HERMON_PCI_CONFIG_BAR_SIZE
Definition: hermon.h:32
static void bofm_init(struct bofm_device *bofm, struct pci_device *pci, struct bofm_operations *op)
Initialise BOFM device.
Definition: bofm.h:339
static void hermon_eth_complete_send(struct ib_device *ibdev __unused, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Handle Hermon Ethernet device send completion.
Definition: hermon.c:3410
static int hermon_sense_port_type(struct hermon *hermon, struct hermon_port *port)
Sense port type.
Definition: hermon.c:3725
#define HERMON_CMPT_MAX_ENTRIES
Number of cMPT entries of each type.
Definition: hermon.h:610
#define HERMON_HCR_BASE
Definition: hermon.h:957
void nvs_vpd_nvo_init(struct nvs_vpd_device *nvsvpd, unsigned int field, struct nvo_block *nvo, struct refcnt *refcnt)
Initialise non-volatile option storage within NVS VPD device.
Definition: nvsvpd.c:220
#define IB_MTU_2048
Definition: ib_mad.h:162
static int hermon_cmd_rst2init_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:414
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:520
#define HERMON_HCR_READ_MCG
Definition: hermon.h:72
struct hermonprm_event_db_register event
Definition: hermon.h:547
static int hermon_cmd_wait(struct hermon *hermon, struct hermonprm_hca_command_register *hcr)
Wait for Hermon command completion.
Definition: hermon.c:136
int nvs_vpd_init(struct nvs_vpd_device *nvsvpd, struct pci_device *pci)
Initialise NVS VPD device.
Definition: nvsvpd.c:178
A PCI driver.
Definition: pci.h:230
#define EBUSY
Device or resource busy.
Definition: errno.h:338
#define HERMON_OPCODE_NOP
Definition: hermon.h:41
size_t auxc_entry_size
Auxiliary context entry size.
Definition: hermon.h:580
static int ib_is_open(struct ib_device *ibdev)
Check whether or not Infiniband device is open.
Definition: infiniband.h:576
#define MLX_FILL_8(_ptr, _index,...)
Definition: mlx_bitops.h:195
struct hermon_recv_work_queue recv
Receive work queue.
Definition: hermon.h:746
#define HERMON_HCR_INIT_PORT
Definition: hermon.h:51
Infiniband device operations.
Definition: infiniband.h:254
#define HERMON_MOD_STAT_CFG_OFFSET(field)
Calculate offset within static configuration.
Definition: hermon.c:723
__be32 in[4]
Definition: CIB_PRM.h:35
static int hermon_alloc_mtt(struct hermon *hermon, const void *memory, size_t len, struct hermon_mtt *mtt)
Allocate MTT entries.
Definition: hermon.c:624
uint8_t state
State.
Definition: eth_slow.h:47
static int hermon_cmd_query_fw(struct hermon *hermon, struct hermonprm_query_fw *fw)
Definition: hermon.c:278
void * doorbell
Doorbell register.
Definition: hermon.h:682
#define HERMON_MOD_STAT_CFG_SET
Definition: hermon.h:139
static unsigned int unsigned int bit
Definition: bigint.h:208
struct hermonprm_send_db_register send
Definition: hermon.h:546
static struct ib_completion_queue_operations hermon_eth_cq_op
Hermon Ethernet device completion operations.
Definition: hermon.c:3448
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
struct hermonprm_recv_wqe recv
Definition: hermon.h:690
uint8_t opcode
Opcode.
Definition: ena.h:16
#define HERMON_MKEY_PREFIX
Memory key prefix.
Definition: hermon.h:943
static int hermon_dump_eqctx(struct hermon *hermon, struct hermon_event_queue *hermon_eq)
Dump event queue context (for debugging only)
Definition: hermon.c:1920
#define HERMON_ETH_NUM_SEND_WQES
Number of Hermon Ethernet send work queue entries.
Definition: hermon.c:3364
static int hermon_cmd_write_mtt(struct hermon *hermon, const struct hermonprm_write_mtt *write_mtt)
Definition: hermon.c:342
static int hermon_cmd_set_icm_size(struct hermon *hermon, const struct hermonprm_scalar_parameter *icm_size, struct hermonprm_scalar_parameter *icm_aux_size)
Definition: hermon.c:582
#define HERMON_OPCODE_SEND
Definition: hermon.h:42
size_t cqc_entry_size
CQ context entry size.
Definition: hermon.h:588
static int hermon_mcast_attach(struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid)
Attach to multicast group.
Definition: hermon.c:3193
static int hermon_cmd_query_qp(struct hermon *hermon, unsigned long qpn, struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:457
Error codes.
#define HERMON_PORT_TYPE_ETH
Definition: hermon.h:97
void * mailbox_in
Command input mailbox.
Definition: hermon.h:875
struct golan_inbox_hdr hdr
Message header.
Definition: CIB_PRM.h:28
u8 owner
Definition: CIB_PRM.h:36
#define HERMON_SET_PORT_RECEIVE_QP
Definition: hermon.h:119
static int hermon_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
Definition: hermon.c:1312
static void hermon_event_port_mgmnt_change(struct hermon *hermon, union hermonprm_event_entry *eqe)
Handle port management event.
Definition: hermon.c:2163
A command-line command.
Definition: command.h:9
#define HERMON_MAX_EQS
Maximum number of allocatable event queues.
Definition: hermon.h:773
I/O buffers.
#define HERMON_LOG_MULTICAST_HASH_SIZE
Definition: hermon.h:131
#define DBG_ENABLE(level)
Definition: compiler.h:313
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:232
Non-Volatile Storage using Vital Product Data.
int register_nvo(struct nvo_block *nvo, struct settings *parent)
Register non-volatile stored options.
Definition: nvo.c:293
size_t mtt_entry_size
MTT entry size.
Definition: hermon.h:596
uint32_t g
Definition: sha256.c:34
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
int ib_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
Definition: infiniband.c:294
static void hermon_destroy_eq(struct hermon *hermon)
Destroy event queue.
Definition: hermon.c:2095
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
int ib_create_cq(struct ib_device *ibdev, unsigned int num_cqes, struct ib_completion_queue_operations *op, struct ib_completion_queue **new_cq)
Create completion queue.
Definition: infiniband.c:98
#define FCOE_VLAN_PRIORITY
FCoE VLAN priority.
Definition: fcoe.h:90
void ib_refill_recv(struct ib_device *ibdev, struct ib_queue_pair *qp)
Refill receive work queue.
Definition: infiniband.c:556
#define DBGC(...)
Definition: compiler.h:505
unsigned long last_poll
Last unsolicited link state poll.
Definition: hermon.h:908
static struct hermon_port_type hermon_port_type_ib
Hermon Infiniband port type.
Definition: hermon.c:3350
__be32 byte_count
Definition: CIB_PRM.h:28
static int hermon_cmd_mod_stat_cfg(struct hermon *hermon, unsigned int mode, unsigned int input_mod, struct hermonprm_scalar_parameter *portion)
Definition: hermon.c:512
struct pci_driver hermon_bofm_driver __bofm_driver
Definition: hermon.c:4248
struct io_buffer *(* alloc_iob)(size_t len)
Allocate receive I/O buffer.
Definition: infiniband.h:153
static int hermon_cmd_query_eq(struct hermon *hermon, unsigned int index, struct hermonprm_eqc *eqctx)
Definition: hermon.c:378
static void hermon_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: hermon.c:2188
struct device * dev
Underlying device.
Definition: infiniband.h:410
static int hermon_start(struct hermon *hermon, int running)
Start Hermon device.
Definition: hermon.c:2965
unsigned long long uint64_t
Definition: stdint.h:13
#define DBG_DISABLE(level)
Definition: compiler.h:312
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.h:62
unsigned long special_qpn_base
Special QPN base.
Definition: hermon.h:925
static __always_inline void * ib_qp_get_drvdata(struct ib_queue_pair *qp)
Get Infiniband queue pair driver-private data.
Definition: infiniband.h:642
#define ntohl(value)
Definition: byteswap.h:134
#define HERMON_PAGE_SIZE
Definition: hermon.h:105
static void hermon_stop_firmware(struct hermon *hermon)
Stop firmware running.
Definition: hermon.c:2418
#define HERMON_RETRY_MAX
Definition: hermon.h:137
void pci_backup(struct pci_device *pci, struct pci_config_backup *backup, const uint8_t *exclude)
Back up PCI configuration space.
Definition: pcibackup.c:66
static void hermon_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: hermon.c:965
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:186
struct ib_global_route_header grh
Definition: ib_packet.h:16
A Hermon send work queue entry.
Definition: hermon.h:659
#define HERMON_HCR_QUERY_PORT
Definition: hermon.h:76
#define ntohs(value)
Definition: byteswap.h:136
static int hermon_set_port_type(struct hermon *hermon, struct hermon_port *port)
Set port type.
Definition: hermon.c:3762
static int hermon_cmd_map_icm(struct hermon *hermon, const struct hermonprm_virtual_physical_mapping *map)
Definition: hermon.c:557
#define HERMON_DB_POST_SND_OFFSET
Definition: hermon.h:107
#define HERMON_OPCODE_RECV_ERROR
Definition: hermon.h:43
#define HERMON_PORT_TYPE_IB
Definition: hermon.h:96
A Hermon completion queue.
Definition: hermon.h:758
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
uint8_t mac[ETH_ALEN]
MAC address.
Definition: ena.h:24
struct golan_eq_context ctx
Definition: CIB_PRM.h:28
unsigned int gid_present
GID is present.
Definition: infiniband.h:90
#define HERMON_HCR_MAP_ICM
Definition: hermon.h:82
static int hermon_cmd_hw2sw_eq(struct hermon *hermon, unsigned int index, struct hermonprm_eqc *eqctx)
Definition: hermon.c:369
unsigned int vlan
VLAN, if present.
Definition: infiniband.h:96
static void iob_populate(struct io_buffer *iobuf, void *data, size_t len, size_t max_len)
Create a temporary I/O buffer.
Definition: iobuf.h:190
union hermonprm_event_entry * eqe
Event queue entries.
Definition: hermon.h:778
struct hermonprm_wqe_segment_ctrl_mlx ctrl
Definition: hermon.h:513
#define HERMON_HCR_UNMAP_ICM_AUX
Definition: hermon.h:83
unsigned int reserved_cqs
Number of reserved CQs.
Definition: hermon.h:586
#define HERMON_ETH_NUM_CQES
Number of Hermon Ethernet completion entries.
Definition: hermon.c:3370
userptr_t firmware_area
Firmware area in external memory.
Definition: hermon.h:890
#define HERMON_PCI_UAR_BAR
Definition: hermon.h:33
size_t cqe_size
Size of completion queue.
Definition: hermon.h:762
#define HERMON_HCR_IN_LEN(_command)
Definition: hermon.h:977
static void hermon_free_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Free queue pair number.
Definition: hermon.c:1053
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:154
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:129
An Infiniband Global Identifier.
Definition: ib_packet.h:33
#define HERMON_HCR_RTR2RTS_QP
Definition: hermon.h:66
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
Definition: uaccess.h:287
struct hermonprm_qp_db_record * doorbell
Doorbell record.
Definition: hermon.h:705
__be32 qpn
Definition: CIB_PRM.h:29
enum hermon_queue_pair_state state
Queue state.
Definition: hermon.h:748
#define htonl(value)
Definition: byteswap.h:133
struct device dev
Generic device.
Definition: pci.h:193
static unsigned int hermon_fill_rc_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest __unused, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct RC send work queue entry.
Definition: hermon.c:1553
static struct settings * netdev_settings(struct net_device *netdev)
Get per-netdevice configuration settings block.
Definition: netdevice.h:589
static int hermon_reset(struct hermon *hermon)
Reset device.
Definition: hermon.c:2831
struct hermonprm_set_port_general_context general
Definition: hermon.h:558
A Hermon port type.
Definition: hermon.h:809
union ib_gid dgid
Destiniation GID.
Definition: ib_packet.h:106
#define ENOTSUP
Operation not supported.
Definition: errno.h:589
void ib_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: infiniband.c:145
static int hermon_dump_cqctx(struct hermon *hermon, struct ib_completion_queue *cq)
Dump completion queue context (for debugging only)
Definition: hermon.c:822
#define HERMON_SET_PORT_GENERAL_PARAM
Definition: hermon.h:118
static unsigned int hermon_fill_ud_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct UD send work queue entry.
Definition: hermon.c:1459
#define HERMON_INVALID_LKEY
Definition: hermon.h:103
enum ib_rate rate
Rate.
Definition: infiniband.h:86
Dynamic memory allocation.
#define HERMON_HCR_RTS2RTS_QP
Definition: hermon.h:67
#define HERMON_HCR_INOUT_CMD(_opcode, _in_mbox, _in_len, _out_mbox, _out_len)
Build HCR command from component parts.
Definition: hermon.h:981
struct bofm_device bofm
BOFM device.
Definition: hermon.h:936
Definition: hermon.h:534
struct sockaddr_tcpip st
Definition: syslog.c:56
union hermon_recv_wqe * wqe
Work queue entries.
Definition: hermon.h:697
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:525
uint32_t start
Starting offset.
Definition: netvsc.h:12
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:524
static int hermon_cmd_write_mcg(struct hermon *hermon, unsigned int index, const struct hermonprm_mcg_entry *mcg)
Definition: hermon.c:493
Fibre Channel over Ethernet.
An Infiniband device.
Definition: infiniband.h:398
uint8_t status
Status.
Definition: ena.h:16
struct hermon_mtt mtt
MTT descriptor.
Definition: hermon.h:782
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:510
pseudo_bit_t ci[0x00020]
Definition: arbel.h:11
#define DBGCP_HDA(...)
Definition: compiler.h:540
#define MLX_FILL_3(_ptr, _index,...)
Definition: mlx_bitops.h:175
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:344
static int hermon_cmd_rts2rts_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:441
#define HERMON_HCR_INIT_HCA
Definition: hermon.h:49
#define HERMON_ST_RC
Definition: hermon.h:90
#define ENOMEM
Not enough space.
Definition: errno.h:534
static int hermon_map_icm(struct hermon *hermon, struct hermonprm_init_hca *init_hca)
Map ICM (allocating if necessary)
Definition: hermon.c:2513
unsigned int mtt_base_addr
MTT base address.
Definition: hermon.h:650
static void hermon_free(struct hermon *hermon)
Free Hermon device.
Definition: hermon.c:3980
Infiniband completion queue operations.
Definition: infiniband.h:194
void * memcpy(void *dest, const void *src, size_t len) __nonnull
__be32 producer_counter
Definition: CIB_PRM.h:44
#define HERMON_HCR_IN_MBOX
Definition: hermon.h:974
Infiniband queue pair operations.
Definition: infiniband.h:147
hermon_bitmask_t cq_inuse[HERMON_BITMASK_SIZE(HERMON_MAX_CQS)]
Completion queue in-use bitmask.
Definition: hermon.h:916
unsigned int num_ports
Number of ports.
Definition: hermon.h:604
static int hermon_cmd_sw2hw_mpt(struct hermon *hermon, unsigned int index, const struct hermonprm_mpt *mpt)
Definition: hermon.c:326
static int hermon_cmd_rtr2rts_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:432
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:514
static int hermon_open(struct hermon *hermon)
Open Hermon device.
Definition: hermon.c:3042
u8 port
Port number.
Definition: CIB_PRM.h:31
void * mailbox_out
Command output mailbox.
Definition: hermon.h:877
static __always_inline void * ib_get_drvdata(struct ib_device *ibdev)
Get Infiniband device driver-private data.
Definition: infiniband.h:708
u32 version
Version number.
Definition: ath9k_hw.c:1983
static int hermon_start_firmware(struct hermon *hermon)
Start firmware running.
Definition: hermon.c:2351
uint32_t hermon_bitmask_t
A Hermon resource bitmask.
Definition: hermon.h:798
IBM BladeCenter Open Fabric Manager (BOFM)
static int hermon_cmd(struct hermon *hermon, unsigned long command, unsigned int op_mod, const void *in, unsigned int in_mod, void *out)
Issue HCA command.
Definition: hermon.c:162
int ib_smc_init(struct ib_device *ibdev, ib_local_mad_t local_mad)
Initialise Infiniband parameters using SMC.
Definition: ib_smc.c:232
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
size_t wqe_size
Size of work queue buffer.
Definition: hermon.h:740
static int hermon_configure_special_qps(struct hermon *hermon)
Configure special queue pairs.
Definition: hermon.c:2935
struct hermonprm_eth_send_wqe eth
Definition: hermon.h:664
#define HERMON_MAP_EQ
Definition: hermon.h:115
userptr_t icm
ICM area.
Definition: hermon.h:903
#define be32_to_cpu(value)
Definition: byteswap.h:116
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:567
#define HERMON_SENSE_PORT_TIMEOUT
Timeout for port sensing.
Definition: hermon.c:3701
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
#define HERMON_HCR_INIT2RTR_QP
Definition: hermon.h:65
Ethernet protocol.
struct hermonprm_rc_send_wqe rc
Definition: hermon.h:663
struct ib_device_operations * op
Infiniband operations.
Definition: infiniband.h:416
#define DBGLVL_LOG
Definition: compiler.h:316
pseudo_bit_t p[0x00002]
Definition: MT25218_PRM.h:3017
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:507
uint64_t offset
Offset (virtual address within ICM)
Definition: hermon.h:615
struct hermonprm_event_queue_entry generic
Definition: hermon.h:540
An Infiniband Work Queue.
Definition: infiniband.h:100
#define ETH_FRAME_LEN
Definition: if_ether.h:11
static int hermon_bofm_update(struct bofm_device *bofm, unsigned int mport, const uint8_t *mac)
Update Ethernet MAC for BOFM.
Definition: hermon.c:3890
void * priv
Driver private data.
Definition: netdevice.h:431
#define DBGC_HDA(...)
Definition: compiler.h:506
size_t wqe_size
Size of work queue.
Definition: hermon.h:699
void * wqe
Work queue buffer.
Definition: hermon.h:738
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:780
void ib_complete_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Complete send work queue entry.
Definition: infiniband.c:515