iPXE
hermon.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3  * Copyright (C) 2008 Mellanox Technologies Ltd.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of the
8  * License, or any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 FILE_LICENCE ( GPL2_OR_LATER );
22 
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <strings.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <byteswap.h>
31 #include <ipxe/io.h>
32 #include <ipxe/pci.h>
33 #include <ipxe/pcibackup.h>
34 #include <ipxe/malloc.h>
35 #include <ipxe/umalloc.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/netdevice.h>
38 #include <ipxe/infiniband.h>
39 #include <ipxe/ib_smc.h>
40 #include <ipxe/if_ether.h>
41 #include <ipxe/ethernet.h>
42 #include <ipxe/fcoe.h>
43 #include <ipxe/vlan.h>
44 #include <ipxe/bofm.h>
45 #include <ipxe/nvsvpd.h>
46 #include <ipxe/nvo.h>
47 #include "hermon.h"
48 
49 /**
50  * @file
51  *
52  * Mellanox Hermon Infiniband HCA
53  *
54  */
55 
56 /***************************************************************************
57  *
58  * Queue number allocation
59  *
60  ***************************************************************************
61  */
62 
63 /**
64  * Allocate offsets within usage bitmask
65  *
66  * @v bits Usage bitmask
67  * @v bits_len Length of usage bitmask
68  * @v num_bits Number of contiguous bits to allocate within bitmask
69  * @ret bit First free bit within bitmask, or negative error
70  */
72  unsigned int bits_len,
73  unsigned int num_bits ) {
74  unsigned int bit = 0;
75  hermon_bitmask_t mask = 1;
76  unsigned int found = 0;
77 
78  /* Search bits for num_bits contiguous free bits */
79  while ( bit < bits_len ) {
80  if ( ( mask & *bits ) == 0 ) {
81  if ( ++found == num_bits )
82  goto found;
83  } else {
84  found = 0;
85  }
86  bit++;
87  mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
88  if ( mask == 1 )
89  bits++;
90  }
91  return -ENFILE;
92 
93  found:
94  /* Mark bits as in-use */
95  do {
96  *bits |= mask;
97  if ( mask == 1 )
98  bits--;
99  mask = ( mask >> 1 ) | ( mask << ( 8 * sizeof ( mask ) - 1 ) );
100  } while ( --found );
101 
102  return ( bit - num_bits + 1 );
103 }
104 
105 /**
106  * Free offsets within usage bitmask
107  *
108  * @v bits Usage bitmask
109  * @v bit Starting bit within bitmask
110  * @v num_bits Number of contiguous bits to free within bitmask
111  */
113  int bit, unsigned int num_bits ) {
114  hermon_bitmask_t mask;
115 
116  for ( ; num_bits ; bit++, num_bits-- ) {
117  mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
118  bits[ ( bit / ( 8 * sizeof ( mask ) ) ) ] &= ~mask;
119  }
120 }
121 
122 /***************************************************************************
123  *
124  * HCA commands
125  *
126  ***************************************************************************
127  */
128 
129 /**
130  * Wait for Hermon command completion
131  *
132  * @v hermon Hermon device
133  * @v hcr HCA command registers
134  * @ret rc Return status code
135  */
136 static int hermon_cmd_wait ( struct hermon *hermon,
137  struct hermonprm_hca_command_register *hcr ) {
138  unsigned int wait;
139 
140  for ( wait = ( 100 * HERMON_HCR_MAX_WAIT_MS ) ; wait ; wait-- ) {
141  hcr->u.dwords[6] =
142  readl ( hermon->config + HERMON_HCR_REG ( 6 ) );
143  if ( ( MLX_GET ( hcr, go ) == 0 ) &&
144  ( MLX_GET ( hcr, t ) == hermon->toggle ) )
145  return 0;
146  udelay ( 10 );
147  }
148  return -EBUSY;
149 }
150 
151 /**
152  * Issue HCA command
153  *
154  * @v hermon Hermon device
155  * @v command Command opcode, flags and input/output lengths
156  * @v op_mod Opcode modifier (0 if no modifier applicable)
157  * @v in Input parameters
158  * @v in_mod Input modifier (0 if no modifier applicable)
159  * @v out Output parameters
160  * @ret rc Return status code
161  */
162 static int hermon_cmd ( struct hermon *hermon, unsigned long command,
163  unsigned int op_mod, const void *in,
164  unsigned int in_mod, void *out ) {
165  struct hermonprm_hca_command_register hcr;
166  unsigned int opcode = HERMON_HCR_OPCODE ( command );
167  size_t in_len = HERMON_HCR_IN_LEN ( command );
168  size_t out_len = HERMON_HCR_OUT_LEN ( command );
169  void *in_buffer;
170  void *out_buffer;
171  unsigned int status;
172  unsigned int i;
173  int rc;
174 
175  assert ( in_len <= HERMON_MBOX_SIZE );
176  assert ( out_len <= HERMON_MBOX_SIZE );
177 
178  DBGC2 ( hermon, "Hermon %p command %04x in %zx%s out %zx%s\n",
179  hermon, opcode, in_len,
180  ( ( command & HERMON_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
181  ( ( command & HERMON_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
182 
183  /* Check that HCR is free */
184  if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
185  DBGC ( hermon, "Hermon %p command interface locked\n",
186  hermon );
187  return rc;
188  }
189 
190  /* Flip HCR toggle */
191  hermon->toggle = ( 1 - hermon->toggle );
192 
193  /* Prepare HCR */
194  memset ( &hcr, 0, sizeof ( hcr ) );
195  in_buffer = &hcr.u.dwords[0];
196  if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
198  in_buffer = hermon->mailbox_in;
199  MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
200  MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
201  }
202  memcpy ( in_buffer, in, in_len );
203  MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
204  out_buffer = &hcr.u.dwords[3];
205  if ( out_len && ( command & HERMON_HCR_OUT_MBOX ) ) {
206  out_buffer = hermon->mailbox_out;
207  MLX_FILL_H ( &hcr, 3, out_param_h,
208  virt_to_bus ( out_buffer ) );
209  MLX_FILL_1 ( &hcr, 4, out_param_l,
210  virt_to_bus ( out_buffer ) );
211  }
212  MLX_FILL_4 ( &hcr, 6,
213  opcode, opcode,
214  opcode_modifier, op_mod,
215  go, 1,
216  t, hermon->toggle );
218  &hcr, sizeof ( hcr ) );
219  if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
220  DBGC2 ( hermon, "Input mailbox:\n" );
221  DBGC2_HDA ( hermon, virt_to_phys ( in_buffer ), in_buffer,
222  ( ( in_len < 512 ) ? in_len : 512 ) );
223  }
224 
225  /* Issue command */
226  for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
227  i++ ) {
228  writel ( hcr.u.dwords[i],
229  hermon->config + HERMON_HCR_REG ( i ) );
230  barrier();
231  }
232 
233  /* Wait for command completion */
234  if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
235  DBGC ( hermon, "Hermon %p timed out waiting for command "
236  "%04x:\n", hermon, opcode );
237  DBGC_HDA ( hermon,
239  &hcr, sizeof ( hcr ) );
240  return rc;
241  }
242 
243  /* Check command status */
244  status = MLX_GET ( &hcr, status );
245  if ( status != 0 ) {
246  DBGC ( hermon, "Hermon %p command %04x failed with status "
247  "%02x:\n", hermon, opcode, status );
248  DBGC_HDA ( hermon,
250  &hcr, sizeof ( hcr ) );
251  return -EIO;
252  }
253 
254  /* Read output parameters, if any */
255  hcr.u.dwords[3] = readl ( hermon->config + HERMON_HCR_REG ( 3 ) );
256  hcr.u.dwords[4] = readl ( hermon->config + HERMON_HCR_REG ( 4 ) );
257  memcpy ( out, out_buffer, out_len );
258  if ( out_len ) {
259  DBGC2 ( hermon, "Output%s:\n",
260  ( command & HERMON_HCR_OUT_MBOX ) ? " mailbox" : "" );
261  DBGC2_HDA ( hermon, virt_to_phys ( out_buffer ), out_buffer,
262  ( ( out_len < 512 ) ? out_len : 512 ) );
263  }
264 
265  return 0;
266 }
267 
268 static inline int
270  struct hermonprm_query_dev_cap *dev_cap ) {
271  return hermon_cmd ( hermon,
273  1, sizeof ( *dev_cap ) ),
274  0, NULL, 0, dev_cap );
275 }
276 
277 static inline int
278 hermon_cmd_query_fw ( struct hermon *hermon, struct hermonprm_query_fw *fw ) {
279  return hermon_cmd ( hermon,
281  1, sizeof ( *fw ) ),
282  0, NULL, 0, fw );
283 }
284 
285 static inline int
287  const struct hermonprm_init_hca *init_hca ) {
288  return hermon_cmd ( hermon,
290  1, sizeof ( *init_hca ) ),
291  0, init_hca, 0, NULL );
292 }
293 
294 static inline int
296  return hermon_cmd ( hermon,
298  0, NULL, 0, NULL );
299 }
300 
301 static inline int
302 hermon_cmd_init_port ( struct hermon *hermon, unsigned int port ) {
303  return hermon_cmd ( hermon,
305  0, NULL, port, NULL );
306 }
307 
308 static inline int
309 hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
310  return hermon_cmd ( hermon,
312  0, NULL, port, NULL );
313 }
314 
315 static inline int
316 hermon_cmd_set_port ( struct hermon *hermon, int is_ethernet,
317  unsigned int port_selector,
318  const union hermonprm_set_port *set_port ) {
319  return hermon_cmd ( hermon,
321  1, sizeof ( *set_port ) ),
322  is_ethernet, set_port, port_selector, NULL );
323 }
324 
325 static inline int
326 hermon_cmd_sw2hw_mpt ( struct hermon *hermon, unsigned int index,
327  const struct hermonprm_mpt *mpt ) {
328  return hermon_cmd ( hermon,
330  1, sizeof ( *mpt ) ),
331  0, mpt, index, NULL );
332 }
333 
334 static inline int
335 hermon_cmd_hw2sw_mpt ( struct hermon *hermon, unsigned int index ) {
336  return hermon_cmd ( hermon,
338  0, NULL, index, NULL );
339 }
340 
341 static inline int
343  const struct hermonprm_write_mtt *write_mtt ) {
344  return hermon_cmd ( hermon,
346  1, sizeof ( *write_mtt ) ),
347  0, write_mtt, 1, NULL );
348 }
349 
350 static inline int
351 hermon_cmd_map_eq ( struct hermon *hermon, unsigned long index_map,
352  const struct hermonprm_event_mask *mask ) {
353  return hermon_cmd ( hermon,
355  0, sizeof ( *mask ) ),
356  0, mask, index_map, NULL );
357 }
358 
359 static inline int
360 hermon_cmd_sw2hw_eq ( struct hermon *hermon, unsigned int index,
361  const struct hermonprm_eqc *eqctx ) {
362  return hermon_cmd ( hermon,
364  1, sizeof ( *eqctx ) ),
365  0, eqctx, index, NULL );
366 }
367 
368 static inline int
369 hermon_cmd_hw2sw_eq ( struct hermon *hermon, unsigned int index,
370  struct hermonprm_eqc *eqctx ) {
371  return hermon_cmd ( hermon,
373  1, sizeof ( *eqctx ) ),
374  1, NULL, index, eqctx );
375 }
376 
377 static inline int
378 hermon_cmd_query_eq ( struct hermon *hermon, unsigned int index,
379  struct hermonprm_eqc *eqctx ) {
380  return hermon_cmd ( hermon,
382  1, sizeof ( *eqctx ) ),
383  0, NULL, index, eqctx );
384 }
385 
386 static inline int
387 hermon_cmd_sw2hw_cq ( struct hermon *hermon, unsigned long cqn,
388  const struct hermonprm_completion_queue_context *cqctx ){
389  return hermon_cmd ( hermon,
391  1, sizeof ( *cqctx ) ),
392  0, cqctx, cqn, NULL );
393 }
394 
395 static inline int
396 hermon_cmd_hw2sw_cq ( struct hermon *hermon, unsigned long cqn,
397  struct hermonprm_completion_queue_context *cqctx ) {
398  return hermon_cmd ( hermon,
400  1, sizeof ( *cqctx ) ),
401  0, NULL, cqn, cqctx );
402 }
403 
404 static inline int
405 hermon_cmd_query_cq ( struct hermon *hermon, unsigned long cqn,
406  struct hermonprm_completion_queue_context *cqctx ) {
407  return hermon_cmd ( hermon,
409  1, sizeof ( *cqctx ) ),
410  0, NULL, cqn, cqctx );
411 }
412 
413 static inline int
414 hermon_cmd_rst2init_qp ( struct hermon *hermon, unsigned long qpn,
415  const struct hermonprm_qp_ee_state_transitions *ctx ){
416  return hermon_cmd ( hermon,
418  1, sizeof ( *ctx ) ),
419  0, ctx, qpn, NULL );
420 }
421 
422 static inline int
423 hermon_cmd_init2rtr_qp ( struct hermon *hermon, unsigned long qpn,
424  const struct hermonprm_qp_ee_state_transitions *ctx ){
425  return hermon_cmd ( hermon,
427  1, sizeof ( *ctx ) ),
428  0, ctx, qpn, NULL );
429 }
430 
431 static inline int
432 hermon_cmd_rtr2rts_qp ( struct hermon *hermon, unsigned long qpn,
433  const struct hermonprm_qp_ee_state_transitions *ctx ) {
434  return hermon_cmd ( hermon,
436  1, sizeof ( *ctx ) ),
437  0, ctx, qpn, NULL );
438 }
439 
440 static inline int
441 hermon_cmd_rts2rts_qp ( struct hermon *hermon, unsigned long qpn,
442  const struct hermonprm_qp_ee_state_transitions *ctx ) {
443  return hermon_cmd ( hermon,
445  1, sizeof ( *ctx ) ),
446  0, ctx, qpn, NULL );
447 }
448 
449 static inline int
450 hermon_cmd_2rst_qp ( struct hermon *hermon, unsigned long qpn ) {
451  return hermon_cmd ( hermon,
453  0x03, NULL, qpn, NULL );
454 }
455 
456 static inline int
457 hermon_cmd_query_qp ( struct hermon *hermon, unsigned long qpn,
458  struct hermonprm_qp_ee_state_transitions *ctx ) {
459  return hermon_cmd ( hermon,
461  1, sizeof ( *ctx ) ),
462  0, NULL, qpn, ctx );
463 }
464 
465 static inline int
466 hermon_cmd_conf_special_qp ( struct hermon *hermon, unsigned int internal_qps,
467  unsigned long base_qpn ) {
468  return hermon_cmd ( hermon,
470  internal_qps, NULL, base_qpn, NULL );
471 }
472 
473 static inline int
474 hermon_cmd_mad_ifc ( struct hermon *hermon, unsigned int port,
475  union hermonprm_mad *mad ) {
476  return hermon_cmd ( hermon,
478  1, sizeof ( *mad ),
479  1, sizeof ( *mad ) ),
480  0x03, mad, port, mad );
481 }
482 
483 static inline int
484 hermon_cmd_read_mcg ( struct hermon *hermon, unsigned int index,
485  struct hermonprm_mcg_entry *mcg ) {
486  return hermon_cmd ( hermon,
488  1, sizeof ( *mcg ) ),
489  0, NULL, index, mcg );
490 }
491 
492 static inline int
493 hermon_cmd_write_mcg ( struct hermon *hermon, unsigned int index,
494  const struct hermonprm_mcg_entry *mcg ) {
495  return hermon_cmd ( hermon,
497  1, sizeof ( *mcg ) ),
498  0, mcg, index, NULL );
499 }
500 
501 static inline int
502 hermon_cmd_mgid_hash ( struct hermon *hermon, const union ib_gid *gid,
503  struct hermonprm_mgm_hash *hash ) {
504  return hermon_cmd ( hermon,
506  1, sizeof ( *gid ),
507  0, sizeof ( *hash ) ),
508  0, gid, 0, hash );
509 }
510 
511 static inline int
512 hermon_cmd_mod_stat_cfg ( struct hermon *hermon, unsigned int mode,
513  unsigned int input_mod,
514  struct hermonprm_scalar_parameter *portion ) {
515  return hermon_cmd ( hermon,
517  0, sizeof ( *portion ),
518  0, sizeof ( *portion ) ),
519  mode, portion, input_mod, portion );
520 }
521 
522 static inline int
523 hermon_cmd_query_port ( struct hermon *hermon, unsigned int port,
524  struct hermonprm_query_port_cap *query_port ) {
525  return hermon_cmd ( hermon,
527  1, sizeof ( *query_port ) ),
528  0, NULL, port, query_port );
529 }
530 
531 static inline int
532 hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
533  struct hermonprm_sense_port *port_type ) {
534  return hermon_cmd ( hermon,
536  0, sizeof ( *port_type ) ),
537  0, NULL, port, port_type );
538 }
539 
540 static inline int
542  return hermon_cmd ( hermon,
544  0, NULL, 0, NULL );
545 }
546 
547 static inline int
548 hermon_cmd_unmap_icm ( struct hermon *hermon, unsigned int page_count,
549  const struct hermonprm_scalar_parameter *offset ) {
550  return hermon_cmd ( hermon,
552  0, sizeof ( *offset ) ),
553  0, offset, page_count, NULL );
554 }
555 
556 static inline int
558  const struct hermonprm_virtual_physical_mapping *map ) {
559  return hermon_cmd ( hermon,
561  1, sizeof ( *map ) ),
562  0, map, 1, NULL );
563 }
564 
565 static inline int
567  return hermon_cmd ( hermon,
569  0, NULL, 0, NULL );
570 }
571 
572 static inline int
574  const struct hermonprm_virtual_physical_mapping *map ) {
575  return hermon_cmd ( hermon,
577  1, sizeof ( *map ) ),
578  0, map, 1, NULL );
579 }
580 
581 static inline int
583  const struct hermonprm_scalar_parameter *icm_size,
584  struct hermonprm_scalar_parameter *icm_aux_size ) {
585  return hermon_cmd ( hermon,
587  0, sizeof ( *icm_size ),
588  0, sizeof (*icm_aux_size) ),
589  0, icm_size, 0, icm_aux_size );
590 }
591 
592 static inline int
594  return hermon_cmd ( hermon,
596  0, NULL, 0, NULL );
597 }
598 
599 static inline int
601  const struct hermonprm_virtual_physical_mapping *map ) {
602  return hermon_cmd ( hermon,
604  1, sizeof ( *map ) ),
605  0, map, 1, NULL );
606 }
607 
608 /***************************************************************************
609  *
610  * Memory translation table operations
611  *
612  ***************************************************************************
613  */
614 
615 /**
616  * Allocate MTT entries
617  *
618  * @v hermon Hermon device
619  * @v memory Memory to map into MTT
620  * @v len Length of memory to map
621  * @v mtt MTT descriptor to fill in
622  * @ret rc Return status code
623  */
624 static int hermon_alloc_mtt ( struct hermon *hermon,
625  const void *memory, size_t len,
626  struct hermon_mtt *mtt ) {
627  struct hermonprm_write_mtt write_mtt;
630  unsigned int page_offset;
631  unsigned int num_pages;
632  int mtt_offset;
633  unsigned int mtt_base_addr;
634  unsigned int i;
635  int rc;
636 
637  /* Find available MTT entries */
638  start = virt_to_phys ( memory );
639  page_offset = ( start & ( HERMON_PAGE_SIZE - 1 ) );
640  start -= page_offset;
641  len += page_offset;
644  num_pages );
645  if ( mtt_offset < 0 ) {
646  rc = mtt_offset;
647  DBGC ( hermon, "Hermon %p could not allocate %d MTT entries: "
648  "%s\n", hermon, num_pages, strerror ( rc ) );
649  goto err_mtt_offset;
650  }
651  mtt_base_addr = ( ( hermon->cap.reserved_mtts + mtt_offset ) *
653  addr = start;
654 
655  /* Fill in MTT structure */
656  mtt->mtt_offset = mtt_offset;
657  mtt->num_pages = num_pages;
658  mtt->mtt_base_addr = mtt_base_addr;
659  mtt->page_offset = page_offset;
660 
661  /* Construct and issue WRITE_MTT commands */
662  for ( i = 0 ; i < num_pages ; i++ ) {
663  memset ( &write_mtt, 0, sizeof ( write_mtt ) );
664  MLX_FILL_1 ( &write_mtt.mtt_base_addr, 1,
665  value, mtt_base_addr );
666  MLX_FILL_H ( &write_mtt.mtt, 0, ptag_h, addr );
667  MLX_FILL_2 ( &write_mtt.mtt, 1,
668  p, 1,
669  ptag_l, ( addr >> 3 ) );
670  if ( ( rc = hermon_cmd_write_mtt ( hermon,
671  &write_mtt ) ) != 0 ) {
672  DBGC ( hermon, "Hermon %p could not write MTT at %x: "
673  "%s\n", hermon, mtt_base_addr,
674  strerror ( rc ) );
675  goto err_write_mtt;
676  }
679  }
680 
681  DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] for "
682  "[%08lx,%08lx,%08lx,%08lx)\n", hermon, mtt->mtt_offset,
683  ( mtt->mtt_offset + mtt->num_pages - 1 ), start,
684  ( start + page_offset ), ( start + len ), addr );
685 
686  return 0;
687 
688  err_write_mtt:
689  hermon_bitmask_free ( hermon->mtt_inuse, mtt_offset, num_pages );
690  err_mtt_offset:
691  return rc;
692 }
693 
694 /**
695  * Free MTT entries
696  *
697  * @v hermon Hermon device
698  * @v mtt MTT descriptor
699  */
700 static void hermon_free_mtt ( struct hermon *hermon,
701  struct hermon_mtt *mtt ) {
702 
703  DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] freed\n",
704  hermon, mtt->mtt_offset,
705  ( mtt->mtt_offset + mtt->num_pages - 1 ) );
706  hermon_bitmask_free ( hermon->mtt_inuse, mtt->mtt_offset,
707  mtt->num_pages );
708 }
709 
710 /***************************************************************************
711  *
712  * Static configuration operations
713  *
714  ***************************************************************************
715  */
716 
717 /**
718  * Calculate offset within static configuration
719  *
720  * @v field Field
721  * @ret offset Offset
722  */
723 #define HERMON_MOD_STAT_CFG_OFFSET( field ) \
724  ( ( MLX_BIT_OFFSET ( struct hermonprm_mod_stat_cfg_st, field ) / 8 ) \
725  & ~( sizeof ( struct hermonprm_scalar_parameter ) - 1 ) )
726 
727 /**
728  * Query or modify static configuration
729  *
730  * @v hermon Hermon device
731  * @v port Port
732  * @v mode Command mode
733  * @v offset Offset within static configuration
734  * @v stat_cfg Static configuration
735  * @ret rc Return status code
736  */
737 static int hermon_mod_stat_cfg ( struct hermon *hermon, unsigned int port,
738  unsigned int mode, unsigned int offset,
739  struct hermonprm_mod_stat_cfg *stat_cfg ) {
740  struct hermonprm_scalar_parameter *portion =
741  ( ( void * ) &stat_cfg->u.bytes[offset] );
742  struct hermonprm_mod_stat_cfg_input_mod mod;
743  int rc;
744 
745  /* Sanity check */
746  assert ( ( offset % sizeof ( *portion ) ) == 0 );
747 
748  /* Construct input modifier */
749  memset ( &mod, 0, sizeof ( mod ) );
750  MLX_FILL_2 ( &mod, 0,
751  portnum, port,
752  offset, offset );
753 
754  /* Issue command */
755  if ( ( rc = hermon_cmd_mod_stat_cfg ( hermon, mode,
756  be32_to_cpu ( mod.u.dwords[0] ),
757  portion ) ) != 0 )
758  return rc;
759 
760  return 0;
761 }
762 
763 /***************************************************************************
764  *
765  * MAD operations
766  *
767  ***************************************************************************
768  */
769 
770 /**
771  * Issue management datagram
772  *
773  * @v ibdev Infiniband device
774  * @v mad Management datagram
775  * @ret rc Return status code
776  */
777 static int hermon_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
778  struct hermon *hermon = ib_get_drvdata ( ibdev );
779  union hermonprm_mad mad_ifc;
780  int rc;
781 
782  /* Sanity check */
783  static_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ) );
784 
785  /* Copy in request packet */
786  memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
787 
788  /* Issue MAD */
789  if ( ( rc = hermon_cmd_mad_ifc ( hermon, ibdev->port,
790  &mad_ifc ) ) != 0 ) {
791  DBGC ( hermon, "Hermon %p port %d could not issue MAD IFC: "
792  "%s\n", hermon, ibdev->port, strerror ( rc ) );
793  return rc;
794  }
795 
796  /* Copy out reply packet */
797  memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
798 
799  if ( mad->hdr.status != 0 ) {
800  DBGC ( hermon, "Hermon %p port %d MAD IFC status %04x\n",
801  hermon, ibdev->port, ntohs ( mad->hdr.status ) );
802  return -EIO;
803  }
804  return 0;
805 }
806 
807 /***************************************************************************
808  *
809  * Completion queue operations
810  *
811  ***************************************************************************
812  */
813 
814 /**
815  * Dump completion queue context (for debugging only)
816  *
817  * @v hermon Hermon device
818  * @v cq Completion queue
819  * @ret rc Return status code
820  */
821 static __attribute__ (( unused )) int
823  struct hermonprm_completion_queue_context cqctx;
824  int rc;
825 
826  /* Do nothing unless debugging is enabled */
827  if ( ! DBG_LOG )
828  return 0;
829 
830  /* Dump completion queue context */
831  memset ( &cqctx, 0, sizeof ( cqctx ) );
832  if ( ( rc = hermon_cmd_query_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
833  DBGC ( hermon, "Hermon %p CQN %#lx QUERY_CQ failed: %s\n",
834  hermon, cq->cqn, strerror ( rc ) );
835  return rc;
836  }
837  DBGC ( hermon, "Hermon %p CQN %#lx context:\n", hermon, cq->cqn );
838  DBGC_HDA ( hermon, 0, &cqctx, sizeof ( cqctx ) );
839 
840  return 0;
841 }
842 
843 /**
844  * Create completion queue
845  *
846  * @v ibdev Infiniband device
847  * @v cq Completion queue
848  * @ret rc Return status code
849  */
850 static int hermon_create_cq ( struct ib_device *ibdev,
851  struct ib_completion_queue *cq ) {
852  struct hermon *hermon = ib_get_drvdata ( ibdev );
853  struct hermon_completion_queue *hermon_cq;
854  struct hermonprm_completion_queue_context cqctx;
855  int cqn_offset;
856  unsigned int i;
857  int rc;
858 
859  /* Find a free completion queue number */
860  cqn_offset = hermon_bitmask_alloc ( hermon->cq_inuse,
861  HERMON_MAX_CQS, 1 );
862  if ( cqn_offset < 0 ) {
863  DBGC ( hermon, "Hermon %p out of completion queues\n",
864  hermon );
865  rc = cqn_offset;
866  goto err_cqn_offset;
867  }
868  cq->cqn = ( hermon->cap.reserved_cqs + cqn_offset );
869 
870  /* Allocate control structures */
871  hermon_cq = zalloc ( sizeof ( *hermon_cq ) );
872  if ( ! hermon_cq ) {
873  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate CQ\n",
874  hermon, cq->cqn );
875  rc = -ENOMEM;
876  goto err_hermon_cq;
877  }
878 
879  /* Allocate doorbell */
880  hermon_cq->doorbell = malloc_phys ( sizeof ( hermon_cq->doorbell[0] ),
881  sizeof ( hermon_cq->doorbell[0] ) );
882  if ( ! hermon_cq->doorbell ) {
883  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate "
884  "doorbell\n", hermon, cq->cqn );
885  rc = -ENOMEM;
886  goto err_doorbell;
887  }
888  memset ( hermon_cq->doorbell, 0, sizeof ( hermon_cq->doorbell[0] ) );
889 
890  /* Allocate completion queue itself */
891  hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
892  hermon_cq->cqe = malloc_phys ( hermon_cq->cqe_size,
893  sizeof ( hermon_cq->cqe[0] ) );
894  if ( ! hermon_cq->cqe ) {
895  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate CQEs\n",
896  hermon, cq->cqn );
897  rc = -ENOMEM;
898  goto err_cqe;
899  }
900  memset ( hermon_cq->cqe, 0, hermon_cq->cqe_size );
901  for ( i = 0 ; i < cq->num_cqes ; i++ ) {
902  MLX_FILL_1 ( &hermon_cq->cqe[i].normal, 7, owner, 1 );
903  }
904  barrier();
905 
906  /* Allocate MTT entries */
907  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_cq->cqe,
908  hermon_cq->cqe_size,
909  &hermon_cq->mtt ) ) != 0 ) {
910  DBGC ( hermon, "Hermon %p CQN %#lx could not allocate MTTs: "
911  "%s\n", hermon, cq->cqn, strerror ( rc ) );
912  goto err_alloc_mtt;
913  }
914 
915  /* Hand queue over to hardware */
916  memset ( &cqctx, 0, sizeof ( cqctx ) );
917  MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
918  MLX_FILL_1 ( &cqctx, 2,
919  page_offset, ( hermon_cq->mtt.page_offset >> 5 ) );
920  MLX_FILL_2 ( &cqctx, 3,
921  usr_page, HERMON_UAR_NON_EQ_PAGE,
922  log_cq_size, fls ( cq->num_cqes - 1 ) );
923  MLX_FILL_1 ( &cqctx, 5, c_eqn, hermon->eq.eqn );
924  MLX_FILL_H ( &cqctx, 6, mtt_base_addr_h,
925  hermon_cq->mtt.mtt_base_addr );
926  MLX_FILL_1 ( &cqctx, 7, mtt_base_addr_l,
927  ( hermon_cq->mtt.mtt_base_addr >> 3 ) );
928  MLX_FILL_H ( &cqctx, 14, db_record_addr_h,
929  virt_to_phys ( hermon_cq->doorbell ) );
930  MLX_FILL_1 ( &cqctx, 15, db_record_addr_l,
931  ( virt_to_phys ( hermon_cq->doorbell ) >> 3 ) );
932  if ( ( rc = hermon_cmd_sw2hw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
933  DBGC ( hermon, "Hermon %p CQN %#lx SW2HW_CQ failed: %s\n",
934  hermon, cq->cqn, strerror ( rc ) );
935  goto err_sw2hw_cq;
936  }
937 
938  DBGC ( hermon, "Hermon %p CQN %#lx ring [%08lx,%08lx), doorbell "
939  "%08lx\n", hermon, cq->cqn, virt_to_phys ( hermon_cq->cqe ),
940  ( virt_to_phys ( hermon_cq->cqe ) + hermon_cq->cqe_size ),
941  virt_to_phys ( hermon_cq->doorbell ) );
942  ib_cq_set_drvdata ( cq, hermon_cq );
943  return 0;
944 
945  err_sw2hw_cq:
946  hermon_free_mtt ( hermon, &hermon_cq->mtt );
947  err_alloc_mtt:
948  free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
949  err_cqe:
950  free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
951  err_doorbell:
952  free ( hermon_cq );
953  err_hermon_cq:
954  hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
955  err_cqn_offset:
956  return rc;
957 }
958 
959 /**
960  * Destroy completion queue
961  *
962  * @v ibdev Infiniband device
963  * @v cq Completion queue
964  */
965 static void hermon_destroy_cq ( struct ib_device *ibdev,
966  struct ib_completion_queue *cq ) {
967  struct hermon *hermon = ib_get_drvdata ( ibdev );
968  struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
969  struct hermonprm_completion_queue_context cqctx;
970  int cqn_offset;
971  int rc;
972 
973  /* Take ownership back from hardware */
974  if ( ( rc = hermon_cmd_hw2sw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
975  DBGC ( hermon, "Hermon %p CQN %#lx FATAL HW2SW_CQ failed: "
976  "%s\n", hermon, cq->cqn, strerror ( rc ) );
977  /* Leak memory and return; at least we avoid corruption */
978  return;
979  }
980 
981  /* Free MTT entries */
982  hermon_free_mtt ( hermon, &hermon_cq->mtt );
983 
984  /* Free memory */
985  free_phys ( hermon_cq->cqe, hermon_cq->cqe_size );
986  free_phys ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
987  free ( hermon_cq );
988 
989  /* Mark queue number as free */
990  cqn_offset = ( cq->cqn - hermon->cap.reserved_cqs );
991  hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
992 
993  ib_cq_set_drvdata ( cq, NULL );
994 }
995 
996 /***************************************************************************
997  *
998  * Queue pair operations
999  *
1000  ***************************************************************************
1001  */
1002 
1003 /**
1004  * Assign queue pair number
1005  *
1006  * @v ibdev Infiniband device
1007  * @v qp Queue pair
1008  * @ret rc Return status code
1009  */
1010 static int hermon_alloc_qpn ( struct ib_device *ibdev,
1011  struct ib_queue_pair *qp ) {
1012  struct hermon *hermon = ib_get_drvdata ( ibdev );
1013  unsigned int port_offset;
1014  int qpn_offset;
1015 
1016  /* Calculate queue pair number */
1017  port_offset = ( ibdev->port - HERMON_PORT_BASE );
1018 
1019  switch ( qp->type ) {
1020  case IB_QPT_SMI:
1021  qp->qpn = ( hermon->special_qpn_base + port_offset );
1022  return 0;
1023  case IB_QPT_GSI:
1024  qp->qpn = ( hermon->special_qpn_base + 2 + port_offset );
1025  return 0;
1026  case IB_QPT_UD:
1027  case IB_QPT_RC:
1028  case IB_QPT_ETH:
1029  /* Find a free queue pair number */
1030  qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
1031  HERMON_MAX_QPS, 1 );
1032  if ( qpn_offset < 0 ) {
1033  DBGC ( hermon, "Hermon %p out of queue pairs\n",
1034  hermon );
1035  return qpn_offset;
1036  }
1037  qp->qpn = ( ( random() & HERMON_QPN_RANDOM_MASK ) |
1038  ( hermon->qpn_base + qpn_offset ) );
1039  return 0;
1040  default:
1041  DBGC ( hermon, "Hermon %p unsupported QP type %d\n",
1042  hermon, qp->type );
1043  return -ENOTSUP;
1044  }
1045 }
1046 
1047 /**
1048  * Free queue pair number
1049  *
1050  * @v ibdev Infiniband device
1051  * @v qp Queue pair
1052  */
1053 static void hermon_free_qpn ( struct ib_device *ibdev,
1054  struct ib_queue_pair *qp ) {
1055  struct hermon *hermon = ib_get_drvdata ( ibdev );
1056  int qpn_offset;
1057 
1058  qpn_offset = ( ( qp->qpn & ~HERMON_QPN_RANDOM_MASK )
1059  - hermon->qpn_base );
1060  if ( qpn_offset >= 0 )
1061  hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
1062 }
1063 
1064 /**
1065  * Calculate transmission rate
1066  *
1067  * @v av Address vector
1068  * @ret hermon_rate Hermon rate
1069  */
1070 static unsigned int hermon_rate ( struct ib_address_vector *av ) {
1071  return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
1072  ? ( av->rate + 5 ) : 0 );
1073 }
1074 
1075 /**
1076  * Calculate schedule queue
1077  *
1078  * @v ibdev Infiniband device
1079  * @v qp Queue pair
1080  * @ret sched_queue Schedule queue
1081  */
1082 static unsigned int hermon_sched_queue ( struct ib_device *ibdev,
1083  struct ib_queue_pair *qp ) {
1084  return ( ( ( qp->type == IB_QPT_SMI ) ?
1086  ( ( ibdev->port - 1 ) << 6 ) );
1087 }
1088 
1089 /** Queue pair transport service type map */
1090 static uint8_t hermon_qp_st[] = {
1093  [IB_QPT_UD] = HERMON_ST_UD,
1094  [IB_QPT_RC] = HERMON_ST_RC,
1096 };
1097 
1098 /**
1099  * Dump queue pair context (for debugging only)
1100  *
1101  * @v hermon Hermon device
1102  * @v qp Queue pair
1103  * @ret rc Return status code
1104  */
1105 static __attribute__ (( unused )) int
1107  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1108  struct hermonprm_qp_ee_state_transitions qpctx;
1109  unsigned int state;
1110  int rc;
1111 
1112  /* Do nothing unless debugging is enabled */
1113  if ( ! DBG_LOG )
1114  return 0;
1115 
1116  /* Dump queue pair context */
1117  memset ( &qpctx, 0, sizeof ( qpctx ) );
1118  if ( ( rc = hermon_cmd_query_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ) {
1119  DBGC ( hermon, "Hermon %p QPN %#lx QUERY_QP failed: %s\n",
1120  hermon, qp->qpn, strerror ( rc ) );
1121  return rc;
1122  }
1123  state = MLX_GET ( &qpctx, qpc_eec_data.state );
1124  if ( state != hermon_qp->state ) {
1125  DBGC ( hermon, "Hermon %p QPN %#lx state %d unexpected "
1126  "(should be %d)\n",
1127  hermon, qp->qpn, state, hermon_qp->state );
1128  }
1129  DBGC ( hermon, "Hermon %p QPN %#lx state %d context:\n",
1130  hermon, qp->qpn, state );
1131  DBGC_HDA ( hermon, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
1132 
1133  return 0;
1134 }
1135 
1136 /**
1137  * Create queue pair
1138  *
1139  * @v ibdev Infiniband device
1140  * @v qp Queue pair
1141  * @ret rc Return status code
1142  */
1143 static int hermon_create_qp ( struct ib_device *ibdev,
1144  struct ib_queue_pair *qp ) {
1145  struct hermon *hermon = ib_get_drvdata ( ibdev );
1146  struct hermon_queue_pair *hermon_qp;
1147  struct hermonprm_qp_ee_state_transitions qpctx;
1148  struct hermonprm_wqe_segment_data_ptr *data;
1149  unsigned int i;
1150  int rc;
1151 
1152  /* Calculate queue pair number */
1153  if ( ( rc = hermon_alloc_qpn ( ibdev, qp ) ) != 0 )
1154  goto err_alloc_qpn;
1155 
1156  /* Allocate control structures */
1157  hermon_qp = zalloc ( sizeof ( *hermon_qp ) );
1158  if ( ! hermon_qp ) {
1159  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate QP\n",
1160  hermon, qp->qpn );
1161  rc = -ENOMEM;
1162  goto err_hermon_qp;
1163  }
1164 
1165  /* Allocate doorbells */
1166  hermon_qp->recv.doorbell =
1167  malloc_phys ( sizeof ( hermon_qp->recv.doorbell[0] ),
1168  sizeof ( hermon_qp->recv.doorbell[0] ) );
1169  if ( ! hermon_qp->recv.doorbell ) {
1170  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate "
1171  "doorbell\n", hermon, qp->qpn );
1172  rc = -ENOMEM;
1173  goto err_recv_doorbell;
1174  }
1175  memset ( hermon_qp->recv.doorbell, 0,
1176  sizeof ( hermon_qp->recv.doorbell[0] ) );
1177  hermon_qp->send.doorbell =
1180 
1181  /* Allocate work queue buffer */
1182  hermon_qp->send.num_wqes = ( qp->send.num_wqes /* headroom */ + 1 +
1183  ( 2048 / sizeof ( hermon_qp->send.wqe[0] ) ) );
1184  hermon_qp->send.num_wqes =
1185  ( 1 << fls ( hermon_qp->send.num_wqes - 1 ) ); /* round up */
1186  hermon_qp->send.wqe_size = ( hermon_qp->send.num_wqes *
1187  sizeof ( hermon_qp->send.wqe[0] ) );
1188  hermon_qp->recv.wqe_size = ( qp->recv.num_wqes *
1189  sizeof ( hermon_qp->recv.wqe[0] ) );
1190  if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
1191  ( qp->type == IB_QPT_UD ) ) {
1192  hermon_qp->recv.grh_size = ( qp->recv.num_wqes *
1193  sizeof ( hermon_qp->recv.grh[0] ));
1194  }
1195  hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
1196  hermon_qp->recv.wqe_size +
1197  hermon_qp->recv.grh_size );
1198  hermon_qp->wqe = malloc_phys ( hermon_qp->wqe_size,
1199  sizeof ( hermon_qp->send.wqe[0] ) );
1200  if ( ! hermon_qp->wqe ) {
1201  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate WQEs\n",
1202  hermon, qp->qpn );
1203  rc = -ENOMEM;
1204  goto err_alloc_wqe;
1205  }
1206  hermon_qp->send.wqe = hermon_qp->wqe;
1207  hermon_qp->recv.wqe = ( hermon_qp->wqe + hermon_qp->send.wqe_size );
1208  if ( hermon_qp->recv.grh_size ) {
1209  hermon_qp->recv.grh = ( hermon_qp->wqe +
1210  hermon_qp->send.wqe_size +
1211  hermon_qp->recv.wqe_size );
1212  }
1213 
1214  /* Initialise work queue entries */
1215  memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
1216  memset ( hermon_qp->recv.wqe, 0, hermon_qp->recv.wqe_size );
1217  data = &hermon_qp->recv.wqe[0].recv.data[0];
1218  for ( i = 0 ; i < ( hermon_qp->recv.wqe_size / sizeof ( *data ) ); i++){
1219  MLX_FILL_1 ( data, 1, l_key, HERMON_INVALID_LKEY );
1220  data++;
1221  }
1222 
1223  /* Allocate MTT entries */
1224  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe,
1225  hermon_qp->wqe_size,
1226  &hermon_qp->mtt ) ) != 0 ) {
1227  DBGC ( hermon, "Hermon %p QPN %#lx could not allocate MTTs: "
1228  "%s\n", hermon, qp->qpn, strerror ( rc ) );
1229  goto err_alloc_mtt;
1230  }
1231 
1232  /* Transition queue to INIT state */
1233  memset ( &qpctx, 0, sizeof ( qpctx ) );
1234  MLX_FILL_2 ( &qpctx, 2,
1235  qpc_eec_data.pm_state, HERMON_PM_STATE_MIGRATED,
1236  qpc_eec_data.st, hermon_qp_st[qp->type] );
1237  MLX_FILL_1 ( &qpctx, 3, qpc_eec_data.pd, HERMON_GLOBAL_PD );
1238  MLX_FILL_4 ( &qpctx, 4,
1239  qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
1240  qpc_eec_data.log_rq_stride,
1241  ( fls ( sizeof ( hermon_qp->recv.wqe[0] ) - 1 ) - 4 ),
1242  qpc_eec_data.log_sq_size,
1243  fls ( hermon_qp->send.num_wqes - 1 ),
1244  qpc_eec_data.log_sq_stride,
1245  ( fls ( sizeof ( hermon_qp->send.wqe[0] ) - 1 ) - 4 ) );
1246  MLX_FILL_1 ( &qpctx, 5,
1247  qpc_eec_data.usr_page, HERMON_UAR_NON_EQ_PAGE );
1248  MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
1249  MLX_FILL_4 ( &qpctx, 38,
1250  qpc_eec_data.rre, 1,
1251  qpc_eec_data.rwe, 1,
1252  qpc_eec_data.rae, 1,
1253  qpc_eec_data.page_offset,
1254  ( hermon_qp->mtt.page_offset >> 6 ) );
1255  MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
1256  MLX_FILL_H ( &qpctx, 42, qpc_eec_data.db_record_addr_h,
1257  virt_to_phys ( hermon_qp->recv.doorbell ) );
1258  MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.db_record_addr_l,
1259  ( virt_to_phys ( hermon_qp->recv.doorbell ) >> 2 ) );
1260  MLX_FILL_H ( &qpctx, 52, qpc_eec_data.mtt_base_addr_h,
1261  hermon_qp->mtt.mtt_base_addr );
1262  MLX_FILL_1 ( &qpctx, 53, qpc_eec_data.mtt_base_addr_l,
1263  ( hermon_qp->mtt.mtt_base_addr >> 3 ) );
1264  if ( ( rc = hermon_cmd_rst2init_qp ( hermon, qp->qpn,
1265  &qpctx ) ) != 0 ) {
1266  DBGC ( hermon, "Hermon %p QPN %#lx RST2INIT_QP failed: %s\n",
1267  hermon, qp->qpn, strerror ( rc ) );
1268  goto err_rst2init_qp;
1269  }
1270  hermon_qp->state = HERMON_QP_ST_INIT;
1271 
1272  DBGC ( hermon, "Hermon %p QPN %#lx send ring [%08lx,%08lx), doorbell "
1273  "%08lx\n", hermon, qp->qpn,
1274  virt_to_phys ( hermon_qp->send.wqe ),
1275  ( virt_to_phys ( hermon_qp->send.wqe ) +
1276  hermon_qp->send.wqe_size ),
1277  virt_to_phys ( hermon_qp->send.doorbell ) );
1278  DBGC ( hermon, "Hermon %p QPN %#lx receive ring [%08lx,%08lx), "
1279  "doorbell %08lx\n", hermon, qp->qpn,
1280  virt_to_phys ( hermon_qp->recv.wqe ),
1281  ( virt_to_phys ( hermon_qp->recv.wqe ) +
1282  hermon_qp->recv.wqe_size ),
1283  virt_to_phys ( hermon_qp->recv.doorbell ) );
1284  DBGC ( hermon, "Hermon %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1285  hermon, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
1286  ib_qp_set_drvdata ( qp, hermon_qp );
1287  return 0;
1288 
1289  hermon_cmd_2rst_qp ( hermon, qp->qpn );
1290  err_rst2init_qp:
1291  hermon_free_mtt ( hermon, &hermon_qp->mtt );
1292  err_alloc_mtt:
1293  free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
1294  err_alloc_wqe:
1295  free_phys ( hermon_qp->recv.doorbell,
1296  sizeof ( hermon_qp->recv.doorbell[0] ) );
1297  err_recv_doorbell:
1298  free ( hermon_qp );
1299  err_hermon_qp:
1300  hermon_free_qpn ( ibdev, qp );
1301  err_alloc_qpn:
1302  return rc;
1303 }
1304 
1305 /**
1306  * Modify queue pair
1307  *
1308  * @v ibdev Infiniband device
1309  * @v qp Queue pair
1310  * @ret rc Return status code
1311  */
1312 static int hermon_modify_qp ( struct ib_device *ibdev,
1313  struct ib_queue_pair *qp ) {
1314  struct hermon *hermon = ib_get_drvdata ( ibdev );
1315  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1316  struct hermonprm_qp_ee_state_transitions qpctx;
1317  int rc;
1318 
1319  /* Transition queue to RTR state, if applicable */
1320  if ( hermon_qp->state < HERMON_QP_ST_RTR ) {
1321  memset ( &qpctx, 0, sizeof ( qpctx ) );
1322  MLX_FILL_2 ( &qpctx, 4,
1323  qpc_eec_data.mtu,
1324  ( ( qp->type == IB_QPT_ETH ) ?
1326  qpc_eec_data.msg_max, 31 );
1327  MLX_FILL_1 ( &qpctx, 7,
1328  qpc_eec_data.remote_qpn_een, qp->av.qpn );
1329  MLX_FILL_1 ( &qpctx, 9,
1330  qpc_eec_data.primary_address_path.rlid,
1331  qp->av.lid );
1332  MLX_FILL_1 ( &qpctx, 10,
1333  qpc_eec_data.primary_address_path.max_stat_rate,
1334  hermon_rate ( &qp->av ) );
1335  memcpy ( &qpctx.u.dwords[12], &qp->av.gid,
1336  sizeof ( qp->av.gid ) );
1337  MLX_FILL_1 ( &qpctx, 16,
1338  qpc_eec_data.primary_address_path.sched_queue,
1339  hermon_sched_queue ( ibdev, qp ) );
1340  MLX_FILL_1 ( &qpctx, 39,
1341  qpc_eec_data.next_rcv_psn, qp->recv.psn );
1342  if ( ( rc = hermon_cmd_init2rtr_qp ( hermon, qp->qpn,
1343  &qpctx ) ) != 0 ) {
1344  DBGC ( hermon, "Hermon %p QPN %#lx INIT2RTR_QP failed:"
1345  " %s\n", hermon, qp->qpn, strerror ( rc ) );
1346  return rc;
1347  }
1348  hermon_qp->state = HERMON_QP_ST_RTR;
1349  }
1350 
1351  /* Transition queue to RTS state */
1352  if ( hermon_qp->state < HERMON_QP_ST_RTS ) {
1353  memset ( &qpctx, 0, sizeof ( qpctx ) );
1354  MLX_FILL_1 ( &qpctx, 10,
1355  qpc_eec_data.primary_address_path.ack_timeout,
1356  14 /* 4.096us * 2^(14) = 67ms */ );
1357  MLX_FILL_2 ( &qpctx, 30,
1358  qpc_eec_data.retry_count, HERMON_RETRY_MAX,
1359  qpc_eec_data.rnr_retry, HERMON_RETRY_MAX );
1360  MLX_FILL_1 ( &qpctx, 32,
1361  qpc_eec_data.next_send_psn, qp->send.psn );
1362  if ( ( rc = hermon_cmd_rtr2rts_qp ( hermon, qp->qpn,
1363  &qpctx ) ) != 0 ) {
1364  DBGC ( hermon, "Hermon %p QPN %#lx RTR2RTS_QP failed: "
1365  "%s\n", hermon, qp->qpn, strerror ( rc ) );
1366  return rc;
1367  }
1368  hermon_qp->state = HERMON_QP_ST_RTS;
1369  }
1370 
1371  /* Update parameters in RTS state */
1372  memset ( &qpctx, 0, sizeof ( qpctx ) );
1374  MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
1375  if ( ( rc = hermon_cmd_rts2rts_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ){
1376  DBGC ( hermon, "Hermon %p QPN %#lx RTS2RTS_QP failed: %s\n",
1377  hermon, qp->qpn, strerror ( rc ) );
1378  return rc;
1379  }
1380 
1381  return 0;
1382 }
1383 
1384 /**
1385  * Destroy queue pair
1386  *
1387  * @v ibdev Infiniband device
1388  * @v qp Queue pair
1389  */
1390 static void hermon_destroy_qp ( struct ib_device *ibdev,
1391  struct ib_queue_pair *qp ) {
1392  struct hermon *hermon = ib_get_drvdata ( ibdev );
1393  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1394  int rc;
1395 
1396  /* Take ownership back from hardware */
1397  if ( ( rc = hermon_cmd_2rst_qp ( hermon, qp->qpn ) ) != 0 ) {
1398  DBGC ( hermon, "Hermon %p QPN %#lx FATAL 2RST_QP failed: %s\n",
1399  hermon, qp->qpn, strerror ( rc ) );
1400  /* Leak memory and return; at least we avoid corruption */
1401  return;
1402  }
1403 
1404  /* Free MTT entries */
1405  hermon_free_mtt ( hermon, &hermon_qp->mtt );
1406 
1407  /* Free memory */
1408  free_phys ( hermon_qp->wqe, hermon_qp->wqe_size );
1409  free_phys ( hermon_qp->recv.doorbell,
1410  sizeof ( hermon_qp->recv.doorbell[0] ) );
1411  free ( hermon_qp );
1412 
1413  /* Mark queue number as free */
1414  hermon_free_qpn ( ibdev, qp );
1415 
1416  ib_qp_set_drvdata ( qp, NULL );
1417 }
1418 
1419 /***************************************************************************
1420  *
1421  * Work request operations
1422  *
1423  ***************************************************************************
1424  */
1425 
1426 /**
1427  * Construct UD send work queue entry
1428  *
1429  * @v ibdev Infiniband device
1430  * @v qp Queue pair
1431  * @v dest Destination address vector
1432  * @v iobuf I/O buffer
1433  * @v wqe Send work queue entry
1434  * @ret opcode Control opcode
1435  */
1436 static __attribute__ (( unused )) unsigned int
1438  struct ib_queue_pair *qp __unused,
1440  struct io_buffer *iobuf __unused,
1441  union hermon_send_wqe *wqe ) {
1442 
1443  MLX_FILL_1 ( &wqe->ctrl, 1, ds, ( sizeof ( wqe->ctrl ) / 16 ) );
1444  MLX_FILL_1 ( &wqe->ctrl, 2, c, 0x03 /* generate completion */ );
1445  return HERMON_OPCODE_NOP;
1446 }
1447 
1448 /**
1449  * Construct UD send work queue entry
1450  *
1451  * @v ibdev Infiniband device
1452  * @v qp Queue pair
1453  * @v dest Destination address vector
1454  * @v iobuf I/O buffer
1455  * @v wqe Send work queue entry
1456  * @ret opcode Control opcode
1457  */
1458 static unsigned int
1460  struct ib_queue_pair *qp __unused,
1461  struct ib_address_vector *dest,
1462  struct io_buffer *iobuf,
1463  union hermon_send_wqe *wqe ) {
1464  struct hermon *hermon = ib_get_drvdata ( ibdev );
1465 
1466  MLX_FILL_1 ( &wqe->ud.ctrl, 1, ds,
1467  ( ( offsetof ( typeof ( wqe->ud ), data[1] ) / 16 ) ) );
1468  MLX_FILL_1 ( &wqe->ud.ctrl, 2, c, 0x03 /* generate completion */ );
1469  MLX_FILL_2 ( &wqe->ud.ud, 0,
1470  ud_address_vector.pd, HERMON_GLOBAL_PD,
1471  ud_address_vector.port_number, ibdev->port );
1472  MLX_FILL_2 ( &wqe->ud.ud, 1,
1473  ud_address_vector.rlid, dest->lid,
1474  ud_address_vector.g, dest->gid_present );
1475  MLX_FILL_1 ( &wqe->ud.ud, 2,
1476  ud_address_vector.max_stat_rate, hermon_rate ( dest ) );
1477  MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
1478  memcpy ( &wqe->ud.ud.u.dwords[4], &dest->gid, sizeof ( dest->gid ) );
1479  MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
1480  MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
1481  MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
1482  MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, hermon->lkey );
1483  MLX_FILL_H ( &wqe->ud.data[0], 2,
1484  local_address_h, virt_to_bus ( iobuf->data ) );
1485  MLX_FILL_1 ( &wqe->ud.data[0], 3,
1486  local_address_l, virt_to_bus ( iobuf->data ) );
1487  return HERMON_OPCODE_SEND;
1488 }
1489 
1490 /**
1491  * Construct MLX send work queue entry
1492  *
1493  * @v ibdev Infiniband device
1494  * @v qp Queue pair
1495  * @v dest Destination address vector
1496  * @v iobuf I/O buffer
1497  * @v wqe Send work queue entry
1498  * @ret opcode Control opcode
1499  */
1500 static unsigned int
1502  struct ib_queue_pair *qp,
1503  struct ib_address_vector *dest,
1504  struct io_buffer *iobuf,
1505  union hermon_send_wqe *wqe ) {
1506  struct hermon *hermon = ib_get_drvdata ( ibdev );
1507  struct io_buffer headers;
1508 
1509  /* Construct IB headers */
1510  iob_populate ( &headers, &wqe->mlx.headers, 0,
1511  sizeof ( wqe->mlx.headers ) );
1512  iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
1513  ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1514 
1515  /* Fill work queue entry */
1516  MLX_FILL_1 ( &wqe->mlx.ctrl, 1, ds,
1517  ( ( offsetof ( typeof ( wqe->mlx ), data[2] ) / 16 ) ) );
1518  MLX_FILL_5 ( &wqe->mlx.ctrl, 2,
1519  c, 0x03 /* generate completion */,
1520  icrc, 0 /* generate ICRC */,
1521  max_statrate, hermon_rate ( dest ),
1522  slr, 0,
1523  v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
1524  MLX_FILL_1 ( &wqe->mlx.ctrl, 3, rlid, dest->lid );
1525  MLX_FILL_1 ( &wqe->mlx.data[0], 0,
1526  byte_count, iob_len ( &headers ) );
1527  MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, hermon->lkey );
1528  MLX_FILL_H ( &wqe->mlx.data[0], 2,
1529  local_address_h, virt_to_bus ( headers.data ) );
1530  MLX_FILL_1 ( &wqe->mlx.data[0], 3,
1531  local_address_l, virt_to_bus ( headers.data ) );
1532  MLX_FILL_1 ( &wqe->mlx.data[1], 0,
1533  byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
1534  MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, hermon->lkey );
1535  MLX_FILL_H ( &wqe->mlx.data[1], 2,
1536  local_address_h, virt_to_bus ( iobuf->data ) );
1537  MLX_FILL_1 ( &wqe->mlx.data[1], 3,
1538  local_address_l, virt_to_bus ( iobuf->data ) );
1539  return HERMON_OPCODE_SEND;
1540 }
1541 
1542 /**
1543  * Construct RC send work queue entry
1544  *
1545  * @v ibdev Infiniband device
1546  * @v qp Queue pair
1547  * @v dest Destination address vector
1548  * @v iobuf I/O buffer
1549  * @v wqe Send work queue entry
1550  * @ret opcode Control opcode
1551  */
1552 static unsigned int
1554  struct ib_queue_pair *qp __unused,
1556  struct io_buffer *iobuf,
1557  union hermon_send_wqe *wqe ) {
1558  struct hermon *hermon = ib_get_drvdata ( ibdev );
1559 
1560  MLX_FILL_1 ( &wqe->rc.ctrl, 1, ds,
1561  ( ( offsetof ( typeof ( wqe->rc ), data[1] ) / 16 ) ) );
1562  MLX_FILL_1 ( &wqe->rc.ctrl, 2, c, 0x03 /* generate completion */ );
1563  MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
1564  MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, hermon->lkey );
1565  MLX_FILL_H ( &wqe->rc.data[0], 2,
1566  local_address_h, virt_to_bus ( iobuf->data ) );
1567  MLX_FILL_1 ( &wqe->rc.data[0], 3,
1568  local_address_l, virt_to_bus ( iobuf->data ) );
1569  return HERMON_OPCODE_SEND;
1570 }
1571 
1572 /**
1573  * Construct Ethernet send work queue entry
1574  *
1575  * @v ibdev Infiniband device
1576  * @v qp Queue pair
1577  * @v dest Destination address vector
1578  * @v iobuf I/O buffer
1579  * @v wqe Send work queue entry
1580  * @ret opcode Control opcode
1581  */
1582 static unsigned int
1584  struct ib_queue_pair *qp __unused,
1586  struct io_buffer *iobuf,
1587  union hermon_send_wqe *wqe ) {
1588  struct hermon *hermon = ib_get_drvdata ( ibdev );
1589 
1590  /* Fill work queue entry */
1591  MLX_FILL_1 ( &wqe->eth.ctrl, 1, ds,
1592  ( ( offsetof ( typeof ( wqe->mlx ), data[1] ) / 16 ) ) );
1593  MLX_FILL_2 ( &wqe->eth.ctrl, 2,
1594  c, 0x03 /* generate completion */,
1595  s, 1 /* inhibit ICRC */ );
1596  MLX_FILL_1 ( &wqe->eth.data[0], 0,
1597  byte_count, iob_len ( iobuf ) );
1598  MLX_FILL_1 ( &wqe->eth.data[0], 1, l_key, hermon->lkey );
1599  MLX_FILL_H ( &wqe->eth.data[0], 2,
1600  local_address_h, virt_to_bus ( iobuf->data ) );
1601  MLX_FILL_1 ( &wqe->eth.data[0], 3,
1602  local_address_l, virt_to_bus ( iobuf->data ) );
1603  return HERMON_OPCODE_SEND;
1604 }
1605 
1606 /** Work queue entry constructors */
1607 static unsigned int
1608 ( * hermon_fill_send_wqe[] ) ( struct ib_device *ibdev,
1609  struct ib_queue_pair *qp,
1610  struct ib_address_vector *dest,
1611  struct io_buffer *iobuf,
1612  union hermon_send_wqe *wqe ) = {
1618 };
1619 
1620 /**
1621  * Post send work queue entry
1622  *
1623  * @v ibdev Infiniband device
1624  * @v qp Queue pair
1625  * @v dest Destination address vector
1626  * @v iobuf I/O buffer
1627  * @ret rc Return status code
1628  */
1629 static int hermon_post_send ( struct ib_device *ibdev,
1630  struct ib_queue_pair *qp,
1631  struct ib_address_vector *dest,
1632  struct io_buffer *iobuf ) {
1633  struct hermon *hermon = ib_get_drvdata ( ibdev );
1634  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1635  struct ib_work_queue *wq = &qp->send;
1636  struct hermon_send_work_queue *hermon_send_wq = &hermon_qp->send;
1637  union hermon_send_wqe *wqe;
1638  union hermonprm_doorbell_register db_reg;
1639  unsigned long wqe_idx_mask;
1640  unsigned long wqe_idx;
1641  unsigned int owner;
1642  unsigned int opcode;
1643 
1644  /* Allocate work queue entry */
1645  wqe_idx = ( wq->next_idx & ( hermon_send_wq->num_wqes - 1 ) );
1646  owner = ( ( wq->next_idx & hermon_send_wq->num_wqes ) ? 1 : 0 );
1647  wqe_idx_mask = ( wq->num_wqes - 1 );
1648  if ( wq->iobufs[ wqe_idx & wqe_idx_mask ] ) {
1649  DBGC ( hermon, "Hermon %p QPN %#lx send queue full",
1650  hermon, qp->qpn );
1651  return -ENOBUFS;
1652  }
1653  wq->iobufs[ wqe_idx & wqe_idx_mask ] = iobuf;
1654  wqe = &hermon_send_wq->wqe[wqe_idx];
1655 
1656  /* Construct work queue entry */
1657  memset ( ( ( ( void * ) wqe ) + 4 /* avoid ctrl.owner */ ), 0,
1658  ( sizeof ( *wqe ) - 4 ) );
1659  assert ( qp->type < ( sizeof ( hermon_fill_send_wqe ) /
1660  sizeof ( hermon_fill_send_wqe[0] ) ) );
1661  assert ( hermon_fill_send_wqe[qp->type] != NULL );
1662  opcode = hermon_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
1663  barrier();
1664  MLX_FILL_2 ( &wqe->ctrl, 0,
1665  opcode, opcode,
1666  owner, owner );
1667  DBGCP ( hermon, "Hermon %p QPN %#lx posting send WQE %#lx:\n",
1668  hermon, qp->qpn, wqe_idx );
1669  DBGCP_HDA ( hermon, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
1670 
1671  /* Ring doorbell register */
1672  MLX_FILL_1 ( &db_reg.send, 0, qn, qp->qpn );
1673  barrier();
1674  writel ( db_reg.dword[0], hermon_send_wq->doorbell );
1675 
1676  /* Update work queue's index */
1677  wq->next_idx++;
1678 
1679  return 0;
1680 }
1681 
1682 /**
1683  * Post receive work queue entry
1684  *
1685  * @v ibdev Infiniband device
1686  * @v qp Queue pair
1687  * @v iobuf I/O buffer
1688  * @ret rc Return status code
1689  */
1690 static int hermon_post_recv ( struct ib_device *ibdev,
1691  struct ib_queue_pair *qp,
1692  struct io_buffer *iobuf ) {
1693  struct hermon *hermon = ib_get_drvdata ( ibdev );
1694  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1695  struct ib_work_queue *wq = &qp->recv;
1696  struct hermon_recv_work_queue *hermon_recv_wq = &hermon_qp->recv;
1697  struct hermonprm_recv_wqe *wqe;
1698  struct hermonprm_wqe_segment_data_ptr *data;
1699  struct ib_global_route_header *grh;
1700  unsigned int wqe_idx_mask;
1701 
1702  /* Allocate work queue entry */
1703  wqe_idx_mask = ( wq->num_wqes - 1 );
1704  if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1705  DBGC ( hermon, "Hermon %p QPN %#lx receive queue full",
1706  hermon, qp->qpn );
1707  return -ENOBUFS;
1708  }
1709  wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1710  wqe = &hermon_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1711 
1712  /* Construct work queue entry */
1713  data = &wqe->data[0];
1714  if ( hermon_qp->recv.grh ) {
1715  grh = &hermon_qp->recv.grh[wq->next_idx & wqe_idx_mask];
1716  MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
1717  MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
1718  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
1719  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
1720  data++;
1721  }
1722  MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
1723  MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
1724  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
1725  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
1726 
1727  /* Update work queue's index */
1728  wq->next_idx++;
1729 
1730  /* Update doorbell record */
1731  barrier();
1732  MLX_FILL_1 ( hermon_recv_wq->doorbell, 0, receive_wqe_counter,
1733  ( wq->next_idx & 0xffff ) );
1734 
1735  return 0;
1736 }
1737 
1738 /**
1739  * Handle completion
1740  *
1741  * @v ibdev Infiniband device
1742  * @v cq Completion queue
1743  * @v cqe Hardware completion queue entry
1744  * @ret rc Return status code
1745  */
1746 static int hermon_complete ( struct ib_device *ibdev,
1747  struct ib_completion_queue *cq,
1748  union hermonprm_completion_entry *cqe ) {
1749  struct hermon *hermon = ib_get_drvdata ( ibdev );
1750  struct hermon_queue_pair *hermon_qp;
1751  struct ib_work_queue *wq;
1752  struct ib_queue_pair *qp;
1753  struct io_buffer *iobuf;
1754  struct ib_address_vector recv_dest;
1755  struct ib_address_vector recv_source;
1756  struct ib_global_route_header *grh;
1757  struct ib_address_vector *source;
1758  unsigned int opcode;
1759  unsigned long qpn;
1760  int is_send;
1761  unsigned long wqe_idx;
1762  unsigned long wqe_idx_mask;
1763  size_t len;
1764  int rc = 0;
1765 
1766  /* Parse completion */
1767  qpn = MLX_GET ( &cqe->normal, qpn );
1768  is_send = MLX_GET ( &cqe->normal, s_r );
1769  opcode = MLX_GET ( &cqe->normal, opcode );
1770  if ( opcode >= HERMON_OPCODE_RECV_ERROR ) {
1771  /* "s" field is not valid for error opcodes */
1772  is_send = ( opcode == HERMON_OPCODE_SEND_ERROR );
1773  DBGC ( hermon, "Hermon %p CQN %#lx syndrome %x vendor %x\n",
1774  hermon, cq->cqn, MLX_GET ( &cqe->error, syndrome ),
1775  MLX_GET ( &cqe->error, vendor_error_syndrome ) );
1776  rc = -EIO;
1777  /* Don't return immediately; propagate error to completer */
1778  }
1779 
1780  /* Identify work queue */
1781  wq = ib_find_wq ( cq, qpn, is_send );
1782  if ( ! wq ) {
1783  DBGC ( hermon, "Hermon %p CQN %#lx unknown %s QPN %#lx\n",
1784  hermon, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
1785  return -EIO;
1786  }
1787  qp = wq->qp;
1788  hermon_qp = ib_qp_get_drvdata ( qp );
1789 
1790  /* Identify work queue entry */
1791  wqe_idx = MLX_GET ( &cqe->normal, wqe_counter );
1792  wqe_idx_mask = ( wq->num_wqes - 1 );
1793  DBGCP ( hermon, "Hermon %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1794  hermon, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1795  wqe_idx );
1796  DBGCP_HDA ( hermon, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1797 
1798  /* Identify I/O buffer */
1799  iobuf = wq->iobufs[ wqe_idx & wqe_idx_mask ];
1800  if ( ! iobuf ) {
1801  DBGC ( hermon, "Hermon %p CQN %#lx QPN %#lx empty %s WQE "
1802  "%#lx\n", hermon, cq->cqn, qp->qpn,
1803  ( is_send ? "send" : "recv" ), wqe_idx );
1804  return -EIO;
1805  }
1806  wq->iobufs[ wqe_idx & wqe_idx_mask ] = NULL;
1807 
1808  if ( is_send ) {
1809  /* Hand off to completion handler */
1810  ib_complete_send ( ibdev, qp, iobuf, rc );
1811  } else if ( rc != 0 ) {
1812  /* Dump queue state (for debugging) */
1814  /* Hand off to completion handler */
1815  ib_complete_recv ( ibdev, qp, NULL, NULL, iobuf, rc );
1816  } else {
1817  /* Set received length */
1818  len = MLX_GET ( &cqe->normal, byte_cnt );
1819  memset ( &recv_dest, 0, sizeof ( recv_dest ) );
1820  recv_dest.qpn = qpn;
1821  memset ( &recv_source, 0, sizeof ( recv_source ) );
1822  switch ( qp->type ) {
1823  case IB_QPT_SMI:
1824  case IB_QPT_GSI:
1825  case IB_QPT_UD:
1826  /* Locate corresponding GRH */
1827  assert ( hermon_qp->recv.grh != NULL );
1828  grh = &hermon_qp->recv.grh[ wqe_idx & wqe_idx_mask ];
1829  len -= sizeof ( *grh );
1830  /* Construct address vector */
1831  source = &recv_source;
1832  source->qpn = MLX_GET ( &cqe->normal, srq_rqpn );
1833  source->lid = MLX_GET ( &cqe->normal, slid_smac47_32 );
1834  source->sl = MLX_GET ( &cqe->normal, sl );
1835  recv_dest.gid_present = source->gid_present =
1836  MLX_GET ( &cqe->normal, g );
1837  memcpy ( &recv_dest.gid, &grh->dgid,
1838  sizeof ( recv_dest.gid ) );
1839  memcpy ( &source->gid, &grh->sgid,
1840  sizeof ( source->gid ) );
1841  break;
1842  case IB_QPT_RC:
1843  source = &qp->av;
1844  break;
1845  case IB_QPT_ETH:
1846  /* Construct address vector */
1847  source = &recv_source;
1848  source->vlan_present = MLX_GET ( &cqe->normal, vlan );
1849  source->vlan = MLX_GET ( &cqe->normal, vid );
1850  break;
1851  default:
1852  assert ( 0 );
1853  return -EINVAL;
1854  }
1855  assert ( len <= iob_tailroom ( iobuf ) );
1856  iob_put ( iobuf, len );
1857  /* Hand off to completion handler */
1858  ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, 0 );
1859  }
1860 
1861  return rc;
1862 }
1863 
1864 /**
1865  * Poll completion queue
1866  *
1867  * @v ibdev Infiniband device
1868  * @v cq Completion queue
1869  */
1870 static void hermon_poll_cq ( struct ib_device *ibdev,
1871  struct ib_completion_queue *cq ) {
1872  struct hermon *hermon = ib_get_drvdata ( ibdev );
1873  struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
1874  union hermonprm_completion_entry *cqe;
1875  unsigned int cqe_idx_mask;
1876  int rc;
1877 
1878  while ( 1 ) {
1879  /* Look for completion entry */
1880  cqe_idx_mask = ( cq->num_cqes - 1 );
1881  cqe = &hermon_cq->cqe[cq->next_idx & cqe_idx_mask];
1882  if ( MLX_GET ( &cqe->normal, owner ) ^
1883  ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
1884  /* Entry still owned by hardware; end of poll */
1885  break;
1886  }
1887 
1888  /* Handle completion */
1889  if ( ( rc = hermon_complete ( ibdev, cq, cqe ) ) != 0 ) {
1890  DBGC ( hermon, "Hermon %p CQN %#lx failed to complete:"
1891  " %s\n", hermon, cq->cqn, strerror ( rc ) );
1892  DBGC_HDA ( hermon, virt_to_phys ( cqe ),
1893  cqe, sizeof ( *cqe ) );
1894  }
1895 
1896  /* Update completion queue's index */
1897  cq->next_idx++;
1898 
1899  /* Update doorbell record */
1900  MLX_FILL_1 ( hermon_cq->doorbell, 0, update_ci,
1901  ( cq->next_idx & 0x00ffffffUL ) );
1902  }
1903 }
1904 
1905 /***************************************************************************
1906  *
1907  * Event queues
1908  *
1909  ***************************************************************************
1910  */
1911 
1912 /**
1913  * Dump event queue context (for debugging only)
1914  *
1915  * @v hermon Hermon device
1916  * @v hermon_eq Event queue
1917  * @ret rc Return status code
1918  */
1919 static __attribute__ (( unused )) int
1921  struct hermon_event_queue *hermon_eq ) {
1922  struct hermonprm_eqc eqctx;
1923  int rc;
1924 
1925  /* Do nothing unless debugging is enabled */
1926  if ( ! DBG_LOG )
1927  return 0;
1928 
1929  /* Dump event queue context */
1930  memset ( &eqctx, 0, sizeof ( eqctx ) );
1931  if ( ( rc = hermon_cmd_query_eq ( hermon, hermon_eq->eqn,
1932  &eqctx ) ) != 0 ) {
1933  DBGC ( hermon, "Hermon %p EQN %#lx QUERY_EQ failed: %s\n",
1934  hermon, hermon_eq->eqn, strerror ( rc ) );
1935  return rc;
1936  }
1937  DBGC ( hermon, "Hermon %p EQN %#lx context:\n",
1938  hermon, hermon_eq->eqn );
1939  DBGC_HDA ( hermon, 0, &eqctx, sizeof ( eqctx ) );
1940 
1941  return 0;
1942 }
1943 
1944 /**
1945  * Dump unconsumed event queue entries (for debugging only)
1946  *
1947  * @v hermon Hermon device
1948  * @v hermon_eq Event queue
1949  * @ret rc Return status code
1950  */
1951 static __attribute__ (( unused )) int
1953  struct hermon_event_queue *hermon_eq ) {
1954  struct hermonprm_eqc eqctx;
1955  union hermonprm_event_entry *eqe;
1956  unsigned int mask;
1957  unsigned int prod;
1958  unsigned int cons;
1959  unsigned int idx;
1960  int rc;
1961 
1962  /* Do nothing unless debugging is enabled */
1963  if ( ! DBG_LOG )
1964  return 0;
1965 
1966  /* Dump event queue entries */
1967  memset ( &eqctx, 0, sizeof ( eqctx ) );
1968  if ( ( rc = hermon_cmd_query_eq ( hermon, hermon_eq->eqn,
1969  &eqctx ) ) != 0 ) {
1970  DBGC ( hermon, "Hermon %p EQN %#lx QUERY_EQ failed: %s\n",
1971  hermon, hermon_eq->eqn, strerror ( rc ) );
1972  return rc;
1973  }
1974  mask = ( HERMON_NUM_EQES - 1 );
1975  prod = MLX_GET ( &eqctx, producer_counter ) & mask;
1976  cons = MLX_GET ( &eqctx, consumer_counter ) & mask;
1977  idx = hermon_eq->next_idx;
1978  if ( ( idx & mask ) != ( cons & mask ) ) {
1979  DBGC ( hermon, "Hermon %p EQN %#lx mismatch: SW %#x != HW "
1980  "%#x\n", hermon, hermon_eq->eqn, idx, cons );
1981  }
1982  for ( ; ( idx & mask ) != ( prod & mask ) ; idx++ ) {
1983  eqe = &hermon_eq->eqe[idx & mask];
1984  DBGC ( hermon, "Hermon %p EQN %#lx event %#x owner %d type "
1985  "%#02x:%#02x\n", hermon, hermon_eq->eqn, idx,
1986  MLX_GET ( &eqe->generic, owner ),
1987  MLX_GET ( &eqe->generic, event_type ),
1988  MLX_GET ( &eqe->generic, event_sub_type ) );
1989  DBGC_HDA ( hermon, 0, eqe, sizeof ( *eqe ) );
1990  }
1991 
1992  return 0;
1993 }
1994 
1995 /**
1996  * Create event queue
1997  *
1998  * @v hermon Hermon device
1999  * @ret rc Return status code
2000  */
2001 static int hermon_create_eq ( struct hermon *hermon ) {
2002  struct hermon_event_queue *hermon_eq = &hermon->eq;
2003  struct hermonprm_eqc eqctx;
2004  struct hermonprm_event_mask mask;
2005  unsigned int i;
2006  int rc;
2007 
2008  /* Select event queue number */
2009  hermon_eq->eqn = ( 4 * hermon->cap.reserved_uars );
2010  if ( hermon_eq->eqn < hermon->cap.reserved_eqs )
2011  hermon_eq->eqn = hermon->cap.reserved_eqs;
2012 
2013  /* Calculate doorbell address */
2014  hermon_eq->doorbell =
2015  ( hermon->uar + HERMON_DB_EQ_OFFSET ( hermon_eq->eqn ) );
2016 
2017  /* Allocate event queue itself */
2018  hermon_eq->eqe_size =
2019  ( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
2020  hermon_eq->eqe = malloc_phys ( hermon_eq->eqe_size,
2021  sizeof ( hermon_eq->eqe[0] ) );
2022  if ( ! hermon_eq->eqe ) {
2023  DBGC ( hermon, "Hermon %p EQN %#lx could not allocate EQEs\n",
2024  hermon, hermon_eq->eqn );
2025  rc = -ENOMEM;
2026  goto err_eqe;
2027  }
2028  memset ( hermon_eq->eqe, 0, hermon_eq->eqe_size );
2029  for ( i = 0 ; i < HERMON_NUM_EQES ; i++ ) {
2030  MLX_FILL_1 ( &hermon_eq->eqe[i].generic, 7, owner, 1 );
2031  }
2032  barrier();
2033 
2034  /* Allocate MTT entries */
2035  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_eq->eqe,
2036  hermon_eq->eqe_size,
2037  &hermon_eq->mtt ) ) != 0 ) {
2038  DBGC ( hermon, "Hermon %p EQN %#lx could not allocate MTTs: "
2039  "%s\n", hermon, hermon_eq->eqn, strerror ( rc ) );
2040  goto err_alloc_mtt;
2041  }
2042 
2043  /* Hand queue over to hardware */
2044  memset ( &eqctx, 0, sizeof ( eqctx ) );
2045  MLX_FILL_2 ( &eqctx, 0,
2046  st, 0xa /* "Fired" */,
2047  oi, 1 );
2048  MLX_FILL_1 ( &eqctx, 2,
2049  page_offset, ( hermon_eq->mtt.page_offset >> 5 ) );
2050  MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( HERMON_NUM_EQES - 1 ) );
2051  MLX_FILL_H ( &eqctx, 6, mtt_base_addr_h,
2052  hermon_eq->mtt.mtt_base_addr );
2053  MLX_FILL_1 ( &eqctx, 7, mtt_base_addr_l,
2054  ( hermon_eq->mtt.mtt_base_addr >> 3 ) );
2055  if ( ( rc = hermon_cmd_sw2hw_eq ( hermon, hermon_eq->eqn,
2056  &eqctx ) ) != 0 ) {
2057  DBGC ( hermon, "Hermon %p EQN %#lx SW2HW_EQ failed: %s\n",
2058  hermon, hermon_eq->eqn, strerror ( rc ) );
2059  goto err_sw2hw_eq;
2060  }
2061 
2062  /* Map all events to this event queue */
2063  memset ( &mask, 0xff, sizeof ( mask ) );
2064  if ( ( rc = hermon_cmd_map_eq ( hermon,
2065  ( HERMON_MAP_EQ | hermon_eq->eqn ),
2066  &mask ) ) != 0 ) {
2067  DBGC ( hermon, "Hermon %p EQN %#lx MAP_EQ failed: %s\n",
2068  hermon, hermon_eq->eqn, strerror ( rc ) );
2069  goto err_map_eq;
2070  }
2071 
2072  DBGC ( hermon, "Hermon %p EQN %#lx ring [%08lx,%08lx), doorbell "
2073  "%08lx\n", hermon, hermon_eq->eqn,
2074  virt_to_phys ( hermon_eq->eqe ),
2075  ( virt_to_phys ( hermon_eq->eqe ) + hermon_eq->eqe_size ),
2076  virt_to_phys ( hermon_eq->doorbell ) );
2077  return 0;
2078 
2079  err_map_eq:
2080  hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn, &eqctx );
2081  err_sw2hw_eq:
2082  hermon_free_mtt ( hermon, &hermon_eq->mtt );
2083  err_alloc_mtt:
2084  free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
2085  err_eqe:
2086  memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
2087  return rc;
2088 }
2089 
2090 /**
2091  * Destroy event queue
2092  *
2093  * @v hermon Hermon device
2094  */
2095 static void hermon_destroy_eq ( struct hermon *hermon ) {
2096  struct hermon_event_queue *hermon_eq = &hermon->eq;
2097  struct hermonprm_eqc eqctx;
2098  struct hermonprm_event_mask mask;
2099  int rc;
2100 
2101  /* Unmap events from event queue */
2102  memset ( &mask, 0xff, sizeof ( mask ) );
2103  if ( ( rc = hermon_cmd_map_eq ( hermon,
2104  ( HERMON_UNMAP_EQ | hermon_eq->eqn ),
2105  &mask ) ) != 0 ) {
2106  DBGC ( hermon, "Hermon %p EQN %#lx FATAL MAP_EQ failed to "
2107  "unmap: %s\n", hermon, hermon_eq->eqn, strerror ( rc ) );
2108  /* Continue; HCA may die but system should survive */
2109  }
2110 
2111  /* Take ownership back from hardware */
2112  if ( ( rc = hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn,
2113  &eqctx ) ) != 0 ) {
2114  DBGC ( hermon, "Hermon %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
2115  hermon, hermon_eq->eqn, strerror ( rc ) );
2116  /* Leak memory and return; at least we avoid corruption */
2117  return;
2118  }
2119 
2120  /* Free MTT entries */
2121  hermon_free_mtt ( hermon, &hermon_eq->mtt );
2122 
2123  /* Free memory */
2124  free_phys ( hermon_eq->eqe, hermon_eq->eqe_size );
2125  memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
2126 }
2127 
2128 /**
2129  * Handle port state event
2130  *
2131  * @v hermon Hermon device
2132  * @v eqe Port state change event queue entry
2133  */
2135  union hermonprm_event_entry *eqe){
2136  unsigned int port;
2137  int link_up;
2138 
2139  /* Get port and link status */
2140  port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
2141  link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
2142  DBGC ( hermon, "Hermon %p port %d link %s\n", hermon, ( port + 1 ),
2143  ( link_up ? "up" : "down" ) );
2144 
2145  /* Sanity check */
2146  if ( port >= hermon->cap.num_ports ) {
2147  DBGC ( hermon, "Hermon %p port %d does not exist!\n",
2148  hermon, ( port + 1 ) );
2149  return;
2150  }
2151 
2152  /* Notify device of port state change */
2154  link_up );
2155 }
2156 
2157 /**
2158  * Handle port management event
2159  *
2160  * @v hermon Hermon device
2161  * @v eqe Port management change event queue entry
2162  */
2164  union hermonprm_event_entry *eqe){
2165  unsigned int port;
2166 
2167  /* Get port */
2168  port = ( MLX_GET ( &eqe->port_mgmnt_change, port ) - 1 );
2169  DBGC ( hermon, "Hermon %p port %d management change\n",
2170  hermon, ( port + 1 ) );
2171 
2172  /* Sanity check */
2173  if ( port >= hermon->cap.num_ports ) {
2174  DBGC ( hermon, "Hermon %p port %d does not exist!\n",
2175  hermon, ( port + 1 ) );
2176  return;
2177  }
2178 
2179  /* Update MAD parameters */
2181 }
2182 
2183 /**
2184  * Poll event queue
2185  *
2186  * @v ibdev Infiniband device
2187  */
2188 static void hermon_poll_eq ( struct ib_device *ibdev ) {
2189  struct hermon *hermon = ib_get_drvdata ( ibdev );
2190  struct hermon_event_queue *hermon_eq = &hermon->eq;
2191  union hermonprm_event_entry *eqe;
2192  union hermonprm_doorbell_register db_reg;
2193  unsigned long now;
2194  unsigned long elapsed;
2195  unsigned int eqe_idx_mask;
2196  unsigned int event_type;
2197 
2198  /* No event is generated upon reaching INIT, so we must poll
2199  * separately for link state changes while we remain DOWN.
2200  */
2201  if ( ib_is_open ( ibdev ) &&
2202  ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
2203  now = currticks();
2204  elapsed = ( now - hermon->last_poll );
2205  if ( elapsed >= HERMON_LINK_POLL_INTERVAL ) {
2206  hermon->last_poll = now;
2207  ib_smc_update ( ibdev, hermon_mad );
2208  }
2209  }
2210 
2211  /* Poll event queue */
2212  while ( 1 ) {
2213  /* Look for event entry */
2214  eqe_idx_mask = ( HERMON_NUM_EQES - 1 );
2215  eqe = &hermon_eq->eqe[hermon_eq->next_idx & eqe_idx_mask];
2216  if ( MLX_GET ( &eqe->generic, owner ) ^
2217  ( ( hermon_eq->next_idx & HERMON_NUM_EQES ) ? 1 : 0 ) ) {
2218  /* Entry still owned by hardware; end of poll */
2219  break;
2220  }
2221  DBGCP ( hermon, "Hermon %p EQN %#lx event:\n",
2222  hermon, hermon_eq->eqn );
2223  DBGCP_HDA ( hermon, virt_to_phys ( eqe ),
2224  eqe, sizeof ( *eqe ) );
2225 
2226  /* Handle event */
2227  event_type = MLX_GET ( &eqe->generic, event_type );
2228  switch ( event_type ) {
2231  break;
2234  break;
2235  default:
2236  DBGC ( hermon, "Hermon %p EQN %#lx unrecognised event "
2237  "type %#02x:%#02x\n",
2238  hermon, hermon_eq->eqn, event_type,
2239  MLX_GET ( &eqe->generic, event_sub_type ) );
2240  DBGC_HDA ( hermon, virt_to_phys ( eqe ),
2241  eqe, sizeof ( *eqe ) );
2242  break;
2243  }
2244 
2245  /* Update event queue's index */
2246  hermon_eq->next_idx++;
2247 
2248  /* Ring doorbell */
2249  MLX_FILL_1 ( &db_reg.event, 0,
2250  ci, ( hermon_eq->next_idx & 0x00ffffffUL ) );
2251  writel ( db_reg.dword[0], hermon_eq->doorbell );
2252  }
2253 }
2254 
2255 /***************************************************************************
2256  *
2257  * Firmware control
2258  *
2259  ***************************************************************************
2260  */
2261 
2262 /**
2263  * Map virtual to physical address for firmware usage
2264  *
2265  * @v hermon Hermon device
2266  * @v map Mapping function
2267  * @v va Virtual address
2268  * @v pa Physical address
2269  * @v len Length of region
2270  * @ret rc Return status code
2271  */
2272 static int hermon_map_vpm ( struct hermon *hermon,
2273  int ( *map ) ( struct hermon *hermon,
2274  const struct hermonprm_virtual_physical_mapping* ),
2275  uint64_t va, physaddr_t pa, size_t len ) {
2276  struct hermonprm_virtual_physical_mapping mapping;
2277  physaddr_t start;
2278  physaddr_t low;
2279  physaddr_t high;
2280  physaddr_t end;
2281  size_t size;
2282  int rc;
2283 
2284  /* Sanity checks */
2285  assert ( ( va & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2286  assert ( ( pa & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2287  assert ( ( len & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2288  assert ( len != 0 );
2289 
2290  /* Calculate starting points */
2291  start = pa;
2292  end = ( start + len );
2293  size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
2294  low = high = ( end & ~( size - 1 ) );
2295  assert ( start < low );
2296  assert ( high <= end );
2297 
2298  /* These mappings tend to generate huge volumes of
2299  * uninteresting debug data, which basically makes it
2300  * impossible to use debugging otherwise.
2301  */
2303 
2304  /* Map blocks in descending order of size */
2305  while ( size >= HERMON_PAGE_SIZE ) {
2306 
2307  /* Find the next candidate block */
2308  if ( ( low - size ) >= start ) {
2309  low -= size;
2310  pa = low;
2311  } else if ( high <= ( end - size ) ) {
2312  pa = high;
2313  high += size;
2314  } else {
2315  size >>= 1;
2316  continue;
2317  }
2318  assert ( ( va & ( size - 1 ) ) == 0 );
2319  assert ( ( pa & ( size - 1 ) ) == 0 );
2320 
2321  /* Map this block */
2322  memset ( &mapping, 0, sizeof ( mapping ) );
2323  MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2324  MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2325  MLX_FILL_H ( &mapping, 2, pa_h, pa );
2326  MLX_FILL_2 ( &mapping, 3,
2327  log2size, ( ( fls ( size ) - 1 ) - 12 ),
2328  pa_l, ( pa >> 12 ) );
2329  if ( ( rc = map ( hermon, &mapping ) ) != 0 ) {
2331  DBGC ( hermon, "Hermon %p could not map %08llx+%zx to "
2332  "%08lx: %s\n",
2333  hermon, va, size, pa, strerror ( rc ) );
2334  return rc;
2335  }
2336  va += size;
2337  }
2338  assert ( low == start );
2339  assert ( high == end );
2340 
2342  return 0;
2343 }
2344 
2345 /**
2346  * Start firmware running
2347  *
2348  * @v hermon Hermon device
2349  * @ret rc Return status code
2350  */
2351 static int hermon_start_firmware ( struct hermon *hermon ) {
2352  struct hermonprm_query_fw fw;
2353  unsigned int fw_pages;
2354  size_t fw_len;
2355  physaddr_t fw_base;
2356  int rc;
2357 
2358  /* Get firmware parameters */
2359  if ( ( rc = hermon_cmd_query_fw ( hermon, &fw ) ) != 0 ) {
2360  DBGC ( hermon, "Hermon %p could not query firmware: %s\n",
2361  hermon, strerror ( rc ) );
2362  goto err_query_fw;
2363  }
2364  DBGC ( hermon, "Hermon %p firmware version %d.%d.%d\n", hermon,
2365  MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2366  MLX_GET ( &fw, fw_rev_subminor ) );
2367  fw_pages = MLX_GET ( &fw, fw_pages );
2368  DBGC ( hermon, "Hermon %p requires %d pages (%d kB) for firmware\n",
2369  hermon, fw_pages, ( fw_pages * 4 ) );
2370 
2371  /* Allocate firmware pages and map firmware area */
2372  fw_len = ( fw_pages * HERMON_PAGE_SIZE );
2373  if ( ! hermon->firmware_area ) {
2374  hermon->firmware_len = fw_len;
2376  if ( ! hermon->firmware_area ) {
2377  DBGC ( hermon, "Hermon %p could not allocate firmware "
2378  "area\n", hermon );
2379  rc = -ENOMEM;
2380  goto err_alloc_fa;
2381  }
2382  } else {
2383  assert ( hermon->firmware_len == fw_len );
2384  }
2385  fw_base = user_to_phys ( hermon->firmware_area, 0 );
2386  DBGC ( hermon, "Hermon %p firmware area at physical [%08lx,%08lx)\n",
2387  hermon, fw_base, ( fw_base + fw_len ) );
2389  0, fw_base, fw_len ) ) != 0 ) {
2390  DBGC ( hermon, "Hermon %p could not map firmware: %s\n",
2391  hermon, strerror ( rc ) );
2392  goto err_map_fa;
2393  }
2394 
2395  /* Start firmware */
2396  if ( ( rc = hermon_cmd_run_fw ( hermon ) ) != 0 ) {
2397  DBGC ( hermon, "Hermon %p could not run firmware: %s\n",
2398  hermon, strerror ( rc ) );
2399  goto err_run_fw;
2400  }
2401 
2402  DBGC ( hermon, "Hermon %p firmware started\n", hermon );
2403  return 0;
2404 
2405  err_run_fw:
2406  err_map_fa:
2408  err_alloc_fa:
2409  err_query_fw:
2410  return rc;
2411 }
2412 
2413 /**
2414  * Stop firmware running
2415  *
2416  * @v hermon Hermon device
2417  */
2418 static void hermon_stop_firmware ( struct hermon *hermon ) {
2419  int rc;
2420 
2421  if ( ( rc = hermon_cmd_unmap_fa ( hermon ) ) != 0 ) {
2422  DBGC ( hermon, "Hermon %p FATAL could not stop firmware: %s\n",
2423  hermon, strerror ( rc ) );
2424  /* Leak memory and return; at least we avoid corruption */
2426  return;
2427  }
2428 }
2429 
2430 /***************************************************************************
2431  *
2432  * Infinihost Context Memory management
2433  *
2434  ***************************************************************************
2435  */
2436 
2437 /**
2438  * Get device limits
2439  *
2440  * @v hermon Hermon device
2441  * @ret rc Return status code
2442  */
2443 static int hermon_get_cap ( struct hermon *hermon ) {
2444  struct hermonprm_query_dev_cap dev_cap;
2445  int rc;
2446 
2447  if ( ( rc = hermon_cmd_query_dev_cap ( hermon, &dev_cap ) ) != 0 ) {
2448  DBGC ( hermon, "Hermon %p could not get device limits: %s\n",
2449  hermon, strerror ( rc ) );
2450  return rc;
2451  }
2452 
2453  hermon->cap.cmpt_entry_size = MLX_GET ( &dev_cap, c_mpt_entry_sz );
2455  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_qps ) );
2456  hermon->cap.qpc_entry_size = MLX_GET ( &dev_cap, qpc_entry_sz );
2457  hermon->cap.altc_entry_size = MLX_GET ( &dev_cap, altc_entry_sz );
2458  hermon->cap.auxc_entry_size = MLX_GET ( &dev_cap, aux_entry_sz );
2460  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_srqs ) );
2461  hermon->cap.srqc_entry_size = MLX_GET ( &dev_cap, srq_entry_sz );
2463  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_cqs ) );
2464  hermon->cap.cqc_entry_size = MLX_GET ( &dev_cap, cqc_entry_sz );
2465  hermon->cap.reserved_eqs = MLX_GET ( &dev_cap, num_rsvd_eqs );
2466  if ( hermon->cap.reserved_eqs == 0 ) {
2467  /* Backward compatibility */
2469  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_eqs ) );
2470  }
2471  hermon->cap.eqc_entry_size = MLX_GET ( &dev_cap, eqc_entry_sz );
2473  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mtts ) );
2474  hermon->cap.mtt_entry_size = MLX_GET ( &dev_cap, mtt_entry_sz );
2476  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mrws ) );
2477  hermon->cap.dmpt_entry_size = MLX_GET ( &dev_cap, d_mpt_entry_sz );
2478  hermon->cap.reserved_uars = MLX_GET ( &dev_cap, num_rsvd_uars );
2479  hermon->cap.num_ports = MLX_GET ( &dev_cap, num_ports );
2480  hermon->cap.dpdp = MLX_GET ( &dev_cap, dpdp );
2481 
2482  /* Sanity check */
2483  if ( hermon->cap.num_ports > HERMON_MAX_PORTS ) {
2484  DBGC ( hermon, "Hermon %p has %d ports (only %d supported)\n",
2487  }
2488 
2489  return 0;
2490 }
2491 
2492 /**
2493  * Align ICM table
2494  *
2495  * @v icm_offset Current ICM offset
2496  * @v len ICM table length
2497  * @ret icm_offset ICM offset
2498  */
2499 static uint64_t icm_align ( uint64_t icm_offset, size_t len ) {
2500 
2501  /* Round up to a multiple of the table size */
2502  assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
2503  return ( ( icm_offset + len - 1 ) & ~( ( ( uint64_t ) len ) - 1 ) );
2504 }
2505 
2506 /**
2507  * Map ICM (allocating if necessary)
2508  *
2509  * @v hermon Hermon device
2510  * @v init_hca INIT_HCA structure to fill in
2511  * @ret rc Return status code
2512  */
2513 static int hermon_map_icm ( struct hermon *hermon,
2514  struct hermonprm_init_hca *init_hca ) {
2515  struct hermonprm_scalar_parameter icm_size;
2516  struct hermonprm_scalar_parameter icm_aux_size;
2517  uint64_t icm_offset = 0;
2518  unsigned int log_num_qps, log_num_srqs, log_num_cqs, log_num_eqs;
2519  unsigned int log_num_mtts, log_num_mpts, log_num_mcs;
2520  size_t cmpt_max_len;
2521  size_t icm_len, icm_aux_len;
2522  size_t len;
2523  physaddr_t icm_phys;
2524  int i;
2525  int rc;
2526 
2527  /*
2528  * Start by carving up the ICM virtual address space
2529  *
2530  */
2531 
2532  /* Calculate number of each object type within ICM */
2533  log_num_qps = fls ( hermon->cap.reserved_qps +
2535  log_num_srqs = fls ( hermon->cap.reserved_srqs - 1 );
2536  log_num_cqs = fls ( hermon->cap.reserved_cqs + HERMON_MAX_CQS - 1 );
2537  log_num_eqs = fls ( hermon->cap.reserved_eqs + HERMON_MAX_EQS - 1 );
2538  log_num_mtts = fls ( hermon->cap.reserved_mtts + HERMON_MAX_MTTS - 1 );
2539  log_num_mpts = fls ( hermon->cap.reserved_mrws + 1 - 1 );
2540  log_num_mcs = HERMON_LOG_MULTICAST_HASH_SIZE;
2541 
2542  /* ICM starts with the cMPT tables, which are sparse */
2543  cmpt_max_len = ( HERMON_CMPT_MAX_ENTRIES *
2544  ( ( uint64_t ) hermon->cap.cmpt_entry_size ) );
2545  len = ( ( ( ( 1 << log_num_qps ) * hermon->cap.cmpt_entry_size ) +
2546  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2547  hermon->icm_map[HERMON_ICM_QP_CMPT].offset = icm_offset;
2549  icm_offset += cmpt_max_len;
2550  len = ( ( ( ( 1 << log_num_srqs ) * hermon->cap.cmpt_entry_size ) +
2551  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2552  hermon->icm_map[HERMON_ICM_SRQ_CMPT].offset = icm_offset;
2554  icm_offset += cmpt_max_len;
2555  len = ( ( ( ( 1 << log_num_cqs ) * hermon->cap.cmpt_entry_size ) +
2556  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2557  hermon->icm_map[HERMON_ICM_CQ_CMPT].offset = icm_offset;
2559  icm_offset += cmpt_max_len;
2560  len = ( ( ( ( 1 << log_num_eqs ) * hermon->cap.cmpt_entry_size ) +
2561  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2562  hermon->icm_map[HERMON_ICM_EQ_CMPT].offset = icm_offset;
2564  icm_offset += cmpt_max_len;
2565 
2566  hermon->icm_map[HERMON_ICM_OTHER].offset = icm_offset;
2567 
2568  /* Queue pair contexts */
2569  len = ( ( 1 << log_num_qps ) * hermon->cap.qpc_entry_size );
2570  icm_offset = icm_align ( icm_offset, len );
2571  MLX_FILL_1 ( init_hca, 12,
2572  qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_h,
2573  ( icm_offset >> 32 ) );
2574  MLX_FILL_2 ( init_hca, 13,
2575  qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
2576  ( icm_offset >> 5 ),
2577  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
2578  log_num_qps );
2579  DBGC ( hermon, "Hermon %p ICM QPC is %d x %#zx at [%08llx,%08llx)\n",
2580  hermon, ( 1 << log_num_qps ), hermon->cap.qpc_entry_size,
2581  icm_offset, ( icm_offset + len ) );
2582  icm_offset += len;
2583 
2584  /* Extended alternate path contexts */
2585  len = ( ( 1 << log_num_qps ) * hermon->cap.altc_entry_size );
2586  icm_offset = icm_align ( icm_offset, len );
2587  MLX_FILL_1 ( init_hca, 24,
2588  qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_h,
2589  ( icm_offset >> 32 ) );
2590  MLX_FILL_1 ( init_hca, 25,
2591  qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_l,
2592  icm_offset );
2593  DBGC ( hermon, "Hermon %p ICM ALTC is %d x %#zx at [%08llx,%08llx)\n",
2594  hermon, ( 1 << log_num_qps ), hermon->cap.altc_entry_size,
2595  icm_offset, ( icm_offset + len ) );
2596  icm_offset += len;
2597 
2598  /* Extended auxiliary contexts */
2599  len = ( ( 1 << log_num_qps ) * hermon->cap.auxc_entry_size );
2600  icm_offset = icm_align ( icm_offset, len );
2601  MLX_FILL_1 ( init_hca, 28,
2602  qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_h,
2603  ( icm_offset >> 32 ) );
2604  MLX_FILL_1 ( init_hca, 29,
2605  qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_l,
2606  icm_offset );
2607  DBGC ( hermon, "Hermon %p ICM AUXC is %d x %#zx at [%08llx,%08llx)\n",
2608  hermon, ( 1 << log_num_qps ), hermon->cap.auxc_entry_size,
2609  icm_offset, ( icm_offset + len ) );
2610  icm_offset += len;
2611 
2612  /* Shared receive queue contexts */
2613  len = ( ( 1 << log_num_srqs ) * hermon->cap.srqc_entry_size );
2614  icm_offset = icm_align ( icm_offset, len );
2615  MLX_FILL_1 ( init_hca, 18,
2616  qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_h,
2617  ( icm_offset >> 32 ) );
2618  MLX_FILL_2 ( init_hca, 19,
2619  qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
2620  ( icm_offset >> 5 ),
2621  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
2622  log_num_srqs );
2623  DBGC ( hermon, "Hermon %p ICM SRQC is %d x %#zx at [%08llx,%08llx)\n",
2624  hermon, ( 1 << log_num_srqs ), hermon->cap.srqc_entry_size,
2625  icm_offset, ( icm_offset + len ) );
2626  icm_offset += len;
2627 
2628  /* Completion queue contexts */
2629  len = ( ( 1 << log_num_cqs ) * hermon->cap.cqc_entry_size );
2630  icm_offset = icm_align ( icm_offset, len );
2631  MLX_FILL_1 ( init_hca, 20,
2632  qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_h,
2633  ( icm_offset >> 32 ) );
2634  MLX_FILL_2 ( init_hca, 21,
2635  qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
2636  ( icm_offset >> 5 ),
2637  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
2638  log_num_cqs );
2639  DBGC ( hermon, "Hermon %p ICM CQC is %d x %#zx at [%08llx,%08llx)\n",
2640  hermon, ( 1 << log_num_cqs ), hermon->cap.cqc_entry_size,
2641  icm_offset, ( icm_offset + len ) );
2642  icm_offset += len;
2643 
2644  /* Event queue contexts */
2645  len = ( ( 1 << log_num_eqs ) * hermon->cap.eqc_entry_size );
2646  icm_offset = icm_align ( icm_offset, len );
2647  MLX_FILL_1 ( init_hca, 32,
2648  qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_h,
2649  ( icm_offset >> 32 ) );
2650  MLX_FILL_2 ( init_hca, 33,
2651  qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
2652  ( icm_offset >> 5 ),
2653  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_eq,
2654  log_num_eqs );
2655  DBGC ( hermon, "Hermon %p ICM EQC is %d x %#zx at [%08llx,%08llx)\n",
2656  hermon, ( 1 << log_num_eqs ), hermon->cap.eqc_entry_size,
2657  icm_offset, ( icm_offset + len ) );
2658  icm_offset += len;
2659 
2660  /* Memory translation table */
2661  len = ( ( 1 << log_num_mtts ) * hermon->cap.mtt_entry_size );
2662  icm_offset = icm_align ( icm_offset, len );
2663  MLX_FILL_1 ( init_hca, 64,
2664  tpt_parameters.mtt_base_addr_h, ( icm_offset >> 32 ) );
2665  MLX_FILL_1 ( init_hca, 65,
2666  tpt_parameters.mtt_base_addr_l, icm_offset );
2667  DBGC ( hermon, "Hermon %p ICM MTT is %d x %#zx at [%08llx,%08llx)\n",
2668  hermon, ( 1 << log_num_mtts ), hermon->cap.mtt_entry_size,
2669  icm_offset, ( icm_offset + len ) );
2670  icm_offset += len;
2671 
2672  /* Memory protection table */
2673  len = ( ( 1 << log_num_mpts ) * hermon->cap.dmpt_entry_size );
2674  icm_offset = icm_align ( icm_offset, len );
2675  MLX_FILL_1 ( init_hca, 60,
2676  tpt_parameters.dmpt_base_adr_h, ( icm_offset >> 32 ) );
2677  MLX_FILL_1 ( init_hca, 61,
2678  tpt_parameters.dmpt_base_adr_l, icm_offset );
2679  MLX_FILL_1 ( init_hca, 62,
2680  tpt_parameters.log_dmpt_sz, log_num_mpts );
2681  DBGC ( hermon, "Hermon %p ICM DMPT is %d x %#zx at [%08llx,%08llx)\n",
2682  hermon, ( 1 << log_num_mpts ), hermon->cap.dmpt_entry_size,
2683  icm_offset, ( icm_offset + len ) );
2684  icm_offset += len;
2685 
2686  /* Multicast table */
2687  len = ( ( 1 << log_num_mcs ) * sizeof ( struct hermonprm_mcg_entry ) );
2688  icm_offset = icm_align ( icm_offset, len );
2689  MLX_FILL_1 ( init_hca, 48,
2690  multicast_parameters.mc_base_addr_h,
2691  ( icm_offset >> 32 ) );
2692  MLX_FILL_1 ( init_hca, 49,
2693  multicast_parameters.mc_base_addr_l, icm_offset );
2694  MLX_FILL_1 ( init_hca, 52,
2695  multicast_parameters.log_mc_table_entry_sz,
2696  fls ( sizeof ( struct hermonprm_mcg_entry ) - 1 ) );
2697  MLX_FILL_1 ( init_hca, 53,
2698  multicast_parameters.log_mc_table_hash_sz, log_num_mcs );
2699  MLX_FILL_1 ( init_hca, 54,
2700  multicast_parameters.log_mc_table_sz, log_num_mcs );
2701  DBGC ( hermon, "Hermon %p ICM MC is %d x %#zx at [%08llx,%08llx)\n",
2702  hermon, ( 1 << log_num_mcs ),
2703  sizeof ( struct hermonprm_mcg_entry ),
2704  icm_offset, ( icm_offset + len ) );
2705  icm_offset += len;
2706 
2707 
2709  ( icm_offset - hermon->icm_map[HERMON_ICM_OTHER].offset );
2710 
2711  /*
2712  * Allocate and map physical memory for (portions of) ICM
2713  *
2714  * Map is:
2715  * ICM AUX area (aligned to its own size)
2716  * cMPT areas
2717  * Other areas
2718  */
2719 
2720  /* Calculate physical memory required for ICM */
2721  icm_len = 0;
2722  for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2723  icm_len += hermon->icm_map[i].len;
2724  }
2725 
2726  /* Get ICM auxiliary area size */
2727  memset ( &icm_size, 0, sizeof ( icm_size ) );
2728  MLX_FILL_1 ( &icm_size, 0, value_hi, ( icm_offset >> 32 ) );
2729  MLX_FILL_1 ( &icm_size, 1, value, icm_offset );
2730  if ( ( rc = hermon_cmd_set_icm_size ( hermon, &icm_size,
2731  &icm_aux_size ) ) != 0 ) {
2732  DBGC ( hermon, "Hermon %p could not set ICM size: %s\n",
2733  hermon, strerror ( rc ) );
2734  goto err_set_icm_size;
2735  }
2736  icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * HERMON_PAGE_SIZE );
2737 
2738  /* Allocate ICM data and auxiliary area */
2739  DBGC ( hermon, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
2740  hermon, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
2741  if ( ! hermon->icm ) {
2742  hermon->icm_len = icm_len;
2743  hermon->icm_aux_len = icm_aux_len;
2745  if ( ! hermon->icm ) {
2746  DBGC ( hermon, "Hermon %p could not allocate ICM\n",
2747  hermon );
2748  rc = -ENOMEM;
2749  goto err_alloc;
2750  }
2751  } else {
2752  assert ( hermon->icm_len == icm_len );
2753  assert ( hermon->icm_aux_len == icm_aux_len );
2754  }
2755  icm_phys = user_to_phys ( hermon->icm, 0 );
2756 
2757  /* Map ICM auxiliary area */
2758  DBGC ( hermon, "Hermon %p mapping ICM AUX => %08lx\n",
2759  hermon, icm_phys );
2761  0, icm_phys, icm_aux_len ) ) != 0 ) {
2762  DBGC ( hermon, "Hermon %p could not map AUX ICM: %s\n",
2763  hermon, strerror ( rc ) );
2764  goto err_map_icm_aux;
2765  }
2766  icm_phys += icm_aux_len;
2767 
2768  /* MAP ICM area */
2769  for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2770  DBGC ( hermon, "Hermon %p mapping ICM %llx+%zx => %08lx\n",
2771  hermon, hermon->icm_map[i].offset,
2772  hermon->icm_map[i].len, icm_phys );
2774  hermon->icm_map[i].offset,
2775  icm_phys,
2776  hermon->icm_map[i].len ) ) != 0 ){
2777  DBGC ( hermon, "Hermon %p could not map ICM: %s\n",
2778  hermon, strerror ( rc ) );
2779  goto err_map_icm;
2780  }
2781  icm_phys += hermon->icm_map[i].len;
2782  }
2783 
2784  return 0;
2785 
2786  err_map_icm:
2787  assert ( i == 0 ); /* We don't handle partial failure at present */
2788  err_map_icm_aux:
2790  err_alloc:
2791  err_set_icm_size:
2792  return rc;
2793 }
2794 
2795 /**
2796  * Unmap ICM
2797  *
2798  * @v hermon Hermon device
2799  */
2800 static void hermon_unmap_icm ( struct hermon *hermon ) {
2801  struct hermonprm_scalar_parameter unmap_icm;
2802  int i;
2803 
2804  for ( i = ( HERMON_ICM_NUM_REGIONS - 1 ) ; i >= 0 ; i-- ) {
2805  memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2806  MLX_FILL_1 ( &unmap_icm, 0, value_hi,
2807  ( hermon->icm_map[i].offset >> 32 ) );
2808  MLX_FILL_1 ( &unmap_icm, 1, value,
2809  hermon->icm_map[i].offset );
2811  ( 1 << fls ( ( hermon->icm_map[i].len /
2812  HERMON_PAGE_SIZE ) - 1)),
2813  &unmap_icm );
2814  }
2816 }
2817 
2818 /***************************************************************************
2819  *
2820  * Initialisation and teardown
2821  *
2822  ***************************************************************************
2823  */
2824 
2825 /**
2826  * Reset device
2827  *
2828  * @v hermon Hermon device
2829  * @ret rc Return status code
2830  */
2831 static int hermon_reset ( struct hermon *hermon ) {
2832  struct pci_device *pci = hermon->pci;
2833  struct pci_config_backup backup;
2834  static const uint8_t backup_exclude[] =
2835  PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2836  uint16_t vendor;
2837  unsigned int i;
2838 
2839  /* Reset command interface toggle */
2840  hermon->toggle = 0;
2841 
2842  /* Perform device reset and preserve PCI configuration */
2843  pci_backup ( pci, &backup, PCI_CONFIG_BACKUP_ALL, backup_exclude );
2846 
2847  /* Wait until device starts responding to configuration cycles */
2848  for ( i = 0 ; i < HERMON_RESET_MAX_WAIT_MS ; i++ ) {
2849 
2850  /* Read PCI vendor ID */
2852  if ( vendor == pci->vendor ) {
2853 
2854  /* Restore PCI configuration */
2855  pci_restore ( pci, &backup, PCI_CONFIG_BACKUP_ALL,
2856  backup_exclude );
2857 
2858  DBGC ( hermon, "Hermon %p reset after %dms\n",
2859  hermon, i );
2860  return 0;
2861  }
2862 
2863  /* Delay */
2864  mdelay ( 1 );
2865  }
2866 
2867  DBGC ( hermon, "Hermon %p timed out waiting for reset\n", hermon );
2868  return -ETIMEDOUT;
2869 }
2870 
2871 /**
2872  * Set up memory protection table
2873  *
2874  * @v hermon Hermon device
2875  * @ret rc Return status code
2876  */
2877 static int hermon_setup_mpt ( struct hermon *hermon ) {
2878  struct hermonprm_mpt mpt;
2879  uint32_t key;
2880  int rc;
2881 
2882  /* Derive key */
2884  hermon->lkey = ( ( key << 8 ) | ( key >> 24 ) );
2885 
2886  /* Initialise memory protection table */
2887  memset ( &mpt, 0, sizeof ( mpt ) );
2888  MLX_FILL_7 ( &mpt, 0,
2889  atomic, 1,
2890  rw, 1,
2891  rr, 1,
2892  lw, 1,
2893  lr, 1,
2894  pa, 1,
2895  r_w, 1 );
2896  MLX_FILL_1 ( &mpt, 2, mem_key, key );
2897  MLX_FILL_1 ( &mpt, 3,
2898  pd, HERMON_GLOBAL_PD );
2899  MLX_FILL_1 ( &mpt, 10, len64, 1 );
2900  if ( ( rc = hermon_cmd_sw2hw_mpt ( hermon,
2902  &mpt ) ) != 0 ) {
2903  DBGC ( hermon, "Hermon %p could not set up MPT: %s\n",
2904  hermon, strerror ( rc ) );
2905  return rc;
2906  }
2907 
2908  return 0;
2909 }
2910 
2911 /**
2912  * Unmap memory protection table
2913  *
2914  * @v hermon Hermon device
2915  * @ret rc Return status code
2916  */
2917 static int hermon_unmap_mpt ( struct hermon *hermon ) {
2918  int rc;
2919 
2920  if ( ( rc = hermon_cmd_hw2sw_mpt ( hermon,
2921  hermon->cap.reserved_mrws ) ) != 0 ){
2922  DBGC ( hermon, "Hermon %p could not unmap MPT: %s\n",
2923  hermon, strerror ( rc ) );
2924  return rc;
2925  }
2926 
2927  return 0;
2928 }
2929 
2930 /**
2931  * Configure special queue pairs
2932  *
2933  * @v hermon Hermon device
2934  * @ret rc Return status code
2935  */
2937  int rc;
2938 
2939  /* Special QP block must be aligned on its own size */
2942  & ~( HERMON_NUM_SPECIAL_QPS - 1 ) );
2945  DBGC ( hermon, "Hermon %p special QPs at [%lx,%lx]\n", hermon,
2946  hermon->special_qpn_base, ( hermon->qpn_base - 1 ) );
2947 
2948  /* Issue command to configure special QPs */
2949  if ( ( rc = hermon_cmd_conf_special_qp ( hermon, 0x00,
2950  hermon->special_qpn_base ) ) != 0 ) {
2951  DBGC ( hermon, "Hermon %p could not configure special QPs: "
2952  "%s\n", hermon, strerror ( rc ) );
2953  return rc;
2954  }
2955 
2956  return 0;
2957 }
2958 
2959 /**
2960  * Start Hermon device
2961  *
2962  * @v hermon Hermon device
2963  * @v running Firmware is already running
2964  * @ret rc Return status code
2965  */
2966 static int hermon_start ( struct hermon *hermon, int running ) {
2967  struct hermonprm_init_hca init_hca;
2968  unsigned int i;
2969  int rc;
2970 
2971  /* Start firmware if not already running */
2972  if ( ! running ) {
2973  if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
2974  goto err_start_firmware;
2975  }
2976 
2977  /* Allocate and map ICM */
2978  memset ( &init_hca, 0, sizeof ( init_hca ) );
2979  if ( ( rc = hermon_map_icm ( hermon, &init_hca ) ) != 0 )
2980  goto err_map_icm;
2981 
2982  /* Initialise HCA */
2983  MLX_FILL_1 ( &init_hca, 0, version, 0x02 /* "Must be 0x02" */ );
2984  MLX_FILL_1 ( &init_hca, 5, udp, 1 );
2985  MLX_FILL_1 ( &init_hca, 74, uar_parameters.log_max_uars, 8 );
2986  if ( ( rc = hermon_cmd_init_hca ( hermon, &init_hca ) ) != 0 ) {
2987  DBGC ( hermon, "Hermon %p could not initialise HCA: %s\n",
2988  hermon, strerror ( rc ) );
2989  goto err_init_hca;
2990  }
2991 
2992  /* Set up memory protection */
2993  if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
2994  goto err_setup_mpt;
2995  for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
2996  hermon->port[i].ibdev->rdma_key = hermon->lkey;
2997 
2998  /* Set up event queue */
2999  if ( ( rc = hermon_create_eq ( hermon ) ) != 0 )
3000  goto err_create_eq;
3001 
3002  /* Configure special QPs */
3003  if ( ( rc = hermon_configure_special_qps ( hermon ) ) != 0 )
3004  goto err_conf_special_qps;
3005 
3006  DBGC ( hermon, "Hermon %p device started\n", hermon );
3007  return 0;
3008 
3009  err_conf_special_qps:
3011  err_create_eq:
3013  err_setup_mpt:
3015  err_init_hca:
3017  err_map_icm:
3019  err_start_firmware:
3020  return rc;
3021 }
3022 
3023 /**
3024  * Stop Hermon device
3025  *
3026  * @v hermon Hermon device
3027  */
3028 static void hermon_stop ( struct hermon *hermon ) {
3034  hermon_reset ( hermon );
3035 }
3036 
3037 /**
3038  * Open Hermon device
3039  *
3040  * @v hermon Hermon device
3041  * @ret rc Return status code
3042  */
3043 static int hermon_open ( struct hermon *hermon ) {
3044  int rc;
3045 
3046  /* Start device if applicable */
3047  if ( hermon->open_count == 0 ) {
3048  if ( ( rc = hermon_start ( hermon, 0 ) ) != 0 )
3049  return rc;
3050  }
3051 
3052  /* Increment open counter */
3053  hermon->open_count++;
3054 
3055  return 0;
3056 }
3057 
3058 /**
3059  * Close Hermon device
3060  *
3061  * @v hermon Hermon device
3062  */
3063 static void hermon_close ( struct hermon *hermon ) {
3064 
3065  /* Decrement open counter */
3066  assert ( hermon->open_count != 0 );
3067  hermon->open_count--;
3068 
3069  /* Stop device if applicable */
3070  if ( hermon->open_count == 0 )
3071  hermon_stop ( hermon );
3072 }
3073 
3074 /***************************************************************************
3075  *
3076  * Infiniband link-layer operations
3077  *
3078  ***************************************************************************
3079  */
3080 
3081 /**
3082  * Initialise Infiniband link
3083  *
3084  * @v ibdev Infiniband device
3085  * @ret rc Return status code
3086  */
3087 static int hermon_ib_open ( struct ib_device *ibdev ) {
3088  struct hermon *hermon = ib_get_drvdata ( ibdev );
3089  union hermonprm_set_port set_port;
3090  int rc;
3091 
3092  /* Open hardware */
3093  if ( ( rc = hermon_open ( hermon ) ) != 0 )
3094  goto err_open;
3095 
3096  /* Set port parameters */
3097  memset ( &set_port, 0, sizeof ( set_port ) );
3098  MLX_FILL_8 ( &set_port.ib, 0,
3099  mmc, 1,
3100  mvc, 1,
3101  mp, 1,
3102  mg, 1,
3103  mtu_cap, IB_MTU_2048,
3104  vl_cap, IB_VL_0,
3105  rcm, 1,
3106  lss, 1 );
3107  MLX_FILL_2 ( &set_port.ib, 10,
3108  max_pkey, 1,
3109  max_gid, 1 );
3110  MLX_FILL_1 ( &set_port.ib, 28,
3111  link_speed_supported, 1 );
3112  if ( ( rc = hermon_cmd_set_port ( hermon, 0, ibdev->port,
3113  &set_port ) ) != 0 ) {
3114  DBGC ( hermon, "Hermon %p port %d could not set port: %s\n",
3115  hermon, ibdev->port, strerror ( rc ) );
3116  goto err_set_port;
3117  }
3118 
3119  /* Initialise port */
3120  if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
3121  DBGC ( hermon, "Hermon %p port %d could not initialise port: "
3122  "%s\n", hermon, ibdev->port, strerror ( rc ) );
3123  goto err_init_port;
3124  }
3125 
3126  /* Update MAD parameters */
3127  ib_smc_update ( ibdev, hermon_mad );
3128 
3129  return 0;
3130 
3131  err_init_port:
3132  err_set_port:
3133  hermon_close ( hermon );
3134  err_open:
3135  return rc;
3136 }
3137 
3138 /**
3139  * Close Infiniband link
3140  *
3141  * @v ibdev Infiniband device
3142  */
3143 static void hermon_ib_close ( struct ib_device *ibdev ) {
3144  struct hermon *hermon = ib_get_drvdata ( ibdev );
3145  int rc;
3146 
3147  /* Close port */
3148  if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
3149  DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
3150  hermon, ibdev->port, strerror ( rc ) );
3151  /* Nothing we can do about this */
3152  }
3153 
3154  /* Close hardware */
3155  hermon_close ( hermon );
3156 }
3157 
3158 /**
3159  * Inform embedded subnet management agent of a received MAD
3160  *
3161  * @v ibdev Infiniband device
3162  * @v mad MAD
3163  * @ret rc Return status code
3164  */
3165 static int hermon_inform_sma ( struct ib_device *ibdev,
3166  union ib_mad *mad ) {
3167  int rc;
3168 
3169  /* Send the MAD to the embedded SMA */
3170  if ( ( rc = hermon_mad ( ibdev, mad ) ) != 0 )
3171  return rc;
3172 
3173  /* Update parameters held in software */
3174  ib_smc_update ( ibdev, hermon_mad );
3175 
3176  return 0;
3177 }
3178 
3179 /***************************************************************************
3180  *
3181  * Multicast group operations
3182  *
3183  ***************************************************************************
3184  */
3185 
3186 /**
3187  * Attach to multicast group
3188  *
3189  * @v ibdev Infiniband device
3190  * @v qp Queue pair
3191  * @v gid Multicast GID
3192  * @ret rc Return status code
3193  */
3194 static int hermon_mcast_attach ( struct ib_device *ibdev,
3195  struct ib_queue_pair *qp,
3196  union ib_gid *gid ) {
3197  struct hermon *hermon = ib_get_drvdata ( ibdev );
3198  struct hermonprm_mgm_hash hash;
3199  struct hermonprm_mcg_entry mcg;
3200  unsigned int index;
3201  int rc;
3202 
3203  /* Generate hash table index */
3204  if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
3205  DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
3206  hermon, strerror ( rc ) );
3207  return rc;
3208  }
3209  index = MLX_GET ( &hash, hash );
3210 
3211  /* Check for existing hash table entry */
3212  if ( ( rc = hermon_cmd_read_mcg ( hermon, index, &mcg ) ) != 0 ) {
3213  DBGC ( hermon, "Hermon %p could not read MCG %#x: %s\n",
3214  hermon, index, strerror ( rc ) );
3215  return rc;
3216  }
3217  if ( MLX_GET ( &mcg, hdr.members_count ) != 0 ) {
3218  /* FIXME: this implementation allows only a single QP
3219  * per multicast group, and doesn't handle hash
3220  * collisions. Sufficient for IPoIB but may need to
3221  * be extended in future.
3222  */
3223  DBGC ( hermon, "Hermon %p MGID index %#x already in use\n",
3224  hermon, index );
3225  return -EBUSY;
3226  }
3227 
3228  /* Update hash table entry */
3229  MLX_FILL_1 ( &mcg, 1, hdr.members_count, 1 );
3230  MLX_FILL_1 ( &mcg, 8, qp[0].qpn, qp->qpn );
3231  memcpy ( &mcg.u.dwords[4], gid, sizeof ( *gid ) );
3232  if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
3233  DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
3234  hermon, index, strerror ( rc ) );
3235  return rc;
3236  }
3237 
3238  return 0;
3239 }
3240 
3241 /**
3242  * Detach from multicast group
3243  *
3244  * @v ibdev Infiniband device
3245  * @v qp Queue pair
3246  * @v gid Multicast GID
3247  */
3248 static void hermon_mcast_detach ( struct ib_device *ibdev,
3249  struct ib_queue_pair *qp __unused,
3250  union ib_gid *gid ) {
3251  struct hermon *hermon = ib_get_drvdata ( ibdev );
3252  struct hermonprm_mgm_hash hash;
3253  struct hermonprm_mcg_entry mcg;
3254  unsigned int index;
3255  int rc;
3256 
3257  /* Generate hash table index */
3258  if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
3259  DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
3260  hermon, strerror ( rc ) );
3261  return;
3262  }
3263  index = MLX_GET ( &hash, hash );
3264 
3265  /* Clear hash table entry */
3266  memset ( &mcg, 0, sizeof ( mcg ) );
3267  if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
3268  DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
3269  hermon, index, strerror ( rc ) );
3270  return;
3271  }
3272 }
3273 
3274 /** Hermon Infiniband operations */
3277  .destroy_cq = hermon_destroy_cq,
3278  .create_qp = hermon_create_qp,
3279  .modify_qp = hermon_modify_qp,
3280  .destroy_qp = hermon_destroy_qp,
3281  .post_send = hermon_post_send,
3282  .post_recv = hermon_post_recv,
3283  .poll_cq = hermon_poll_cq,
3284  .poll_eq = hermon_poll_eq,
3285  .open = hermon_ib_open,
3286  .close = hermon_ib_close,
3287  .mcast_attach = hermon_mcast_attach,
3288  .mcast_detach = hermon_mcast_detach,
3289  .set_port_info = hermon_inform_sma,
3290  .set_pkey_table = hermon_inform_sma,
3291 };
3292 
3293 /**
3294  * Register Hermon Infiniband device
3295  *
3296  * @v hermon Hermon device
3297  * @v port Hermon port
3298  * @ret rc Return status code
3299  */
3300 static int hermon_register_ibdev ( struct hermon *hermon,
3301  struct hermon_port *port ) {
3302  struct ib_device *ibdev = port->ibdev;
3303  int rc;
3304 
3305  /* Use Ethernet MAC as eIPoIB local EMAC */
3306  memcpy ( ibdev->lemac, port->eth_mac.raw, ETH_ALEN );
3307 
3308  /* Initialise parameters using SMC */
3309  ib_smc_init ( ibdev, hermon_mad );
3310 
3311  /* Register Infiniband device */
3312  if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
3313  DBGC ( hermon, "Hermon %p port %d could not register IB "
3314  "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
3315  return rc;
3316  }
3317 
3318  return 0;
3319 }
3320 
3321 /**
3322  * Handle Hermon Infiniband device port state change
3323  *
3324  * @v hermon Hermon device
3325  * @v port Hermon port
3326  * @v link_up Link is up
3327  */
3329  struct hermon_port *port,
3330  int link_up __unused ) {
3331  struct ib_device *ibdev = port->ibdev;
3332 
3333  /* Update MAD parameters */
3334  ib_smc_update ( ibdev, hermon_mad );
3335 }
3336 
3337 /**
3338  * Unregister Hermon Infiniband device
3339  *
3340  * @v hermon Hermon device
3341  * @v port Hermon port
3342  */
3344  struct hermon_port *port ) {
3345  struct ib_device *ibdev = port->ibdev;
3346 
3347  unregister_ibdev ( ibdev );
3348 }
3349 
3350 /** Hermon Infiniband port type */
3353  .state_change = hermon_state_change_ibdev,
3354  .unregister_dev = hermon_unregister_ibdev,
3355 };
3356 
3357 /***************************************************************************
3358  *
3359  * Ethernet operation
3360  *
3361  ***************************************************************************
3362  */
3363 
3364 /** Number of Hermon Ethernet send work queue entries */
3365 #define HERMON_ETH_NUM_SEND_WQES 16
3366 
3367 /** Number of Hermon Ethernet receive work queue entries */
3368 #define HERMON_ETH_NUM_RECV_WQES 8
3369 
3370 /** Number of Hermon Ethernet completion entries */
3371 #define HERMON_ETH_NUM_CQES 32
3372 
3373 /**
3374  * Transmit packet via Hermon Ethernet device
3375  *
3376  * @v netdev Network device
3377  * @v iobuf I/O buffer
3378  * @ret rc Return status code
3379  */
3381  struct io_buffer *iobuf ) {
3382  struct hermon_port *port = netdev->priv;
3383  struct ib_device *ibdev = port->ibdev;
3384  struct hermon *hermon = ib_get_drvdata ( ibdev );
3385  int rc;
3386 
3387  /* Transmit packet */
3388  if ( ( rc = ib_post_send ( ibdev, port->eth_qp, NULL,
3389  iobuf ) ) != 0 ) {
3390  DBGC ( hermon, "Hermon %p port %d could not transmit: %s\n",
3391  hermon, ibdev->port, strerror ( rc ) );
3392  return rc;
3393  }
3394 
3395  return 0;
3396 }
3397 
3398 /** Hermon Ethernet queue pair operations */
3400  .alloc_iob = alloc_iob,
3401 };
3402 
3403 /**
3404  * Handle Hermon Ethernet device send completion
3405  *
3406  * @v ibdev Infiniband device
3407  * @v qp Queue pair
3408  * @v iobuf I/O buffer
3409  * @v rc Completion status code
3410  */
3411 static void hermon_eth_complete_send ( struct ib_device *ibdev __unused,
3412  struct ib_queue_pair *qp,
3413  struct io_buffer *iobuf, int rc ) {
3414  struct net_device *netdev = ib_qp_get_ownerdata ( qp );
3415 
3416  netdev_tx_complete_err ( netdev, iobuf, rc );
3417 }
3418 
3419 /**
3420  * Handle Hermon Ethernet device receive completion
3421  *
3422  * @v ibdev Infiniband device
3423  * @v qp Queue pair
3424  * @v dest Destination address vector, or NULL
3425  * @v source Source address vector, or NULL
3426  * @v iobuf I/O buffer
3427  * @v rc Completion status code
3428  */
3429 static void hermon_eth_complete_recv ( struct ib_device *ibdev __unused,
3430  struct ib_queue_pair *qp,
3432  struct ib_address_vector *source,
3433  struct io_buffer *iobuf, int rc ) {
3434  struct net_device *netdev = ib_qp_get_ownerdata ( qp );
3435  unsigned int tag;
3436 
3437  /* Identify VLAN tag, if applicable */
3438  tag = ( source->vlan_present ? source->vlan : 0 );
3439 
3440  /* Hand off to network layer */
3441  if ( rc == 0 ) {
3442  vlan_netdev_rx ( netdev, tag, iobuf );
3443  } else {
3444  vlan_netdev_rx_err ( netdev, tag, iobuf, rc );
3445  }
3446 }
3447 
3448 /** Hermon Ethernet device completion operations */
3451  .complete_recv = hermon_eth_complete_recv,
3452 };
3453 
3454 /**
3455  * Poll Hermon Ethernet device
3456  *
3457  * @v netdev Network device
3458  */
3459 static void hermon_eth_poll ( struct net_device *netdev ) {
3460  struct hermon_port *port = netdev->priv;
3461  struct ib_device *ibdev = port->ibdev;
3462 
3463  ib_poll_eq ( ibdev );
3464 }
3465 
3466 /**
3467  * Open Hermon Ethernet device
3468  *
3469  * @v netdev Network device
3470  * @ret rc Return status code
3471  */
3472 static int hermon_eth_open ( struct net_device *netdev ) {
3473  struct hermon_port *port = netdev->priv;
3474  struct ib_device *ibdev = port->ibdev;
3475  struct hermon *hermon = ib_get_drvdata ( ibdev );
3476  union hermonprm_set_port set_port;
3477  int rc;
3478 
3479  /* Open hardware */
3480  if ( ( rc = hermon_open ( hermon ) ) != 0 )
3481  goto err_open;
3482 
3483  /* Allocate completion queue */
3484  if ( ( rc = ib_create_cq ( ibdev, HERMON_ETH_NUM_CQES,
3485  &hermon_eth_cq_op, &port->eth_cq ) ) != 0 ) {
3486  DBGC ( hermon, "Hermon %p port %d could not create completion "
3487  "queue: %s\n", hermon, ibdev->port, strerror ( rc ) );
3488  goto err_create_cq;
3489  }
3490 
3491  /* Allocate queue pair */
3493  port->eth_cq, HERMON_ETH_NUM_RECV_WQES,
3494  port->eth_cq, &hermon_eth_qp_op,
3495  netdev->name, &port->eth_qp ) ) != 0 ) {
3496  DBGC ( hermon, "Hermon %p port %d could not create queue "
3497  "pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
3498  goto err_create_qp;
3499  }
3500  ib_qp_set_ownerdata ( port->eth_qp, netdev );
3501 
3502  /* Activate queue pair */
3503  if ( ( rc = ib_modify_qp ( ibdev, port->eth_qp ) ) != 0 ) {
3504  DBGC ( hermon, "Hermon %p port %d could not modify queue "
3505  "pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
3506  goto err_modify_qp;
3507  }
3508 
3509  /* Fill receive rings */
3510  ib_refill_recv ( ibdev, port->eth_qp );
3511 
3512  /* Set port general parameters */
3513  memset ( &set_port, 0, sizeof ( set_port ) );
3514  MLX_FILL_3 ( &set_port.general, 0,
3515  v_mtu, 1,
3516  v_pprx, 1,
3517  v_pptx, 1 );
3518  MLX_FILL_1 ( &set_port.general, 1,
3519  mtu, ( ETH_FRAME_LEN + 40 /* Used by card */ ) );
3520  MLX_FILL_1 ( &set_port.general, 2,
3521  pfctx, ( 1 << FCOE_VLAN_PRIORITY ) );
3522  MLX_FILL_1 ( &set_port.general, 3,
3523  pfcrx, ( 1 << FCOE_VLAN_PRIORITY ) );
3524  if ( ( rc = hermon_cmd_set_port ( hermon, 1,
3526  ibdev->port ),
3527  &set_port ) ) != 0 ) {
3528  DBGC ( hermon, "Hermon %p port %d could not set port general "
3529  "parameters: %s\n",
3530  hermon, ibdev->port, strerror ( rc ) );
3531  goto err_set_port_general_params;
3532  }
3533 
3534  /* Set port receive QP */
3535  memset ( &set_port, 0, sizeof ( set_port ) );
3536  MLX_FILL_1 ( &set_port.rqp_calc, 0, base_qpn, port->eth_qp->qpn );
3537  MLX_FILL_1 ( &set_port.rqp_calc, 2,
3538  mac_miss_index, 128 /* MAC misses go to promisc QP */ );
3539  MLX_FILL_2 ( &set_port.rqp_calc, 3,
3540  vlan_miss_index, 127 /* VLAN misses go to promisc QP */,
3541  no_vlan_index, 126 /* VLAN-free go to promisc QP */ );
3542  MLX_FILL_2 ( &set_port.rqp_calc, 5,
3543  promisc_qpn, port->eth_qp->qpn,
3544  en_uc_promisc, 1 );
3545  MLX_FILL_2 ( &set_port.rqp_calc, 6,
3546  def_mcast_qpn, port->eth_qp->qpn,
3547  mc_promisc_mode, 2 /* Receive all multicasts */ );
3548  if ( ( rc = hermon_cmd_set_port ( hermon, 1,
3550  ibdev->port ),
3551  &set_port ) ) != 0 ) {
3552  DBGC ( hermon, "Hermon %p port %d could not set port receive "
3553  "QP: %s\n", hermon, ibdev->port, strerror ( rc ) );
3554  goto err_set_port_receive_qp;
3555  }
3556 
3557  /* Initialise port */
3558  if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
3559  DBGC ( hermon, "Hermon %p port %d could not initialise port: "
3560  "%s\n", hermon, ibdev->port, strerror ( rc ) );
3561  goto err_init_port;
3562  }
3563 
3564  return 0;
3565 
3566  err_init_port:
3567  err_set_port_receive_qp:
3568  err_set_port_general_params:
3569  err_modify_qp:
3570  ib_destroy_qp ( ibdev, port->eth_qp );
3571  err_create_qp:
3572  ib_destroy_cq ( ibdev, port->eth_cq );
3573  err_create_cq:
3574  hermon_close ( hermon );
3575  err_open:
3576  return rc;
3577 }
3578 
3579 /**
3580  * Close Hermon Ethernet device
3581  *
3582  * @v netdev Network device
3583  */
3584 static void hermon_eth_close ( struct net_device *netdev ) {
3585  struct hermon_port *port = netdev->priv;
3586  struct ib_device *ibdev = port->ibdev;
3587  struct hermon *hermon = ib_get_drvdata ( ibdev );
3588  int rc;
3589 
3590  /* Close port */
3591  if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
3592  DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
3593  hermon, ibdev->port, strerror ( rc ) );
3594  /* Nothing we can do about this */
3595  }
3596 
3597  /* Tear down the queues */
3598  ib_destroy_qp ( ibdev, port->eth_qp );
3599  ib_destroy_cq ( ibdev, port->eth_cq );
3600 
3601  /* Close hardware */
3602  hermon_close ( hermon );
3603 }
3604 
3605 /** Hermon Ethernet network device operations */
3607  .open = hermon_eth_open,
3608  .close = hermon_eth_close,
3609  .transmit = hermon_eth_transmit,
3610  .poll = hermon_eth_poll,
3611 };
3612 
3613 /**
3614  * Register Hermon Ethernet device
3615  *
3616  * @v hermon Hermon device
3617  * @v port Hermon port
3618  * @ret rc Return status code
3619  */
3620 static int hermon_register_netdev ( struct hermon *hermon,
3621  struct hermon_port *port ) {
3622  struct net_device *netdev = port->netdev;
3623  struct ib_device *ibdev = port->ibdev;
3624  int rc;
3625 
3626  /* Set MAC address */
3627  memcpy ( netdev->hw_addr, port->eth_mac.raw, ETH_ALEN );
3628 
3629  /* Register network device */
3630  if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
3631  DBGC ( hermon, "Hermon %p port %d could not register network "
3632  "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
3633  goto err_register_netdev;
3634  }
3635 
3636  /* Register non-volatile options */
3637  if ( ( rc = register_nvo ( &port->nvo,
3638  netdev_settings ( netdev ) ) ) != 0 ) {
3639  DBGC ( hermon, "Hermon %p port %d could not register non-"
3640  "volatile options: %s\n",
3641  hermon, ibdev->port, strerror ( rc ) );
3642  goto err_register_nvo;
3643  }
3644 
3645  return 0;
3646 
3647  unregister_nvo ( &port->nvo );
3648  err_register_nvo:
3650  err_register_netdev:
3651  return rc;
3652 }
3653 
3654 /**
3655  * Handle Hermon Ethernet device port state change
3656  *
3657  * @v hermon Hermon device
3658  * @v port Hermon port
3659  * @v link_up Link is up
3660  */
3662  struct hermon_port *port,
3663  int link_up ) {
3664  struct net_device *netdev = port->netdev;
3665 
3666  if ( link_up ) {
3667  netdev_link_up ( netdev );
3668  } else {
3670  }
3671 }
3672 
3673 /**
3674  * Unregister Hermon Ethernet device
3675  *
3676  * @v hermon Hermon device
3677  * @v port Hermon port
3678  */
3680  struct hermon_port *port ) {
3681  struct net_device *netdev = port->netdev;
3682 
3683  unregister_nvo ( &port->nvo );
3685 }
3686 
3687 /** Hermon Ethernet port type */
3690  .state_change = hermon_state_change_netdev,
3691  .unregister_dev = hermon_unregister_netdev,
3692 };
3693 
3694 /***************************************************************************
3695  *
3696  * Port type detection
3697  *
3698  ***************************************************************************
3699  */
3700 
3701 /** Timeout for port sensing */
3702 #define HERMON_SENSE_PORT_TIMEOUT ( TICKS_PER_SEC / 2 )
3703 
3704 /**
3705  * Name port type
3706  *
3707  * @v port_type Port type
3708  * @v port_type_name Port type name
3709  */
3710 static inline const char * hermon_name_port_type ( unsigned int port_type ) {
3711  switch ( port_type ) {
3712  case HERMON_PORT_TYPE_UNKNOWN: return "unknown";
3713  case HERMON_PORT_TYPE_IB: return "Infiniband";
3714  case HERMON_PORT_TYPE_ETH: return "Ethernet";
3715  default: return "INVALID";
3716  }
3717 }
3718 
3719 /**
3720  * Sense port type
3721  *
3722  * @v hermon Hermon device
3723  * @v port Hermon port
3724  * @ret port_type Port type, or negative error
3725  */
3726 static int hermon_sense_port_type ( struct hermon *hermon,
3727  struct hermon_port *port ) {
3728  struct ib_device *ibdev = port->ibdev;
3729  struct hermonprm_sense_port sense_port;
3730  int port_type;
3731  int rc;
3732 
3733  /* If DPDP is not supported, always assume Infiniband */
3734  if ( ! hermon->cap.dpdp ) {
3735  port_type = HERMON_PORT_TYPE_IB;
3736  DBGC ( hermon, "Hermon %p port %d does not support DPDP; "
3737  "assuming an %s network\n", hermon, ibdev->port,
3738  hermon_name_port_type ( port_type ) );
3739  return port_type;
3740  }
3741 
3742  /* Sense the port type */
3743  if ( ( rc = hermon_cmd_sense_port ( hermon, ibdev->port,
3744  &sense_port ) ) != 0 ) {
3745  DBGC ( hermon, "Hermon %p port %d sense failed: %s\n",
3746  hermon, ibdev->port, strerror ( rc ) );
3747  return rc;
3748  }
3749  port_type = MLX_GET ( &sense_port, port_type );
3750 
3751  DBGC ( hermon, "Hermon %p port %d sensed an %s network\n",
3752  hermon, ibdev->port, hermon_name_port_type ( port_type ) );
3753  return port_type;
3754 }
3755 
3756 /**
3757  * Set port type
3758  *
3759  * @v hermon Hermon device
3760  * @v port Hermon port
3761  * @ret rc Return status code
3762  */
3763 static int hermon_set_port_type ( struct hermon *hermon,
3764  struct hermon_port *port ) {
3765  struct ib_device *ibdev = port->ibdev;
3766  struct hermonprm_query_port_cap query_port;
3767  int ib_supported;
3768  int eth_supported;
3769  int port_type;
3770  unsigned long start;
3771  unsigned long elapsed;
3772  int rc;
3773 
3774  /* Check to see which types are supported */
3775  if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
3776  &query_port ) ) != 0 ) {
3777  DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
3778  hermon, ibdev->port, strerror ( rc ) );
3779  return rc;
3780  }
3781  ib_supported = MLX_GET ( &query_port, ib );
3782  eth_supported = MLX_GET ( &query_port, eth );
3783  DBGC ( hermon, "Hermon %p port %d supports%s%s%s\n",
3784  hermon, ibdev->port, ( ib_supported ? " Infiniband" : "" ),
3785  ( ( ib_supported && eth_supported ) ? " and" : "" ),
3786  ( eth_supported ? " Ethernet" : "" ) );
3787 
3788  /* Record Ethernet MAC address */
3789  port->eth_mac.part.h = htons ( MLX_GET ( &query_port, mac_47_32 ) );
3790  port->eth_mac.part.l = htonl ( MLX_GET ( &query_port, mac_31_0 ) );
3791 
3792  /* Sense network, if applicable */
3793  if ( ib_supported && eth_supported ) {
3794 
3795  /* Both types are supported; try sensing network */
3796  start = currticks();
3797  do {
3798  /* Try sensing port */
3799  port_type = hermon_sense_port_type ( hermon, port );
3800  if ( port_type < 0 ) {
3801  rc = port_type;
3802  return rc;
3803  }
3804 
3805  /* Avoid spamming debug output */
3806  mdelay ( 50 );
3807  } while ( ( port_type == HERMON_PORT_TYPE_UNKNOWN ) &&
3808  ( ( elapsed = ( currticks() - start ) ) <
3810 
3811  /* Set port type based on sensed network, defaulting
3812  * to Infiniband if nothing was sensed.
3813  */
3814  switch ( port_type ) {
3815  case HERMON_PORT_TYPE_ETH:
3816  port->type = &hermon_port_type_eth;
3817  break;
3818  case HERMON_PORT_TYPE_IB:
3820  port->type = &hermon_port_type_ib;
3821  break;
3822  default:
3823  return -EINVAL;
3824  }
3825 
3826  } else if ( eth_supported ) {
3827  port->type = &hermon_port_type_eth;
3828  } else {
3829  port->type = &hermon_port_type_ib;
3830  }
3831 
3832  assert ( port->type != NULL );
3833  return 0;
3834 }
3835 
3836 /***************************************************************************
3837  *
3838  * BOFM interface
3839  *
3840  ***************************************************************************
3841  */
3842 
3843 /**
3844  * Harvest Ethernet MAC for BOFM
3845  *
3846  * @v bofm BOFM device
3847  * @v mport Multi-port index
3848  * @v mac MAC to fill in
3849  * @ret rc Return status code
3850  */
3851 static int hermon_bofm_harvest ( struct bofm_device *bofm, unsigned int mport,
3852  uint8_t *mac ) {
3853  struct hermon *hermon = container_of ( bofm, struct hermon, bofm );
3854  struct hermonprm_mod_stat_cfg stat_cfg;
3855  union {
3856  uint8_t bytes[8];
3857  uint32_t dwords[2];
3858  } buf;
3859  int rc;
3860 
3861  /* Query static configuration */
3862  if ( ( rc = hermon_mod_stat_cfg ( hermon, mport,
3864  HERMON_MOD_STAT_CFG_OFFSET ( mac_m ),
3865  &stat_cfg ) ) != 0 ) {
3866  DBGC ( hermon, "Hermon %p port %d could not query "
3867  "configuration: %s\n", hermon, mport, strerror ( rc ) );
3868  return rc;
3869  }
3870 
3871  /* Retrieve MAC address */
3872  buf.dwords[0] = htonl ( MLX_GET ( &stat_cfg, mac_high ) );
3873  buf.dwords[1] = htonl ( MLX_GET ( &stat_cfg, mac_low ) );
3874  memcpy ( mac, &buf.bytes[ sizeof ( buf.bytes ) - ETH_ALEN ],
3875  ETH_ALEN );
3876 
3877  DBGC ( hermon, "Hermon %p port %d harvested MAC address %s\n",
3878  hermon, mport, eth_ntoa ( mac ) );
3879 
3880  return 0;
3881 }
3882 
3883 /**
3884  * Update Ethernet MAC for BOFM
3885  *
3886  * @v bofm BOFM device
3887  * @v mport Multi-port index
3888  * @v mac MAC to fill in
3889  * @ret rc Return status code
3890  */
3891 static int hermon_bofm_update ( struct bofm_device *bofm, unsigned int mport,
3892  const uint8_t *mac ) {
3893  struct hermon *hermon = container_of ( bofm, struct hermon, bofm );
3894  struct hermonprm_mod_stat_cfg stat_cfg;
3895  union {
3896  uint8_t bytes[8];
3897  uint32_t dwords[2];
3898  } buf;
3899  int rc;
3900 
3901  /* Prepare MAC address */
3902  memset ( &buf, 0, sizeof ( buf ) );
3903  memcpy ( &buf.bytes[ sizeof ( buf.bytes ) - ETH_ALEN ], mac,
3904  ETH_ALEN );
3905 
3906  /* Modify static configuration */
3907  memset ( &stat_cfg, 0, sizeof ( stat_cfg ) );
3908  MLX_FILL_2 ( &stat_cfg, 36,
3909  mac_m, 1,
3910  mac_high, ntohl ( buf.dwords[0] ) );
3911  MLX_FILL_1 ( &stat_cfg, 37, mac_low, ntohl ( buf.dwords[1] ) );
3912  if ( ( rc = hermon_mod_stat_cfg ( hermon, mport,
3914  HERMON_MOD_STAT_CFG_OFFSET ( mac_m ),
3915  &stat_cfg ) ) != 0 ) {
3916  DBGC ( hermon, "Hermon %p port %d could not modify "
3917  "configuration: %s\n", hermon, mport, strerror ( rc ) );
3918  return rc;
3919  }
3920 
3921  DBGC ( hermon, "Hermon %p port %d updated MAC address to %s\n",
3922  hermon, mport, eth_ntoa ( mac ) );
3923 
3924  return 0;
3925 }
3926 
3927 /** Hermon BOFM operations */
3930  .update = hermon_bofm_update,
3931 };
3932 
3933 /***************************************************************************
3934  *
3935  * PCI interface
3936  *
3937  ***************************************************************************
3938  */
3939 
3940 /**
3941  * Allocate Hermon device
3942  *
3943  * @v pci PCI device
3944  * @v id PCI ID
3945  * @ret rc Return status code
3946  */
3947 static struct hermon * hermon_alloc ( void ) {
3948  struct hermon *hermon;
3949 
3950  /* Allocate Hermon device */
3951  hermon = zalloc ( sizeof ( *hermon ) );
3952  if ( ! hermon )
3953  goto err_hermon;
3954 
3955  /* Allocate space for mailboxes */
3958  if ( ! hermon->mailbox_in )
3959  goto err_mailbox_in;
3962  if ( ! hermon->mailbox_out )
3963  goto err_mailbox_out;
3964 
3965  return hermon;
3966 
3968  err_mailbox_out:
3970  err_mailbox_in:
3971  free ( hermon );
3972  err_hermon:
3973  return NULL;
3974 }
3975 
3976 /**
3977  * Free Hermon device
3978  *
3979  * @v hermon Hermon device
3980  */
3981 static void hermon_free ( struct hermon *hermon ) {
3982 
3983  ufree ( hermon->icm );
3984  ufree ( hermon->firmware_area );
3987  free ( hermon );
3988 }
3989 
3990 /**
3991  * Probe PCI device
3992  *
3993  * @v pci PCI device
3994  * @v id PCI ID
3995  * @ret rc Return status code
3996  */
3997 static int hermon_probe ( struct pci_device *pci ) {
3998  struct hermon *hermon;
3999  struct ib_device *ibdev;
4000  struct net_device *netdev;
4001  struct hermon_port *port;
4002  unsigned long config;
4003  unsigned long uar;
4004  unsigned int i;
4005  int rc;
4006 
4007  /* Allocate Hermon device */
4008  hermon = hermon_alloc();
4009  if ( ! hermon ) {
4010  rc = -ENOMEM;
4011  goto err_alloc;
4012  }
4013  pci_set_drvdata ( pci, hermon );
4014  hermon->pci = pci;
4015 
4016  /* Fix up PCI device */
4017  adjust_pci_device ( pci );
4018 
4019  /* Map PCI BARs */
4020  config = pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR );
4021  hermon->config = pci_ioremap ( pci, config,
4023  uar = pci_bar_start ( pci, HERMON_PCI_UAR_BAR );
4024  hermon->uar = pci_ioremap ( pci, uar,
4026 
4027  /* Reset device */
4028  if ( ( rc = hermon_reset ( hermon ) ) != 0 )
4029  goto err_reset;
4030 
4031  /* Start firmware */
4032  if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
4033  goto err_start_firmware;
4034 
4035  /* Get device limits */
4036  if ( ( rc = hermon_get_cap ( hermon ) ) != 0 )
4037  goto err_get_cap;
4038 
4039  /* Allocate Infiniband devices */
4040  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4041  ibdev = alloc_ibdev ( 0 );
4042  if ( ! ibdev ) {
4043  rc = -ENOMEM;
4044  goto err_alloc_ibdev;
4045  }
4046  hermon->port[i].ibdev = ibdev;
4048  ibdev->dev = &pci->dev;
4049  ibdev->port = ( HERMON_PORT_BASE + i );
4052  }
4053 
4054  /* Allocate network devices */
4055  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4056  netdev = alloc_etherdev ( 0 );
4057  if ( ! netdev ) {
4058  rc = -ENOMEM;
4059  goto err_alloc_netdev;
4060  }
4061  hermon->port[i].netdev = netdev;
4063  netdev->dev = &pci->dev;
4064  netdev->priv = &hermon->port[i];
4065  }
4066 
4067  /* Start device */
4068  if ( ( rc = hermon_start ( hermon, 1 ) ) != 0 )
4069  goto err_start;
4070 
4071  /* Determine port types */
4072  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4073  port = &hermon->port[i];
4074  if ( ( rc = hermon_set_port_type ( hermon, port ) ) != 0 )
4075  goto err_set_port_type;
4076  }
4077 
4078  /* Initialise non-volatile storage */
4079  nvs_vpd_init ( &hermon->nvsvpd, pci );
4080  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4081  port = &hermon->port[i];
4083  HERMON_VPD_FIELD ( port->ibdev->port ),
4084  &port->nvo, NULL );
4085  }
4086 
4087  /* Register devices */
4088  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
4089  port = &hermon->port[i];
4090  if ( ( rc = port->type->register_dev ( hermon, port ) ) != 0 )
4091  goto err_register;
4092  }
4093 
4094  /* Leave device quiescent until opened */
4095  if ( hermon->open_count == 0 )
4096  hermon_stop ( hermon );
4097 
4098  return 0;
4099 
4100  i = hermon->cap.num_ports;
4101  err_register:
4102  for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
4103  port = &hermon->port[i];
4104  port->type->unregister_dev ( hermon, port );
4105  }
4106  err_set_port_type:
4107  hermon_stop ( hermon );
4108  err_start:
4109  i = hermon->cap.num_ports;
4110  err_alloc_netdev:
4111  for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
4112  netdev_nullify ( hermon->port[i].netdev );
4113  netdev_put ( hermon->port[i].netdev );
4114  }
4115  i = hermon->cap.num_ports;
4116  err_alloc_ibdev:
4117  for ( i-- ; ( signed int ) i >= 0 ; i-- )
4118  ibdev_put ( hermon->port[i].ibdev );
4119  err_get_cap:
4121  err_start_firmware:
4122  err_reset:
4123  iounmap ( hermon->uar );
4124  iounmap ( hermon->config );
4125  hermon_free ( hermon );
4126  err_alloc:
4127  return rc;
4128 }
4129 
4130 /**
4131  * Remove PCI device
4132  *
4133  * @v pci PCI device
4134  */
4135 static void hermon_remove ( struct pci_device *pci ) {
4136  struct hermon *hermon = pci_get_drvdata ( pci );
4137  struct hermon_port *port;
4138  int i;
4139 
4140  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
4141  port = &hermon->port[i];
4142  port->type->unregister_dev ( hermon, port );
4143  }
4144  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
4145  netdev_nullify ( hermon->port[i].netdev );
4146  netdev_put ( hermon->port[i].netdev );
4147  }
4148  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- )
4149  ibdev_put ( hermon->port[i].ibdev );
4150  iounmap ( hermon->uar );
4151  iounmap ( hermon->config );
4152  hermon_free ( hermon );
4153 }
4154 
4155 /**
4156  * Probe PCI device for BOFM
4157  *
4158  * @v pci PCI device
4159  * @v id PCI ID
4160  * @ret rc Return status code
4161  */
4162 static int hermon_bofm_probe ( struct pci_device *pci ) {
4163  struct hermon *hermon;
4164  unsigned long config;
4165  int rc;
4166 
4167  /* Allocate Hermon device */
4168  hermon = hermon_alloc();
4169  if ( ! hermon ) {
4170  rc = -ENOMEM;
4171  goto err_alloc;
4172  }
4173  pci_set_drvdata ( pci, hermon );
4174  hermon->pci = pci;
4175 
4176  /* Fix up PCI device */
4177  adjust_pci_device ( pci );
4178 
4179  /* Map PCI BAR */
4183 
4184  /* Initialise BOFM device */
4186 
4187  /* Register BOFM device */
4188  if ( ( rc = bofm_register ( &hermon->bofm ) ) != 0 ) {
4189  DBGC ( hermon, "Hermon %p could not register BOFM device: "
4190  "%s\n", hermon, strerror ( rc ) );
4191  goto err_bofm_register;
4192  }
4193 
4194  return 0;
4195 
4196  err_bofm_register:
4197  iounmap ( hermon->config );
4198  hermon_free ( hermon );
4199  err_alloc:
4200  return rc;
4201 }
4202 
4203 /**
4204  * Remove PCI device for BOFM
4205  *
4206  * @v pci PCI device
4207  */
4208 static void hermon_bofm_remove ( struct pci_device *pci ) {
4209  struct hermon *hermon = pci_get_drvdata ( pci );
4210 
4211  bofm_unregister ( &hermon->bofm );
4212  iounmap ( hermon->config );
4213  hermon_free ( hermon );
4214 }
4215 
4216 static struct pci_device_id hermon_nics[] = {
4217  /* Mellanox ConnectX-3 VPI (ethernet + infiniband) */
4218  PCI_ROM ( 0x15b3, 0x1003, "mt4099", "ConnectX-3 HCA driver", 0 ),
4219  PCI_ROM ( 0x15b3, 0x1007, "mt4103", "ConnectX-3 Pro HCA driver", 0 ),
4220  /* Mellanox ConnectX VPI (ethernet + infiniband) */
4221  PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver", 0 ),
4222  PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver", 0 ),
4223 
4224  /* Mellanox ConnectX EN (ethernet only) */
4225  PCI_ROM ( 0x15b3, 0x6368, "mt25448", "MT25448 HCA driver", 0 ),
4226  PCI_ROM ( 0x15b3, 0x6372, "mt25458", "MT25458 HCA driver", 0 ),
4227 
4228  /* Mellanox ConnectX-2 VPI (ethernet + infiniband) */
4229  PCI_ROM ( 0x15b3, 0x6732, "mt26418", "MT26418 HCA driver", 0 ),
4230  PCI_ROM ( 0x15b3, 0x673c, "mt26428", "MT26428 HCA driver", 0 ),
4231  PCI_ROM ( 0x15b3, 0x6746, "mt26438", "MT26438 HCA driver", 0 ),
4232 
4233  /* Mellanox ConnectX-2 EN (ethernet only) */
4234  PCI_ROM ( 0x15b3, 0x6750, "mt26448", "MT26448 HCA driver", 0 ),
4235  PCI_ROM ( 0x15b3, 0x675a, "mt26458", "MT26458 HCA driver", 0 ),
4236  PCI_ROM ( 0x15b3, 0x6764, "mt26468", "MT26468 HCA driver", 0 ),
4237  PCI_ROM ( 0x15b3, 0x676e, "mt26478", "MT26478 HCA driver", 0 ),
4238  PCI_ROM ( 0x15b3, 0x6778, "mt26488", "MT26488 HCA driver", 0 ),
4239 };
4240 
4241 struct pci_driver hermon_driver __pci_driver = {
4242  .ids = hermon_nics,
4243  .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
4244  .probe = hermon_probe,
4245  .remove = hermon_remove,
4246 };
4247 
4248 struct pci_driver hermon_bofm_driver __bofm_driver = {
4249  .ids = hermon_nics,
4250  .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
4253 };
void unregister_ibdev(struct ib_device *ibdev)
Unregister Infiniband device.
Definition: infiniband.c:985
#define HERMON_MTU_ETH
Definition: hermon.h:101
static int hermon_cmd_query_dev_cap(struct hermon *hermon, struct hermonprm_query_dev_cap *dev_cap)
Definition: hermon.c:269
size_t eqe_size
Size of event queue.
Definition: hermon.h:780
#define HERMON_PORT_BASE
Definition: hermon.h:28
static void hermon_ib_close(struct ib_device *ibdev)
Close Infiniband link.
Definition: hermon.c:3143
uint32_t c
Definition: md4.c:30
#define __attribute__(x)
Definition: compiler.h:10
struct hermonprm_port_mgmnt_change_event port_mgmnt_change
Definition: hermon.h:542
#define EINVAL
Invalid argument.
Definition: errno.h:428
static __always_inline void ib_set_drvdata(struct ib_device *ibdev, void *priv)
Set Infiniband device driver-private data.
Definition: infiniband.h:697
#define HERMON_SCHED_DEFAULT
Definition: hermon.h:129
void ib_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: infiniband.c:878
iPXE I/O API
static void hermon_event_port_state_change(struct hermon *hermon, union hermonprm_event_entry *eqe)
Handle port state event.
Definition: hermon.c:2134
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define HERMON_HCR_OUT_LEN(_command)
Definition: hermon.h:978
struct ib_device * ibdev
Infiniband device.
Definition: hermon.h:848
Infiniband protocol.
#define MLX_FILL_7(_ptr, _index,...)
Definition: mlx_bitops.h:191
struct net_device * netdev
Network device.
Definition: hermon.h:850
void pci_restore(struct pci_device *pci, struct pci_config_backup *backup, unsigned int limit, const uint8_t *exclude)
Restore PCI configuration space.
Definition: pcibackup.c:87
unsigned short uint16_t
Definition: stdint.h:11
uint32_t low
Low 16 bits of address.
Definition: myson.h:19
static int hermon_cmd_unmap_icm_aux(struct hermon *hermon)
Definition: hermon.c:566
struct hermonprm_set_port_rqp_calc rqp_calc
Definition: hermon.h:559
#define MLX_FILL_2(_ptr, _index,...)
Definition: mlx_bitops.h:171
struct hermon_send_work_queue send
Send work queue.
Definition: hermon.h:744
static int hermon_cmd_sw2hw_cq(struct hermon *hermon, unsigned long cqn, const struct hermonprm_completion_queue_context *cqctx)
Definition: hermon.c:387
struct hermon_mtt mtt
MTT descriptor.
Definition: hermon.h:764
static int hermon_cmd_sw2hw_eq(struct hermon *hermon, unsigned int index, const struct hermonprm_eqc *eqctx)
Definition: hermon.c:360
#define iob_put(iobuf, len)
Definition: iobuf.h:120
static int hermon_post_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf)
Post send work queue entry.
Definition: hermon.c:1629
#define MLX_FILL_4(_ptr, _index,...)
Definition: mlx_bitops.h:179
static void hermon_unmap_icm(struct hermon *hermon)
Unmap ICM.
Definition: hermon.c:2800
#define IB_QPN_SMI
Subnet management interface QPN.
Definition: infiniband.h:21
#define HERMON_PCI_CONFIG_BAR_SIZE
Definition: hermon.h:32
static void bofm_init(struct bofm_device *bofm, struct pci_device *pci, struct bofm_operations *op)
Initialise BOFM device.
Definition: bofm.h:339
static void hermon_eth_complete_send(struct ib_device *ibdev __unused, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Handle Hermon Ethernet device send completion.
Definition: hermon.c:3411
static int hermon_sense_port_type(struct hermon *hermon, struct hermon_port *port)
Sense port type.
Definition: hermon.c:3726
#define HERMON_CMPT_MAX_ENTRIES
Number of cMPT entries of each type.
Definition: hermon.h:610
#define HERMON_HCR_BASE
Definition: hermon.h:957
void nvs_vpd_nvo_init(struct nvs_vpd_device *nvsvpd, unsigned int field, struct nvo_block *nvo, struct refcnt *refcnt)
Initialise non-volatile option storage within NVS VPD device.
Definition: nvsvpd.c:220
#define IB_MTU_2048
Definition: ib_mad.h:162
static int hermon_cmd_rst2init_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:414
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:520
#define HERMON_HCR_READ_MCG
Definition: hermon.h:72
struct hermonprm_event_db_register event
Definition: hermon.h:547
static int hermon_cmd_wait(struct hermon *hermon, struct hermonprm_hca_command_register *hcr)
Wait for Hermon command completion.
Definition: hermon.c:136
int nvs_vpd_init(struct nvs_vpd_device *nvsvpd, struct pci_device *pci)
Initialise NVS VPD device.
Definition: nvsvpd.c:178
A PCI driver.
Definition: pci.h:247
#define EBUSY
Device or resource busy.
Definition: errno.h:338
#define HERMON_OPCODE_NOP
Definition: hermon.h:41
size_t auxc_entry_size
Auxiliary context entry size.
Definition: hermon.h:580
static int ib_is_open(struct ib_device *ibdev)
Check whether or not Infiniband device is open.
Definition: infiniband.h:576
#define MLX_FILL_8(_ptr, _index,...)
Definition: mlx_bitops.h:195
struct hermon_recv_work_queue recv
Receive work queue.
Definition: hermon.h:746
#define HERMON_HCR_INIT_PORT
Definition: hermon.h:51
Infiniband device operations.
Definition: infiniband.h:254
#define HERMON_MOD_STAT_CFG_OFFSET(field)
Calculate offset within static configuration.
Definition: hermon.c:723
__be32 in[4]
Definition: CIB_PRM.h:35
static int hermon_alloc_mtt(struct hermon *hermon, const void *memory, size_t len, struct hermon_mtt *mtt)
Allocate MTT entries.
Definition: hermon.c:624
uint8_t state
State.
Definition: eth_slow.h:47
static int hermon_cmd_query_fw(struct hermon *hermon, struct hermonprm_query_fw *fw)
Definition: hermon.c:278
void * doorbell
Doorbell register.
Definition: hermon.h:682
#define HERMON_MOD_STAT_CFG_SET
Definition: hermon.h:139
static unsigned int unsigned int bit
Definition: bigint.h:208
struct hermonprm_send_db_register send
Definition: hermon.h:546
static struct ib_completion_queue_operations hermon_eth_cq_op
Hermon Ethernet device completion operations.
Definition: hermon.c:3449
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
struct hermonprm_recv_wqe recv
Definition: hermon.h:690
uint8_t opcode
Opcode.
Definition: ena.h:16
#define HERMON_MKEY_PREFIX
Memory key prefix.
Definition: hermon.h:943
static int hermon_dump_eqctx(struct hermon *hermon, struct hermon_event_queue *hermon_eq)
Dump event queue context (for debugging only)
Definition: hermon.c:1920
#define HERMON_ETH_NUM_SEND_WQES
Number of Hermon Ethernet send work queue entries.
Definition: hermon.c:3365
static int hermon_cmd_write_mtt(struct hermon *hermon, const struct hermonprm_write_mtt *write_mtt)
Definition: hermon.c:342
static int hermon_cmd_set_icm_size(struct hermon *hermon, const struct hermonprm_scalar_parameter *icm_size, struct hermonprm_scalar_parameter *icm_aux_size)
Definition: hermon.c:582
#define HERMON_OPCODE_SEND
Definition: hermon.h:42
size_t cqc_entry_size
CQ context entry size.
Definition: hermon.h:588
static int hermon_mcast_attach(struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid)
Attach to multicast group.
Definition: hermon.c:3194
static int hermon_cmd_query_qp(struct hermon *hermon, unsigned long qpn, struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:457
Error codes.
#define HERMON_PORT_TYPE_ETH
Definition: hermon.h:97
void * mailbox_in
Command input mailbox.
Definition: hermon.h:875
struct golan_inbox_hdr hdr
Message header.
Definition: CIB_PRM.h:28
u8 owner
Definition: CIB_PRM.h:36
#define HERMON_SET_PORT_RECEIVE_QP
Definition: hermon.h:119
static int hermon_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
Definition: hermon.c:1312
static void hermon_event_port_mgmnt_change(struct hermon *hermon, union hermonprm_event_entry *eqe)
Handle port management event.
Definition: hermon.c:2163
A command-line command.
Definition: command.h:9
#define HERMON_MAX_EQS
Maximum number of allocatable event queues.
Definition: hermon.h:773
I/O buffers.
#define HERMON_LOG_MULTICAST_HASH_SIZE
Definition: hermon.h:131
#define DBG_ENABLE(level)
Definition: compiler.h:313
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:249
static unsigned short vendor
Definition: davicom.c:128
Non-Volatile Storage using Vital Product Data.
int register_nvo(struct nvo_block *nvo, struct settings *parent)
Register non-volatile stored options.
Definition: nvo.c:293
size_t mtt_entry_size
MTT entry size.
Definition: hermon.h:596
uint32_t g
Definition: sha256.c:34
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
int ib_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
Definition: infiniband.c:294
static void hermon_destroy_eq(struct hermon *hermon)
Destroy event queue.
Definition: hermon.c:2095
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
int ib_create_cq(struct ib_device *ibdev, unsigned int num_cqes, struct ib_completion_queue_operations *op, struct ib_completion_queue **new_cq)
Create completion queue.
Definition: infiniband.c:98
#define FCOE_VLAN_PRIORITY
FCoE VLAN priority.
Definition: fcoe.h:90
void ib_refill_recv(struct ib_device *ibdev, struct ib_queue_pair *qp)
Refill receive work queue.
Definition: infiniband.c:556
#define DBGC(...)
Definition: compiler.h:505
unsigned long last_poll
Last unsolicited link state poll.
Definition: hermon.h:908
static struct hermon_port_type hermon_port_type_ib
Hermon Infiniband port type.
Definition: hermon.c:3351
__be32 byte_count
Definition: CIB_PRM.h:28
static int hermon_cmd_mod_stat_cfg(struct hermon *hermon, unsigned int mode, unsigned int input_mod, struct hermonprm_scalar_parameter *portion)
Definition: hermon.c:512
struct pci_driver hermon_bofm_driver __bofm_driver
Definition: hermon.c:4248
struct io_buffer *(* alloc_iob)(size_t len)
Allocate receive I/O buffer.
Definition: infiniband.h:153
static int hermon_cmd_query_eq(struct hermon *hermon, unsigned int index, struct hermonprm_eqc *eqctx)
Definition: hermon.c:378
static void hermon_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: hermon.c:2188
struct device * dev
Underlying device.
Definition: infiniband.h:410
static int hermon_start(struct hermon *hermon, int running)
Start Hermon device.
Definition: hermon.c:2966
unsigned long long uint64_t
Definition: stdint.h:13
#define DBG_DISABLE(level)
Definition: compiler.h:312
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.h:62
unsigned long special_qpn_base
Special QPN base.
Definition: hermon.h:925
static __always_inline void * ib_qp_get_drvdata(struct ib_queue_pair *qp)
Get Infiniband queue pair driver-private data.
Definition: infiniband.h:642
#define ntohl(value)
Definition: byteswap.h:134
#define HERMON_PAGE_SIZE
Definition: hermon.h:105
static void hermon_stop_firmware(struct hermon *hermon)
Stop firmware running.
Definition: hermon.c:2418
#define HERMON_RETRY_MAX
Definition: hermon.h:137
static void hermon_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: hermon.c:965
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:230
struct ib_global_route_header grh
Definition: ib_packet.h:16
A Hermon send work queue entry.
Definition: hermon.h:659
#define HERMON_HCR_QUERY_PORT
Definition: hermon.h:76
#define ntohs(value)
Definition: byteswap.h:136
static int hermon_set_port_type(struct hermon *hermon, struct hermon_port *port)
Set port type.
Definition: hermon.c:3763
static int hermon_cmd_map_icm(struct hermon *hermon, const struct hermonprm_virtual_physical_mapping *map)
Definition: hermon.c:557
#define HERMON_DB_POST_SND_OFFSET
Definition: hermon.h:107
#define HERMON_OPCODE_RECV_ERROR
Definition: hermon.h:43
#define HERMON_PORT_TYPE_IB
Definition: hermon.h:96
A Hermon completion queue.
Definition: hermon.h:758
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
uint8_t mac[ETH_ALEN]
MAC address.
Definition: ena.h:24
unsigned int gid_present
GID is present.
Definition: infiniband.h:90
#define HERMON_HCR_MAP_ICM
Definition: hermon.h:82
#define static_assert(x)
Assert a condition at build time.
Definition: assert.h:65
static int hermon_cmd_hw2sw_eq(struct hermon *hermon, unsigned int index, struct hermonprm_eqc *eqctx)
Definition: hermon.c:369
unsigned int vlan
VLAN, if present.
Definition: infiniband.h:96
static void iob_populate(struct io_buffer *iobuf, void *data, size_t len, size_t max_len)
Create a temporary I/O buffer.
Definition: iobuf.h:190
union hermonprm_event_entry * eqe
Event queue entries.
Definition: hermon.h:778
struct hermonprm_wqe_segment_ctrl_mlx ctrl
Definition: hermon.h:513
#define PCI_CONFIG_BACKUP_ALL
Limit of PCI configuration space.
Definition: pcibackup.h:15
#define HERMON_HCR_UNMAP_ICM_AUX
Definition: hermon.h:83
unsigned int reserved_cqs
Number of reserved CQs.
Definition: hermon.h:586
#define HERMON_ETH_NUM_CQES
Number of Hermon Ethernet completion entries.
Definition: hermon.c:3371
userptr_t firmware_area
Firmware area in external memory.
Definition: hermon.h:890
#define HERMON_PCI_UAR_BAR
Definition: hermon.h:33
size_t cqe_size
Size of completion queue.
Definition: hermon.h:762
#define HERMON_HCR_IN_LEN(_command)
Definition: hermon.h:977
static void hermon_free_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Free queue pair number.
Definition: hermon.c:1053
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:154
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:129
An Infiniband Global Identifier.
Definition: ib_packet.h:33
#define HERMON_HCR_RTR2RTS_QP
Definition: hermon.h:66
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
Definition: uaccess.h:287
struct hermonprm_qp_db_record * doorbell
Doorbell record.
Definition: hermon.h:705
__be32 qpn
Definition: CIB_PRM.h:29
enum hermon_queue_pair_state state
Queue state.
Definition: hermon.h:748
#define htonl(value)
Definition: byteswap.h:133
struct device dev
Generic device.
Definition: pci.h:208
static unsigned int hermon_fill_rc_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest __unused, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct RC send work queue entry.
Definition: hermon.c:1553
static struct settings * netdev_settings(struct net_device *netdev)
Get per-netdevice configuration settings block.
Definition: netdevice.h:583
static int hermon_reset(struct hermon *hermon)
Reset device.
Definition: hermon.c:2831
struct hermonprm_set_port_general_context general
Definition: hermon.h:558
A Hermon port type.
Definition: hermon.h:809
__be32 out[4]
Definition: CIB_PRM.h:36
union ib_gid dgid
Destiniation GID.
Definition: ib_packet.h:106
#define ENOTSUP
Operation not supported.
Definition: errno.h:589
void ib_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: infiniband.c:145
static int hermon_dump_cqctx(struct hermon *hermon, struct ib_completion_queue *cq)
Dump completion queue context (for debugging only)
Definition: hermon.c:822
#define HERMON_SET_PORT_GENERAL_PARAM
Definition: hermon.h:118
static unsigned int hermon_fill_ud_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct UD send work queue entry.
Definition: hermon.c:1459
#define HERMON_INVALID_LKEY
Definition: hermon.h:103
enum ib_rate rate
Rate.
Definition: infiniband.h:86
Dynamic memory allocation.
#define HERMON_HCR_RTS2RTS_QP
Definition: hermon.h:67
#define HERMON_HCR_INOUT_CMD(_opcode, _in_mbox, _in_len, _out_mbox, _out_len)
Build HCR command from component parts.
Definition: hermon.h:981
struct bofm_device bofm
BOFM device.
Definition: hermon.h:936
Definition: hermon.h:534
struct sockaddr_tcpip st
Definition: syslog.c:56
union hermon_recv_wqe * wqe
Work queue entries.
Definition: hermon.h:697
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:525
uint32_t start
Starting offset.
Definition: netvsc.h:12
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:524
static int hermon_cmd_write_mcg(struct hermon *hermon, unsigned int index, const struct hermonprm_mcg_entry *mcg)
Definition: hermon.c:493
Fibre Channel over Ethernet.
An Infiniband device.
Definition: infiniband.h:398
uint8_t status
Status.
Definition: ena.h:16
struct hermon_mtt mtt
MTT descriptor.
Definition: hermon.h:782
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:515
pseudo_bit_t ci[0x00020]
Definition: arbel.h:11
#define DBGCP_HDA(...)
Definition: compiler.h:540
#define MLX_FILL_3(_ptr, _index,...)
Definition: mlx_bitops.h:175
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:359
static int hermon_cmd_rts2rts_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:441
#define HERMON_HCR_INIT_HCA
Definition: hermon.h:49
#define HERMON_ST_RC
Definition: hermon.h:90
#define ENOMEM
Not enough space.
Definition: errno.h:534
static int hermon_map_icm(struct hermon *hermon, struct hermonprm_init_hca *init_hca)
Map ICM (allocating if necessary)
Definition: hermon.c:2513
unsigned int mtt_base_addr
MTT base address.
Definition: hermon.h:650
static void hermon_free(struct hermon *hermon)
Free Hermon device.
Definition: hermon.c:3981
Infiniband completion queue operations.
Definition: infiniband.h:194
void * memcpy(void *dest, const void *src, size_t len) __nonnull
__be32 producer_counter
Definition: CIB_PRM.h:44
#define HERMON_HCR_IN_MBOX
Definition: hermon.h:974
Infiniband queue pair operations.
Definition: infiniband.h:147
hermon_bitmask_t cq_inuse[HERMON_BITMASK_SIZE(HERMON_MAX_CQS)]
Completion queue in-use bitmask.
Definition: hermon.h:916
unsigned int num_ports
Number of ports.
Definition: hermon.h:604
static int hermon_cmd_sw2hw_mpt(struct hermon *hermon, unsigned int index, const struct hermonprm_mpt *mpt)
Definition: hermon.c:326
static int hermon_cmd_rtr2rts_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:432
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:514
static int hermon_open(struct hermon *hermon)
Open Hermon device.
Definition: hermon.c:3043
u8 port
Port number.
Definition: CIB_PRM.h:31
void * mailbox_out
Command output mailbox.
Definition: hermon.h:877
static __always_inline void * ib_get_drvdata(struct ib_device *ibdev)
Get Infiniband device driver-private data.
Definition: infiniband.h:708
static int hermon_start_firmware(struct hermon *hermon)
Start firmware running.
Definition: hermon.c:2351
uint32_t hermon_bitmask_t
A Hermon resource bitmask.
Definition: hermon.h:798
IBM BladeCenter Open Fabric Manager (BOFM)
static int hermon_cmd(struct hermon *hermon, unsigned long command, unsigned int op_mod, const void *in, unsigned int in_mod, void *out)
Issue HCA command.
Definition: hermon.c:162
int ib_smc_init(struct ib_device *ibdev, ib_local_mad_t local_mad)
Initialise Infiniband parameters using SMC.
Definition: ib_smc.c:232
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
size_t wqe_size
Size of work queue buffer.
Definition: hermon.h:740
static int hermon_configure_special_qps(struct hermon *hermon)
Configure special queue pairs.
Definition: hermon.c:2936
struct hermonprm_eth_send_wqe eth
Definition: hermon.h:664
#define HERMON_MAP_EQ
Definition: hermon.h:115
userptr_t icm
ICM area.
Definition: hermon.h:903
#define be32_to_cpu(value)
Definition: byteswap.h:116
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:572
#define HERMON_SENSE_PORT_TIMEOUT
Timeout for port sensing.
Definition: hermon.c:3702
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
#define HERMON_HCR_INIT2RTR_QP
Definition: hermon.h:65
Ethernet protocol.
struct hermonprm_rc_send_wqe rc
Definition: hermon.h:663
struct ib_device_operations * op
Infiniband operations.
Definition: infiniband.h:416
#define DBGLVL_LOG
Definition: compiler.h:316
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:507
uint64_t offset
Offset (virtual address within ICM)
Definition: hermon.h:615
struct hermonprm_event_queue_entry generic
Definition: hermon.h:540
An Infiniband Work Queue.
Definition: infiniband.h:100
#define ETH_FRAME_LEN
Definition: if_ether.h:11
static int hermon_bofm_update(struct bofm_device *bofm, unsigned int mport, const uint8_t *mac)
Update Ethernet MAC for BOFM.
Definition: hermon.c:3891
void * priv
Driver private data.
Definition: netdevice.h:431
#define DBGC_HDA(...)
Definition: compiler.h:506
size_t wqe_size
Size of work queue.
Definition: hermon.h:699
void * wqe
Work queue buffer.
Definition: hermon.h:738
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:774
void ib_complete_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Complete send work queue entry.
Definition: infiniband.c:515
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
static int hermon_cmd_init_port(struct hermon *hermon, unsigned int port)
Definition: hermon.c:302
#define HERMON_PM_STATE_MIGRATED
Definition: hermon.h:135
static struct ib_device_operations hermon_ib_operations
Hermon Infiniband operations.
Definition: hermon.c:3275
__be16 wqe_counter
Definition: CIB_PRM.h:36
#define HERMON_SCHED_QP0
Definition: hermon.h:128
void udelay(unsigned long usecs)
Delay for a fixed number of microseconds.
Definition: timer.c:60
#define HERMON_HCR_MAD_IFC
Definition: hermon.h:71
static void hermon_destroy_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Destroy queue pair.
Definition: hermon.c:1390
static int hermon_cmd_unmap_fa(struct hermon *hermon)
Definition: hermon.c:593
static userptr_t size_t offset
Offset of the first segment within the content.
Definition: deflate.h:259
static struct net_device * netdev
Definition: gdbudp.c:52
static int hermon_bofm_probe(struct pci_device *pci)
Probe PCI device for BOFM.
Definition: hermon.c:4162
u8 num_ports
Definition: CIB_PRM.h:61
static void hermon_eth_complete_recv(struct ib_device *ibdev __unused, struct ib_queue_pair *qp, struct ib_address_vector *dest __unused, struct ib_address_vector *source, struct io_buffer *iobuf, int rc)
Handle Hermon Ethernet device receive completion.
Definition: hermon.c:3429
__be32 num_pages
Definition: CIB_PRM.h:31
#define HERMON_HCR_UNMAP_ICM
Definition: hermon.h:81
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
static int hermon_cmd_set_port(struct hermon *hermon, int is_ethernet, unsigned int port_selector, const union hermonprm_set_port *set_port)
Definition: hermon.c:316
static int hermon_mad(struct ib_device *ibdev, union ib_mad *mad)
Issue management datagram.
Definition: hermon.c:777
static const char * hermon_name_port_type(unsigned int port_type)
Name port type.
Definition: hermon.c:3710
An Infiniband Global Route Header.
Definition: ib_packet.h:89
#define HERMON_HCR_MOD_STAT_CFG
Definition: hermon.h:75
#define HERMON_RSVD_SPECIAL_QPS
Number of queue pairs reserved for the "special QP" block.
Definition: hermon.h:716
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:519
struct ib_work_queue * ib_find_wq(struct ib_completion_queue *cq, unsigned long qpn, int is_send)
Find work queue belonging to completion queue.
Definition: infiniband.c:396
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:941
#define HERMON_PORT_TYPE_UNKNOWN
Definition: hermon.h:95
void unregister_nvo(struct nvo_block *nvo)
Unregister non-volatile stored options.
Definition: nvo.c:324
struct hermon_port_type * type
Port type.
Definition: hermon.h:858
A Hermon receive work queue.
Definition: hermon.h:695
unsigned int num_wqes
Number of work queue entries.
Definition: infiniband.h:112
static void * dest
Definition: strings.h:176
#define HERMON_HCR_QUERY_CQ
Definition: hermon.h:63
struct hermonprm_scalar_parameter mtt_base_addr
Definition: hermon.h:499
#define HERMON_QPN_RANDOM_MASK
Queue pair number randomisation mask.
Definition: hermon.h:725
#define HERMON_VPD_FIELD(port)
Definition: hermon.h:142
pseudo_bit_t value[0x00020]
Definition: arbel.h:13
struct hermonprm_completion_with_error error
Definition: hermon.h:536
uint32_t high
High 32 bits of address.
Definition: myson.h:20
#define HERMON_HCR_QUERY_EQ
Definition: hermon.h:60
static int hermon_cmd_hw2sw_cq(struct hermon *hermon, unsigned long cqn, struct hermonprm_completion_queue_context *cqctx)
Definition: hermon.c:396
#define DBGC2_HDA(...)
Definition: compiler.h:523
uint32_t rdma_key
RDMA key.
Definition: infiniband.h:456
#define HERMON_HCR_REG(x)
Definition: hermon.h:958
static int hermon_cmd_sense_port(struct hermon *hermon, unsigned int port, struct hermonprm_sense_port *port_type)
Definition: hermon.c:532
pseudo_bit_t hash[0x00010]
Hash algorithm.
Definition: arbel.h:13
Non-volatile stored options.
unsigned int port
Port number.
Definition: infiniband.h:418
static __always_inline void ibdev_put(struct ib_device *ibdev)
Drop reference to Infiniband device.
Definition: infiniband.h:598
static __always_inline void ib_cq_set_drvdata(struct ib_completion_queue *cq, void *priv)
Set Infiniband completion queue driver-private data.
Definition: infiniband.h:675
char * strerror(int errno)
Retrieve string representation of error number.
Definition: strerror.c:78
FILE_LICENCE(GPL2_OR_LATER)
union ib_gid sgid
Source GID.
Definition: ib_packet.h:104
unsigned int reserved_qps
Number of reserved QPs.
Definition: hermon.h:574
static void(* free)(struct refcnt *refcnt))
Definition: refcnt.h:54
struct hermon_event_queue eq
Event queue.
Definition: hermon.h:906
static __always_inline void * ib_qp_get_ownerdata(struct ib_queue_pair *qp)
Get Infiniband queue pair owner-private data.
Definition: infiniband.h:664
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:624
__be16 rlid
Definition: CIB_PRM.h:38
uint16_t cons
Consumer index.
Definition: ena.h:22
#define HERMON_HCR_QUERY_DEV_CAP
Definition: hermon.h:47
PCI bus.
static struct hermon_port_type hermon_port_type_eth
Hermon Ethernet port type.
Definition: hermon.c:3688
unsigned int reserved_mtts
Number of reserved MTTs.
Definition: hermon.h:594
A PCI device.
Definition: pci.h:206
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:759
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:155
const char * eth_ntoa(const void *ll_addr)
Transcribe Ethernet address.
Definition: ethernet.c:175
#define HERMON_UNMAP_EQ
Definition: hermon.h:116
struct pci_device * pci
PCI device.
Definition: hermon.h:866
static int hermon_eth_transmit(struct net_device *netdev, struct io_buffer *iobuf)
Transmit packet via Hermon Ethernet device.
Definition: hermon.c:3380
static int hermon_post_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf)
Post receive work queue entry.
Definition: hermon.c:1690
#define ENFILE
Too many open files in system.
Definition: errno.h:493
struct ib_device * alloc_ibdev(size_t priv_size)
Allocate Infiniband device.
Definition: infiniband.c:917
#define HERMON_HCR_MGID_HASH
Definition: hermon.h:74
static void hermon_state_change_ibdev(struct hermon *hermon __unused, struct hermon_port *port, int link_up __unused)
Handle Hermon Infiniband device port state change.
Definition: hermon.c:3328
struct golan_eq_context ctx
Definition: CIB_PRM.h:28
A PCI configuration space backup.
Definition: pcibackup.h:21
User memory allocation.
size_t firmware_len
Firmware size.
Definition: hermon.h:883
size_t icm_len
ICM size.
Definition: hermon.h:894
Definition: hermon.h:539
#define MLX_GET(_ptr, _field)
Definition: mlx_bitops.h:222
#define HERMON_MTU_2048
Definition: hermon.h:100
#define HERMON_HCR_MAP_FA
Definition: hermon.h:87
A network device.
Definition: netdevice.h:352
#define HERMON_HCR_CLOSE_PORT
Definition: hermon.h:52
u8 sl
Definition: CIB_PRM.h:42
#define HERMON_MAX_PORTS
Definition: hermon.h:27
long int random(void)
Generate a pseudo-random number between 0 and 2147483647L or 2147483562?
Definition: random.c:31
A BOFM device.
Definition: bofm.h:286
static size_t iob_tailroom(struct io_buffer *iobuf)
Calculate available space at end of an I/O buffer.
Definition: iobuf.h:175
size_t len
Length.
Definition: hermon.h:617
static void hermon_free_mtt(struct hermon *hermon, struct hermon_mtt *mtt)
Free MTT entries.
Definition: hermon.c:700
union hermon_send_wqe * wqe
Work queue entries.
Definition: hermon.h:678
An Infiniband Completion Queue.
Definition: infiniband.h:224
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:528
static struct ib_queue_pair_operations hermon_eth_qp_op
Hermon Ethernet queue pair operations.
Definition: hermon.c:3399
#define HERMON_UAR_NON_EQ_PAGE
UAR page for doorbell accesses.
Definition: hermon.h:635
int ib_smc_update(struct ib_device *ibdev, ib_local_mad_t local_mad)
Update Infiniband parameters using SMC.
Definition: ib_smc.c:249
#define PCI_VENDOR_ID
PCI vendor ID.
Definition: pci.h:19
static void hermon_eth_close(struct net_device *netdev)
Close Hermon Ethernet device.
Definition: hermon.c:3584
#define MLX_FILL_1(_ptr, _index,...)
Definition: mlx_bitops.h:167
void * uar
PCI user Access Region.
Definition: hermon.h:870
u32 addr
Definition: sky2.h:8
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_SCATTER]
Definition: hermon.h:531
#define MLX_FILL_H(_structure_st, _index, _field, _address)
Definition: mlx_bitops.h:240
static struct bofm_operations hermon_bofm_operations
Hermon BOFM operations.
Definition: hermon.c:3928
unsigned char uint8_t
Definition: stdint.h:10
struct hermonprm_wqe_segment_ud ud
Definition: hermon.h:508
static int hermon_bofm_harvest(struct bofm_device *bofm, unsigned int mport, uint8_t *mac)
Harvest Ethernet MAC for BOFM.
Definition: hermon.c:3851
unsigned int reserved_eqs
Number of reserved EQs.
Definition: hermon.h:590
int dpdp
Dual-port different protocol.
Definition: hermon.h:606
void ib_destroy_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Destroy queue pair.
Definition: infiniband.c:314
static int hermon_register_ibdev(struct hermon *hermon, struct hermon_port *port)
Register Hermon Infiniband device.
Definition: hermon.c:3300
Mellanox Hermon Infiniband HCA driver.
#define HERMON_MOD_STAT_CFG_QUERY
Definition: hermon.h:140
#define HERMON_LINK_POLL_INTERVAL
Link poll interval.
Definition: hermon.h:950
size_t icm_aux_len
ICM AUX size.
Definition: hermon.h:896
#define HERMON_HCR_VOID_CMD(_opcode)
Definition: hermon.h:995
#define HERMON_RESET_OFFSET
Definition: hermon.h:36
#define HERMON_HCR_IN_CMD(_opcode, _in_mbox, _in_len)
Definition: hermon.h:989
struct hermon_dev_cap cap
Device capabilities.
Definition: hermon.h:923
size_t eqc_entry_size
EQ context entry size.
Definition: hermon.h:592
size_t wqe_size
Size of work queue.
Definition: hermon.h:680
#define HERMON_HCR_CLOSE_HCA
Definition: hermon.h:50
unsigned long qpn
Queue Pair Number.
Definition: infiniband.h:74
struct hermonprm_mtt mtt
Definition: hermon.h:501
int bofm_register(struct bofm_device *bofm)
Register BOFM device.
Definition: bofm.c:49
int register_ibdev(struct ib_device *ibdev)
Register Infiniband device.
Definition: infiniband.c:944
#define ETH_ALEN
Definition: if_ether.h:8
#define HERMON_HCR_OUT_MBOX
Definition: hermon.h:975
static void hermon_remove(struct pci_device *pci)
Remove PCI device.
Definition: hermon.c:4135
#define IB_PORT_STATE_DOWN
Definition: ib_mad.h:151
static __always_inline int struct dma_mapping * map
Definition: dma.h:181
#define HERMON_HCR_OUT_CMD(_opcode, _out_mbox, _out_len)
Definition: hermon.h:992
A PCI device ID list entry.
Definition: pci.h:170
struct ib_queue_pair * qp
Containing queue pair.
Definition: infiniband.h:102
uint8_t headers[IB_MAX_HEADER_SIZE]
Definition: arbel.h:14
static int hermon_cmd_mgid_hash(struct hermon *hermon, const union ib_gid *gid, struct hermonprm_mgm_hash *hash)
Definition: hermon.c:502
#define HERMON_HCR_SW2HW_EQ
Definition: hermon.h:58
#define HERMON_HCR_RUN_FW
Definition: hermon.h:78
unsigned int ports
Total ports on device.
Definition: infiniband.h:420
static void hermon_bitmask_free(hermon_bitmask_t *bits, int bit, unsigned int num_bits)
Free offsets within usage bitmask.
Definition: hermon.c:112
static int hermon_inform_sma(struct ib_device *ibdev, union ib_mad *mad)
Inform embedded subnet management agent of a received MAD.
Definition: hermon.c:3165
unsigned int uint32_t
Definition: stdint.h:12
static int hermon_cmd_map_fa(struct hermon *hermon, const struct hermonprm_virtual_physical_mapping *map)
Definition: hermon.c:600
__be16 c_eqn
Definition: CIB_PRM.h:38
#define HERMON_HCR_SW2HW_CQ
Definition: hermon.h:61
unsigned long next_idx
Next work queue entry index.
Definition: infiniband.h:122
struct hermonprm_port_state_change_event port_state_change
Definition: hermon.h:541
#define HERMON_OPCODE_SEND_ERROR
Definition: hermon.h:44
uint16_t syndrome
ID of event.
Definition: ena.h:14
uint32_t ds
Definition: librm.h:254
static void hermon_state_change_netdev(struct hermon *hermon __unused, struct hermon_port *port, int link_up)
Handle Hermon Ethernet device port state change.
Definition: hermon.c:3661
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
uint16_t vendor
Vendor ID.
Definition: pci.h:223
u32 version
Driver version.
Definition: ath9k_hw.c:1983
#define IB_VL_0
Definition: ib_mad.h:165
unsigned long next_idx
Next completion queue entry index.
Definition: infiniband.h:240
static int hermon_dump_eqes(struct hermon *hermon, struct hermon_event_queue *hermon_eq)
Dump unconsumed event queue entries (for debugging only)
Definition: hermon.c:1952
__be32 consumer_counter
Definition: CIB_PRM.h:38
struct hermon_port port[HERMON_MAX_PORTS]
Ports.
Definition: hermon.h:933
static int hermon_register_netdev(struct hermon *hermon, struct hermon_port *port)
Register Hermon Ethernet device.
Definition: hermon.c:3620
static int hermon_cmd_init2rtr_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:423
#define HERMON_ST_UD
Definition: hermon.h:91
Network device operations.
Definition: netdevice.h:213
#define HERMON_MAX_CQS
Maximum number of allocatable completion queues.
Definition: hermon.h:755
struct device * dev
Underlying hardware device.
Definition: netdevice.h:364
An Infiniband Queue Pair.
Definition: infiniband.h:157
#define HERMON_HCR_SET_PORT
Definition: hermon.h:53
unsigned int sl
Service level.
Definition: infiniband.h:88
struct hermonprm_mlx_send_wqe mlx
Definition: hermon.h:662
#define HERMON_EV_PORT_STATE_CHANGE
Definition: hermon.h:125
#define HERMON_EV_PORT_MGMNT_CHANGE
Definition: hermon.h:126
static int hermon_cmd_close_port(struct hermon *hermon, unsigned int port)
Definition: hermon.c:309
Network device management.
unsigned long physaddr_t
Definition: stdint.h:20
#define HERMON_ST_MLX
Definition: hermon.h:92
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
void * config
PCI configuration registers.
Definition: hermon.h:868
unsigned int toggle
Command toggle.
Definition: hermon.h:873
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:369
static void hermon_unregister_ibdev(struct hermon *hermon __unused, struct hermon_port *port)
Unregister Hermon Infiniband device.
Definition: hermon.c:3343
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:509
struct pci_driver hermon_driver __pci_driver
Definition: hermon.c:4241
struct arbelprm_qp_db_record qp
Definition: arbel.h:13
static int hermon_cmd_conf_special_qp(struct hermon *hermon, unsigned int internal_qps, unsigned long base_qpn)
Definition: hermon.c:466
static unsigned int hermon_fill_nop_send_wqe(struct ib_device *ibdev __unused, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest __unused, struct io_buffer *iobuf __unused, union hermon_send_wqe *wqe)
Construct UD send work queue entry.
Definition: hermon.c:1437
int ib_post_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf)
Post send work queue entry.
Definition: infiniband.c:416
static struct hermon * hermon_alloc(void)
Allocate Hermon device.
Definition: hermon.c:3947
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
static int hermon_eth_open(struct net_device *netdev)
Open Hermon Ethernet device.
Definition: hermon.c:3472
#define iob_reserve(iobuf, len)
Definition: iobuf.h:67
static int hermon_cmd_read_mcg(struct hermon *hermon, unsigned int index, struct hermonprm_mcg_entry *mcg)
Definition: hermon.c:484
#define HERMON_MBOX_ALIGN
Definition: hermon.h:960
static unsigned int hermon_fill_mlx_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct MLX send work queue entry.
Definition: hermon.c:1501
int(* harvest)(struct bofm_device *bofm, unsigned int mport, uint8_t *mac)
Harvest Ethernet MAC.
Definition: bofm.h:304
unsigned long qpn_base
QPN base.
Definition: hermon.h:927
static struct net_device_operations hermon_eth_operations
Hermon Ethernet network device operations.
Definition: hermon.c:3606
static int hermon_probe(struct pci_device *pci)
Probe PCI device.
Definition: hermon.c:3997
void netdev_tx_complete_err(struct net_device *netdev, struct io_buffer *iobuf, int rc)
Complete network transmission.
Definition: netdevice.c:470
void * doorbell
Doorbell register.
Definition: hermon.h:788
#define HERMON_HCR_MAP_ICM_AUX
Definition: hermon.h:84
char name[NETDEV_NAME_LEN]
Name of this network device.
Definition: netdevice.h:362
void(* state_change)(struct hermon *hermon, struct hermon_port *port, int link_up)
Port state changed.
Definition: hermon.h:824
struct ib_mad_hdr hdr
Definition: ib_mad.h:611
size_t srqc_entry_size
SRQ context entry size.
Definition: hermon.h:584
#define UNULL
Equivalent of NULL for user pointers.
Definition: uaccess.h:36
__weak void vlan_netdev_rx(struct net_device *netdev, unsigned int tag, struct io_buffer *iobuf)
Add VLAN tag-stripped packet to queue (when VLAN support is not present)
Definition: netdevice.c:1209
static volatile void * bits
Definition: bitops.h:27
static __always_inline void ufree(userptr_t userptr)
Free external memory.
Definition: umalloc.h:65
uint32_t len
Length.
Definition: ena.h:14
uint8_t unused[32]
Unused.
Definition: eltorito.h:15
static __always_inline userptr_t umalloc(size_t size)
Allocate external memory.
Definition: umalloc.h:54
#define ENOBUFS
No buffer space available.
Definition: errno.h:498
static int hermon_create_eq(struct hermon *hermon)
Create event queue.
Definition: hermon.c:2001
#define DBGC2(...)
Definition: compiler.h:522
void pci_backup(struct pci_device *pci, struct pci_config_backup *backup, unsigned int limit, const uint8_t *exclude)
Back up PCI configuration space.
Definition: pcibackup.c:67
int bofm(userptr_t bofmtab, struct pci_device *pci)
Process BOFM table.
Definition: bofm.c:238
int(* probe)(struct pci_device *pci)
Probe device.
Definition: pci.h:260
struct hermon_icm_map icm_map[HERMON_ICM_NUM_REGIONS]
ICM map.
Definition: hermon.h:892
#define HERMON_MAX_MTTS
Maximum number of allocatable MTT entries.
Definition: hermon.h:641
#define PCI_CONFIG_BACKUP_EXCLUDE(...)
Define a PCI configuration space backup exclusion list.
Definition: pcibackup.h:29
int ib_create_qp(struct ib_device *ibdev, enum ib_queue_pair_type type, unsigned int num_send_wqes, struct ib_completion_queue *send_cq, unsigned int num_recv_wqes, struct ib_completion_queue *recv_cq, struct ib_queue_pair_operations *op, const char *name, struct ib_queue_pair **new_qp)
Create queue pair.
Definition: infiniband.c:199
static int hermon_create_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
Definition: hermon.c:850
static int hermon_cmd_map_icm_aux(struct hermon *hermon, const struct hermonprm_virtual_physical_mapping *map)
Definition: hermon.c:573
uint32_t mtu
Maximum MTU.
Definition: ena.h:28
struct hermonprm_cq_db_record * doorbell
Doorbell record.
Definition: hermon.h:766
#define HERMON_HCR_UNMAP_FA
Definition: hermon.h:86
static unsigned int hermon_fill_eth_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest __unused, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct Ethernet send work queue entry.
Definition: hermon.c:1583
#define HERMON_HCR_OPCODE(_command)
Definition: hermon.h:976
void * data
Start of data.
Definition: iobuf.h:48
union hermonprm_completion_entry * cqe
Completion queue entries.
Definition: hermon.h:760
#define barrier()
Optimisation barrier.
Definition: compiler.h:655
unsigned int page_offset
Offset within page.
Definition: hermon.h:652
#define HERMON_HCR_RST2INIT_QP
Definition: hermon.h:64
struct hermon_mtt mtt
MTT descriptor.
Definition: hermon.h:742
#define EIO
Input/output error.
Definition: errno.h:433
#define HERMON_HCR_QUERY_FW
Definition: hermon.h:48
#define HERMON_HCR_SW2HW_MPT
Definition: hermon.h:54
A Hermon port.
Definition: hermon.h:846
#define HERMON_DB_EQ_OFFSET(_eqn)
Definition: hermon.h:108
static int hermon_alloc_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Assign queue pair number.
Definition: hermon.c:1010
__weak void vlan_netdev_rx_err(struct net_device *netdev, unsigned int tag __unused, struct io_buffer *iobuf, int rc)
Discard received VLAN tag-stripped packet (when VLAN support is not present)
Definition: netdevice.c:1227
union ib_gid gid
GID, if present.
Definition: infiniband.h:92
struct net_device * alloc_etherdev(size_t priv_size)
Allocate Ethernet device.
Definition: ethernet.c:264
hermon_bitmask_t mtt_inuse[HERMON_BITMASK_SIZE(HERMON_MAX_MTTS)]
MTT entry in-use bitmask.
Definition: hermon.h:920
static unsigned int hermon_sched_queue(struct ib_device *ibdev, struct ib_queue_pair *qp)
Calculate schedule queue.
Definition: hermon.c:1082
static int hermon_bitmask_alloc(hermon_bitmask_t *bits, unsigned int bits_len, unsigned int num_bits)
Allocate offsets within usage bitmask.
Definition: hermon.c:71
static int hermon_dump_qpctx(struct hermon *hermon, struct ib_queue_pair *qp)
Dump queue pair context (for debugging only)
Definition: hermon.c:1106
#define HERMON_RESET_MAGIC
Definition: hermon.h:37
#define HERMON_HCR_HW2SW_MPT
Definition: hermon.h:55
uint8_t port_state
Port state.
Definition: infiniband.h:425
#define HERMON_HCR_SENSE_PORT
Definition: hermon.h:77
struct hermonprm_ud_send_wqe ud
Definition: hermon.h:661
void(* complete_send)(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Complete Send WQE.
Definition: infiniband.h:203
unsigned long cqn
Completion queue number.
Definition: infiniband.h:230
struct hermonprm_completion_queue_entry normal
Definition: hermon.h:535
uint32_t end
Ending offset.
Definition: netvsc.h:18
uint8_t size
Entry size (in 32-bit words)
Definition: ena.h:16
static void hermon_eth_poll(struct net_device *netdev)
Poll Hermon Ethernet device.
Definition: hermon.c:3459
void iounmap(volatile const void *io_addr)
Unmap I/O address.
uint8_t data[48]
Additional event data.
Definition: ena.h:22
unsigned int num_cqes
Number of completion queue entries.
Definition: infiniband.h:232
Virtual LANs.
hermon_bitmask_t qp_inuse[HERMON_BITMASK_SIZE(HERMON_MAX_QPS)]
Queue pair in-use bitmask.
Definition: hermon.h:918
static void hermon_close(struct hermon *hermon)
Close Hermon device.
Definition: hermon.c:3063
A management datagram.
Definition: ib_mad.h:610
size_t dmpt_entry_size
DMPT entry size.
Definition: hermon.h:600
#define DBGCP(...)
Definition: compiler.h:539
#define HERMON_PCI_CONFIG_BAR
Definition: hermon.h:31
static int hermon_cmd_init_hca(struct hermon *hermon, const struct hermonprm_init_hca *init_hca)
Definition: hermon.c:286
#define HERMON_HCR_HW2SW_CQ
Definition: hermon.h:62
#define HERMON_HCR_SET_ICM_SIZE
Definition: hermon.h:85
static int hermon_create_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Create queue pair.
Definition: hermon.c:1143
void ib_complete_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct ib_address_vector *source, struct io_buffer *iobuf, int rc)
Complete receive work queue entry.
Definition: infiniband.c:536
A Hermon device.
Definition: hermon.h:864
static int hermon_cmd_unmap_icm(struct hermon *hermon, unsigned int page_count, const struct hermonprm_scalar_parameter *offset)
Definition: hermon.c:548
static void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.h:77
static int hermon_cmd_mad_ifc(struct hermon *hermon, unsigned int port, union hermonprm_mad *mad)
Definition: hermon.c:474
An Infiniband Address Vector.
Definition: infiniband.h:72
A Hermon send work queue.
Definition: hermon.h:669
struct nvs_vpd_device nvsvpd
Non-volatile storage in PCI VPD.
Definition: hermon.h:930
size_t cmpt_entry_size
CMPT entry size.
Definition: hermon.h:572
static int hermon_unmap_mpt(struct hermon *hermon)
Unmap memory protection table.
Definition: hermon.c:2917
__be32 opt_param_mask
Definition: CIB_PRM.h:28
void bofm_unregister(struct bofm_device *bofm)
Unregister BOFM device.
Definition: bofm.c:62
#define MLX_FILL_5(_ptr, _index,...)
Definition: mlx_bitops.h:183
__be16 page_offset
Definition: CIB_PRM.h:33
static int hermon_cmd_query_cq(struct hermon *hermon, unsigned long cqn, struct hermonprm_completion_queue_context *cqctx)
Definition: hermon.c:405
Infiniband Subnet Management Client.
A Hermon queue pair.
Definition: hermon.h:736
typeof(acpi_finder=acpi_find)
ACPI table finder.
Definition: acpi.c:45
unsigned long lkey
Unrestricted LKey.
Definition: hermon.h:913
unsigned int lid
Local ID.
Definition: infiniband.h:81
#define HERMON_RESET_MAX_WAIT_MS
Definition: hermon.h:38
unsigned long next_idx
Next event queue entry index.
Definition: hermon.h:786
static uint8_t hermon_qp_st[]
Queue pair transport service type map.
Definition: hermon.c:1090
A Hermon MTT descriptor.
Definition: hermon.h:644
static int hermon_setup_mpt(struct hermon *hermon)
Set up memory protection table.
Definition: hermon.c:2877
union ib_mad mad
Definition: hermon.h:553
static void hermon_bofm_remove(struct pci_device *pci)
Remove PCI device for BOFM.
Definition: hermon.c:4208
uint64_t index
Index of the first segment within the content.
Definition: pccrc.h:21
#define HERMON_QP_OPT_PARAM_QKEY
Definition: hermon.h:112
unsigned long currticks(void)
Get current system time in ticks.
Definition: timer.c:42
struct hermonprm_set_port_ib ib
Definition: hermon.h:557
static void hermon_poll_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Poll completion queue.
Definition: hermon.c:1870
unsigned int vlan_present
VLAN is present.
Definition: infiniband.h:94
unsigned int reserved_uars
Number of reserved UARs.
Definition: hermon.h:602
uint8_t headers[IB_MAX_HEADER_SIZE]
Definition: hermon.h:515
static struct pci_device_id hermon_nics[]
Definition: hermon.c:4216
#define HERMON_HCR_2RST_QP
Definition: hermon.h:68
__be32 cqn
Definition: CIB_PRM.h:29
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
static int hermon_map_vpm(struct hermon *hermon, int(*map)(struct hermon *hermon, const struct hermonprm_virtual_physical_mapping *), uint64_t va, physaddr_t pa, size_t len)
Map virtual to physical address for firmware usage.
Definition: hermon.c:2272
#define HERMON_HCR_CONF_SPECIAL_QP
Definition: hermon.h:70
#define fls(x)
Find last (i.e.
Definition: strings.h:166
uint8_t bytes[64]
Definition: ib_mad.h:16
#define DBGLVL_EXTRA
Definition: compiler.h:318
u8 gid[16]
Definition: CIB_PRM.h:31
static int hermon_cmd_query_port(struct hermon *hermon, unsigned int port, struct hermonprm_query_port_cap *query_port)
Definition: hermon.c:523
uint64_t tag
Identity tag.
Definition: edd.h:30
unsigned int reserved_mrws
Number of reserved MRWs.
Definition: hermon.h:598
static int hermon_cmd_map_eq(struct hermon *hermon, unsigned long index_map, const struct hermonprm_event_mask *mask)
Definition: hermon.c:351
unsigned long eqn
Event queue number.
Definition: hermon.h:784
static unsigned int(* hermon_fill_send_wqe[])(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Work queue entry constructors.
Definition: hermon.c:1608
#define DBG_LOG
Definition: compiler.h:317
#define HERMON_HCR_QUERY_QP
Definition: hermon.h:69
size_t altc_entry_size
Alternate path context entry size.
Definition: hermon.h:578
uint8_t lemac[ETH_ALEN]
IPoIB LEMAC (if non-default)
Definition: infiniband.h:464
static int hermon_ib_open(struct ib_device *ibdev)
Initialise Infiniband link.
Definition: hermon.c:3087
int(* register_dev)(struct hermon *hermon, struct hermon_port *port)
Register port.
Definition: hermon.h:816
static __always_inline void * ib_cq_get_drvdata(struct ib_completion_queue *cq)
Get Infiniband completion queue driver-private data.
Definition: infiniband.h:686
uint8_t hw_addr[MAX_HW_ADDR_LEN]
Hardware address.
Definition: netdevice.h:381
BOFM device operations.
Definition: bofm.h:296
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
static int hermon_cmd_2rst_qp(struct hermon *hermon, unsigned long qpn)
Definition: hermon.c:450
static int hermon_cmd_run_fw(struct hermon *hermon)
Definition: hermon.c:541
#define HERMON_HCR_WRITE_MCG
Definition: hermon.h:73
#define ETIMEDOUT
Connection timed out.
Definition: errno.h:669
static __always_inline void ib_qp_set_ownerdata(struct ib_queue_pair *qp, void *priv)
Set Infiniband queue pair owner-private data.
Definition: infiniband.h:653
#define HERMON_HCR_MAP_EQ
Definition: hermon.h:57
unsigned int open_count
Device open request counter.
Definition: hermon.h:880
int ib_push(struct ib_device *ibdev, struct io_buffer *iobuf, struct ib_queue_pair *qp, size_t payload_len, const struct ib_address_vector *dest)
Add IB headers.
Definition: ib_packet.c:52
String functions.
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:303
__be32 byte_cnt
Definition: CIB_PRM.h:37
static void hermon_unregister_netdev(struct hermon *hermon __unused, struct hermon_port *port)
Unregister Hermon Ethernet device.
Definition: hermon.c:3679
#define htons(value)
Definition: byteswap.h:135
#define HERMON_MAX_QPS
Maximum number of allocatable queue pairs.
Definition: hermon.h:722
static int hermon_mod_stat_cfg(struct hermon *hermon, unsigned int port, unsigned int mode, unsigned int offset, struct hermonprm_mod_stat_cfg *stat_cfg)
Query or modify static configuration.
Definition: hermon.c:737
static void hermon_mcast_detach(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, union ib_gid *gid)
Detach from multicast group.
Definition: hermon.c:3248
union @382 key
Sense key.
Definition: crypto.h:284
static int hermon_cmd_close_hca(struct hermon *hermon)
Definition: hermon.c:295
union ib_mad mad
Definition: arbel.h:12
#define HERMON_NUM_SPECIAL_QPS
Number of special queue pairs.
Definition: hermon.h:709
static __always_inline void ib_qp_set_drvdata(struct ib_queue_pair *qp, void *priv)
Set Infiniband queue pair driver-private data.
Definition: infiniband.h:631
int(* create_cq)(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
Definition: infiniband.h:261
size_t qpc_entry_size
QP context entry size.
Definition: hermon.h:576
uint16_t status
Definition: ib_mad.h:543
unsigned int reserved_srqs
Number of reserved SRQs.
Definition: hermon.h:582
unsigned int num_wqes
Number of work queue entries, including headroom.
Definition: hermon.h:676
#define HERMON_HCR_WRITE_MTT
Definition: hermon.h:56
#define HERMON_HCR_MAX_WAIT_MS
Definition: hermon.h:959
struct ib_global_route_header * grh
GRH buffers (if applicable)
Definition: hermon.h:701
static uint64_t icm_align(uint64_t icm_offset, size_t len)
Align ICM table.
Definition: hermon.c:2499
static unsigned int hermon_rate(struct ib_address_vector *av)
Calculate transmission rate.
Definition: hermon.c:1070
#define HERMON_ETH_NUM_RECV_WQES
Number of Hermon Ethernet receive work queue entries.
Definition: hermon.c:3368
static int hermon_complete(struct ib_device *ibdev, struct ib_completion_queue *cq, union hermonprm_completion_entry *cqe)
Handle completion.
Definition: hermon.c:1746
String functions.
if(natsemi->flags &NATSEMI_64BIT) return 1
static void hermon_stop(struct hermon *hermon)
Stop Hermon device.
Definition: hermon.c:3028
#define HERMON_NUM_EQES
Number of event queue entries.
Definition: hermon.h:795
static int hermon_cmd_hw2sw_mpt(struct hermon *hermon, unsigned int index)
Definition: hermon.c:335
size_t grh_size
Size of GRH buffers.
Definition: hermon.h:703
void * memset(void *dest, int character, size_t len) __nonnull
A Hermon event queue.
Definition: hermon.h:776
static int hermon_get_cap(struct hermon *hermon)
Get device limits.
Definition: hermon.c:2443
A persistent I/O buffer.
Definition: iobuf.h:33
PCI configuration space backup and restoration.
#define HERMON_MBOX_SIZE
Definition: hermon.h:961
#define HERMON_HCR_HW2SW_EQ
Definition: hermon.h:59
#define HERMON_GLOBAL_PD
Global protection domain.
Definition: hermon.h:940
struct io_buffer ** iobufs
I/O buffers assigned to work queue.
Definition: infiniband.h:124