iPXE
hermon.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3  * Copyright (C) 2008 Mellanox Technologies Ltd.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of the
8  * License, or any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA.
19  */
20 
21 FILE_LICENCE ( GPL2_OR_LATER );
22 
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include <strings.h>
28 #include <unistd.h>
29 #include <errno.h>
30 #include <byteswap.h>
31 #include <ipxe/io.h>
32 #include <ipxe/pci.h>
33 #include <ipxe/pcibackup.h>
34 #include <ipxe/malloc.h>
35 #include <ipxe/umalloc.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/netdevice.h>
38 #include <ipxe/infiniband.h>
39 #include <ipxe/ib_smc.h>
40 #include <ipxe/if_ether.h>
41 #include <ipxe/ethernet.h>
42 #include <ipxe/fcoe.h>
43 #include <ipxe/vlan.h>
44 #include <ipxe/bofm.h>
45 #include <ipxe/nvsvpd.h>
46 #include <ipxe/nvo.h>
47 #include "hermon.h"
48 
49 /**
50  * @file
51  *
52  * Mellanox Hermon Infiniband HCA
53  *
54  */
55 
56 /***************************************************************************
57  *
58  * Queue number allocation
59  *
60  ***************************************************************************
61  */
62 
63 /**
64  * Allocate offsets within usage bitmask
65  *
66  * @v bits Usage bitmask
67  * @v bits_len Length of usage bitmask
68  * @v num_bits Number of contiguous bits to allocate within bitmask
69  * @ret bit First free bit within bitmask, or negative error
70  */
72  unsigned int bits_len,
73  unsigned int num_bits ) {
74  unsigned int bit = 0;
75  hermon_bitmask_t mask = 1;
76  unsigned int found = 0;
77 
78  /* Search bits for num_bits contiguous free bits */
79  while ( bit < bits_len ) {
80  if ( ( mask & *bits ) == 0 ) {
81  if ( ++found == num_bits )
82  goto found;
83  } else {
84  found = 0;
85  }
86  bit++;
87  mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
88  if ( mask == 1 )
89  bits++;
90  }
91  return -ENFILE;
92 
93  found:
94  /* Mark bits as in-use */
95  do {
96  *bits |= mask;
97  if ( mask == 1 )
98  bits--;
99  mask = ( mask >> 1 ) | ( mask << ( 8 * sizeof ( mask ) - 1 ) );
100  } while ( --found );
101 
102  return ( bit - num_bits + 1 );
103 }
104 
105 /**
106  * Free offsets within usage bitmask
107  *
108  * @v bits Usage bitmask
109  * @v bit Starting bit within bitmask
110  * @v num_bits Number of contiguous bits to free within bitmask
111  */
113  int bit, unsigned int num_bits ) {
114  hermon_bitmask_t mask;
115 
116  for ( ; num_bits ; bit++, num_bits-- ) {
117  mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
118  bits[ ( bit / ( 8 * sizeof ( mask ) ) ) ] &= ~mask;
119  }
120 }
121 
122 /***************************************************************************
123  *
124  * HCA commands
125  *
126  ***************************************************************************
127  */
128 
129 /**
130  * Wait for Hermon command completion
131  *
132  * @v hermon Hermon device
133  * @v hcr HCA command registers
134  * @ret rc Return status code
135  */
136 static int hermon_cmd_wait ( struct hermon *hermon,
137  struct hermonprm_hca_command_register *hcr ) {
138  unsigned int wait;
139 
140  for ( wait = HERMON_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
141  hcr->u.dwords[6] =
142  readl ( hermon->config + HERMON_HCR_REG ( 6 ) );
143  if ( ( MLX_GET ( hcr, go ) == 0 ) &&
144  ( MLX_GET ( hcr, t ) == hermon->toggle ) )
145  return 0;
146  mdelay ( 1 );
147  }
148  return -EBUSY;
149 }
150 
151 /**
152  * Issue HCA command
153  *
154  * @v hermon Hermon device
155  * @v command Command opcode, flags and input/output lengths
156  * @v op_mod Opcode modifier (0 if no modifier applicable)
157  * @v in Input parameters
158  * @v in_mod Input modifier (0 if no modifier applicable)
159  * @v out Output parameters
160  * @ret rc Return status code
161  */
162 static int hermon_cmd ( struct hermon *hermon, unsigned long command,
163  unsigned int op_mod, const void *in,
164  unsigned int in_mod, void *out ) {
165  struct hermonprm_hca_command_register hcr;
166  unsigned int opcode = HERMON_HCR_OPCODE ( command );
167  size_t in_len = HERMON_HCR_IN_LEN ( command );
168  size_t out_len = HERMON_HCR_OUT_LEN ( command );
169  void *in_buffer;
170  void *out_buffer;
171  unsigned int status;
172  unsigned int i;
173  int rc;
174 
175  assert ( in_len <= HERMON_MBOX_SIZE );
176  assert ( out_len <= HERMON_MBOX_SIZE );
177 
178  DBGC2 ( hermon, "Hermon %p command %02x in %zx%s out %zx%s\n",
179  hermon, opcode, in_len,
180  ( ( command & HERMON_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
181  ( ( command & HERMON_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
182 
183  /* Check that HCR is free */
184  if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
185  DBGC ( hermon, "Hermon %p command interface locked\n",
186  hermon );
187  return rc;
188  }
189 
190  /* Flip HCR toggle */
191  hermon->toggle = ( 1 - hermon->toggle );
192 
193  /* Prepare HCR */
194  memset ( &hcr, 0, sizeof ( hcr ) );
195  in_buffer = &hcr.u.dwords[0];
196  if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
198  in_buffer = hermon->mailbox_in;
199  MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
200  MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
201  }
202  memcpy ( in_buffer, in, in_len );
203  MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
204  out_buffer = &hcr.u.dwords[3];
205  if ( out_len && ( command & HERMON_HCR_OUT_MBOX ) ) {
206  out_buffer = hermon->mailbox_out;
207  MLX_FILL_H ( &hcr, 3, out_param_h,
208  virt_to_bus ( out_buffer ) );
209  MLX_FILL_1 ( &hcr, 4, out_param_l,
210  virt_to_bus ( out_buffer ) );
211  }
212  MLX_FILL_4 ( &hcr, 6,
213  opcode, opcode,
214  opcode_modifier, op_mod,
215  go, 1,
216  t, hermon->toggle );
217  DBGC ( hermon, "Hermon %p issuing command %04x\n",
218  hermon, opcode );
220  &hcr, sizeof ( hcr ) );
221  if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
222  DBGC2 ( hermon, "Input mailbox:\n" );
223  DBGC2_HDA ( hermon, virt_to_phys ( in_buffer ), in_buffer,
224  ( ( in_len < 512 ) ? in_len : 512 ) );
225  }
226 
227  /* Issue command */
228  for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
229  i++ ) {
230  writel ( hcr.u.dwords[i],
231  hermon->config + HERMON_HCR_REG ( i ) );
232  barrier();
233  }
234 
235  /* Wait for command completion */
236  if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
237  DBGC ( hermon, "Hermon %p timed out waiting for command:\n",
238  hermon );
239  DBGC_HDA ( hermon,
241  &hcr, sizeof ( hcr ) );
242  return rc;
243  }
244 
245  /* Check command status */
246  status = MLX_GET ( &hcr, status );
247  if ( status != 0 ) {
248  DBGC ( hermon, "Hermon %p command failed with status %02x:\n",
249  hermon, status );
250  DBGC_HDA ( hermon,
252  &hcr, sizeof ( hcr ) );
253  return -EIO;
254  }
255 
256  /* Read output parameters, if any */
257  hcr.u.dwords[3] = readl ( hermon->config + HERMON_HCR_REG ( 3 ) );
258  hcr.u.dwords[4] = readl ( hermon->config + HERMON_HCR_REG ( 4 ) );
259  memcpy ( out, out_buffer, out_len );
260  if ( out_len ) {
261  DBGC2 ( hermon, "Output%s:\n",
262  ( command & HERMON_HCR_OUT_MBOX ) ? " mailbox" : "" );
263  DBGC2_HDA ( hermon, virt_to_phys ( out_buffer ), out_buffer,
264  ( ( out_len < 512 ) ? out_len : 512 ) );
265  }
266 
267  return 0;
268 }
269 
270 static inline int
272  struct hermonprm_query_dev_cap *dev_cap ) {
273  return hermon_cmd ( hermon,
275  1, sizeof ( *dev_cap ) ),
276  0, NULL, 0, dev_cap );
277 }
278 
279 static inline int
280 hermon_cmd_query_fw ( struct hermon *hermon, struct hermonprm_query_fw *fw ) {
281  return hermon_cmd ( hermon,
283  1, sizeof ( *fw ) ),
284  0, NULL, 0, fw );
285 }
286 
287 static inline int
289  const struct hermonprm_init_hca *init_hca ) {
290  return hermon_cmd ( hermon,
292  1, sizeof ( *init_hca ) ),
293  0, init_hca, 0, NULL );
294 }
295 
296 static inline int
298  return hermon_cmd ( hermon,
300  0, NULL, 0, NULL );
301 }
302 
303 static inline int
304 hermon_cmd_init_port ( struct hermon *hermon, unsigned int port ) {
305  return hermon_cmd ( hermon,
307  0, NULL, port, NULL );
308 }
309 
310 static inline int
311 hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
312  return hermon_cmd ( hermon,
314  0, NULL, port, NULL );
315 }
316 
317 static inline int
318 hermon_cmd_set_port ( struct hermon *hermon, int is_ethernet,
319  unsigned int port_selector,
320  const union hermonprm_set_port *set_port ) {
321  return hermon_cmd ( hermon,
323  1, sizeof ( *set_port ) ),
324  is_ethernet, set_port, port_selector, NULL );
325 }
326 
327 static inline int
328 hermon_cmd_sw2hw_mpt ( struct hermon *hermon, unsigned int index,
329  const struct hermonprm_mpt *mpt ) {
330  return hermon_cmd ( hermon,
332  1, sizeof ( *mpt ) ),
333  0, mpt, index, NULL );
334 }
335 
336 static inline int
338  const struct hermonprm_write_mtt *write_mtt ) {
339  return hermon_cmd ( hermon,
341  1, sizeof ( *write_mtt ) ),
342  0, write_mtt, 1, NULL );
343 }
344 
345 static inline int
346 hermon_cmd_map_eq ( struct hermon *hermon, unsigned long index_map,
347  const struct hermonprm_event_mask *mask ) {
348  return hermon_cmd ( hermon,
350  0, sizeof ( *mask ) ),
351  0, mask, index_map, NULL );
352 }
353 
354 static inline int
355 hermon_cmd_sw2hw_eq ( struct hermon *hermon, unsigned int index,
356  const struct hermonprm_eqc *eqctx ) {
357  return hermon_cmd ( hermon,
359  1, sizeof ( *eqctx ) ),
360  0, eqctx, index, NULL );
361 }
362 
363 static inline int
364 hermon_cmd_hw2sw_eq ( struct hermon *hermon, unsigned int index,
365  struct hermonprm_eqc *eqctx ) {
366  return hermon_cmd ( hermon,
368  1, sizeof ( *eqctx ) ),
369  1, NULL, index, eqctx );
370 }
371 
372 static inline int
373 hermon_cmd_query_eq ( struct hermon *hermon, unsigned int index,
374  struct hermonprm_eqc *eqctx ) {
375  return hermon_cmd ( hermon,
377  1, sizeof ( *eqctx ) ),
378  0, NULL, index, eqctx );
379 }
380 
381 static inline int
382 hermon_cmd_sw2hw_cq ( struct hermon *hermon, unsigned long cqn,
383  const struct hermonprm_completion_queue_context *cqctx ){
384  return hermon_cmd ( hermon,
386  1, sizeof ( *cqctx ) ),
387  0, cqctx, cqn, NULL );
388 }
389 
390 static inline int
391 hermon_cmd_hw2sw_cq ( struct hermon *hermon, unsigned long cqn,
392  struct hermonprm_completion_queue_context *cqctx ) {
393  return hermon_cmd ( hermon,
395  1, sizeof ( *cqctx ) ),
396  0, NULL, cqn, cqctx );
397 }
398 
399 static inline int
400 hermon_cmd_query_cq ( struct hermon *hermon, unsigned long cqn,
401  struct hermonprm_completion_queue_context *cqctx ) {
402  return hermon_cmd ( hermon,
404  1, sizeof ( *cqctx ) ),
405  0, NULL, cqn, cqctx );
406 }
407 
408 static inline int
409 hermon_cmd_rst2init_qp ( struct hermon *hermon, unsigned long qpn,
410  const struct hermonprm_qp_ee_state_transitions *ctx ){
411  return hermon_cmd ( hermon,
413  1, sizeof ( *ctx ) ),
414  0, ctx, qpn, NULL );
415 }
416 
417 static inline int
418 hermon_cmd_init2rtr_qp ( struct hermon *hermon, unsigned long qpn,
419  const struct hermonprm_qp_ee_state_transitions *ctx ){
420  return hermon_cmd ( hermon,
422  1, sizeof ( *ctx ) ),
423  0, ctx, qpn, NULL );
424 }
425 
426 static inline int
427 hermon_cmd_rtr2rts_qp ( struct hermon *hermon, unsigned long qpn,
428  const struct hermonprm_qp_ee_state_transitions *ctx ) {
429  return hermon_cmd ( hermon,
431  1, sizeof ( *ctx ) ),
432  0, ctx, qpn, NULL );
433 }
434 
435 static inline int
436 hermon_cmd_rts2rts_qp ( struct hermon *hermon, unsigned long qpn,
437  const struct hermonprm_qp_ee_state_transitions *ctx ) {
438  return hermon_cmd ( hermon,
440  1, sizeof ( *ctx ) ),
441  0, ctx, qpn, NULL );
442 }
443 
444 static inline int
445 hermon_cmd_2rst_qp ( struct hermon *hermon, unsigned long qpn ) {
446  return hermon_cmd ( hermon,
448  0x03, NULL, qpn, NULL );
449 }
450 
451 static inline int
452 hermon_cmd_query_qp ( struct hermon *hermon, unsigned long qpn,
453  struct hermonprm_qp_ee_state_transitions *ctx ) {
454  return hermon_cmd ( hermon,
456  1, sizeof ( *ctx ) ),
457  0, NULL, qpn, ctx );
458 }
459 
460 static inline int
461 hermon_cmd_conf_special_qp ( struct hermon *hermon, unsigned int internal_qps,
462  unsigned long base_qpn ) {
463  return hermon_cmd ( hermon,
465  internal_qps, NULL, base_qpn, NULL );
466 }
467 
468 static inline int
469 hermon_cmd_mad_ifc ( struct hermon *hermon, unsigned int port,
470  union hermonprm_mad *mad ) {
471  return hermon_cmd ( hermon,
473  1, sizeof ( *mad ),
474  1, sizeof ( *mad ) ),
475  0x03, mad, port, mad );
476 }
477 
478 static inline int
479 hermon_cmd_read_mcg ( struct hermon *hermon, unsigned int index,
480  struct hermonprm_mcg_entry *mcg ) {
481  return hermon_cmd ( hermon,
483  1, sizeof ( *mcg ) ),
484  0, NULL, index, mcg );
485 }
486 
487 static inline int
488 hermon_cmd_write_mcg ( struct hermon *hermon, unsigned int index,
489  const struct hermonprm_mcg_entry *mcg ) {
490  return hermon_cmd ( hermon,
492  1, sizeof ( *mcg ) ),
493  0, mcg, index, NULL );
494 }
495 
496 static inline int
497 hermon_cmd_mgid_hash ( struct hermon *hermon, const union ib_gid *gid,
498  struct hermonprm_mgm_hash *hash ) {
499  return hermon_cmd ( hermon,
501  1, sizeof ( *gid ),
502  0, sizeof ( *hash ) ),
503  0, gid, 0, hash );
504 }
505 
506 static inline int
507 hermon_cmd_mod_stat_cfg ( struct hermon *hermon, unsigned int mode,
508  unsigned int input_mod,
509  struct hermonprm_scalar_parameter *portion ) {
510  return hermon_cmd ( hermon,
512  0, sizeof ( *portion ),
513  0, sizeof ( *portion ) ),
514  mode, portion, input_mod, portion );
515 }
516 
517 static inline int
518 hermon_cmd_query_port ( struct hermon *hermon, unsigned int port,
519  struct hermonprm_query_port_cap *query_port ) {
520  return hermon_cmd ( hermon,
522  1, sizeof ( *query_port ) ),
523  0, NULL, port, query_port );
524 }
525 
526 static inline int
527 hermon_cmd_sense_port ( struct hermon *hermon, unsigned int port,
528  struct hermonprm_sense_port *port_type ) {
529  return hermon_cmd ( hermon,
531  0, sizeof ( *port_type ) ),
532  0, NULL, port, port_type );
533 }
534 
535 static inline int
537  return hermon_cmd ( hermon,
539  0, NULL, 0, NULL );
540 }
541 
542 static inline int
543 hermon_cmd_unmap_icm ( struct hermon *hermon, unsigned int page_count,
544  const struct hermonprm_scalar_parameter *offset ) {
545  return hermon_cmd ( hermon,
547  0, sizeof ( *offset ) ),
548  0, offset, page_count, NULL );
549 }
550 
551 static inline int
553  const struct hermonprm_virtual_physical_mapping *map ) {
554  return hermon_cmd ( hermon,
556  1, sizeof ( *map ) ),
557  0, map, 1, NULL );
558 }
559 
560 static inline int
562  return hermon_cmd ( hermon,
564  0, NULL, 0, NULL );
565 }
566 
567 static inline int
569  const struct hermonprm_virtual_physical_mapping *map ) {
570  return hermon_cmd ( hermon,
572  1, sizeof ( *map ) ),
573  0, map, 1, NULL );
574 }
575 
576 static inline int
578  const struct hermonprm_scalar_parameter *icm_size,
579  struct hermonprm_scalar_parameter *icm_aux_size ) {
580  return hermon_cmd ( hermon,
582  0, sizeof ( *icm_size ),
583  0, sizeof (*icm_aux_size) ),
584  0, icm_size, 0, icm_aux_size );
585 }
586 
587 static inline int
589  return hermon_cmd ( hermon,
591  0, NULL, 0, NULL );
592 }
593 
594 static inline int
596  const struct hermonprm_virtual_physical_mapping *map ) {
597  return hermon_cmd ( hermon,
599  1, sizeof ( *map ) ),
600  0, map, 1, NULL );
601 }
602 
603 /***************************************************************************
604  *
605  * Memory translation table operations
606  *
607  ***************************************************************************
608  */
609 
610 /**
611  * Allocate MTT entries
612  *
613  * @v hermon Hermon device
614  * @v memory Memory to map into MTT
615  * @v len Length of memory to map
616  * @v mtt MTT descriptor to fill in
617  * @ret rc Return status code
618  */
619 static int hermon_alloc_mtt ( struct hermon *hermon,
620  const void *memory, size_t len,
621  struct hermon_mtt *mtt ) {
622  struct hermonprm_write_mtt write_mtt;
625  unsigned int page_offset;
626  unsigned int num_pages;
627  int mtt_offset;
628  unsigned int mtt_base_addr;
629  unsigned int i;
630  int rc;
631 
632  /* Find available MTT entries */
633  start = virt_to_phys ( memory );
634  page_offset = ( start & ( HERMON_PAGE_SIZE - 1 ) );
635  start -= page_offset;
636  len += page_offset;
639  num_pages );
640  if ( mtt_offset < 0 ) {
641  DBGC ( hermon, "Hermon %p could not allocate %d MTT entries\n",
642  hermon, num_pages );
643  rc = mtt_offset;
644  goto err_mtt_offset;
645  }
646  mtt_base_addr = ( ( hermon->cap.reserved_mtts + mtt_offset ) *
648  addr = start;
649 
650  /* Fill in MTT structure */
651  mtt->mtt_offset = mtt_offset;
652  mtt->num_pages = num_pages;
653  mtt->mtt_base_addr = mtt_base_addr;
654  mtt->page_offset = page_offset;
655 
656  /* Construct and issue WRITE_MTT commands */
657  for ( i = 0 ; i < num_pages ; i++ ) {
658  memset ( &write_mtt, 0, sizeof ( write_mtt ) );
659  MLX_FILL_1 ( &write_mtt.mtt_base_addr, 1,
660  value, mtt_base_addr );
661  MLX_FILL_H ( &write_mtt.mtt, 0, ptag_h, addr );
662  MLX_FILL_2 ( &write_mtt.mtt, 1,
663  p, 1,
664  ptag_l, ( addr >> 3 ) );
665  if ( ( rc = hermon_cmd_write_mtt ( hermon,
666  &write_mtt ) ) != 0 ) {
667  DBGC ( hermon, "Hermon %p could not write MTT at %x\n",
669  goto err_write_mtt;
670  }
673  }
674 
675  DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] for "
676  "[%08lx,%08lx,%08lx,%08lx)\n", hermon, mtt->mtt_offset,
677  ( mtt->mtt_offset + mtt->num_pages - 1 ), start,
678  ( start + page_offset ), ( start + len ), addr );
679 
680  return 0;
681 
682  err_write_mtt:
683  hermon_bitmask_free ( hermon->mtt_inuse, mtt_offset, num_pages );
684  err_mtt_offset:
685  return rc;
686 }
687 
688 /**
689  * Free MTT entries
690  *
691  * @v hermon Hermon device
692  * @v mtt MTT descriptor
693  */
694 static void hermon_free_mtt ( struct hermon *hermon,
695  struct hermon_mtt *mtt ) {
696 
697  DBGC ( hermon, "Hermon %p MTT entries [%#x,%#x] freed\n",
698  hermon, mtt->mtt_offset,
699  ( mtt->mtt_offset + mtt->num_pages - 1 ) );
700  hermon_bitmask_free ( hermon->mtt_inuse, mtt->mtt_offset,
701  mtt->num_pages );
702 }
703 
704 /***************************************************************************
705  *
706  * Static configuration operations
707  *
708  ***************************************************************************
709  */
710 
711 /**
712  * Calculate offset within static configuration
713  *
714  * @v field Field
715  * @ret offset Offset
716  */
717 #define HERMON_MOD_STAT_CFG_OFFSET( field ) \
718  ( ( MLX_BIT_OFFSET ( struct hermonprm_mod_stat_cfg_st, field ) / 8 ) \
719  & ~( sizeof ( struct hermonprm_scalar_parameter ) - 1 ) )
720 
721 /**
722  * Query or modify static configuration
723  *
724  * @v hermon Hermon device
725  * @v port Port
726  * @v mode Command mode
727  * @v offset Offset within static configuration
728  * @v stat_cfg Static configuration
729  * @ret rc Return status code
730  */
731 static int hermon_mod_stat_cfg ( struct hermon *hermon, unsigned int port,
732  unsigned int mode, unsigned int offset,
733  struct hermonprm_mod_stat_cfg *stat_cfg ) {
734  struct hermonprm_scalar_parameter *portion =
735  ( ( void * ) &stat_cfg->u.bytes[offset] );
736  struct hermonprm_mod_stat_cfg_input_mod mod;
737  int rc;
738 
739  /* Sanity check */
740  assert ( ( offset % sizeof ( *portion ) ) == 0 );
741 
742  /* Construct input modifier */
743  memset ( &mod, 0, sizeof ( mod ) );
744  MLX_FILL_2 ( &mod, 0,
745  portnum, port,
746  offset, offset );
747 
748  /* Issue command */
749  if ( ( rc = hermon_cmd_mod_stat_cfg ( hermon, mode,
750  be32_to_cpu ( mod.u.dwords[0] ),
751  portion ) ) != 0 )
752  return rc;
753 
754  return 0;
755 }
756 
757 /***************************************************************************
758  *
759  * MAD operations
760  *
761  ***************************************************************************
762  */
763 
764 /**
765  * Issue management datagram
766  *
767  * @v ibdev Infiniband device
768  * @v mad Management datagram
769  * @ret rc Return status code
770  */
771 static int hermon_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
772  struct hermon *hermon = ib_get_drvdata ( ibdev );
773  union hermonprm_mad mad_ifc;
774  int rc;
775 
776  linker_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ),
777  mad_size_mismatch );
778 
779  /* Copy in request packet */
780  memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
781 
782  /* Issue MAD */
783  if ( ( rc = hermon_cmd_mad_ifc ( hermon, ibdev->port,
784  &mad_ifc ) ) != 0 ) {
785  DBGC ( hermon, "Hermon %p port %d could not issue MAD IFC: "
786  "%s\n", hermon, ibdev->port, strerror ( rc ) );
787  return rc;
788  }
789 
790  /* Copy out reply packet */
791  memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
792 
793  if ( mad->hdr.status != 0 ) {
794  DBGC ( hermon, "Hermon %p port %d MAD IFC status %04x\n",
795  hermon, ibdev->port, ntohs ( mad->hdr.status ) );
796  return -EIO;
797  }
798  return 0;
799 }
800 
801 /***************************************************************************
802  *
803  * Completion queue operations
804  *
805  ***************************************************************************
806  */
807 
808 /**
809  * Dump completion queue context (for debugging only)
810  *
811  * @v hermon Hermon device
812  * @v cq Completion queue
813  * @ret rc Return status code
814  */
815 static __attribute__ (( unused )) int
817  struct hermonprm_completion_queue_context cqctx;
818  int rc;
819 
820  memset ( &cqctx, 0, sizeof ( cqctx ) );
821  if ( ( rc = hermon_cmd_query_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
822  DBGC ( hermon, "Hermon %p CQN %#lx QUERY_CQ failed: %s\n",
823  hermon, cq->cqn, strerror ( rc ) );
824  return rc;
825  }
826  DBGC ( hermon, "Hermon %p CQN %#lx context:\n", hermon, cq->cqn );
827  DBGC_HDA ( hermon, 0, &cqctx, sizeof ( cqctx ) );
828 
829  return 0;
830 }
831 
832 /**
833  * Create completion queue
834  *
835  * @v ibdev Infiniband device
836  * @v cq Completion queue
837  * @ret rc Return status code
838  */
839 static int hermon_create_cq ( struct ib_device *ibdev,
840  struct ib_completion_queue *cq ) {
841  struct hermon *hermon = ib_get_drvdata ( ibdev );
842  struct hermon_completion_queue *hermon_cq;
843  struct hermonprm_completion_queue_context cqctx;
844  int cqn_offset;
845  unsigned int i;
846  int rc;
847 
848  /* Find a free completion queue number */
849  cqn_offset = hermon_bitmask_alloc ( hermon->cq_inuse,
850  HERMON_MAX_CQS, 1 );
851  if ( cqn_offset < 0 ) {
852  DBGC ( hermon, "Hermon %p out of completion queues\n",
853  hermon );
854  rc = cqn_offset;
855  goto err_cqn_offset;
856  }
857  cq->cqn = ( hermon->cap.reserved_cqs + cqn_offset );
858 
859  /* Allocate control structures */
860  hermon_cq = zalloc ( sizeof ( *hermon_cq ) );
861  if ( ! hermon_cq ) {
862  rc = -ENOMEM;
863  goto err_hermon_cq;
864  }
865 
866  /* Allocate doorbell */
867  hermon_cq->doorbell = malloc_dma ( sizeof ( hermon_cq->doorbell[0] ),
868  sizeof ( hermon_cq->doorbell[0] ) );
869  if ( ! hermon_cq->doorbell ) {
870  rc = -ENOMEM;
871  goto err_doorbell;
872  }
873  memset ( hermon_cq->doorbell, 0, sizeof ( hermon_cq->doorbell[0] ) );
874 
875  /* Allocate completion queue itself */
876  hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
877  hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size,
878  sizeof ( hermon_cq->cqe[0] ) );
879  if ( ! hermon_cq->cqe ) {
880  rc = -ENOMEM;
881  goto err_cqe;
882  }
883  memset ( hermon_cq->cqe, 0, hermon_cq->cqe_size );
884  for ( i = 0 ; i < cq->num_cqes ; i++ ) {
885  MLX_FILL_1 ( &hermon_cq->cqe[i].normal, 7, owner, 1 );
886  }
887  barrier();
888 
889  /* Allocate MTT entries */
890  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_cq->cqe,
891  hermon_cq->cqe_size,
892  &hermon_cq->mtt ) ) != 0 )
893  goto err_alloc_mtt;
894 
895  /* Hand queue over to hardware */
896  memset ( &cqctx, 0, sizeof ( cqctx ) );
897  MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
898  MLX_FILL_1 ( &cqctx, 2,
899  page_offset, ( hermon_cq->mtt.page_offset >> 5 ) );
900  MLX_FILL_2 ( &cqctx, 3,
901  usr_page, HERMON_UAR_NON_EQ_PAGE,
902  log_cq_size, fls ( cq->num_cqes - 1 ) );
903  MLX_FILL_1 ( &cqctx, 5, c_eqn, hermon->eq.eqn );
904  MLX_FILL_H ( &cqctx, 6, mtt_base_addr_h,
905  hermon_cq->mtt.mtt_base_addr );
906  MLX_FILL_1 ( &cqctx, 7, mtt_base_addr_l,
907  ( hermon_cq->mtt.mtt_base_addr >> 3 ) );
908  MLX_FILL_H ( &cqctx, 14, db_record_addr_h,
909  virt_to_phys ( hermon_cq->doorbell ) );
910  MLX_FILL_1 ( &cqctx, 15, db_record_addr_l,
911  ( virt_to_phys ( hermon_cq->doorbell ) >> 3 ) );
912  if ( ( rc = hermon_cmd_sw2hw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
913  DBGC ( hermon, "Hermon %p CQN %#lx SW2HW_CQ failed: %s\n",
914  hermon, cq->cqn, strerror ( rc ) );
915  goto err_sw2hw_cq;
916  }
917 
918  DBGC ( hermon, "Hermon %p CQN %#lx ring [%08lx,%08lx), doorbell "
919  "%08lx\n", hermon, cq->cqn, virt_to_phys ( hermon_cq->cqe ),
920  ( virt_to_phys ( hermon_cq->cqe ) + hermon_cq->cqe_size ),
921  virt_to_phys ( hermon_cq->doorbell ) );
922  ib_cq_set_drvdata ( cq, hermon_cq );
923  return 0;
924 
925  err_sw2hw_cq:
926  hermon_free_mtt ( hermon, &hermon_cq->mtt );
927  err_alloc_mtt:
928  free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
929  err_cqe:
930  free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
931  err_doorbell:
932  free ( hermon_cq );
933  err_hermon_cq:
934  hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
935  err_cqn_offset:
936  return rc;
937 }
938 
939 /**
940  * Destroy completion queue
941  *
942  * @v ibdev Infiniband device
943  * @v cq Completion queue
944  */
945 static void hermon_destroy_cq ( struct ib_device *ibdev,
946  struct ib_completion_queue *cq ) {
947  struct hermon *hermon = ib_get_drvdata ( ibdev );
948  struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
949  struct hermonprm_completion_queue_context cqctx;
950  int cqn_offset;
951  int rc;
952 
953  /* Take ownership back from hardware */
954  if ( ( rc = hermon_cmd_hw2sw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
955  DBGC ( hermon, "Hermon %p CQN %#lx FATAL HW2SW_CQ failed: "
956  "%s\n", hermon, cq->cqn, strerror ( rc ) );
957  /* Leak memory and return; at least we avoid corruption */
958  return;
959  }
960 
961  /* Free MTT entries */
962  hermon_free_mtt ( hermon, &hermon_cq->mtt );
963 
964  /* Free memory */
965  free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
966  free_dma ( hermon_cq->doorbell, sizeof ( hermon_cq->doorbell[0] ) );
967  free ( hermon_cq );
968 
969  /* Mark queue number as free */
970  cqn_offset = ( cq->cqn - hermon->cap.reserved_cqs );
971  hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
972 
973  ib_cq_set_drvdata ( cq, NULL );
974 }
975 
976 /***************************************************************************
977  *
978  * Queue pair operations
979  *
980  ***************************************************************************
981  */
982 
983 /**
984  * Assign queue pair number
985  *
986  * @v ibdev Infiniband device
987  * @v qp Queue pair
988  * @ret rc Return status code
989  */
990 static int hermon_alloc_qpn ( struct ib_device *ibdev,
991  struct ib_queue_pair *qp ) {
992  struct hermon *hermon = ib_get_drvdata ( ibdev );
993  unsigned int port_offset;
994  int qpn_offset;
995 
996  /* Calculate queue pair number */
997  port_offset = ( ibdev->port - HERMON_PORT_BASE );
998 
999  switch ( qp->type ) {
1000  case IB_QPT_SMI:
1001  qp->qpn = ( hermon->special_qpn_base + port_offset );
1002  return 0;
1003  case IB_QPT_GSI:
1004  qp->qpn = ( hermon->special_qpn_base + 2 + port_offset );
1005  return 0;
1006  case IB_QPT_UD:
1007  case IB_QPT_RC:
1008  case IB_QPT_ETH:
1009  /* Find a free queue pair number */
1010  qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
1011  HERMON_MAX_QPS, 1 );
1012  if ( qpn_offset < 0 ) {
1013  DBGC ( hermon, "Hermon %p out of queue pairs\n",
1014  hermon );
1015  return qpn_offset;
1016  }
1017  qp->qpn = ( ( random() & HERMON_QPN_RANDOM_MASK ) |
1018  ( hermon->qpn_base + qpn_offset ) );
1019  return 0;
1020  default:
1021  DBGC ( hermon, "Hermon %p unsupported QP type %d\n",
1022  hermon, qp->type );
1023  return -ENOTSUP;
1024  }
1025 }
1026 
1027 /**
1028  * Free queue pair number
1029  *
1030  * @v ibdev Infiniband device
1031  * @v qp Queue pair
1032  */
1033 static void hermon_free_qpn ( struct ib_device *ibdev,
1034  struct ib_queue_pair *qp ) {
1035  struct hermon *hermon = ib_get_drvdata ( ibdev );
1036  int qpn_offset;
1037 
1038  qpn_offset = ( ( qp->qpn & ~HERMON_QPN_RANDOM_MASK )
1039  - hermon->qpn_base );
1040  if ( qpn_offset >= 0 )
1041  hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
1042 }
1043 
1044 /**
1045  * Calculate transmission rate
1046  *
1047  * @v av Address vector
1048  * @ret hermon_rate Hermon rate
1049  */
1050 static unsigned int hermon_rate ( struct ib_address_vector *av ) {
1051  return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
1052  ? ( av->rate + 5 ) : 0 );
1053 }
1054 
1055 /**
1056  * Calculate schedule queue
1057  *
1058  * @v ibdev Infiniband device
1059  * @v qp Queue pair
1060  * @ret sched_queue Schedule queue
1061  */
1062 static unsigned int hermon_sched_queue ( struct ib_device *ibdev,
1063  struct ib_queue_pair *qp ) {
1064  return ( ( ( qp->type == IB_QPT_SMI ) ?
1066  ( ( ibdev->port - 1 ) << 6 ) );
1067 }
1068 
1069 /** Queue pair transport service type map */
1070 static uint8_t hermon_qp_st[] = {
1073  [IB_QPT_UD] = HERMON_ST_UD,
1074  [IB_QPT_RC] = HERMON_ST_RC,
1076 };
1077 
1078 /**
1079  * Dump queue pair context (for debugging only)
1080  *
1081  * @v hermon Hermon device
1082  * @v qp Queue pair
1083  * @ret rc Return status code
1084  */
1085 static __attribute__ (( unused )) int
1087  struct hermonprm_qp_ee_state_transitions qpctx;
1088  int rc;
1089 
1090  memset ( &qpctx, 0, sizeof ( qpctx ) );
1091  if ( ( rc = hermon_cmd_query_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ) {
1092  DBGC ( hermon, "Hermon %p QPN %#lx QUERY_QP failed: %s\n",
1093  hermon, qp->qpn, strerror ( rc ) );
1094  return rc;
1095  }
1096  DBGC ( hermon, "Hermon %p QPN %#lx context:\n", hermon, qp->qpn );
1097  DBGC_HDA ( hermon, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
1098 
1099  return 0;
1100 }
1101 
1102 /**
1103  * Create queue pair
1104  *
1105  * @v ibdev Infiniband device
1106  * @v qp Queue pair
1107  * @ret rc Return status code
1108  */
1109 static int hermon_create_qp ( struct ib_device *ibdev,
1110  struct ib_queue_pair *qp ) {
1111  struct hermon *hermon = ib_get_drvdata ( ibdev );
1112  struct hermon_queue_pair *hermon_qp;
1113  struct hermonprm_qp_ee_state_transitions qpctx;
1114  struct hermonprm_wqe_segment_data_ptr *data;
1115  unsigned int i;
1116  int rc;
1117 
1118  /* Calculate queue pair number */
1119  if ( ( rc = hermon_alloc_qpn ( ibdev, qp ) ) != 0 )
1120  goto err_alloc_qpn;
1121 
1122  /* Allocate control structures */
1123  hermon_qp = zalloc ( sizeof ( *hermon_qp ) );
1124  if ( ! hermon_qp ) {
1125  rc = -ENOMEM;
1126  goto err_hermon_qp;
1127  }
1128 
1129  /* Allocate doorbells */
1130  hermon_qp->recv.doorbell =
1131  malloc_dma ( sizeof ( hermon_qp->recv.doorbell[0] ),
1132  sizeof ( hermon_qp->recv.doorbell[0] ) );
1133  if ( ! hermon_qp->recv.doorbell ) {
1134  rc = -ENOMEM;
1135  goto err_recv_doorbell;
1136  }
1137  memset ( hermon_qp->recv.doorbell, 0,
1138  sizeof ( hermon_qp->recv.doorbell[0] ) );
1139  hermon_qp->send.doorbell =
1142 
1143  /* Allocate work queue buffer */
1144  hermon_qp->send.num_wqes = ( qp->send.num_wqes /* headroom */ + 1 +
1145  ( 2048 / sizeof ( hermon_qp->send.wqe[0] ) ) );
1146  hermon_qp->send.num_wqes =
1147  ( 1 << fls ( hermon_qp->send.num_wqes - 1 ) ); /* round up */
1148  hermon_qp->send.wqe_size = ( hermon_qp->send.num_wqes *
1149  sizeof ( hermon_qp->send.wqe[0] ) );
1150  hermon_qp->recv.wqe_size = ( qp->recv.num_wqes *
1151  sizeof ( hermon_qp->recv.wqe[0] ) );
1152  if ( ( qp->type == IB_QPT_SMI ) || ( qp->type == IB_QPT_GSI ) ||
1153  ( qp->type == IB_QPT_UD ) ) {
1154  hermon_qp->recv.grh_size = ( qp->recv.num_wqes *
1155  sizeof ( hermon_qp->recv.grh[0] ));
1156  }
1157  hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
1158  hermon_qp->recv.wqe_size +
1159  hermon_qp->recv.grh_size );
1160  hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
1161  sizeof ( hermon_qp->send.wqe[0] ) );
1162  if ( ! hermon_qp->wqe ) {
1163  rc = -ENOMEM;
1164  goto err_alloc_wqe;
1165  }
1166  hermon_qp->send.wqe = hermon_qp->wqe;
1167  hermon_qp->recv.wqe = ( hermon_qp->wqe + hermon_qp->send.wqe_size );
1168  if ( hermon_qp->recv.grh_size ) {
1169  hermon_qp->recv.grh = ( hermon_qp->wqe +
1170  hermon_qp->send.wqe_size +
1171  hermon_qp->recv.wqe_size );
1172  }
1173 
1174  /* Initialise work queue entries */
1175  memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
1176  memset ( hermon_qp->recv.wqe, 0, hermon_qp->recv.wqe_size );
1177  data = &hermon_qp->recv.wqe[0].recv.data[0];
1178  for ( i = 0 ; i < ( hermon_qp->recv.wqe_size / sizeof ( *data ) ); i++){
1179  MLX_FILL_1 ( data, 1, l_key, HERMON_INVALID_LKEY );
1180  data++;
1181  }
1182 
1183  /* Allocate MTT entries */
1184  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe,
1185  hermon_qp->wqe_size,
1186  &hermon_qp->mtt ) ) != 0 ) {
1187  goto err_alloc_mtt;
1188  }
1189 
1190  /* Transition queue to INIT state */
1191  memset ( &qpctx, 0, sizeof ( qpctx ) );
1192  MLX_FILL_2 ( &qpctx, 2,
1193  qpc_eec_data.pm_state, HERMON_PM_STATE_MIGRATED,
1194  qpc_eec_data.st, hermon_qp_st[qp->type] );
1195  MLX_FILL_1 ( &qpctx, 3, qpc_eec_data.pd, HERMON_GLOBAL_PD );
1196  MLX_FILL_4 ( &qpctx, 4,
1197  qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
1198  qpc_eec_data.log_rq_stride,
1199  ( fls ( sizeof ( hermon_qp->recv.wqe[0] ) - 1 ) - 4 ),
1200  qpc_eec_data.log_sq_size,
1201  fls ( hermon_qp->send.num_wqes - 1 ),
1202  qpc_eec_data.log_sq_stride,
1203  ( fls ( sizeof ( hermon_qp->send.wqe[0] ) - 1 ) - 4 ) );
1204  MLX_FILL_1 ( &qpctx, 5,
1205  qpc_eec_data.usr_page, HERMON_UAR_NON_EQ_PAGE );
1206  MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
1207  MLX_FILL_4 ( &qpctx, 38,
1208  qpc_eec_data.rre, 1,
1209  qpc_eec_data.rwe, 1,
1210  qpc_eec_data.rae, 1,
1211  qpc_eec_data.page_offset,
1212  ( hermon_qp->mtt.page_offset >> 6 ) );
1213  MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
1214  MLX_FILL_H ( &qpctx, 42, qpc_eec_data.db_record_addr_h,
1215  virt_to_phys ( hermon_qp->recv.doorbell ) );
1216  MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.db_record_addr_l,
1217  ( virt_to_phys ( hermon_qp->recv.doorbell ) >> 2 ) );
1218  MLX_FILL_H ( &qpctx, 52, qpc_eec_data.mtt_base_addr_h,
1219  hermon_qp->mtt.mtt_base_addr );
1220  MLX_FILL_1 ( &qpctx, 53, qpc_eec_data.mtt_base_addr_l,
1221  ( hermon_qp->mtt.mtt_base_addr >> 3 ) );
1222  if ( ( rc = hermon_cmd_rst2init_qp ( hermon, qp->qpn,
1223  &qpctx ) ) != 0 ) {
1224  DBGC ( hermon, "Hermon %p QPN %#lx RST2INIT_QP failed: %s\n",
1225  hermon, qp->qpn, strerror ( rc ) );
1226  goto err_rst2init_qp;
1227  }
1228  hermon_qp->state = HERMON_QP_ST_INIT;
1229 
1230  DBGC ( hermon, "Hermon %p QPN %#lx send ring [%08lx,%08lx), doorbell "
1231  "%08lx\n", hermon, qp->qpn,
1232  virt_to_phys ( hermon_qp->send.wqe ),
1233  ( virt_to_phys ( hermon_qp->send.wqe ) +
1234  hermon_qp->send.wqe_size ),
1235  virt_to_phys ( hermon_qp->send.doorbell ) );
1236  DBGC ( hermon, "Hermon %p QPN %#lx receive ring [%08lx,%08lx), "
1237  "doorbell %08lx\n", hermon, qp->qpn,
1238  virt_to_phys ( hermon_qp->recv.wqe ),
1239  ( virt_to_phys ( hermon_qp->recv.wqe ) +
1240  hermon_qp->recv.wqe_size ),
1241  virt_to_phys ( hermon_qp->recv.doorbell ) );
1242  DBGC ( hermon, "Hermon %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1243  hermon, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
1244  ib_qp_set_drvdata ( qp, hermon_qp );
1245  return 0;
1246 
1247  hermon_cmd_2rst_qp ( hermon, qp->qpn );
1248  err_rst2init_qp:
1249  hermon_free_mtt ( hermon, &hermon_qp->mtt );
1250  err_alloc_mtt:
1251  free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
1252  err_alloc_wqe:
1253  free_dma ( hermon_qp->recv.doorbell,
1254  sizeof ( hermon_qp->recv.doorbell[0] ) );
1255  err_recv_doorbell:
1256  free ( hermon_qp );
1257  err_hermon_qp:
1258  hermon_free_qpn ( ibdev, qp );
1259  err_alloc_qpn:
1260  return rc;
1261 }
1262 
1263 /**
1264  * Modify queue pair
1265  *
1266  * @v ibdev Infiniband device
1267  * @v qp Queue pair
1268  * @ret rc Return status code
1269  */
1270 static int hermon_modify_qp ( struct ib_device *ibdev,
1271  struct ib_queue_pair *qp ) {
1272  struct hermon *hermon = ib_get_drvdata ( ibdev );
1273  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1274  struct hermonprm_qp_ee_state_transitions qpctx;
1275  int rc;
1276 
1277  /* Transition queue to RTR state, if applicable */
1278  if ( hermon_qp->state < HERMON_QP_ST_RTR ) {
1279  memset ( &qpctx, 0, sizeof ( qpctx ) );
1280  MLX_FILL_2 ( &qpctx, 4,
1281  qpc_eec_data.mtu,
1282  ( ( qp->type == IB_QPT_ETH ) ?
1284  qpc_eec_data.msg_max, 31 );
1285  MLX_FILL_1 ( &qpctx, 7,
1286  qpc_eec_data.remote_qpn_een, qp->av.qpn );
1287  MLX_FILL_1 ( &qpctx, 9,
1288  qpc_eec_data.primary_address_path.rlid,
1289  qp->av.lid );
1290  MLX_FILL_1 ( &qpctx, 10,
1291  qpc_eec_data.primary_address_path.max_stat_rate,
1292  hermon_rate ( &qp->av ) );
1293  memcpy ( &qpctx.u.dwords[12], &qp->av.gid,
1294  sizeof ( qp->av.gid ) );
1295  MLX_FILL_1 ( &qpctx, 16,
1296  qpc_eec_data.primary_address_path.sched_queue,
1297  hermon_sched_queue ( ibdev, qp ) );
1298  MLX_FILL_1 ( &qpctx, 39,
1299  qpc_eec_data.next_rcv_psn, qp->recv.psn );
1300  if ( ( rc = hermon_cmd_init2rtr_qp ( hermon, qp->qpn,
1301  &qpctx ) ) != 0 ) {
1302  DBGC ( hermon, "Hermon %p QPN %#lx INIT2RTR_QP failed:"
1303  " %s\n", hermon, qp->qpn, strerror ( rc ) );
1304  return rc;
1305  }
1306  hermon_qp->state = HERMON_QP_ST_RTR;
1307  }
1308 
1309  /* Transition queue to RTS state */
1310  if ( hermon_qp->state < HERMON_QP_ST_RTS ) {
1311  memset ( &qpctx, 0, sizeof ( qpctx ) );
1312  MLX_FILL_1 ( &qpctx, 10,
1313  qpc_eec_data.primary_address_path.ack_timeout,
1314  14 /* 4.096us * 2^(14) = 67ms */ );
1315  MLX_FILL_2 ( &qpctx, 30,
1316  qpc_eec_data.retry_count, HERMON_RETRY_MAX,
1317  qpc_eec_data.rnr_retry, HERMON_RETRY_MAX );
1318  MLX_FILL_1 ( &qpctx, 32,
1319  qpc_eec_data.next_send_psn, qp->send.psn );
1320  if ( ( rc = hermon_cmd_rtr2rts_qp ( hermon, qp->qpn,
1321  &qpctx ) ) != 0 ) {
1322  DBGC ( hermon, "Hermon %p QPN %#lx RTR2RTS_QP failed: "
1323  "%s\n", hermon, qp->qpn, strerror ( rc ) );
1324  return rc;
1325  }
1326  hermon_qp->state = HERMON_QP_ST_RTS;
1327  }
1328 
1329  /* Update parameters in RTS state */
1330  memset ( &qpctx, 0, sizeof ( qpctx ) );
1332  MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
1333  if ( ( rc = hermon_cmd_rts2rts_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ){
1334  DBGC ( hermon, "Hermon %p QPN %#lx RTS2RTS_QP failed: %s\n",
1335  hermon, qp->qpn, strerror ( rc ) );
1336  return rc;
1337  }
1338 
1339  return 0;
1340 }
1341 
1342 /**
1343  * Destroy queue pair
1344  *
1345  * @v ibdev Infiniband device
1346  * @v qp Queue pair
1347  */
1348 static void hermon_destroy_qp ( struct ib_device *ibdev,
1349  struct ib_queue_pair *qp ) {
1350  struct hermon *hermon = ib_get_drvdata ( ibdev );
1351  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1352  int rc;
1353 
1354  /* Take ownership back from hardware */
1355  if ( ( rc = hermon_cmd_2rst_qp ( hermon, qp->qpn ) ) != 0 ) {
1356  DBGC ( hermon, "Hermon %p QPN %#lx FATAL 2RST_QP failed: %s\n",
1357  hermon, qp->qpn, strerror ( rc ) );
1358  /* Leak memory and return; at least we avoid corruption */
1359  return;
1360  }
1361 
1362  /* Free MTT entries */
1363  hermon_free_mtt ( hermon, &hermon_qp->mtt );
1364 
1365  /* Free memory */
1366  free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
1367  free_dma ( hermon_qp->recv.doorbell,
1368  sizeof ( hermon_qp->recv.doorbell[0] ) );
1369  free ( hermon_qp );
1370 
1371  /* Mark queue number as free */
1372  hermon_free_qpn ( ibdev, qp );
1373 
1374  ib_qp_set_drvdata ( qp, NULL );
1375 }
1376 
1377 /***************************************************************************
1378  *
1379  * Work request operations
1380  *
1381  ***************************************************************************
1382  */
1383 
1384 /**
1385  * Construct UD send work queue entry
1386  *
1387  * @v ibdev Infiniband device
1388  * @v qp Queue pair
1389  * @v dest Destination address vector
1390  * @v iobuf I/O buffer
1391  * @v wqe Send work queue entry
1392  * @ret opcode Control opcode
1393  */
1394 static __attribute__ (( unused )) unsigned int
1396  struct ib_queue_pair *qp __unused,
1398  struct io_buffer *iobuf __unused,
1399  union hermon_send_wqe *wqe ) {
1400 
1401  MLX_FILL_1 ( &wqe->ctrl, 1, ds, ( sizeof ( wqe->ctrl ) / 16 ) );
1402  MLX_FILL_1 ( &wqe->ctrl, 2, c, 0x03 /* generate completion */ );
1403  return HERMON_OPCODE_NOP;
1404 }
1405 
1406 /**
1407  * Construct UD send work queue entry
1408  *
1409  * @v ibdev Infiniband device
1410  * @v qp Queue pair
1411  * @v dest Destination address vector
1412  * @v iobuf I/O buffer
1413  * @v wqe Send work queue entry
1414  * @ret opcode Control opcode
1415  */
1416 static unsigned int
1418  struct ib_queue_pair *qp __unused,
1419  struct ib_address_vector *dest,
1420  struct io_buffer *iobuf,
1421  union hermon_send_wqe *wqe ) {
1422  struct hermon *hermon = ib_get_drvdata ( ibdev );
1423 
1424  MLX_FILL_1 ( &wqe->ud.ctrl, 1, ds,
1425  ( ( offsetof ( typeof ( wqe->ud ), data[1] ) / 16 ) ) );
1426  MLX_FILL_1 ( &wqe->ud.ctrl, 2, c, 0x03 /* generate completion */ );
1427  MLX_FILL_2 ( &wqe->ud.ud, 0,
1428  ud_address_vector.pd, HERMON_GLOBAL_PD,
1429  ud_address_vector.port_number, ibdev->port );
1430  MLX_FILL_2 ( &wqe->ud.ud, 1,
1431  ud_address_vector.rlid, dest->lid,
1432  ud_address_vector.g, dest->gid_present );
1433  MLX_FILL_1 ( &wqe->ud.ud, 2,
1434  ud_address_vector.max_stat_rate, hermon_rate ( dest ) );
1435  MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
1436  memcpy ( &wqe->ud.ud.u.dwords[4], &dest->gid, sizeof ( dest->gid ) );
1437  MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
1438  MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
1439  MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
1440  MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, hermon->lkey );
1441  MLX_FILL_H ( &wqe->ud.data[0], 2,
1442  local_address_h, virt_to_bus ( iobuf->data ) );
1443  MLX_FILL_1 ( &wqe->ud.data[0], 3,
1444  local_address_l, virt_to_bus ( iobuf->data ) );
1445  return HERMON_OPCODE_SEND;
1446 }
1447 
1448 /**
1449  * Construct MLX send work queue entry
1450  *
1451  * @v ibdev Infiniband device
1452  * @v qp Queue pair
1453  * @v dest Destination address vector
1454  * @v iobuf I/O buffer
1455  * @v wqe Send work queue entry
1456  * @ret opcode Control opcode
1457  */
1458 static unsigned int
1460  struct ib_queue_pair *qp,
1461  struct ib_address_vector *dest,
1462  struct io_buffer *iobuf,
1463  union hermon_send_wqe *wqe ) {
1464  struct hermon *hermon = ib_get_drvdata ( ibdev );
1465  struct io_buffer headers;
1466 
1467  /* Construct IB headers */
1468  iob_populate ( &headers, &wqe->mlx.headers, 0,
1469  sizeof ( wqe->mlx.headers ) );
1470  iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
1471  ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1472 
1473  /* Fill work queue entry */
1474  MLX_FILL_1 ( &wqe->mlx.ctrl, 1, ds,
1475  ( ( offsetof ( typeof ( wqe->mlx ), data[2] ) / 16 ) ) );
1476  MLX_FILL_5 ( &wqe->mlx.ctrl, 2,
1477  c, 0x03 /* generate completion */,
1478  icrc, 0 /* generate ICRC */,
1479  max_statrate, hermon_rate ( dest ),
1480  slr, 0,
1481  v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
1482  MLX_FILL_1 ( &wqe->mlx.ctrl, 3, rlid, dest->lid );
1483  MLX_FILL_1 ( &wqe->mlx.data[0], 0,
1484  byte_count, iob_len ( &headers ) );
1485  MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, hermon->lkey );
1486  MLX_FILL_H ( &wqe->mlx.data[0], 2,
1487  local_address_h, virt_to_bus ( headers.data ) );
1488  MLX_FILL_1 ( &wqe->mlx.data[0], 3,
1489  local_address_l, virt_to_bus ( headers.data ) );
1490  MLX_FILL_1 ( &wqe->mlx.data[1], 0,
1491  byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
1492  MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, hermon->lkey );
1493  MLX_FILL_H ( &wqe->mlx.data[1], 2,
1494  local_address_h, virt_to_bus ( iobuf->data ) );
1495  MLX_FILL_1 ( &wqe->mlx.data[1], 3,
1496  local_address_l, virt_to_bus ( iobuf->data ) );
1497  return HERMON_OPCODE_SEND;
1498 }
1499 
1500 /**
1501  * Construct RC send work queue entry
1502  *
1503  * @v ibdev Infiniband device
1504  * @v qp Queue pair
1505  * @v dest Destination address vector
1506  * @v iobuf I/O buffer
1507  * @v wqe Send work queue entry
1508  * @ret opcode Control opcode
1509  */
1510 static unsigned int
1512  struct ib_queue_pair *qp __unused,
1514  struct io_buffer *iobuf,
1515  union hermon_send_wqe *wqe ) {
1516  struct hermon *hermon = ib_get_drvdata ( ibdev );
1517 
1518  MLX_FILL_1 ( &wqe->rc.ctrl, 1, ds,
1519  ( ( offsetof ( typeof ( wqe->rc ), data[1] ) / 16 ) ) );
1520  MLX_FILL_1 ( &wqe->rc.ctrl, 2, c, 0x03 /* generate completion */ );
1521  MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
1522  MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, hermon->lkey );
1523  MLX_FILL_H ( &wqe->rc.data[0], 2,
1524  local_address_h, virt_to_bus ( iobuf->data ) );
1525  MLX_FILL_1 ( &wqe->rc.data[0], 3,
1526  local_address_l, virt_to_bus ( iobuf->data ) );
1527  return HERMON_OPCODE_SEND;
1528 }
1529 
1530 /**
1531  * Construct Ethernet send work queue entry
1532  *
1533  * @v ibdev Infiniband device
1534  * @v qp Queue pair
1535  * @v dest Destination address vector
1536  * @v iobuf I/O buffer
1537  * @v wqe Send work queue entry
1538  * @ret opcode Control opcode
1539  */
1540 static unsigned int
1542  struct ib_queue_pair *qp __unused,
1544  struct io_buffer *iobuf,
1545  union hermon_send_wqe *wqe ) {
1546  struct hermon *hermon = ib_get_drvdata ( ibdev );
1547 
1548  /* Fill work queue entry */
1549  MLX_FILL_1 ( &wqe->eth.ctrl, 1, ds,
1550  ( ( offsetof ( typeof ( wqe->mlx ), data[1] ) / 16 ) ) );
1551  MLX_FILL_2 ( &wqe->eth.ctrl, 2,
1552  c, 0x03 /* generate completion */,
1553  s, 1 /* inhibit ICRC */ );
1554  MLX_FILL_1 ( &wqe->eth.data[0], 0,
1555  byte_count, iob_len ( iobuf ) );
1556  MLX_FILL_1 ( &wqe->eth.data[0], 1, l_key, hermon->lkey );
1557  MLX_FILL_H ( &wqe->eth.data[0], 2,
1558  local_address_h, virt_to_bus ( iobuf->data ) );
1559  MLX_FILL_1 ( &wqe->eth.data[0], 3,
1560  local_address_l, virt_to_bus ( iobuf->data ) );
1561  return HERMON_OPCODE_SEND;
1562 }
1563 
1564 /** Work queue entry constructors */
1565 static unsigned int
1566 ( * hermon_fill_send_wqe[] ) ( struct ib_device *ibdev,
1567  struct ib_queue_pair *qp,
1568  struct ib_address_vector *dest,
1569  struct io_buffer *iobuf,
1570  union hermon_send_wqe *wqe ) = {
1576 };
1577 
1578 /**
1579  * Post send work queue entry
1580  *
1581  * @v ibdev Infiniband device
1582  * @v qp Queue pair
1583  * @v dest Destination address vector
1584  * @v iobuf I/O buffer
1585  * @ret rc Return status code
1586  */
1587 static int hermon_post_send ( struct ib_device *ibdev,
1588  struct ib_queue_pair *qp,
1589  struct ib_address_vector *dest,
1590  struct io_buffer *iobuf ) {
1591  struct hermon *hermon = ib_get_drvdata ( ibdev );
1592  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1593  struct ib_work_queue *wq = &qp->send;
1594  struct hermon_send_work_queue *hermon_send_wq = &hermon_qp->send;
1595  union hermon_send_wqe *wqe;
1596  union hermonprm_doorbell_register db_reg;
1597  unsigned long wqe_idx_mask;
1598  unsigned long wqe_idx;
1599  unsigned int owner;
1600  unsigned int opcode;
1601 
1602  /* Allocate work queue entry */
1603  wqe_idx = ( wq->next_idx & ( hermon_send_wq->num_wqes - 1 ) );
1604  owner = ( ( wq->next_idx & hermon_send_wq->num_wqes ) ? 1 : 0 );
1605  wqe_idx_mask = ( wq->num_wqes - 1 );
1606  if ( wq->iobufs[ wqe_idx & wqe_idx_mask ] ) {
1607  DBGC ( hermon, "Hermon %p QPN %#lx send queue full",
1608  hermon, qp->qpn );
1609  return -ENOBUFS;
1610  }
1611  wq->iobufs[ wqe_idx & wqe_idx_mask ] = iobuf;
1612  wqe = &hermon_send_wq->wqe[wqe_idx];
1613 
1614  /* Construct work queue entry */
1615  memset ( ( ( ( void * ) wqe ) + 4 /* avoid ctrl.owner */ ), 0,
1616  ( sizeof ( *wqe ) - 4 ) );
1617  assert ( qp->type < ( sizeof ( hermon_fill_send_wqe ) /
1618  sizeof ( hermon_fill_send_wqe[0] ) ) );
1619  assert ( hermon_fill_send_wqe[qp->type] != NULL );
1620  opcode = hermon_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
1621  barrier();
1622  MLX_FILL_2 ( &wqe->ctrl, 0,
1623  opcode, opcode,
1624  owner, owner );
1625  DBGCP ( hermon, "Hermon %p QPN %#lx posting send WQE %#lx:\n",
1626  hermon, qp->qpn, wqe_idx );
1627  DBGCP_HDA ( hermon, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
1628 
1629  /* Ring doorbell register */
1630  MLX_FILL_1 ( &db_reg.send, 0, qn, qp->qpn );
1631  barrier();
1632  writel ( db_reg.dword[0], hermon_send_wq->doorbell );
1633 
1634  /* Update work queue's index */
1635  wq->next_idx++;
1636 
1637  return 0;
1638 }
1639 
1640 /**
1641  * Post receive work queue entry
1642  *
1643  * @v ibdev Infiniband device
1644  * @v qp Queue pair
1645  * @v iobuf I/O buffer
1646  * @ret rc Return status code
1647  */
1648 static int hermon_post_recv ( struct ib_device *ibdev,
1649  struct ib_queue_pair *qp,
1650  struct io_buffer *iobuf ) {
1651  struct hermon *hermon = ib_get_drvdata ( ibdev );
1652  struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1653  struct ib_work_queue *wq = &qp->recv;
1654  struct hermon_recv_work_queue *hermon_recv_wq = &hermon_qp->recv;
1655  struct hermonprm_recv_wqe *wqe;
1656  struct hermonprm_wqe_segment_data_ptr *data;
1657  struct ib_global_route_header *grh;
1658  unsigned int wqe_idx_mask;
1659 
1660  /* Allocate work queue entry */
1661  wqe_idx_mask = ( wq->num_wqes - 1 );
1662  if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1663  DBGC ( hermon, "Hermon %p QPN %#lx receive queue full",
1664  hermon, qp->qpn );
1665  return -ENOBUFS;
1666  }
1667  wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1668  wqe = &hermon_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1669 
1670  /* Construct work queue entry */
1671  data = &wqe->data[0];
1672  if ( hermon_qp->recv.grh ) {
1673  grh = &hermon_qp->recv.grh[wq->next_idx & wqe_idx_mask];
1674  MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
1675  MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
1676  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
1677  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
1678  data++;
1679  }
1680  MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
1681  MLX_FILL_1 ( data, 1, l_key, hermon->lkey );
1682  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
1683  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
1684 
1685  /* Update work queue's index */
1686  wq->next_idx++;
1687 
1688  /* Update doorbell record */
1689  barrier();
1690  MLX_FILL_1 ( hermon_recv_wq->doorbell, 0, receive_wqe_counter,
1691  ( wq->next_idx & 0xffff ) );
1692 
1693  return 0;
1694 }
1695 
1696 /**
1697  * Handle completion
1698  *
1699  * @v ibdev Infiniband device
1700  * @v cq Completion queue
1701  * @v cqe Hardware completion queue entry
1702  * @ret rc Return status code
1703  */
1704 static int hermon_complete ( struct ib_device *ibdev,
1705  struct ib_completion_queue *cq,
1706  union hermonprm_completion_entry *cqe ) {
1707  struct hermon *hermon = ib_get_drvdata ( ibdev );
1708  struct hermon_queue_pair *hermon_qp;
1709  struct ib_work_queue *wq;
1710  struct ib_queue_pair *qp;
1711  struct io_buffer *iobuf;
1712  struct ib_address_vector recv_dest;
1713  struct ib_address_vector recv_source;
1714  struct ib_global_route_header *grh;
1715  struct ib_address_vector *source;
1716  unsigned int opcode;
1717  unsigned long qpn;
1718  int is_send;
1719  unsigned long wqe_idx;
1720  unsigned long wqe_idx_mask;
1721  size_t len;
1722  int rc = 0;
1723 
1724  /* Parse completion */
1725  qpn = MLX_GET ( &cqe->normal, qpn );
1726  is_send = MLX_GET ( &cqe->normal, s_r );
1727  opcode = MLX_GET ( &cqe->normal, opcode );
1728  if ( opcode >= HERMON_OPCODE_RECV_ERROR ) {
1729  /* "s" field is not valid for error opcodes */
1730  is_send = ( opcode == HERMON_OPCODE_SEND_ERROR );
1731  DBGC ( hermon, "Hermon %p CQN %#lx syndrome %x vendor %x\n",
1732  hermon, cq->cqn, MLX_GET ( &cqe->error, syndrome ),
1733  MLX_GET ( &cqe->error, vendor_error_syndrome ) );
1734  rc = -EIO;
1735  /* Don't return immediately; propagate error to completer */
1736  }
1737 
1738  /* Identify work queue */
1739  wq = ib_find_wq ( cq, qpn, is_send );
1740  if ( ! wq ) {
1741  DBGC ( hermon, "Hermon %p CQN %#lx unknown %s QPN %#lx\n",
1742  hermon, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
1743  return -EIO;
1744  }
1745  qp = wq->qp;
1746  hermon_qp = ib_qp_get_drvdata ( qp );
1747 
1748  /* Identify work queue entry */
1749  wqe_idx = MLX_GET ( &cqe->normal, wqe_counter );
1750  wqe_idx_mask = ( wq->num_wqes - 1 );
1751  DBGCP ( hermon, "Hermon %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1752  hermon, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1753  wqe_idx );
1754  DBGCP_HDA ( hermon, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1755 
1756  /* Identify I/O buffer */
1757  iobuf = wq->iobufs[ wqe_idx & wqe_idx_mask ];
1758  if ( ! iobuf ) {
1759  DBGC ( hermon, "Hermon %p CQN %#lx QPN %#lx empty %s WQE "
1760  "%#lx\n", hermon, cq->cqn, qp->qpn,
1761  ( is_send ? "send" : "recv" ), wqe_idx );
1762  return -EIO;
1763  }
1764  wq->iobufs[ wqe_idx & wqe_idx_mask ] = NULL;
1765 
1766  if ( is_send ) {
1767  /* Hand off to completion handler */
1768  ib_complete_send ( ibdev, qp, iobuf, rc );
1769  } else {
1770  /* Set received length */
1771  len = MLX_GET ( &cqe->normal, byte_cnt );
1772  memset ( &recv_dest, 0, sizeof ( recv_dest ) );
1773  recv_dest.qpn = qpn;
1774  memset ( &recv_source, 0, sizeof ( recv_source ) );
1775  switch ( qp->type ) {
1776  case IB_QPT_SMI:
1777  case IB_QPT_GSI:
1778  case IB_QPT_UD:
1779  /* Locate corresponding GRH */
1780  assert ( hermon_qp->recv.grh != NULL );
1781  grh = &hermon_qp->recv.grh[ wqe_idx & wqe_idx_mask ];
1782  len -= sizeof ( *grh );
1783  /* Construct address vector */
1784  source = &recv_source;
1785  source->qpn = MLX_GET ( &cqe->normal, srq_rqpn );
1786  source->lid = MLX_GET ( &cqe->normal, slid_smac47_32 );
1787  source->sl = MLX_GET ( &cqe->normal, sl );
1788  recv_dest.gid_present = source->gid_present =
1789  MLX_GET ( &cqe->normal, g );
1790  memcpy ( &recv_dest.gid, &grh->dgid,
1791  sizeof ( recv_dest.gid ) );
1792  memcpy ( &source->gid, &grh->sgid,
1793  sizeof ( source->gid ) );
1794  break;
1795  case IB_QPT_RC:
1796  source = &qp->av;
1797  break;
1798  case IB_QPT_ETH:
1799  /* Construct address vector */
1800  source = &recv_source;
1801  source->vlan_present = MLX_GET ( &cqe->normal, vlan );
1802  source->vlan = MLX_GET ( &cqe->normal, vid );
1803  break;
1804  default:
1805  assert ( 0 );
1806  return -EINVAL;
1807  }
1808  assert ( len <= iob_tailroom ( iobuf ) );
1809  iob_put ( iobuf, len );
1810  /* Hand off to completion handler */
1811  ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, rc );
1812  }
1813 
1814  return rc;
1815 }
1816 
1817 /**
1818  * Poll completion queue
1819  *
1820  * @v ibdev Infiniband device
1821  * @v cq Completion queue
1822  */
1823 static void hermon_poll_cq ( struct ib_device *ibdev,
1824  struct ib_completion_queue *cq ) {
1825  struct hermon *hermon = ib_get_drvdata ( ibdev );
1826  struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
1827  union hermonprm_completion_entry *cqe;
1828  unsigned int cqe_idx_mask;
1829  int rc;
1830 
1831  while ( 1 ) {
1832  /* Look for completion entry */
1833  cqe_idx_mask = ( cq->num_cqes - 1 );
1834  cqe = &hermon_cq->cqe[cq->next_idx & cqe_idx_mask];
1835  if ( MLX_GET ( &cqe->normal, owner ) ^
1836  ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
1837  /* Entry still owned by hardware; end of poll */
1838  break;
1839  }
1840 
1841  /* Handle completion */
1842  if ( ( rc = hermon_complete ( ibdev, cq, cqe ) ) != 0 ) {
1843  DBGC ( hermon, "Hermon %p CQN %#lx failed to complete:"
1844  " %s\n", hermon, cq->cqn, strerror ( rc ) );
1845  DBGC_HDA ( hermon, virt_to_phys ( cqe ),
1846  cqe, sizeof ( *cqe ) );
1847  }
1848 
1849  /* Update completion queue's index */
1850  cq->next_idx++;
1851 
1852  /* Update doorbell record */
1853  MLX_FILL_1 ( hermon_cq->doorbell, 0, update_ci,
1854  ( cq->next_idx & 0x00ffffffUL ) );
1855  }
1856 }
1857 
1858 /***************************************************************************
1859  *
1860  * Event queues
1861  *
1862  ***************************************************************************
1863  */
1864 
1865 /**
1866  * Create event queue
1867  *
1868  * @v hermon Hermon device
1869  * @ret rc Return status code
1870  */
1871 static int hermon_create_eq ( struct hermon *hermon ) {
1872  struct hermon_event_queue *hermon_eq = &hermon->eq;
1873  struct hermonprm_eqc eqctx;
1874  struct hermonprm_event_mask mask;
1875  unsigned int i;
1876  int rc;
1877 
1878  /* Select event queue number */
1879  hermon_eq->eqn = ( 4 * hermon->cap.reserved_uars );
1880  if ( hermon_eq->eqn < hermon->cap.reserved_eqs )
1881  hermon_eq->eqn = hermon->cap.reserved_eqs;
1882 
1883  /* Calculate doorbell address */
1884  hermon_eq->doorbell =
1885  ( hermon->uar + HERMON_DB_EQ_OFFSET ( hermon_eq->eqn ) );
1886 
1887  /* Allocate event queue itself */
1888  hermon_eq->eqe_size =
1889  ( HERMON_NUM_EQES * sizeof ( hermon_eq->eqe[0] ) );
1890  hermon_eq->eqe = malloc_dma ( hermon_eq->eqe_size,
1891  sizeof ( hermon_eq->eqe[0] ) );
1892  if ( ! hermon_eq->eqe ) {
1893  rc = -ENOMEM;
1894  goto err_eqe;
1895  }
1896  memset ( hermon_eq->eqe, 0, hermon_eq->eqe_size );
1897  for ( i = 0 ; i < HERMON_NUM_EQES ; i++ ) {
1898  MLX_FILL_1 ( &hermon_eq->eqe[i].generic, 7, owner, 1 );
1899  }
1900  barrier();
1901 
1902  /* Allocate MTT entries */
1903  if ( ( rc = hermon_alloc_mtt ( hermon, hermon_eq->eqe,
1904  hermon_eq->eqe_size,
1905  &hermon_eq->mtt ) ) != 0 )
1906  goto err_alloc_mtt;
1907 
1908  /* Hand queue over to hardware */
1909  memset ( &eqctx, 0, sizeof ( eqctx ) );
1910  MLX_FILL_2 ( &eqctx, 0,
1911  st, 0xa /* "Fired" */,
1912  oi, 1 );
1913  MLX_FILL_1 ( &eqctx, 2,
1914  page_offset, ( hermon_eq->mtt.page_offset >> 5 ) );
1915  MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( HERMON_NUM_EQES - 1 ) );
1916  MLX_FILL_H ( &eqctx, 6, mtt_base_addr_h,
1917  hermon_eq->mtt.mtt_base_addr );
1918  MLX_FILL_1 ( &eqctx, 7, mtt_base_addr_l,
1919  ( hermon_eq->mtt.mtt_base_addr >> 3 ) );
1920  if ( ( rc = hermon_cmd_sw2hw_eq ( hermon, hermon_eq->eqn,
1921  &eqctx ) ) != 0 ) {
1922  DBGC ( hermon, "Hermon %p EQN %#lx SW2HW_EQ failed: %s\n",
1923  hermon, hermon_eq->eqn, strerror ( rc ) );
1924  goto err_sw2hw_eq;
1925  }
1926 
1927  /* Map all events to this event queue */
1928  memset ( &mask, 0xff, sizeof ( mask ) );
1929  if ( ( rc = hermon_cmd_map_eq ( hermon,
1930  ( HERMON_MAP_EQ | hermon_eq->eqn ),
1931  &mask ) ) != 0 ) {
1932  DBGC ( hermon, "Hermon %p EQN %#lx MAP_EQ failed: %s\n",
1933  hermon, hermon_eq->eqn, strerror ( rc ) );
1934  goto err_map_eq;
1935  }
1936 
1937  DBGC ( hermon, "Hermon %p EQN %#lx ring [%08lx,%08lx), doorbell "
1938  "%08lx\n", hermon, hermon_eq->eqn,
1939  virt_to_phys ( hermon_eq->eqe ),
1940  ( virt_to_phys ( hermon_eq->eqe ) + hermon_eq->eqe_size ),
1941  virt_to_phys ( hermon_eq->doorbell ) );
1942  return 0;
1943 
1944  err_map_eq:
1945  hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn, &eqctx );
1946  err_sw2hw_eq:
1947  hermon_free_mtt ( hermon, &hermon_eq->mtt );
1948  err_alloc_mtt:
1949  free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
1950  err_eqe:
1951  memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
1952  return rc;
1953 }
1954 
1955 /**
1956  * Destroy event queue
1957  *
1958  * @v hermon Hermon device
1959  */
1960 static void hermon_destroy_eq ( struct hermon *hermon ) {
1961  struct hermon_event_queue *hermon_eq = &hermon->eq;
1962  struct hermonprm_eqc eqctx;
1963  struct hermonprm_event_mask mask;
1964  int rc;
1965 
1966  /* Unmap events from event queue */
1967  memset ( &mask, 0xff, sizeof ( mask ) );
1968  if ( ( rc = hermon_cmd_map_eq ( hermon,
1969  ( HERMON_UNMAP_EQ | hermon_eq->eqn ),
1970  &mask ) ) != 0 ) {
1971  DBGC ( hermon, "Hermon %p EQN %#lx FATAL MAP_EQ failed to "
1972  "unmap: %s\n", hermon, hermon_eq->eqn, strerror ( rc ) );
1973  /* Continue; HCA may die but system should survive */
1974  }
1975 
1976  /* Take ownership back from hardware */
1977  if ( ( rc = hermon_cmd_hw2sw_eq ( hermon, hermon_eq->eqn,
1978  &eqctx ) ) != 0 ) {
1979  DBGC ( hermon, "Hermon %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
1980  hermon, hermon_eq->eqn, strerror ( rc ) );
1981  /* Leak memory and return; at least we avoid corruption */
1982  return;
1983  }
1984 
1985  /* Free MTT entries */
1986  hermon_free_mtt ( hermon, &hermon_eq->mtt );
1987 
1988  /* Free memory */
1989  free_dma ( hermon_eq->eqe, hermon_eq->eqe_size );
1990  memset ( hermon_eq, 0, sizeof ( *hermon_eq ) );
1991 }
1992 
1993 /**
1994  * Handle port state event
1995  *
1996  * @v hermon Hermon device
1997  * @v eqe Port state change event queue entry
1998  */
2000  union hermonprm_event_entry *eqe){
2001  unsigned int port;
2002  int link_up;
2003 
2004  /* Get port and link status */
2005  port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
2006  link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
2007  DBGC ( hermon, "Hermon %p port %d link %s\n", hermon, ( port + 1 ),
2008  ( link_up ? "up" : "down" ) );
2009 
2010  /* Sanity check */
2011  if ( port >= hermon->cap.num_ports ) {
2012  DBGC ( hermon, "Hermon %p port %d does not exist!\n",
2013  hermon, ( port + 1 ) );
2014  return;
2015  }
2016 
2017  /* Notify device of port state change */
2019  link_up );
2020 }
2021 
2022 /**
2023  * Poll event queue
2024  *
2025  * @v ibdev Infiniband device
2026  */
2027 static void hermon_poll_eq ( struct ib_device *ibdev ) {
2028  struct hermon *hermon = ib_get_drvdata ( ibdev );
2029  struct hermon_event_queue *hermon_eq = &hermon->eq;
2030  union hermonprm_event_entry *eqe;
2031  union hermonprm_doorbell_register db_reg;
2032  unsigned int eqe_idx_mask;
2033  unsigned int event_type;
2034 
2035  /* No event is generated upon reaching INIT, so we must poll
2036  * separately for link state changes while we remain DOWN.
2037  */
2038  if ( ib_is_open ( ibdev ) &&
2039  ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
2040  ib_smc_update ( ibdev, hermon_mad );
2041  }
2042 
2043  /* Poll event queue */
2044  while ( 1 ) {
2045  /* Look for event entry */
2046  eqe_idx_mask = ( HERMON_NUM_EQES - 1 );
2047  eqe = &hermon_eq->eqe[hermon_eq->next_idx & eqe_idx_mask];
2048  if ( MLX_GET ( &eqe->generic, owner ) ^
2049  ( ( hermon_eq->next_idx & HERMON_NUM_EQES ) ? 1 : 0 ) ) {
2050  /* Entry still owned by hardware; end of poll */
2051  break;
2052  }
2053  DBGCP ( hermon, "Hermon %p EQN %#lx event:\n",
2054  hermon, hermon_eq->eqn );
2055  DBGCP_HDA ( hermon, virt_to_phys ( eqe ),
2056  eqe, sizeof ( *eqe ) );
2057 
2058  /* Handle event */
2059  event_type = MLX_GET ( &eqe->generic, event_type );
2060  switch ( event_type ) {
2063  break;
2064  default:
2065  DBGC ( hermon, "Hermon %p EQN %#lx unrecognised event "
2066  "type %#x:\n",
2067  hermon, hermon_eq->eqn, event_type );
2068  DBGC_HDA ( hermon, virt_to_phys ( eqe ),
2069  eqe, sizeof ( *eqe ) );
2070  break;
2071  }
2072 
2073  /* Update event queue's index */
2074  hermon_eq->next_idx++;
2075 
2076  /* Ring doorbell */
2077  MLX_FILL_1 ( &db_reg.event, 0,
2078  ci, ( hermon_eq->next_idx & 0x00ffffffUL ) );
2079  writel ( db_reg.dword[0], hermon_eq->doorbell );
2080  }
2081 }
2082 
2083 /***************************************************************************
2084  *
2085  * Firmware control
2086  *
2087  ***************************************************************************
2088  */
2089 
2090 /**
2091  * Map virtual to physical address for firmware usage
2092  *
2093  * @v hermon Hermon device
2094  * @v map Mapping function
2095  * @v va Virtual address
2096  * @v pa Physical address
2097  * @v len Length of region
2098  * @ret rc Return status code
2099  */
2100 static int hermon_map_vpm ( struct hermon *hermon,
2101  int ( *map ) ( struct hermon *hermon,
2102  const struct hermonprm_virtual_physical_mapping* ),
2103  uint64_t va, physaddr_t pa, size_t len ) {
2104  struct hermonprm_virtual_physical_mapping mapping;
2105  physaddr_t start;
2106  physaddr_t low;
2107  physaddr_t high;
2108  physaddr_t end;
2109  size_t size;
2110  int rc;
2111 
2112  /* Sanity checks */
2113  assert ( ( va & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2114  assert ( ( pa & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2115  assert ( ( len & ( HERMON_PAGE_SIZE - 1 ) ) == 0 );
2116  assert ( len != 0 );
2117 
2118  /* Calculate starting points */
2119  start = pa;
2120  end = ( start + len );
2121  size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
2122  low = high = ( end & ~( size - 1 ) );
2123  assert ( start < low );
2124  assert ( high <= end );
2125 
2126  /* These mappings tend to generate huge volumes of
2127  * uninteresting debug data, which basically makes it
2128  * impossible to use debugging otherwise.
2129  */
2131 
2132  /* Map blocks in descending order of size */
2133  while ( size >= HERMON_PAGE_SIZE ) {
2134 
2135  /* Find the next candidate block */
2136  if ( ( low - size ) >= start ) {
2137  low -= size;
2138  pa = low;
2139  } else if ( high <= ( end - size ) ) {
2140  pa = high;
2141  high += size;
2142  } else {
2143  size >>= 1;
2144  continue;
2145  }
2146  assert ( ( va & ( size - 1 ) ) == 0 );
2147  assert ( ( pa & ( size - 1 ) ) == 0 );
2148 
2149  /* Map this block */
2150  memset ( &mapping, 0, sizeof ( mapping ) );
2151  MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2152  MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2153  MLX_FILL_H ( &mapping, 2, pa_h, pa );
2154  MLX_FILL_2 ( &mapping, 3,
2155  log2size, ( ( fls ( size ) - 1 ) - 12 ),
2156  pa_l, ( pa >> 12 ) );
2157  if ( ( rc = map ( hermon, &mapping ) ) != 0 ) {
2159  DBGC ( hermon, "Hermon %p could not map %08llx+%zx to "
2160  "%08lx: %s\n",
2161  hermon, va, size, pa, strerror ( rc ) );
2162  return rc;
2163  }
2164  va += size;
2165  }
2166  assert ( low == start );
2167  assert ( high == end );
2168 
2170  return 0;
2171 }
2172 
2173 /**
2174  * Start firmware running
2175  *
2176  * @v hermon Hermon device
2177  * @ret rc Return status code
2178  */
2179 static int hermon_start_firmware ( struct hermon *hermon ) {
2180  struct hermonprm_query_fw fw;
2181  unsigned int fw_pages;
2182  size_t fw_len;
2183  physaddr_t fw_base;
2184  int rc;
2185 
2186  /* Get firmware parameters */
2187  if ( ( rc = hermon_cmd_query_fw ( hermon, &fw ) ) != 0 ) {
2188  DBGC ( hermon, "Hermon %p could not query firmware: %s\n",
2189  hermon, strerror ( rc ) );
2190  goto err_query_fw;
2191  }
2192  DBGC ( hermon, "Hermon %p firmware version %d.%d.%d\n", hermon,
2193  MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2194  MLX_GET ( &fw, fw_rev_subminor ) );
2195  fw_pages = MLX_GET ( &fw, fw_pages );
2196  DBGC ( hermon, "Hermon %p requires %d pages (%d kB) for firmware\n",
2197  hermon, fw_pages, ( fw_pages * 4 ) );
2198 
2199  /* Allocate firmware pages and map firmware area */
2200  fw_len = ( fw_pages * HERMON_PAGE_SIZE );
2201  if ( ! hermon->firmware_area ) {
2202  hermon->firmware_len = fw_len;
2204  if ( ! hermon->firmware_area ) {
2205  rc = -ENOMEM;
2206  goto err_alloc_fa;
2207  }
2208  } else {
2209  assert ( hermon->firmware_len == fw_len );
2210  }
2211  fw_base = user_to_phys ( hermon->firmware_area, 0 );
2212  DBGC ( hermon, "Hermon %p firmware area at physical [%08lx,%08lx)\n",
2213  hermon, fw_base, ( fw_base + fw_len ) );
2215  0, fw_base, fw_len ) ) != 0 ) {
2216  DBGC ( hermon, "Hermon %p could not map firmware: %s\n",
2217  hermon, strerror ( rc ) );
2218  goto err_map_fa;
2219  }
2220 
2221  /* Start firmware */
2222  if ( ( rc = hermon_cmd_run_fw ( hermon ) ) != 0 ) {
2223  DBGC ( hermon, "Hermon %p could not run firmware: %s\n",
2224  hermon, strerror ( rc ) );
2225  goto err_run_fw;
2226  }
2227 
2228  DBGC ( hermon, "Hermon %p firmware started\n", hermon );
2229  return 0;
2230 
2231  err_run_fw:
2232  err_map_fa:
2234  err_alloc_fa:
2235  err_query_fw:
2236  return rc;
2237 }
2238 
2239 /**
2240  * Stop firmware running
2241  *
2242  * @v hermon Hermon device
2243  */
2244 static void hermon_stop_firmware ( struct hermon *hermon ) {
2245  int rc;
2246 
2247  if ( ( rc = hermon_cmd_unmap_fa ( hermon ) ) != 0 ) {
2248  DBGC ( hermon, "Hermon %p FATAL could not stop firmware: %s\n",
2249  hermon, strerror ( rc ) );
2250  /* Leak memory and return; at least we avoid corruption */
2252  return;
2253  }
2254 }
2255 
2256 /***************************************************************************
2257  *
2258  * Infinihost Context Memory management
2259  *
2260  ***************************************************************************
2261  */
2262 
2263 /**
2264  * Get device limits
2265  *
2266  * @v hermon Hermon device
2267  * @ret rc Return status code
2268  */
2269 static int hermon_get_cap ( struct hermon *hermon ) {
2270  struct hermonprm_query_dev_cap dev_cap;
2271  int rc;
2272 
2273  if ( ( rc = hermon_cmd_query_dev_cap ( hermon, &dev_cap ) ) != 0 ) {
2274  DBGC ( hermon, "Hermon %p could not get device limits: %s\n",
2275  hermon, strerror ( rc ) );
2276  return rc;
2277  }
2278 
2279  hermon->cap.cmpt_entry_size = MLX_GET ( &dev_cap, c_mpt_entry_sz );
2281  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_qps ) );
2282  hermon->cap.qpc_entry_size = MLX_GET ( &dev_cap, qpc_entry_sz );
2283  hermon->cap.altc_entry_size = MLX_GET ( &dev_cap, altc_entry_sz );
2284  hermon->cap.auxc_entry_size = MLX_GET ( &dev_cap, aux_entry_sz );
2286  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_srqs ) );
2287  hermon->cap.srqc_entry_size = MLX_GET ( &dev_cap, srq_entry_sz );
2289  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_cqs ) );
2290  hermon->cap.cqc_entry_size = MLX_GET ( &dev_cap, cqc_entry_sz );
2291  hermon->cap.reserved_eqs = MLX_GET ( &dev_cap, num_rsvd_eqs );
2292  if ( hermon->cap.reserved_eqs == 0 ) {
2293  /* Backward compatibility */
2295  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_eqs ) );
2296  }
2297  hermon->cap.eqc_entry_size = MLX_GET ( &dev_cap, eqc_entry_sz );
2299  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mtts ) );
2300  hermon->cap.mtt_entry_size = MLX_GET ( &dev_cap, mtt_entry_sz );
2302  ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mrws ) );
2303  hermon->cap.dmpt_entry_size = MLX_GET ( &dev_cap, d_mpt_entry_sz );
2304  hermon->cap.reserved_uars = MLX_GET ( &dev_cap, num_rsvd_uars );
2305  hermon->cap.num_ports = MLX_GET ( &dev_cap, num_ports );
2306  hermon->cap.dpdp = MLX_GET ( &dev_cap, dpdp );
2307 
2308  /* Sanity check */
2309  if ( hermon->cap.num_ports > HERMON_MAX_PORTS ) {
2310  DBGC ( hermon, "Hermon %p has %d ports (only %d supported)\n",
2313  }
2314 
2315  return 0;
2316 }
2317 
2318 /**
2319  * Align ICM table
2320  *
2321  * @v icm_offset Current ICM offset
2322  * @v len ICM table length
2323  * @ret icm_offset ICM offset
2324  */
2325 static uint64_t icm_align ( uint64_t icm_offset, size_t len ) {
2326 
2327  /* Round up to a multiple of the table size */
2328  assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
2329  return ( ( icm_offset + len - 1 ) & ~( ( ( uint64_t ) len ) - 1 ) );
2330 }
2331 
2332 /**
2333  * Map ICM (allocating if necessary)
2334  *
2335  * @v hermon Hermon device
2336  * @v init_hca INIT_HCA structure to fill in
2337  * @ret rc Return status code
2338  */
2339 static int hermon_map_icm ( struct hermon *hermon,
2340  struct hermonprm_init_hca *init_hca ) {
2341  struct hermonprm_scalar_parameter icm_size;
2342  struct hermonprm_scalar_parameter icm_aux_size;
2343  uint64_t icm_offset = 0;
2344  unsigned int log_num_qps, log_num_srqs, log_num_cqs, log_num_eqs;
2345  unsigned int log_num_mtts, log_num_mpts, log_num_mcs;
2346  size_t cmpt_max_len;
2347  size_t icm_len, icm_aux_len;
2348  size_t len;
2349  physaddr_t icm_phys;
2350  int i;
2351  int rc;
2352 
2353  /*
2354  * Start by carving up the ICM virtual address space
2355  *
2356  */
2357 
2358  /* Calculate number of each object type within ICM */
2359  log_num_qps = fls ( hermon->cap.reserved_qps +
2361  log_num_srqs = fls ( hermon->cap.reserved_srqs - 1 );
2362  log_num_cqs = fls ( hermon->cap.reserved_cqs + HERMON_MAX_CQS - 1 );
2363  log_num_eqs = fls ( hermon->cap.reserved_eqs + HERMON_MAX_EQS - 1 );
2364  log_num_mtts = fls ( hermon->cap.reserved_mtts + HERMON_MAX_MTTS - 1 );
2365  log_num_mpts = fls ( hermon->cap.reserved_mrws + 1 - 1 );
2366  log_num_mcs = HERMON_LOG_MULTICAST_HASH_SIZE;
2367 
2368  /* ICM starts with the cMPT tables, which are sparse */
2369  cmpt_max_len = ( HERMON_CMPT_MAX_ENTRIES *
2370  ( ( uint64_t ) hermon->cap.cmpt_entry_size ) );
2371  len = ( ( ( ( 1 << log_num_qps ) * hermon->cap.cmpt_entry_size ) +
2372  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2373  hermon->icm_map[HERMON_ICM_QP_CMPT].offset = icm_offset;
2375  icm_offset += cmpt_max_len;
2376  len = ( ( ( ( 1 << log_num_srqs ) * hermon->cap.cmpt_entry_size ) +
2377  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2378  hermon->icm_map[HERMON_ICM_SRQ_CMPT].offset = icm_offset;
2380  icm_offset += cmpt_max_len;
2381  len = ( ( ( ( 1 << log_num_cqs ) * hermon->cap.cmpt_entry_size ) +
2382  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2383  hermon->icm_map[HERMON_ICM_CQ_CMPT].offset = icm_offset;
2385  icm_offset += cmpt_max_len;
2386  len = ( ( ( ( 1 << log_num_eqs ) * hermon->cap.cmpt_entry_size ) +
2387  HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
2388  hermon->icm_map[HERMON_ICM_EQ_CMPT].offset = icm_offset;
2390  icm_offset += cmpt_max_len;
2391 
2392  hermon->icm_map[HERMON_ICM_OTHER].offset = icm_offset;
2393 
2394  /* Queue pair contexts */
2395  len = ( ( 1 << log_num_qps ) * hermon->cap.qpc_entry_size );
2396  icm_offset = icm_align ( icm_offset, len );
2397  MLX_FILL_1 ( init_hca, 12,
2398  qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_h,
2399  ( icm_offset >> 32 ) );
2400  MLX_FILL_2 ( init_hca, 13,
2401  qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
2402  ( icm_offset >> 5 ),
2403  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
2404  log_num_qps );
2405  DBGC ( hermon, "Hermon %p ICM QPC is %d x %#zx at [%08llx,%08llx)\n",
2406  hermon, ( 1 << log_num_qps ), hermon->cap.qpc_entry_size,
2407  icm_offset, ( icm_offset + len ) );
2408  icm_offset += len;
2409 
2410  /* Extended alternate path contexts */
2411  len = ( ( 1 << log_num_qps ) * hermon->cap.altc_entry_size );
2412  icm_offset = icm_align ( icm_offset, len );
2413  MLX_FILL_1 ( init_hca, 24,
2414  qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_h,
2415  ( icm_offset >> 32 ) );
2416  MLX_FILL_1 ( init_hca, 25,
2417  qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_l,
2418  icm_offset );
2419  DBGC ( hermon, "Hermon %p ICM ALTC is %d x %#zx at [%08llx,%08llx)\n",
2420  hermon, ( 1 << log_num_qps ), hermon->cap.altc_entry_size,
2421  icm_offset, ( icm_offset + len ) );
2422  icm_offset += len;
2423 
2424  /* Extended auxiliary contexts */
2425  len = ( ( 1 << log_num_qps ) * hermon->cap.auxc_entry_size );
2426  icm_offset = icm_align ( icm_offset, len );
2427  MLX_FILL_1 ( init_hca, 28,
2428  qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_h,
2429  ( icm_offset >> 32 ) );
2430  MLX_FILL_1 ( init_hca, 29,
2431  qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_l,
2432  icm_offset );
2433  DBGC ( hermon, "Hermon %p ICM AUXC is %d x %#zx at [%08llx,%08llx)\n",
2434  hermon, ( 1 << log_num_qps ), hermon->cap.auxc_entry_size,
2435  icm_offset, ( icm_offset + len ) );
2436  icm_offset += len;
2437 
2438  /* Shared receive queue contexts */
2439  len = ( ( 1 << log_num_srqs ) * hermon->cap.srqc_entry_size );
2440  icm_offset = icm_align ( icm_offset, len );
2441  MLX_FILL_1 ( init_hca, 18,
2442  qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_h,
2443  ( icm_offset >> 32 ) );
2444  MLX_FILL_2 ( init_hca, 19,
2445  qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
2446  ( icm_offset >> 5 ),
2447  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
2448  log_num_srqs );
2449  DBGC ( hermon, "Hermon %p ICM SRQC is %d x %#zx at [%08llx,%08llx)\n",
2450  hermon, ( 1 << log_num_srqs ), hermon->cap.srqc_entry_size,
2451  icm_offset, ( icm_offset + len ) );
2452  icm_offset += len;
2453 
2454  /* Completion queue contexts */
2455  len = ( ( 1 << log_num_cqs ) * hermon->cap.cqc_entry_size );
2456  icm_offset = icm_align ( icm_offset, len );
2457  MLX_FILL_1 ( init_hca, 20,
2458  qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_h,
2459  ( icm_offset >> 32 ) );
2460  MLX_FILL_2 ( init_hca, 21,
2461  qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
2462  ( icm_offset >> 5 ),
2463  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
2464  log_num_cqs );
2465  DBGC ( hermon, "Hermon %p ICM CQC is %d x %#zx at [%08llx,%08llx)\n",
2466  hermon, ( 1 << log_num_cqs ), hermon->cap.cqc_entry_size,
2467  icm_offset, ( icm_offset + len ) );
2468  icm_offset += len;
2469 
2470  /* Event queue contexts */
2471  len = ( ( 1 << log_num_eqs ) * hermon->cap.eqc_entry_size );
2472  icm_offset = icm_align ( icm_offset, len );
2473  MLX_FILL_1 ( init_hca, 32,
2474  qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_h,
2475  ( icm_offset >> 32 ) );
2476  MLX_FILL_2 ( init_hca, 33,
2477  qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
2478  ( icm_offset >> 5 ),
2479  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_eq,
2480  log_num_eqs );
2481  DBGC ( hermon, "Hermon %p ICM EQC is %d x %#zx at [%08llx,%08llx)\n",
2482  hermon, ( 1 << log_num_eqs ), hermon->cap.eqc_entry_size,
2483  icm_offset, ( icm_offset + len ) );
2484  icm_offset += len;
2485 
2486  /* Memory translation table */
2487  len = ( ( 1 << log_num_mtts ) * hermon->cap.mtt_entry_size );
2488  icm_offset = icm_align ( icm_offset, len );
2489  MLX_FILL_1 ( init_hca, 64,
2490  tpt_parameters.mtt_base_addr_h, ( icm_offset >> 32 ) );
2491  MLX_FILL_1 ( init_hca, 65,
2492  tpt_parameters.mtt_base_addr_l, icm_offset );
2493  DBGC ( hermon, "Hermon %p ICM MTT is %d x %#zx at [%08llx,%08llx)\n",
2494  hermon, ( 1 << log_num_mtts ), hermon->cap.mtt_entry_size,
2495  icm_offset, ( icm_offset + len ) );
2496  icm_offset += len;
2497 
2498  /* Memory protection table */
2499  len = ( ( 1 << log_num_mpts ) * hermon->cap.dmpt_entry_size );
2500  icm_offset = icm_align ( icm_offset, len );
2501  MLX_FILL_1 ( init_hca, 60,
2502  tpt_parameters.dmpt_base_adr_h, ( icm_offset >> 32 ) );
2503  MLX_FILL_1 ( init_hca, 61,
2504  tpt_parameters.dmpt_base_adr_l, icm_offset );
2505  MLX_FILL_1 ( init_hca, 62,
2506  tpt_parameters.log_dmpt_sz, log_num_mpts );
2507  DBGC ( hermon, "Hermon %p ICM DMPT is %d x %#zx at [%08llx,%08llx)\n",
2508  hermon, ( 1 << log_num_mpts ), hermon->cap.dmpt_entry_size,
2509  icm_offset, ( icm_offset + len ) );
2510  icm_offset += len;
2511 
2512  /* Multicast table */
2513  len = ( ( 1 << log_num_mcs ) * sizeof ( struct hermonprm_mcg_entry ) );
2514  icm_offset = icm_align ( icm_offset, len );
2515  MLX_FILL_1 ( init_hca, 48,
2516  multicast_parameters.mc_base_addr_h,
2517  ( icm_offset >> 32 ) );
2518  MLX_FILL_1 ( init_hca, 49,
2519  multicast_parameters.mc_base_addr_l, icm_offset );
2520  MLX_FILL_1 ( init_hca, 52,
2521  multicast_parameters.log_mc_table_entry_sz,
2522  fls ( sizeof ( struct hermonprm_mcg_entry ) - 1 ) );
2523  MLX_FILL_1 ( init_hca, 53,
2524  multicast_parameters.log_mc_table_hash_sz, log_num_mcs );
2525  MLX_FILL_1 ( init_hca, 54,
2526  multicast_parameters.log_mc_table_sz, log_num_mcs );
2527  DBGC ( hermon, "Hermon %p ICM MC is %d x %#zx at [%08llx,%08llx)\n",
2528  hermon, ( 1 << log_num_mcs ),
2529  sizeof ( struct hermonprm_mcg_entry ),
2530  icm_offset, ( icm_offset + len ) );
2531  icm_offset += len;
2532 
2533 
2535  ( icm_offset - hermon->icm_map[HERMON_ICM_OTHER].offset );
2536 
2537  /*
2538  * Allocate and map physical memory for (portions of) ICM
2539  *
2540  * Map is:
2541  * ICM AUX area (aligned to its own size)
2542  * cMPT areas
2543  * Other areas
2544  */
2545 
2546  /* Calculate physical memory required for ICM */
2547  icm_len = 0;
2548  for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2549  icm_len += hermon->icm_map[i].len;
2550  }
2551 
2552  /* Get ICM auxiliary area size */
2553  memset ( &icm_size, 0, sizeof ( icm_size ) );
2554  MLX_FILL_1 ( &icm_size, 0, value_hi, ( icm_offset >> 32 ) );
2555  MLX_FILL_1 ( &icm_size, 1, value, icm_offset );
2556  if ( ( rc = hermon_cmd_set_icm_size ( hermon, &icm_size,
2557  &icm_aux_size ) ) != 0 ) {
2558  DBGC ( hermon, "Hermon %p could not set ICM size: %s\n",
2559  hermon, strerror ( rc ) );
2560  goto err_set_icm_size;
2561  }
2562  icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * HERMON_PAGE_SIZE );
2563 
2564  /* Allocate ICM data and auxiliary area */
2565  DBGC ( hermon, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
2566  hermon, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
2567  if ( ! hermon->icm ) {
2568  hermon->icm_len = icm_len;
2569  hermon->icm_aux_len = icm_aux_len;
2571  if ( ! hermon->icm ) {
2572  rc = -ENOMEM;
2573  goto err_alloc;
2574  }
2575  } else {
2576  assert ( hermon->icm_len == icm_len );
2577  assert ( hermon->icm_aux_len == icm_aux_len );
2578  }
2579  icm_phys = user_to_phys ( hermon->icm, 0 );
2580 
2581  /* Map ICM auxiliary area */
2582  DBGC ( hermon, "Hermon %p mapping ICM AUX => %08lx\n",
2583  hermon, icm_phys );
2585  0, icm_phys, icm_aux_len ) ) != 0 ) {
2586  DBGC ( hermon, "Hermon %p could not map AUX ICM: %s\n",
2587  hermon, strerror ( rc ) );
2588  goto err_map_icm_aux;
2589  }
2590  icm_phys += icm_aux_len;
2591 
2592  /* MAP ICM area */
2593  for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
2594  DBGC ( hermon, "Hermon %p mapping ICM %llx+%zx => %08lx\n",
2595  hermon, hermon->icm_map[i].offset,
2596  hermon->icm_map[i].len, icm_phys );
2598  hermon->icm_map[i].offset,
2599  icm_phys,
2600  hermon->icm_map[i].len ) ) != 0 ){
2601  DBGC ( hermon, "Hermon %p could not map ICM: %s\n",
2602  hermon, strerror ( rc ) );
2603  goto err_map_icm;
2604  }
2605  icm_phys += hermon->icm_map[i].len;
2606  }
2607 
2608  return 0;
2609 
2610  err_map_icm:
2611  assert ( i == 0 ); /* We don't handle partial failure at present */
2612  err_map_icm_aux:
2614  err_alloc:
2615  err_set_icm_size:
2616  return rc;
2617 }
2618 
2619 /**
2620  * Unmap ICM
2621  *
2622  * @v hermon Hermon device
2623  */
2624 static void hermon_unmap_icm ( struct hermon *hermon ) {
2625  struct hermonprm_scalar_parameter unmap_icm;
2626  int i;
2627 
2628  for ( i = ( HERMON_ICM_NUM_REGIONS - 1 ) ; i >= 0 ; i-- ) {
2629  memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2630  MLX_FILL_1 ( &unmap_icm, 0, value_hi,
2631  ( hermon->icm_map[i].offset >> 32 ) );
2632  MLX_FILL_1 ( &unmap_icm, 1, value,
2633  hermon->icm_map[i].offset );
2635  ( 1 << fls ( ( hermon->icm_map[i].len /
2636  HERMON_PAGE_SIZE ) - 1)),
2637  &unmap_icm );
2638  }
2640 }
2641 
2642 /***************************************************************************
2643  *
2644  * Initialisation and teardown
2645  *
2646  ***************************************************************************
2647  */
2648 
2649 /**
2650  * Reset device
2651  *
2652  * @v hermon Hermon device
2653  */
2654 static void hermon_reset ( struct hermon *hermon ) {
2655  struct pci_device *pci = hermon->pci;
2656  struct pci_config_backup backup;
2657  static const uint8_t backup_exclude[] =
2658  PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2659 
2660  /* Perform device reset and preserve PCI configuration */
2661  pci_backup ( pci, &backup, backup_exclude );
2665  pci_restore ( pci, &backup, backup_exclude );
2666 
2667  /* Reset command interface toggle */
2668  hermon->toggle = 0;
2669 }
2670 
2671 /**
2672  * Set up memory protection table
2673  *
2674  * @v hermon Hermon device
2675  * @ret rc Return status code
2676  */
2677 static int hermon_setup_mpt ( struct hermon *hermon ) {
2678  struct hermonprm_mpt mpt;
2679  uint32_t key;
2680  int rc;
2681 
2682  /* Derive key */
2684  hermon->lkey = ( ( key << 8 ) | ( key >> 24 ) );
2685 
2686  /* Initialise memory protection table */
2687  memset ( &mpt, 0, sizeof ( mpt ) );
2688  MLX_FILL_7 ( &mpt, 0,
2689  atomic, 1,
2690  rw, 1,
2691  rr, 1,
2692  lw, 1,
2693  lr, 1,
2694  pa, 1,
2695  r_w, 1 );
2696  MLX_FILL_1 ( &mpt, 2, mem_key, key );
2697  MLX_FILL_1 ( &mpt, 3,
2698  pd, HERMON_GLOBAL_PD );
2699  MLX_FILL_1 ( &mpt, 10, len64, 1 );
2700  if ( ( rc = hermon_cmd_sw2hw_mpt ( hermon,
2702  &mpt ) ) != 0 ) {
2703  DBGC ( hermon, "Hermon %p could not set up MPT: %s\n",
2704  hermon, strerror ( rc ) );
2705  return rc;
2706  }
2707 
2708  return 0;
2709 }
2710 
2711 /**
2712  * Configure special queue pairs
2713  *
2714  * @v hermon Hermon device
2715  * @ret rc Return status code
2716  */
2718  int rc;
2719 
2720  /* Special QP block must be aligned on its own size */
2723  & ~( HERMON_NUM_SPECIAL_QPS - 1 ) );
2726  DBGC ( hermon, "Hermon %p special QPs at [%lx,%lx]\n", hermon,
2727  hermon->special_qpn_base, ( hermon->qpn_base - 1 ) );
2728 
2729  /* Issue command to configure special QPs */
2730  if ( ( rc = hermon_cmd_conf_special_qp ( hermon, 0x00,
2731  hermon->special_qpn_base ) ) != 0 ) {
2732  DBGC ( hermon, "Hermon %p could not configure special QPs: "
2733  "%s\n", hermon, strerror ( rc ) );
2734  return rc;
2735  }
2736 
2737  return 0;
2738 }
2739 
2740 /**
2741  * Start Hermon device
2742  *
2743  * @v hermon Hermon device
2744  * @v running Firmware is already running
2745  * @ret rc Return status code
2746  */
2747 static int hermon_start ( struct hermon *hermon, int running ) {
2748  struct hermonprm_init_hca init_hca;
2749  unsigned int i;
2750  int rc;
2751 
2752  /* Start firmware if not already running */
2753  if ( ! running ) {
2754  if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
2755  goto err_start_firmware;
2756  }
2757 
2758  /* Allocate and map ICM */
2759  memset ( &init_hca, 0, sizeof ( init_hca ) );
2760  if ( ( rc = hermon_map_icm ( hermon, &init_hca ) ) != 0 )
2761  goto err_map_icm;
2762 
2763  /* Initialise HCA */
2764  MLX_FILL_1 ( &init_hca, 0, version, 0x02 /* "Must be 0x02" */ );
2765  MLX_FILL_1 ( &init_hca, 5, udp, 1 );
2766  MLX_FILL_1 ( &init_hca, 74, uar_parameters.log_max_uars, 8 );
2767  if ( ( rc = hermon_cmd_init_hca ( hermon, &init_hca ) ) != 0 ) {
2768  DBGC ( hermon, "Hermon %p could not initialise HCA: %s\n",
2769  hermon, strerror ( rc ) );
2770  goto err_init_hca;
2771  }
2772 
2773  /* Set up memory protection */
2774  if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
2775  goto err_setup_mpt;
2776  for ( i = 0 ; i < hermon->cap.num_ports ; i++ )
2777  hermon->port[i].ibdev->rdma_key = hermon->lkey;
2778 
2779  /* Set up event queue */
2780  if ( ( rc = hermon_create_eq ( hermon ) ) != 0 )
2781  goto err_create_eq;
2782 
2783  /* Configure special QPs */
2784  if ( ( rc = hermon_configure_special_qps ( hermon ) ) != 0 )
2785  goto err_conf_special_qps;
2786 
2787  return 0;
2788 
2789  err_conf_special_qps:
2791  err_create_eq:
2792  err_setup_mpt:
2794  err_init_hca:
2796  err_map_icm:
2798  err_start_firmware:
2799  return rc;
2800 }
2801 
2802 /**
2803  * Stop Hermon device
2804  *
2805  * @v hermon Hermon device
2806  */
2807 static void hermon_stop ( struct hermon *hermon ) {
2812  hermon_reset ( hermon );
2813 }
2814 
2815 /**
2816  * Open Hermon device
2817  *
2818  * @v hermon Hermon device
2819  * @ret rc Return status code
2820  */
2821 static int hermon_open ( struct hermon *hermon ) {
2822  int rc;
2823 
2824  /* Start device if applicable */
2825  if ( hermon->open_count == 0 ) {
2826  if ( ( rc = hermon_start ( hermon, 0 ) ) != 0 )
2827  return rc;
2828  }
2829 
2830  /* Increment open counter */
2831  hermon->open_count++;
2832 
2833  return 0;
2834 }
2835 
2836 /**
2837  * Close Hermon device
2838  *
2839  * @v hermon Hermon device
2840  */
2841 static void hermon_close ( struct hermon *hermon ) {
2842 
2843  /* Decrement open counter */
2844  assert ( hermon->open_count != 0 );
2845  hermon->open_count--;
2846 
2847  /* Stop device if applicable */
2848  if ( hermon->open_count == 0 )
2849  hermon_stop ( hermon );
2850 }
2851 
2852 /***************************************************************************
2853  *
2854  * Infiniband link-layer operations
2855  *
2856  ***************************************************************************
2857  */
2858 
2859 /**
2860  * Initialise Infiniband link
2861  *
2862  * @v ibdev Infiniband device
2863  * @ret rc Return status code
2864  */
2865 static int hermon_ib_open ( struct ib_device *ibdev ) {
2866  struct hermon *hermon = ib_get_drvdata ( ibdev );
2867  union hermonprm_set_port set_port;
2868  int rc;
2869 
2870  /* Open hardware */
2871  if ( ( rc = hermon_open ( hermon ) ) != 0 )
2872  goto err_open;
2873 
2874  /* Set port parameters */
2875  memset ( &set_port, 0, sizeof ( set_port ) );
2876  MLX_FILL_8 ( &set_port.ib, 0,
2877  mmc, 1,
2878  mvc, 1,
2879  mp, 1,
2880  mg, 1,
2881  mtu_cap, IB_MTU_2048,
2882  vl_cap, IB_VL_0,
2883  rcm, 1,
2884  lss, 1 );
2885  MLX_FILL_2 ( &set_port.ib, 10,
2886  max_pkey, 1,
2887  max_gid, 1 );
2888  MLX_FILL_1 ( &set_port.ib, 28,
2889  link_speed_supported, 1 );
2890  if ( ( rc = hermon_cmd_set_port ( hermon, 0, ibdev->port,
2891  &set_port ) ) != 0 ) {
2892  DBGC ( hermon, "Hermon %p port %d could not set port: %s\n",
2893  hermon, ibdev->port, strerror ( rc ) );
2894  goto err_set_port;
2895  }
2896 
2897  /* Initialise port */
2898  if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
2899  DBGC ( hermon, "Hermon %p port %d could not initialise port: "
2900  "%s\n", hermon, ibdev->port, strerror ( rc ) );
2901  goto err_init_port;
2902  }
2903 
2904  /* Update MAD parameters */
2905  ib_smc_update ( ibdev, hermon_mad );
2906 
2907  return 0;
2908 
2909  err_init_port:
2910  err_set_port:
2911  hermon_close ( hermon );
2912  err_open:
2913  return rc;
2914 }
2915 
2916 /**
2917  * Close Infiniband link
2918  *
2919  * @v ibdev Infiniband device
2920  */
2921 static void hermon_ib_close ( struct ib_device *ibdev ) {
2922  struct hermon *hermon = ib_get_drvdata ( ibdev );
2923  int rc;
2924 
2925  /* Close port */
2926  if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
2927  DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
2928  hermon, ibdev->port, strerror ( rc ) );
2929  /* Nothing we can do about this */
2930  }
2931 
2932  /* Close hardware */
2933  hermon_close ( hermon );
2934 }
2935 
2936 /**
2937  * Inform embedded subnet management agent of a received MAD
2938  *
2939  * @v ibdev Infiniband device
2940  * @v mad MAD
2941  * @ret rc Return status code
2942  */
2943 static int hermon_inform_sma ( struct ib_device *ibdev,
2944  union ib_mad *mad ) {
2945  int rc;
2946 
2947  /* Send the MAD to the embedded SMA */
2948  if ( ( rc = hermon_mad ( ibdev, mad ) ) != 0 )
2949  return rc;
2950 
2951  /* Update parameters held in software */
2952  ib_smc_update ( ibdev, hermon_mad );
2953 
2954  return 0;
2955 }
2956 
2957 /***************************************************************************
2958  *
2959  * Multicast group operations
2960  *
2961  ***************************************************************************
2962  */
2963 
2964 /**
2965  * Attach to multicast group
2966  *
2967  * @v ibdev Infiniband device
2968  * @v qp Queue pair
2969  * @v gid Multicast GID
2970  * @ret rc Return status code
2971  */
2972 static int hermon_mcast_attach ( struct ib_device *ibdev,
2973  struct ib_queue_pair *qp,
2974  union ib_gid *gid ) {
2975  struct hermon *hermon = ib_get_drvdata ( ibdev );
2976  struct hermonprm_mgm_hash hash;
2977  struct hermonprm_mcg_entry mcg;
2978  unsigned int index;
2979  int rc;
2980 
2981  /* Generate hash table index */
2982  if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
2983  DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
2984  hermon, strerror ( rc ) );
2985  return rc;
2986  }
2987  index = MLX_GET ( &hash, hash );
2988 
2989  /* Check for existing hash table entry */
2990  if ( ( rc = hermon_cmd_read_mcg ( hermon, index, &mcg ) ) != 0 ) {
2991  DBGC ( hermon, "Hermon %p could not read MCG %#x: %s\n",
2992  hermon, index, strerror ( rc ) );
2993  return rc;
2994  }
2995  if ( MLX_GET ( &mcg, hdr.members_count ) != 0 ) {
2996  /* FIXME: this implementation allows only a single QP
2997  * per multicast group, and doesn't handle hash
2998  * collisions. Sufficient for IPoIB but may need to
2999  * be extended in future.
3000  */
3001  DBGC ( hermon, "Hermon %p MGID index %#x already in use\n",
3002  hermon, index );
3003  return -EBUSY;
3004  }
3005 
3006  /* Update hash table entry */
3007  MLX_FILL_1 ( &mcg, 1, hdr.members_count, 1 );
3008  MLX_FILL_1 ( &mcg, 8, qp[0].qpn, qp->qpn );
3009  memcpy ( &mcg.u.dwords[4], gid, sizeof ( *gid ) );
3010  if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
3011  DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
3012  hermon, index, strerror ( rc ) );
3013  return rc;
3014  }
3015 
3016  return 0;
3017 }
3018 
3019 /**
3020  * Detach from multicast group
3021  *
3022  * @v ibdev Infiniband device
3023  * @v qp Queue pair
3024  * @v gid Multicast GID
3025  */
3026 static void hermon_mcast_detach ( struct ib_device *ibdev,
3027  struct ib_queue_pair *qp __unused,
3028  union ib_gid *gid ) {
3029  struct hermon *hermon = ib_get_drvdata ( ibdev );
3030  struct hermonprm_mgm_hash hash;
3031  struct hermonprm_mcg_entry mcg;
3032  unsigned int index;
3033  int rc;
3034 
3035  /* Generate hash table index */
3036  if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
3037  DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
3038  hermon, strerror ( rc ) );
3039  return;
3040  }
3041  index = MLX_GET ( &hash, hash );
3042 
3043  /* Clear hash table entry */
3044  memset ( &mcg, 0, sizeof ( mcg ) );
3045  if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
3046  DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
3047  hermon, index, strerror ( rc ) );
3048  return;
3049  }
3050 }
3051 
3052 /** Hermon Infiniband operations */
3055  .destroy_cq = hermon_destroy_cq,
3056  .create_qp = hermon_create_qp,
3057  .modify_qp = hermon_modify_qp,
3058  .destroy_qp = hermon_destroy_qp,
3059  .post_send = hermon_post_send,
3060  .post_recv = hermon_post_recv,
3061  .poll_cq = hermon_poll_cq,
3062  .poll_eq = hermon_poll_eq,
3063  .open = hermon_ib_open,
3064  .close = hermon_ib_close,
3065  .mcast_attach = hermon_mcast_attach,
3066  .mcast_detach = hermon_mcast_detach,
3067  .set_port_info = hermon_inform_sma,
3068  .set_pkey_table = hermon_inform_sma,
3069 };
3070 
3071 /**
3072  * Register Hermon Infiniband device
3073  *
3074  * @v hermon Hermon device
3075  * @v port Hermon port
3076  * @ret rc Return status code
3077  */
3078 static int hermon_register_ibdev ( struct hermon *hermon,
3079  struct hermon_port *port ) {
3080  struct ib_device *ibdev = port->ibdev;
3081  int rc;
3082 
3083  /* Initialise parameters using SMC */
3084  ib_smc_init ( ibdev, hermon_mad );
3085 
3086  /* Register Infiniband device */
3087  if ( ( rc = register_ibdev ( ibdev ) ) != 0 ) {
3088  DBGC ( hermon, "Hermon %p port %d could not register IB "
3089  "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
3090  return rc;
3091  }
3092 
3093  return 0;
3094 }
3095 
3096 /**
3097  * Handle Hermon Infiniband device port state change
3098  *
3099  * @v hermon Hermon device
3100  * @v port Hermon port
3101  * @v link_up Link is up
3102  */
3104  struct hermon_port *port,
3105  int link_up __unused ) {
3106  struct ib_device *ibdev = port->ibdev;
3107 
3108  /* Update MAD parameters */
3109  ib_smc_update ( ibdev, hermon_mad );
3110 }
3111 
3112 /**
3113  * Unregister Hermon Infiniband device
3114  *
3115  * @v hermon Hermon device
3116  * @v port Hermon port
3117  */
3119  struct hermon_port *port ) {
3120  struct ib_device *ibdev = port->ibdev;
3121 
3122  unregister_ibdev ( ibdev );
3123 }
3124 
3125 /** Hermon Infiniband port type */
3128  .state_change = hermon_state_change_ibdev,
3129  .unregister_dev = hermon_unregister_ibdev,
3130 };
3131 
3132 /***************************************************************************
3133  *
3134  * Ethernet operation
3135  *
3136  ***************************************************************************
3137  */
3138 
3139 /** Number of Hermon Ethernet send work queue entries */
3140 #define HERMON_ETH_NUM_SEND_WQES 2
3141 
3142 /** Number of Hermon Ethernet receive work queue entries */
3143 #define HERMON_ETH_NUM_RECV_WQES 4
3144 
3145 /** Number of Hermon Ethernet completion entries */
3146 #define HERMON_ETH_NUM_CQES 8
3147 
3148 /**
3149  * Transmit packet via Hermon Ethernet device
3150  *
3151  * @v netdev Network device
3152  * @v iobuf I/O buffer
3153  * @ret rc Return status code
3154  */
3156  struct io_buffer *iobuf ) {
3157  struct hermon_port *port = netdev->priv;
3158  struct ib_device *ibdev = port->ibdev;
3159  struct hermon *hermon = ib_get_drvdata ( ibdev );
3160  int rc;
3161 
3162  /* Transmit packet */
3163  if ( ( rc = ib_post_send ( ibdev, port->eth_qp, NULL,
3164  iobuf ) ) != 0 ) {
3165  DBGC ( hermon, "Hermon %p port %d could not transmit: %s\n",
3166  hermon, ibdev->port, strerror ( rc ) );
3167  return rc;
3168  }
3169 
3170  return 0;
3171 }
3172 
3173 /** Hermon Ethernet queue pair operations */
3175  .alloc_iob = alloc_iob,
3176 };
3177 
3178 /**
3179  * Handle Hermon Ethernet device send completion
3180  *
3181  * @v ibdev Infiniband device
3182  * @v qp Queue pair
3183  * @v iobuf I/O buffer
3184  * @v rc Completion status code
3185  */
3186 static void hermon_eth_complete_send ( struct ib_device *ibdev __unused,
3187  struct ib_queue_pair *qp,
3188  struct io_buffer *iobuf, int rc ) {
3189  struct net_device *netdev = ib_qp_get_ownerdata ( qp );
3190 
3191  netdev_tx_complete_err ( netdev, iobuf, rc );
3192 }
3193 
3194 /**
3195  * Handle Hermon Ethernet device receive completion
3196  *
3197  * @v ibdev Infiniband device
3198  * @v qp Queue pair
3199  * @v dest Destination address vector, or NULL
3200  * @v source Source address vector, or NULL
3201  * @v iobuf I/O buffer
3202  * @v rc Completion status code
3203  */
3204 static void hermon_eth_complete_recv ( struct ib_device *ibdev __unused,
3205  struct ib_queue_pair *qp,
3207  struct ib_address_vector *source,
3208  struct io_buffer *iobuf, int rc ) {
3209  struct net_device *netdev = ib_qp_get_ownerdata ( qp );
3210  unsigned int tag;
3211 
3212  /* Identify VLAN tag, if applicable */
3213  tag = ( source->vlan_present ? source->vlan : 0 );
3214 
3215  /* Hand off to network layer */
3216  if ( rc == 0 ) {
3217  vlan_netdev_rx ( netdev, tag, iobuf );
3218  } else {
3219  vlan_netdev_rx_err ( netdev, tag, iobuf, rc );
3220  }
3221 }
3222 
3223 /** Hermon Ethernet device completion operations */
3226  .complete_recv = hermon_eth_complete_recv,
3227 };
3228 
3229 /**
3230  * Poll Hermon Ethernet device
3231  *
3232  * @v netdev Network device
3233  */
3234 static void hermon_eth_poll ( struct net_device *netdev ) {
3235  struct hermon_port *port = netdev->priv;
3236  struct ib_device *ibdev = port->ibdev;
3237 
3238  ib_poll_eq ( ibdev );
3239 }
3240 
3241 /**
3242  * Open Hermon Ethernet device
3243  *
3244  * @v netdev Network device
3245  * @ret rc Return status code
3246  */
3247 static int hermon_eth_open ( struct net_device *netdev ) {
3248  struct hermon_port *port = netdev->priv;
3249  struct ib_device *ibdev = port->ibdev;
3250  struct hermon *hermon = ib_get_drvdata ( ibdev );
3251  union hermonprm_set_port set_port;
3252  int rc;
3253 
3254  /* Open hardware */
3255  if ( ( rc = hermon_open ( hermon ) ) != 0 )
3256  goto err_open;
3257 
3258  /* Allocate completion queue */
3259  if ( ( rc = ib_create_cq ( ibdev, HERMON_ETH_NUM_CQES,
3260  &hermon_eth_cq_op, &port->eth_cq ) ) != 0 ) {
3261  DBGC ( hermon, "Hermon %p port %d could not create completion "
3262  "queue: %s\n", hermon, ibdev->port, strerror ( rc ) );
3263  goto err_create_cq;
3264  }
3265 
3266  /* Allocate queue pair */
3268  port->eth_cq, HERMON_ETH_NUM_RECV_WQES,
3269  port->eth_cq, &hermon_eth_qp_op,
3270  netdev->name, &port->eth_qp ) ) != 0 ) {
3271  DBGC ( hermon, "Hermon %p port %d could not create queue "
3272  "pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
3273  goto err_create_qp;
3274  }
3275  ib_qp_set_ownerdata ( port->eth_qp, netdev );
3276 
3277  /* Activate queue pair */
3278  if ( ( rc = ib_modify_qp ( ibdev, port->eth_qp ) ) != 0 ) {
3279  DBGC ( hermon, "Hermon %p port %d could not modify queue "
3280  "pair: %s\n", hermon, ibdev->port, strerror ( rc ) );
3281  goto err_modify_qp;
3282  }
3283 
3284  /* Fill receive rings */
3285  ib_refill_recv ( ibdev, port->eth_qp );
3286 
3287  /* Set port general parameters */
3288  memset ( &set_port, 0, sizeof ( set_port ) );
3289  MLX_FILL_3 ( &set_port.general, 0,
3290  v_mtu, 1,
3291  v_pprx, 1,
3292  v_pptx, 1 );
3293  MLX_FILL_1 ( &set_port.general, 1,
3294  mtu, ( ETH_FRAME_LEN + 40 /* Used by card */ ) );
3295  MLX_FILL_1 ( &set_port.general, 2,
3296  pfctx, ( 1 << FCOE_VLAN_PRIORITY ) );
3297  MLX_FILL_1 ( &set_port.general, 3,
3298  pfcrx, ( 1 << FCOE_VLAN_PRIORITY ) );
3299  if ( ( rc = hermon_cmd_set_port ( hermon, 1,
3301  ibdev->port ),
3302  &set_port ) ) != 0 ) {
3303  DBGC ( hermon, "Hermon %p port %d could not set port general "
3304  "parameters: %s\n",
3305  hermon, ibdev->port, strerror ( rc ) );
3306  goto err_set_port_general_params;
3307  }
3308 
3309  /* Set port receive QP */
3310  memset ( &set_port, 0, sizeof ( set_port ) );
3311  MLX_FILL_1 ( &set_port.rqp_calc, 0, base_qpn, port->eth_qp->qpn );
3312  MLX_FILL_1 ( &set_port.rqp_calc, 2,
3313  mac_miss_index, 128 /* MAC misses go to promisc QP */ );
3314  MLX_FILL_2 ( &set_port.rqp_calc, 3,
3315  vlan_miss_index, 127 /* VLAN misses go to promisc QP */,
3316  no_vlan_index, 126 /* VLAN-free go to promisc QP */ );
3317  MLX_FILL_2 ( &set_port.rqp_calc, 5,
3318  promisc_qpn, port->eth_qp->qpn,
3319  en_uc_promisc, 1 );
3320  MLX_FILL_2 ( &set_port.rqp_calc, 6,
3321  def_mcast_qpn, port->eth_qp->qpn,
3322  mc_promisc_mode, 2 /* Receive all multicasts */ );
3323  if ( ( rc = hermon_cmd_set_port ( hermon, 1,
3325  ibdev->port ),
3326  &set_port ) ) != 0 ) {
3327  DBGC ( hermon, "Hermon %p port %d could not set port receive "
3328  "QP: %s\n", hermon, ibdev->port, strerror ( rc ) );
3329  goto err_set_port_receive_qp;
3330  }
3331 
3332  /* Initialise port */
3333  if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port ) ) != 0 ) {
3334  DBGC ( hermon, "Hermon %p port %d could not initialise port: "
3335  "%s\n", hermon, ibdev->port, strerror ( rc ) );
3336  goto err_init_port;
3337  }
3338 
3339  return 0;
3340 
3341  err_init_port:
3342  err_set_port_receive_qp:
3343  err_set_port_general_params:
3344  err_modify_qp:
3345  ib_destroy_qp ( ibdev, port->eth_qp );
3346  err_create_qp:
3347  ib_destroy_cq ( ibdev, port->eth_cq );
3348  err_create_cq:
3349  hermon_close ( hermon );
3350  err_open:
3351  return rc;
3352 }
3353 
3354 /**
3355  * Close Hermon Ethernet device
3356  *
3357  * @v netdev Network device
3358  */
3359 static void hermon_eth_close ( struct net_device *netdev ) {
3360  struct hermon_port *port = netdev->priv;
3361  struct ib_device *ibdev = port->ibdev;
3362  struct hermon *hermon = ib_get_drvdata ( ibdev );
3363  int rc;
3364 
3365  /* Close port */
3366  if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
3367  DBGC ( hermon, "Hermon %p port %d could not close port: %s\n",
3368  hermon, ibdev->port, strerror ( rc ) );
3369  /* Nothing we can do about this */
3370  }
3371 
3372  /* Tear down the queues */
3373  ib_destroy_qp ( ibdev, port->eth_qp );
3374  ib_destroy_cq ( ibdev, port->eth_cq );
3375 
3376  /* Close hardware */
3377  hermon_close ( hermon );
3378 }
3379 
3380 /** Hermon Ethernet network device operations */
3382  .open = hermon_eth_open,
3383  .close = hermon_eth_close,
3384  .transmit = hermon_eth_transmit,
3385  .poll = hermon_eth_poll,
3386 };
3387 
3388 /**
3389  * Register Hermon Ethernet device
3390  *
3391  * @v hermon Hermon device
3392  * @v port Hermon port
3393  * @ret rc Return status code
3394  */
3395 static int hermon_register_netdev ( struct hermon *hermon,
3396  struct hermon_port *port ) {
3397  struct net_device *netdev = port->netdev;
3398  struct ib_device *ibdev = port->ibdev;
3399  struct hermonprm_query_port_cap query_port;
3400  union {
3401  uint8_t bytes[8];
3402  uint32_t dwords[2];
3403  } mac;
3404  int rc;
3405 
3406  /* Retrieve MAC address */
3407  if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
3408  &query_port ) ) != 0 ) {
3409  DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
3410  hermon, ibdev->port, strerror ( rc ) );
3411  goto err_query_port;
3412  }
3413  mac.dwords[0] = htonl ( MLX_GET ( &query_port, mac_47_32 ) );
3414  mac.dwords[1] = htonl ( MLX_GET ( &query_port, mac_31_0 ) );
3415  memcpy ( netdev->hw_addr,
3416  &mac.bytes[ sizeof ( mac.bytes ) - ETH_ALEN ], ETH_ALEN );
3417 
3418  /* Register network device */
3419  if ( ( rc = register_netdev ( netdev ) ) != 0 ) {
3420  DBGC ( hermon, "Hermon %p port %d could not register network "
3421  "device: %s\n", hermon, ibdev->port, strerror ( rc ) );
3422  goto err_register_netdev;
3423  }
3424 
3425  /* Register non-volatile options */
3426  if ( ( rc = register_nvo ( &port->nvo,
3427  netdev_settings ( netdev ) ) ) != 0 ) {
3428  DBGC ( hermon, "Hermon %p port %d could not register non-"
3429  "volatile options: %s\n",
3430  hermon, ibdev->port, strerror ( rc ) );
3431  goto err_register_nvo;
3432  }
3433 
3434  return 0;
3435 
3436  unregister_nvo ( &port->nvo );
3437  err_register_nvo:
3439  err_register_netdev:
3440  err_query_port:
3441  return rc;
3442 }
3443 
3444 /**
3445  * Handle Hermon Ethernet device port state change
3446  *
3447  * @v hermon Hermon device
3448  * @v port Hermon port
3449  * @v link_up Link is up
3450  */
3452  struct hermon_port *port,
3453  int link_up ) {
3454  struct net_device *netdev = port->netdev;
3455 
3456  if ( link_up ) {
3457  netdev_link_up ( netdev );
3458  } else {
3460  }
3461 }
3462 
3463 /**
3464  * Unregister Hermon Ethernet device
3465  *
3466  * @v hermon Hermon device
3467  * @v port Hermon port
3468  */
3470  struct hermon_port *port ) {
3471  struct net_device *netdev = port->netdev;
3472 
3473  unregister_nvo ( &port->nvo );
3475 }
3476 
3477 /** Hermon Ethernet port type */
3480  .state_change = hermon_state_change_netdev,
3481  .unregister_dev = hermon_unregister_netdev,
3482 };
3483 
3484 /***************************************************************************
3485  *
3486  * Port type detection
3487  *
3488  ***************************************************************************
3489  */
3490 
3491 /** Timeout for port sensing */
3492 #define HERMON_SENSE_PORT_TIMEOUT ( TICKS_PER_SEC / 2 )
3493 
3494 /**
3495  * Name port type
3496  *
3497  * @v port_type Port type
3498  * @v port_type_name Port type name
3499  */
3500 static inline const char * hermon_name_port_type ( unsigned int port_type ) {
3501  switch ( port_type ) {
3502  case HERMON_PORT_TYPE_UNKNOWN: return "unknown";
3503  case HERMON_PORT_TYPE_IB: return "Infiniband";
3504  case HERMON_PORT_TYPE_ETH: return "Ethernet";
3505  default: return "INVALID";
3506  }
3507 }
3508 
3509 /**
3510  * Sense port type
3511  *
3512  * @v hermon Hermon device
3513  * @v port Hermon port
3514  * @ret port_type Port type, or negative error
3515  */
3516 static int hermon_sense_port_type ( struct hermon *hermon,
3517  struct hermon_port *port ) {
3518  struct ib_device *ibdev = port->ibdev;
3519  struct hermonprm_sense_port sense_port;
3520  int port_type;
3521  int rc;
3522 
3523  /* If DPDP is not supported, always assume Infiniband */
3524  if ( ! hermon->cap.dpdp ) {
3525  port_type = HERMON_PORT_TYPE_IB;
3526  DBGC ( hermon, "Hermon %p port %d does not support DPDP; "
3527  "assuming an %s network\n", hermon, ibdev->port,
3528  hermon_name_port_type ( port_type ) );
3529  return port_type;
3530  }
3531 
3532  /* Sense the port type */
3533  if ( ( rc = hermon_cmd_sense_port ( hermon, ibdev->port,
3534  &sense_port ) ) != 0 ) {
3535  DBGC ( hermon, "Hermon %p port %d sense failed: %s\n",
3536  hermon, ibdev->port, strerror ( rc ) );
3537  return rc;
3538  }
3539  port_type = MLX_GET ( &sense_port, port_type );
3540 
3541  DBGC ( hermon, "Hermon %p port %d sensed an %s network\n",
3542  hermon, ibdev->port, hermon_name_port_type ( port_type ) );
3543  return port_type;
3544 }
3545 
3546 /**
3547  * Set port type
3548  *
3549  * @v hermon Hermon device
3550  * @v port Hermon port
3551  * @ret rc Return status code
3552  */
3553 static int hermon_set_port_type ( struct hermon *hermon,
3554  struct hermon_port *port ) {
3555  struct ib_device *ibdev = port->ibdev;
3556  struct hermonprm_query_port_cap query_port;
3557  int ib_supported;
3558  int eth_supported;
3559  int port_type;
3560  unsigned long start;
3561  unsigned long elapsed;
3562  int rc;
3563 
3564  /* Check to see which types are supported */
3565  if ( ( rc = hermon_cmd_query_port ( hermon, ibdev->port,
3566  &query_port ) ) != 0 ) {
3567  DBGC ( hermon, "Hermon %p port %d could not query port: %s\n",
3568  hermon, ibdev->port, strerror ( rc ) );
3569  return rc;
3570  }
3571  ib_supported = MLX_GET ( &query_port, ib );
3572  eth_supported = MLX_GET ( &query_port, eth );
3573  DBGC ( hermon, "Hermon %p port %d supports%s%s%s\n",
3574  hermon, ibdev->port, ( ib_supported ? " Infiniband" : "" ),
3575  ( ( ib_supported && eth_supported ) ? " and" : "" ),
3576  ( eth_supported ? " Ethernet" : "" ) );
3577 
3578  /* Sense network, if applicable */
3579  if ( ib_supported && eth_supported ) {
3580 
3581  /* Both types are supported; try sensing network */
3582  start = currticks();
3583  do {
3584  /* Try sensing port */
3585  port_type = hermon_sense_port_type ( hermon, port );
3586  if ( port_type < 0 ) {
3587  rc = port_type;
3588  return rc;
3589  }
3590  } while ( ( port_type == HERMON_PORT_TYPE_UNKNOWN ) &&
3591  ( ( elapsed = ( currticks() - start ) ) <
3593 
3594  /* Set port type based on sensed network, defaulting
3595  * to Infiniband if nothing was sensed.
3596  */
3597  switch ( port_type ) {
3598  case HERMON_PORT_TYPE_ETH:
3599  port->type = &hermon_port_type_eth;
3600  break;
3601  case HERMON_PORT_TYPE_IB:
3603  port->type = &hermon_port_type_ib;
3604  break;
3605  default:
3606  return -EINVAL;
3607  }
3608 
3609  } else if ( eth_supported ) {
3610  port->type = &hermon_port_type_eth;
3611  } else {
3612  port->type = &hermon_port_type_ib;
3613  }
3614 
3615  assert ( port->type != NULL );
3616  return 0;
3617 }
3618 
3619 /***************************************************************************
3620  *
3621  * BOFM interface
3622  *
3623  ***************************************************************************
3624  */
3625 
3626 /**
3627  * Harvest Ethernet MAC for BOFM
3628  *
3629  * @v bofm BOFM device
3630  * @v mport Multi-port index
3631  * @v mac MAC to fill in
3632  * @ret rc Return status code
3633  */
3634 static int hermon_bofm_harvest ( struct bofm_device *bofm, unsigned int mport,
3635  uint8_t *mac ) {
3636  struct hermon *hermon = container_of ( bofm, struct hermon, bofm );
3637  struct hermonprm_mod_stat_cfg stat_cfg;
3638  union {
3639  uint8_t bytes[8];
3640  uint32_t dwords[2];
3641  } buf;
3642  int rc;
3643 
3644  /* Query static configuration */
3645  if ( ( rc = hermon_mod_stat_cfg ( hermon, mport,
3647  HERMON_MOD_STAT_CFG_OFFSET ( mac_m ),
3648  &stat_cfg ) ) != 0 ) {
3649  DBGC ( hermon, "Hermon %p port %d could not query "
3650  "configuration: %s\n", hermon, mport, strerror ( rc ) );
3651  return rc;
3652  }
3653 
3654  /* Retrieve MAC address */
3655  buf.dwords[0] = htonl ( MLX_GET ( &stat_cfg, mac_high ) );
3656  buf.dwords[1] = htonl ( MLX_GET ( &stat_cfg, mac_low ) );
3657  memcpy ( mac, &buf.bytes[ sizeof ( buf.bytes ) - ETH_ALEN ],
3658  ETH_ALEN );
3659 
3660  DBGC ( hermon, "Hermon %p port %d harvested MAC address %s\n",
3661  hermon, mport, eth_ntoa ( mac ) );
3662 
3663  return 0;
3664 }
3665 
3666 /**
3667  * Update Ethernet MAC for BOFM
3668  *
3669  * @v bofm BOFM device
3670  * @v mport Multi-port index
3671  * @v mac MAC to fill in
3672  * @ret rc Return status code
3673  */
3674 static int hermon_bofm_update ( struct bofm_device *bofm, unsigned int mport,
3675  const uint8_t *mac ) {
3676  struct hermon *hermon = container_of ( bofm, struct hermon, bofm );
3677  struct hermonprm_mod_stat_cfg stat_cfg;
3678  union {
3679  uint8_t bytes[8];
3680  uint32_t dwords[2];
3681  } buf;
3682  int rc;
3683 
3684  /* Prepare MAC address */
3685  memset ( &buf, 0, sizeof ( buf ) );
3686  memcpy ( &buf.bytes[ sizeof ( buf.bytes ) - ETH_ALEN ], mac,
3687  ETH_ALEN );
3688 
3689  /* Modify static configuration */
3690  memset ( &stat_cfg, 0, sizeof ( stat_cfg ) );
3691  MLX_FILL_2 ( &stat_cfg, 36,
3692  mac_m, 1,
3693  mac_high, ntohl ( buf.dwords[0] ) );
3694  MLX_FILL_1 ( &stat_cfg, 37, mac_low, ntohl ( buf.dwords[1] ) );
3695  if ( ( rc = hermon_mod_stat_cfg ( hermon, mport,
3697  HERMON_MOD_STAT_CFG_OFFSET ( mac_m ),
3698  &stat_cfg ) ) != 0 ) {
3699  DBGC ( hermon, "Hermon %p port %d could not modify "
3700  "configuration: %s\n", hermon, mport, strerror ( rc ) );
3701  return rc;
3702  }
3703 
3704  DBGC ( hermon, "Hermon %p port %d updated MAC address to %s\n",
3705  hermon, mport, eth_ntoa ( mac ) );
3706 
3707  return 0;
3708 }
3709 
3710 /** Hermon BOFM operations */
3713  .update = hermon_bofm_update,
3714 };
3715 
3716 /***************************************************************************
3717  *
3718  * PCI interface
3719  *
3720  ***************************************************************************
3721  */
3722 
3723 /**
3724  * Allocate Hermon device
3725  *
3726  * @v pci PCI device
3727  * @v id PCI ID
3728  * @ret rc Return status code
3729  */
3730 static struct hermon * hermon_alloc ( void ) {
3731  struct hermon *hermon;
3732 
3733  /* Allocate Hermon device */
3734  hermon = zalloc ( sizeof ( *hermon ) );
3735  if ( ! hermon )
3736  goto err_hermon;
3737 
3738  /* Allocate space for mailboxes */
3741  if ( ! hermon->mailbox_in )
3742  goto err_mailbox_in;
3745  if ( ! hermon->mailbox_out )
3746  goto err_mailbox_out;
3747 
3748  return hermon;
3749 
3751  err_mailbox_out:
3753  err_mailbox_in:
3754  free ( hermon );
3755  err_hermon:
3756  return NULL;
3757 }
3758 
3759 /**
3760  * Free Hermon device
3761  *
3762  * @v hermon Hermon device
3763  */
3764 static void hermon_free ( struct hermon *hermon ) {
3765 
3766  ufree ( hermon->icm );
3767  ufree ( hermon->firmware_area );
3770  free ( hermon );
3771 }
3772 
3773 /**
3774  * Probe PCI device
3775  *
3776  * @v pci PCI device
3777  * @v id PCI ID
3778  * @ret rc Return status code
3779  */
3780 static int hermon_probe ( struct pci_device *pci ) {
3781  struct hermon *hermon;
3782  struct ib_device *ibdev;
3783  struct net_device *netdev;
3784  struct hermon_port *port;
3785  unsigned int i;
3786  int rc;
3787 
3788  /* Allocate Hermon device */
3789  hermon = hermon_alloc();
3790  if ( ! hermon ) {
3791  rc = -ENOMEM;
3792  goto err_alloc;
3793  }
3794  pci_set_drvdata ( pci, hermon );
3795  hermon->pci = pci;
3796 
3797  /* Fix up PCI device */
3798  adjust_pci_device ( pci );
3799 
3800  /* Map PCI BARs */
3805 
3806  /* Reset device */
3807  hermon_reset ( hermon );
3808 
3809  /* Start firmware */
3810  if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
3811  goto err_start_firmware;
3812 
3813  /* Get device limits */
3814  if ( ( rc = hermon_get_cap ( hermon ) ) != 0 )
3815  goto err_get_cap;
3816 
3817  /* Allocate Infiniband devices */
3818  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
3819  ibdev = alloc_ibdev ( 0 );
3820  if ( ! ibdev ) {
3821  rc = -ENOMEM;
3822  goto err_alloc_ibdev;
3823  }
3824  hermon->port[i].ibdev = ibdev;
3826  ibdev->dev = &pci->dev;
3827  ibdev->port = ( HERMON_PORT_BASE + i );
3829  }
3830 
3831  /* Allocate network devices */
3832  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
3833  netdev = alloc_etherdev ( 0 );
3834  if ( ! netdev ) {
3835  rc = -ENOMEM;
3836  goto err_alloc_netdev;
3837  }
3838  hermon->port[i].netdev = netdev;
3840  netdev->dev = &pci->dev;
3841  netdev->priv = &hermon->port[i];
3842  }
3843 
3844  /* Start device */
3845  if ( ( rc = hermon_start ( hermon, 1 ) ) != 0 )
3846  goto err_start;
3847 
3848  /* Determine port types */
3849  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
3850  port = &hermon->port[i];
3851  if ( ( rc = hermon_set_port_type ( hermon, port ) ) != 0 )
3852  goto err_set_port_type;
3853  }
3854 
3855  /* Initialise non-volatile storage */
3856  nvs_vpd_init ( &hermon->nvsvpd, pci );
3857  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
3858  port = &hermon->port[i];
3860  HERMON_VPD_FIELD ( port->ibdev->port ),
3861  &port->nvo, NULL );
3862  }
3863 
3864  /* Register devices */
3865  for ( i = 0 ; i < hermon->cap.num_ports ; i++ ) {
3866  port = &hermon->port[i];
3867  if ( ( rc = port->type->register_dev ( hermon, port ) ) != 0 )
3868  goto err_register;
3869  }
3870 
3871  /* Leave device quiescent until opened */
3872  if ( hermon->open_count == 0 )
3873  hermon_stop ( hermon );
3874 
3875  return 0;
3876 
3877  i = hermon->cap.num_ports;
3878  err_register:
3879  for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
3880  port = &hermon->port[i];
3881  port->type->unregister_dev ( hermon, port );
3882  }
3883  err_set_port_type:
3884  hermon_stop ( hermon );
3885  err_start:
3886  i = hermon->cap.num_ports;
3887  err_alloc_netdev:
3888  for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
3889  netdev_nullify ( hermon->port[i].netdev );
3890  netdev_put ( hermon->port[i].netdev );
3891  }
3892  i = hermon->cap.num_ports;
3893  err_alloc_ibdev:
3894  for ( i-- ; ( signed int ) i >= 0 ; i-- )
3895  ibdev_put ( hermon->port[i].ibdev );
3896  err_get_cap:
3898  err_start_firmware:
3899  iounmap ( hermon->uar );
3900  iounmap ( hermon->config );
3901  hermon_free ( hermon );
3902  err_alloc:
3903  return rc;
3904 }
3905 
3906 /**
3907  * Remove PCI device
3908  *
3909  * @v pci PCI device
3910  */
3911 static void hermon_remove ( struct pci_device *pci ) {
3912  struct hermon *hermon = pci_get_drvdata ( pci );
3913  struct hermon_port *port;
3914  int i;
3915 
3916  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
3917  port = &hermon->port[i];
3918  port->type->unregister_dev ( hermon, port );
3919  }
3920  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- ) {
3921  netdev_nullify ( hermon->port[i].netdev );
3922  netdev_put ( hermon->port[i].netdev );
3923  }
3924  for ( i = ( hermon->cap.num_ports - 1 ) ; i >= 0 ; i-- )
3925  ibdev_put ( hermon->port[i].ibdev );
3926  iounmap ( hermon->uar );
3927  iounmap ( hermon->config );
3928  hermon_free ( hermon );
3929 }
3930 
3931 /**
3932  * Probe PCI device for BOFM
3933  *
3934  * @v pci PCI device
3935  * @v id PCI ID
3936  * @ret rc Return status code
3937  */
3938 static int hermon_bofm_probe ( struct pci_device *pci ) {
3939  struct hermon *hermon;
3940  int rc;
3941 
3942  /* Allocate Hermon device */
3943  hermon = hermon_alloc();
3944  if ( ! hermon ) {
3945  rc = -ENOMEM;
3946  goto err_alloc;
3947  }
3948  pci_set_drvdata ( pci, hermon );
3949  hermon->pci = pci;
3950 
3951  /* Fix up PCI device */
3952  adjust_pci_device ( pci );
3953 
3954  /* Map PCI BAR */
3957 
3958  /* Initialise BOFM device */
3960 
3961  /* Register BOFM device */
3962  if ( ( rc = bofm_register ( &hermon->bofm ) ) != 0 ) {
3963  DBGC ( hermon, "Hermon %p could not register BOFM device: "
3964  "%s\n", hermon, strerror ( rc ) );
3965  goto err_bofm_register;
3966  }
3967 
3968  return 0;
3969 
3970  err_bofm_register:
3971  iounmap ( hermon->config );
3972  hermon_free ( hermon );
3973  err_alloc:
3974  return rc;
3975 }
3976 
3977 /**
3978  * Remove PCI device for BOFM
3979  *
3980  * @v pci PCI device
3981  */
3982 static void hermon_bofm_remove ( struct pci_device *pci ) {
3983  struct hermon *hermon = pci_get_drvdata ( pci );
3984 
3985  bofm_unregister ( &hermon->bofm );
3986  iounmap ( hermon->config );
3987  hermon_free ( hermon );
3988 }
3989 
3990 static struct pci_device_id hermon_nics[] = {
3991  PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver", 0 ),
3992  PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver", 0 ),
3993  PCI_ROM ( 0x15b3, 0x6732, "mt26418", "MT26418 HCA driver", 0 ),
3994  PCI_ROM ( 0x15b3, 0x673c, "mt26428", "MT26428 HCA driver", 0 ),
3995  PCI_ROM ( 0x15b3, 0x6746, "mt26438", "MT26438 HCA driver", 0 ),
3996  PCI_ROM ( 0x15b3, 0x6778, "mt26488", "MT26488 HCA driver", 0 ),
3997  PCI_ROM ( 0x15b3, 0x6368, "mt25448", "MT25448 HCA driver", 0 ),
3998  PCI_ROM ( 0x15b3, 0x6750, "mt26448", "MT26448 HCA driver", 0 ),
3999  PCI_ROM ( 0x15b3, 0x6372, "mt25458", "MT25458 HCA driver", 0 ),
4000  PCI_ROM ( 0x15b3, 0x675a, "mt26458", "MT26458 HCA driver", 0 ),
4001  PCI_ROM ( 0x15b3, 0x6764, "mt26468", "MT26468 HCA driver", 0 ),
4002  PCI_ROM ( 0x15b3, 0x676e, "mt26478", "MT26478 HCA driver", 0 ),
4003 };
4004 
4005 struct pci_driver hermon_driver __pci_driver = {
4006  .ids = hermon_nics,
4007  .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
4008  .probe = hermon_probe,
4009  .remove = hermon_remove,
4010 };
4011 
4012 struct pci_driver hermon_bofm_driver __bofm_driver = {
4013  .ids = hermon_nics,
4014  .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
4017 };
void unregister_ibdev(struct ib_device *ibdev)
Unregister Infiniband device.
Definition: infiniband.c:1005
#define HERMON_MTU_ETH
Definition: hermon.h:100
static int hermon_cmd_query_dev_cap(struct hermon *hermon, struct hermonprm_query_dev_cap *dev_cap)
Definition: hermon.c:271
size_t eqe_size
Size of event queue.
Definition: hermon.h:769
#define HERMON_PORT_BASE
Definition: hermon.h:28
static void hermon_ib_close(struct ib_device *ibdev)
Close Infiniband link.
Definition: hermon.c:2921
#define __attribute__(x)
Definition: compiler.h:10
#define EINVAL
Invalid argument.
Definition: errno.h:428
static __always_inline void ib_set_drvdata(struct ib_device *ibdev, void *priv)
Set Infiniband device driver-private data.
Definition: infiniband.h:696
#define HERMON_SCHED_DEFAULT
Definition: hermon.h:127
void ib_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: infiniband.c:898
iPXE I/O API
static void hermon_event_port_state_change(struct hermon *hermon, union hermonprm_event_entry *eqe)
Handle port state event.
Definition: hermon.c:1999
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
#define HERMON_HCR_OUT_LEN(_command)
Definition: hermon.h:947
struct ib_device * ibdev
Infiniband device.
Definition: hermon.h:828
Infiniband protocol.
#define MLX_FILL_7(_ptr, _index,...)
Definition: mlx_bitops.h:191
struct net_device * netdev
Network device.
Definition: hermon.h:830
static int hermon_cmd_unmap_icm_aux(struct hermon *hermon)
Definition: hermon.c:561
struct hermonprm_set_port_rqp_calc rqp_calc
Definition: hermon.h:548
#define MLX_FILL_2(_ptr, _index,...)
Definition: mlx_bitops.h:171
struct hermon_send_work_queue send
Send work queue.
Definition: hermon.h:733
static int hermon_cmd_sw2hw_cq(struct hermon *hermon, unsigned long cqn, const struct hermonprm_completion_queue_context *cqctx)
Definition: hermon.c:382
struct hermon_mtt mtt
MTT descriptor.
Definition: hermon.h:753
static int hermon_cmd_sw2hw_eq(struct hermon *hermon, unsigned int index, const struct hermonprm_eqc *eqctx)
Definition: hermon.c:355
#define iob_put(iobuf, len)
Definition: iobuf.h:116
static int hermon_post_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf)
Post send work queue entry.
Definition: hermon.c:1587
#define MLX_FILL_4(_ptr, _index,...)
Definition: mlx_bitops.h:179
static void hermon_unmap_icm(struct hermon *hermon)
Unmap ICM.
Definition: hermon.c:2624
#define IB_QPN_SMI
Subnet management interface QPN.
Definition: infiniband.h:21
#define HERMON_PCI_CONFIG_BAR_SIZE
Definition: hermon.h:32
static void bofm_init(struct bofm_device *bofm, struct pci_device *pci, struct bofm_operations *op)
Initialise BOFM device.
Definition: bofm.h:339
static void hermon_eth_complete_send(struct ib_device *ibdev __unused, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Handle Hermon Ethernet device send completion.
Definition: hermon.c:3186
static int hermon_sense_port_type(struct hermon *hermon, struct hermon_port *port)
Sense port type.
Definition: hermon.c:3516
#define HERMON_CMPT_MAX_ENTRIES
Number of cMPT entries of each type.
Definition: hermon.h:599
#define HERMON_HCR_BASE
Definition: hermon.h:926
void nvs_vpd_nvo_init(struct nvs_vpd_device *nvsvpd, unsigned int field, struct nvo_block *nvo, struct refcnt *refcnt)
Initialise non-volatile option storage within NVS VPD device.
Definition: nvsvpd.c:220
#define IB_MTU_2048
Definition: ib_mad.h:162
static int hermon_cmd_rst2init_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:409
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:510
#define HERMON_HCR_READ_MCG
Definition: hermon.h:71
struct hermonprm_event_db_register event
Definition: hermon.h:536
static int hermon_cmd_wait(struct hermon *hermon, struct hermonprm_hca_command_register *hcr)
Wait for Hermon command completion.
Definition: hermon.c:136
int nvs_vpd_init(struct nvs_vpd_device *nvsvpd, struct pci_device *pci)
Initialise NVS VPD device.
Definition: nvsvpd.c:178
A PCI driver.
Definition: pci.h:224
#define EBUSY
Device or resource busy.
Definition: errno.h:338
#define HERMON_OPCODE_NOP
Definition: hermon.h:41
size_t auxc_entry_size
Auxiliary context entry size.
Definition: hermon.h:569
static int ib_is_open(struct ib_device *ibdev)
Check whether or not Infiniband device is open.
Definition: infiniband.h:575
#define MLX_FILL_8(_ptr, _index,...)
Definition: mlx_bitops.h:195
struct hermon_recv_work_queue recv
Receive work queue.
Definition: hermon.h:735
#define HERMON_HCR_INIT_PORT
Definition: hermon.h:51
Infiniband device operations.
Definition: infiniband.h:254
#define HERMON_MOD_STAT_CFG_OFFSET(field)
Calculate offset within static configuration.
Definition: hermon.c:717
__be32 in[4]
Definition: CIB_PRM.h:35
static int hermon_alloc_mtt(struct hermon *hermon, const void *memory, size_t len, struct hermon_mtt *mtt)
Allocate MTT entries.
Definition: hermon.c:619
static int hermon_cmd_query_fw(struct hermon *hermon, struct hermonprm_query_fw *fw)
Definition: hermon.c:280
void * doorbell
Doorbell register.
Definition: hermon.h:671
#define HERMON_MOD_STAT_CFG_SET
Definition: hermon.h:137
static unsigned int unsigned int bit
Definition: bigint.h:205
struct hermonprm_send_db_register send
Definition: hermon.h:535
static struct ib_completion_queue_operations hermon_eth_cq_op
Hermon Ethernet device completion operations.
Definition: hermon.c:3224
int(* open)(struct net_device *netdev)
Open network device.
Definition: netdevice.h:222
struct hermonprm_recv_wqe recv
Definition: hermon.h:679
uint8_t opcode
Opcode.
Definition: ena.h:16
#define HERMON_MKEY_PREFIX
Memory key prefix.
Definition: hermon.h:919
#define HERMON_ETH_NUM_SEND_WQES
Number of Hermon Ethernet send work queue entries.
Definition: hermon.c:3140
static int hermon_cmd_write_mtt(struct hermon *hermon, const struct hermonprm_write_mtt *write_mtt)
Definition: hermon.c:337
static int hermon_cmd_set_icm_size(struct hermon *hermon, const struct hermonprm_scalar_parameter *icm_size, struct hermonprm_scalar_parameter *icm_aux_size)
Definition: hermon.c:577
#define HERMON_OPCODE_SEND
Definition: hermon.h:42
size_t cqc_entry_size
CQ context entry size.
Definition: hermon.h:577
static int hermon_mcast_attach(struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid)
Attach to multicast group.
Definition: hermon.c:2972
static int hermon_cmd_query_qp(struct hermon *hermon, unsigned long qpn, struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:452
Error codes.
#define HERMON_PORT_TYPE_ETH
Definition: hermon.h:96
void * mailbox_in
Command input mailbox.
Definition: hermon.h:853
u8 owner
Definition: CIB_PRM.h:36
#define HERMON_SET_PORT_RECEIVE_QP
Definition: hermon.h:118
static int hermon_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
Definition: hermon.c:1270
A command-line command.
Definition: command.h:9
#define HERMON_MAX_EQS
Maximum number of allocatable event queues.
Definition: hermon.h:762
I/O buffers.
#define HERMON_LOG_MULTICAST_HASH_SIZE
Definition: hermon.h:129
#define DBG_ENABLE(level)
Definition: compiler.h:313
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:226
Non-Volatile Storage using Vital Product Data.
int register_nvo(struct nvo_block *nvo, struct settings *parent)
Register non-volatile stored options.
Definition: nvo.c:293
size_t mtt_entry_size
MTT entry size.
Definition: hermon.h:585
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
int ib_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
Definition: infiniband.c:294
static void hermon_destroy_eq(struct hermon *hermon)
Destroy event queue.
Definition: hermon.c:1960
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
int ib_create_cq(struct ib_device *ibdev, unsigned int num_cqes, struct ib_completion_queue_operations *op, struct ib_completion_queue **new_cq)
Create completion queue.
Definition: infiniband.c:98
#define FCOE_VLAN_PRIORITY
FCoE VLAN priority.
Definition: fcoe.h:90
void ib_refill_recv(struct ib_device *ibdev, struct ib_queue_pair *qp)
Refill receive work queue.
Definition: infiniband.c:556
#define DBGC(...)
Definition: compiler.h:505
static struct hermon_port_type hermon_port_type_ib
Hermon Infiniband port type.
Definition: hermon.c:3126
__be32 byte_count
Definition: CIB_PRM.h:28
static int hermon_cmd_mod_stat_cfg(struct hermon *hermon, unsigned int mode, unsigned int input_mod, struct hermonprm_scalar_parameter *portion)
Definition: hermon.c:507
struct pci_driver hermon_bofm_driver __bofm_driver
Definition: hermon.c:4012
struct io_buffer *(* alloc_iob)(size_t len)
Allocate receive I/O buffer.
Definition: infiniband.h:153
static int hermon_cmd_query_eq(struct hermon *hermon, unsigned int index, struct hermonprm_eqc *eqctx)
Definition: hermon.c:373
static void hermon_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: hermon.c:2027
struct device * dev
Underlying device.
Definition: infiniband.h:410
static int hermon_start(struct hermon *hermon, int running)
Start Hermon device.
Definition: hermon.c:2747
unsigned long long uint64_t
Definition: stdint.h:13
#define DBG_DISABLE(level)
Definition: compiler.h:312
unsigned long special_qpn_base
Special QPN base.
Definition: hermon.h:901
static __always_inline void * ib_qp_get_drvdata(struct ib_queue_pair *qp)
Get Infiniband queue pair driver-private data.
Definition: infiniband.h:641
#define ntohl(value)
Definition: byteswap.h:134
#define HERMON_PAGE_SIZE
Definition: hermon.h:104
static void hermon_stop_firmware(struct hermon *hermon)
Stop firmware running.
Definition: hermon.c:2244
#define HERMON_RETRY_MAX
Definition: hermon.h:135
void pci_backup(struct pci_device *pci, struct pci_config_backup *backup, const uint8_t *exclude)
Back up PCI configuration space.
Definition: pcibackup.c:66
static void hermon_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: hermon.c:945
void netdev_link_down(struct net_device *netdev)
Mark network device as having link down.
Definition: netdevice.c:186
struct ib_global_route_header grh
Definition: ib_packet.h:16
A Hermon send work queue entry.
Definition: hermon.h:648
#define HERMON_HCR_QUERY_PORT
Definition: hermon.h:75
#define ntohs(value)
Definition: byteswap.h:136
static int hermon_set_port_type(struct hermon *hermon, struct hermon_port *port)
Set port type.
Definition: hermon.c:3553
static int hermon_cmd_map_icm(struct hermon *hermon, const struct hermonprm_virtual_physical_mapping *map)
Definition: hermon.c:552
#define HERMON_DB_POST_SND_OFFSET
Definition: hermon.h:106
#define HERMON_OPCODE_RECV_ERROR
Definition: hermon.h:43
#define HERMON_PORT_TYPE_IB
Definition: hermon.h:95
static __always_inline void off_t int c
Definition: efi_uaccess.h:87
A Hermon completion queue.
Definition: hermon.h:747
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
uint8_t mac[ETH_ALEN]
MAC address.
Definition: ena.h:24
struct golan_eq_context ctx
Definition: CIB_PRM.h:28
unsigned int gid_present
GID is present.
Definition: infiniband.h:90
#define HERMON_HCR_MAP_ICM
Definition: hermon.h:81
static int hermon_cmd_hw2sw_eq(struct hermon *hermon, unsigned int index, struct hermonprm_eqc *eqctx)
Definition: hermon.c:364
unsigned int vlan
VLAN, if present.
Definition: infiniband.h:96
static void iob_populate(struct io_buffer *iobuf, void *data, size_t len, size_t max_len)
Create a temporary I/O buffer.
Definition: iobuf.h:186
union hermonprm_event_entry * eqe
Event queue entries.
Definition: hermon.h:767
struct hermonprm_wqe_segment_ctrl_mlx ctrl
Definition: hermon.h:503
#define HERMON_HCR_UNMAP_ICM_AUX
Definition: hermon.h:82
unsigned int reserved_cqs
Number of reserved CQs.
Definition: hermon.h:575
#define HERMON_ETH_NUM_CQES
Number of Hermon Ethernet completion entries.
Definition: hermon.c:3146
userptr_t firmware_area
Firmware area in external memory.
Definition: hermon.h:868
#define HERMON_PCI_UAR_BAR
Definition: hermon.h:33
size_t cqe_size
Size of completion queue.
Definition: hermon.h:751
#define HERMON_HCR_IN_LEN(_command)
Definition: hermon.h:946
static void hermon_free_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Free queue pair number.
Definition: hermon.c:1033
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:149
struct io_buffer * alloc_iob(size_t len)
Allocate I/O buffer.
Definition: iobuf.c:128
An Infiniband Global Identifier.
Definition: ib_packet.h:33
#define HERMON_HCR_RTR2RTS_QP
Definition: hermon.h:65
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
Definition: uaccess.h:287
struct hermonprm_qp_db_record * doorbell
Doorbell record.
Definition: hermon.h:694
__be32 qpn
Definition: CIB_PRM.h:29
enum hermon_queue_pair_state state
Queue state.
Definition: hermon.h:737
#define htonl(value)
Definition: byteswap.h:133
struct device dev
Generic device.
Definition: pci.h:189
static unsigned int hermon_fill_rc_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest __unused, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct RC send work queue entry.
Definition: hermon.c:1511
static struct settings * netdev_settings(struct net_device *netdev)
Get per-netdevice configuration settings block.
Definition: netdevice.h:577
struct hermonprm_set_port_general_context general
Definition: hermon.h:547
A Hermon port type.
Definition: hermon.h:798
union ib_gid dgid
Destiniation GID.
Definition: ib_packet.h:106
#define ENOTSUP
Operation not supported.
Definition: errno.h:589
void ib_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: infiniband.c:145
static int hermon_dump_cqctx(struct hermon *hermon, struct ib_completion_queue *cq)
Dump completion queue context (for debugging only)
Definition: hermon.c:816
#define HERMON_SET_PORT_GENERAL_PARAM
Definition: hermon.h:117
static unsigned int hermon_fill_ud_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest, struct io_buffer *iobuf, union hermon_send_wqe *wqe)
Construct UD send work queue entry.
Definition: hermon.c:1417
#define HERMON_INVALID_LKEY
Definition: hermon.h:102
enum ib_rate rate
Rate.
Definition: infiniband.h:86
Dynamic memory allocation.
#define HERMON_HCR_RTS2RTS_QP
Definition: hermon.h:66
#define HERMON_HCR_INOUT_CMD(_opcode, _in_mbox, _in_len, _out_mbox, _out_len)
Build HCR command from component parts.
Definition: hermon.h:950
struct bofm_device bofm
BOFM device.
Definition: hermon.h:912
Definition: hermon.h:524
union hermon_recv_wqe * wqe
Work queue entries.
Definition: hermon.h:686
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:515
uint32_t start
Starting offset.
Definition: netvsc.h:12
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:514
static int hermon_cmd_write_mcg(struct hermon *hermon, unsigned int index, const struct hermonprm_mcg_entry *mcg)
Definition: hermon.c:488
Fibre Channel over Ethernet.
An Infiniband device.
Definition: infiniband.h:398
uint8_t status
Status.
Definition: ena.h:16
struct hermon_mtt mtt
MTT descriptor.
Definition: hermon.h:771
static void netdev_init(struct net_device *netdev, struct net_device_operations *op)
Initialise a network device.
Definition: netdevice.h:498
pseudo_bit_t ci[0x00020]
Definition: arbel.h:11
#define DBGCP_HDA(...)
Definition: compiler.h:540
#define MLX_FILL_3(_ptr, _index,...)
Definition: mlx_bitops.h:175
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:338
static int hermon_cmd_rts2rts_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:436
#define HERMON_HCR_INIT_HCA
Definition: hermon.h:49
#define HERMON_ST_RC
Definition: hermon.h:89
#define ENOMEM
Not enough space.
Definition: errno.h:534
static int hermon_map_icm(struct hermon *hermon, struct hermonprm_init_hca *init_hca)
Map ICM (allocating if necessary)
Definition: hermon.c:2339
unsigned int mtt_base_addr
MTT base address.
Definition: hermon.h:639
static void hermon_free(struct hermon *hermon)
Free Hermon device.
Definition: hermon.c:3764
Infiniband completion queue operations.
Definition: infiniband.h:194
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define HERMON_HCR_IN_MBOX
Definition: hermon.h:943
Infiniband queue pair operations.
Definition: infiniband.h:147
hermon_bitmask_t cq_inuse[HERMON_BITMASK_SIZE(HERMON_MAX_CQS)]
Completion queue in-use bitmask.
Definition: hermon.h:892
unsigned int num_ports
Number of ports.
Definition: hermon.h:593
static int hermon_cmd_sw2hw_mpt(struct hermon *hermon, unsigned int index, const struct hermonprm_mpt *mpt)
Definition: hermon.c:328
static int hermon_cmd_rtr2rts_qp(struct hermon *hermon, unsigned long qpn, const struct hermonprm_qp_ee_state_transitions *ctx)
Definition: hermon.c:427
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER]
Definition: hermon.h:504
static int hermon_open(struct hermon *hermon)
Open Hermon device.
Definition: hermon.c:2821
u8 port
Port number.
Definition: CIB_PRM.h:31
void * mailbox_out
Command output mailbox.
Definition: hermon.h:855
static __always_inline void * ib_get_drvdata(struct ib_device *ibdev)
Get Infiniband device driver-private data.
Definition: infiniband.h:707
u32 version
Version number.
Definition: ath9k_hw.c:1983
static int hermon_start_firmware(struct hermon *hermon)
Start firmware running.
Definition: hermon.c:2179
uint32_t hermon_bitmask_t
A Hermon resource bitmask.
Definition: hermon.h:787
IBM BladeCenter Open Fabric Manager (BOFM)
static int hermon_cmd(struct hermon *hermon, unsigned long command, unsigned int op_mod, const void *in, unsigned int in_mod, void *out)
Issue HCA command.
Definition: hermon.c:162
int ib_smc_init(struct ib_device *ibdev, ib_local_mad_t local_mad)
Initialise Infiniband parameters using SMC.
Definition: ib_smc.c:232
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
size_t wqe_size
Size of work queue buffer.
Definition: hermon.h:729
static int hermon_configure_special_qps(struct hermon *hermon)
Configure special queue pairs.
Definition: hermon.c:2717
struct hermonprm_eth_send_wqe eth
Definition: hermon.h:653
#define HERMON_MAP_EQ
Definition: hermon.h:114
userptr_t icm
ICM area.
Definition: hermon.h:881
#define be32_to_cpu(value)
Definition: byteswap.h:116
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
static void netdev_put(struct net_device *netdev)
Drop reference to network device.
Definition: netdevice.h:555
#define HERMON_SENSE_PORT_TIMEOUT
Timeout for port sensing.
Definition: hermon.c:3492
#define container_of(ptr, type, field)
Get containing structure.
Definition: stddef.h:35
#define HERMON_HCR_INIT2RTR_QP
Definition: hermon.h:64
Ethernet protocol.
struct hermonprm_rc_send_wqe rc
Definition: hermon.h:652
struct ib_device_operations * op
Infiniband operations.
Definition: infiniband.h:416
#define DBGLVL_LOG
Definition: compiler.h:316
pseudo_bit_t p[0x00002]
Definition: MT25218_PRM.h:3017
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:497
uint64_t offset
Offset (virtual address within ICM)
Definition: hermon.h:604
struct hermonprm_event_queue_entry generic
Definition: hermon.h:530
An Infiniband Work Queue.
Definition: infiniband.h:100
#define ETH_FRAME_LEN
Definition: if_ether.h:11
static int hermon_bofm_update(struct bofm_device *bofm, unsigned int mport, const uint8_t *mac)
Update Ethernet MAC for BOFM.
Definition: hermon.c:3674
void * priv
Driver private data.
Definition: netdevice.h:425
struct sockaddr_tcpip st
Definition: dns.c:69
#define DBGC_HDA(...)
Definition: compiler.h:506
size_t wqe_size
Size of work queue.
Definition: hermon.h:688
void * wqe
Work queue buffer.
Definition: hermon.h:727
static void netdev_link_up(struct net_device *netdev)
Mark network device as having link up.
Definition: netdevice.h:768
void ib_complete_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Complete send work queue entry.
Definition: infiniband.c:515
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
static int hermon_cmd_init_port(struct hermon *hermon, unsigned int port)
Definition: hermon.c:304
#define HERMON_PM_STATE_MIGRATED
Definition: hermon.h:133
static struct ib_device_operations hermon_ib_operations
Hermon Infiniband operations.
Definition: hermon.c:3053
__be16 wqe_counter
Definition: CIB_PRM.h:36
#define HERMON_SCHED_QP0
Definition: hermon.h:126
#define HERMON_HCR_MAD_IFC
Definition: hermon.h:70
static void hermon_destroy_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Destroy queue pair.
Definition: hermon.c:1348
static int hermon_cmd_unmap_fa(struct hermon *hermon)
Definition: hermon.c:588
static userptr_t size_t offset
Offset of the first segment within the content.
Definition: deflate.h:259
static struct net_device * netdev
Definition: gdbudp.c:52
static int hermon_bofm_probe(struct pci_device *pci)
Probe PCI device for BOFM.
Definition: hermon.c:3938
u8 num_ports
Definition: CIB_PRM.h:61
static void hermon_eth_complete_recv(struct ib_device *ibdev __unused, struct ib_queue_pair *qp, struct ib_address_vector *dest __unused, struct ib_address_vector *source, struct io_buffer *iobuf, int rc)
Handle Hermon Ethernet device receive completion.
Definition: hermon.c:3204
__be32 num_pages
Definition: CIB_PRM.h:31
__be32 out[4]
Definition: CIB_PRM.h:36
#define HERMON_HCR_UNMAP_ICM
Definition: hermon.h:80
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
static int hermon_cmd_set_port(struct hermon *hermon, int is_ethernet, unsigned int port_selector, const union hermonprm_set_port *set_port)
Definition: hermon.c:318
static int hermon_mad(struct ib_device *ibdev, union ib_mad *mad)
Issue management datagram.
Definition: hermon.c:771
static const char * hermon_name_port_type(unsigned int port_type)
Name port type.
Definition: hermon.c:3500
An Infiniband Global Route Header.
Definition: ib_packet.h:89
#define HERMON_HCR_MOD_STAT_CFG
Definition: hermon.h:74
#define HERMON_RSVD_SPECIAL_QPS
Number of queue pairs reserved for the "special QP" block.
Definition: hermon.h:705
struct hermonprm_wqe_segment_ctrl_send ctrl
Definition: hermon.h:509
struct ib_work_queue * ib_find_wq(struct ib_completion_queue *cq, unsigned long qpn, int is_send)
Find work queue belonging to completion queue.
Definition: infiniband.c:396
void unregister_netdev(struct net_device *netdev)
Unregister network device.
Definition: netdevice.c:844
uint32_t low
Low 16 bits of address.
Definition: intel.h:21
#define HERMON_PORT_TYPE_UNKNOWN
Definition: hermon.h:94
void unregister_nvo(struct nvo_block *nvo)
Unregister non-volatile stored options.
Definition: nvo.c:324
struct hermon_port_type * type
Port type.
Definition: hermon.h:836
A Hermon receive work queue.
Definition: hermon.h:684
unsigned int num_wqes
Number of work queue entries.
Definition: infiniband.h:112
static void * dest
Definition: strings.h:176
#define HERMON_HCR_QUERY_CQ
Definition: hermon.h:62
struct hermonprm_scalar_parameter mtt_base_addr
Definition: hermon.h:489
#define HERMON_QPN_RANDOM_MASK
Queue pair number randomisation mask.
Definition: hermon.h:714
#define HERMON_VPD_FIELD(port)
Definition: hermon.h:140
pseudo_bit_t value[0x00020]
Definition: arbel.h:13
struct hermonprm_completion_with_error error
Definition: hermon.h:526
#define HERMON_HCR_QUERY_EQ
Definition: hermon.h:59
static int hermon_cmd_hw2sw_cq(struct hermon *hermon, unsigned long cqn, struct hermonprm_completion_queue_context *cqctx)
Definition: hermon.c:391
#define DBGC2_HDA(...)
Definition: compiler.h:523
uint32_t rdma_key
RDMA key.
Definition: infiniband.h:454
#define HERMON_HCR_REG(x)
Definition: hermon.h:927
static int hermon_cmd_sense_port(struct hermon *hermon, unsigned int port, struct hermonprm_sense_port *port_type)
Definition: hermon.c:527
pseudo_bit_t hash[0x00010]
Hash algorithm.
Definition: arbel.h:13
Non-volatile stored options.
unsigned int port
Port number.
Definition: infiniband.h:418
static __always_inline void ibdev_put(struct ib_device *ibdev)
Drop reference to Infiniband device.
Definition: infiniband.h:597
static __always_inline void ib_cq_set_drvdata(struct ib_completion_queue *cq, void *priv)
Set Infiniband completion queue driver-private data.
Definition: infiniband.h:674
char * strerror(int errno)
Retrieve string representation of error number.
Definition: strerror.c:78
FILE_LICENCE(GPL2_OR_LATER)
union ib_gid sgid
Source GID.
Definition: ib_packet.h:104
unsigned int reserved_qps
Number of reserved QPs.
Definition: hermon.h:563
static void(* free)(struct refcnt *refcnt))
Definition: refcnt.h:54
struct hermon_event_queue eq
Event queue.
Definition: hermon.h:884
static __always_inline void * ib_qp_get_ownerdata(struct ib_queue_pair *qp)
Get Infiniband queue pair owner-private data.
Definition: infiniband.h:663
__be32 syndrome
Definition: CIB_PRM.h:30
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:624
__be16 rlid
Definition: CIB_PRM.h:38
#define HERMON_HCR_QUERY_DEV_CAP
Definition: hermon.h:47
PCI bus.
static struct hermon_port_type hermon_port_type_eth
Hermon Ethernet port type.
Definition: hermon.c:3478
unsigned int reserved_mtts
Number of reserved MTTs.
Definition: hermon.h:583
A PCI device.
Definition: pci.h:187
int register_netdev(struct net_device *netdev)
Register network device.
Definition: netdevice.c:667
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:151
const char * eth_ntoa(const void *ll_addr)
Transcribe Ethernet address.
Definition: ethernet.c:175
#define HERMON_UNMAP_EQ
Definition: hermon.h:115
struct pci_device * pci
PCI device.
Definition: hermon.h:844
static int hermon_eth_transmit(struct net_device *netdev, struct io_buffer *iobuf)
Transmit packet via Hermon Ethernet device.
Definition: hermon.c:3155
static int hermon_post_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf)
Post receive work queue entry.
Definition: hermon.c:1648
#define ENFILE
Too many open files in system.
Definition: errno.h:493
struct ib_device * alloc_ibdev(size_t priv_size)
Allocate Infiniband device.
Definition: infiniband.c:937
#define HERMON_HCR_MGID_HASH
Definition: hermon.h:73
static void hermon_state_change_ibdev(struct hermon *hermon __unused, struct hermon_port *port, int link_up __unused)
Handle Hermon Infiniband device port state change.
Definition: hermon.c:3103
A PCI configuration space backup.
Definition: pcibackup.h:15
User memory allocation.
size_t firmware_len
Firmware size.
Definition: hermon.h:861
size_t icm_len
ICM size.
Definition: hermon.h:872
Definition: hermon.h:529
#define MLX_GET(_ptr, _field)
Definition: mlx_bitops.h:222
#define HERMON_MTU_2048
Definition: hermon.h:99
#define HERMON_HCR_MAP_FA
Definition: hermon.h:86
A network device.
Definition: netdevice.h:348
#define HERMON_HCR_CLOSE_PORT
Definition: hermon.h:52
u8 sl
Definition: CIB_PRM.h:42
#define HERMON_MAX_PORTS
Definition: hermon.h:27
long int random(void)
Generate a pseudo-random number between 0 and 2147483647L or 2147483562?
Definition: random.c:30
A BOFM device.
Definition: bofm.h:286
static size_t iob_tailroom(struct io_buffer *iobuf)
Calculate available space at end of an I/O buffer.
Definition: iobuf.h:171
size_t len
Length.
Definition: hermon.h:606
static void hermon_free_mtt(struct hermon *hermon, struct hermon_mtt *mtt)
Free MTT entries.
Definition: hermon.c:694
union hermon_send_wqe * wqe
Work queue entries.
Definition: hermon.h:667
An Infiniband Completion Queue.
Definition: infiniband.h:224
static void netdev_nullify(struct net_device *netdev)
Stop using a network device.
Definition: netdevice.h:511
static struct ib_queue_pair_operations hermon_eth_qp_op
Hermon Ethernet queue pair operations.
Definition: hermon.c:3174
#define HERMON_UAR_NON_EQ_PAGE
UAR page for doorbell accesses.
Definition: hermon.h:624
int ib_smc_update(struct ib_device *ibdev, ib_local_mad_t local_mad)
Update Infiniband parameters using SMC.
Definition: ib_smc.c:249
static void hermon_eth_close(struct net_device *netdev)
Close Hermon Ethernet device.
Definition: hermon.c:3359
#define MLX_FILL_1(_ptr, _index,...)
Definition: mlx_bitops.h:167
void * uar
PCI user Access Region.
Definition: hermon.h:848
u32 addr
Definition: sky2.h:8
struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_SCATTER]
Definition: hermon.h:521
#define MLX_FILL_H(_structure_st, _index, _field, _address)
Definition: mlx_bitops.h:240
static struct bofm_operations hermon_bofm_operations
Hermon BOFM operations.
Definition: hermon.c:3711
unsigned char uint8_t
Definition: stdint.h:10
struct hermonprm_wqe_segment_ud ud
Definition: hermon.h:498
static int hermon_bofm_harvest(struct bofm_device *bofm, unsigned int mport, uint8_t *mac)
Harvest Ethernet MAC for BOFM.
Definition: hermon.c:3634
unsigned int reserved_eqs
Number of reserved EQs.
Definition: hermon.h:579
int dpdp
Dual-port different protocol.
Definition: hermon.h:595
void ib_destroy_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Destroy queue pair.
Definition: infiniband.c:314
static int hermon_register_ibdev(struct hermon *hermon, struct hermon_port *port)
Register Hermon Infiniband device.
Definition: hermon.c:3078
Mellanox Hermon Infiniband HCA driver.
#define HERMON_MOD_STAT_CFG_QUERY
Definition: hermon.h:138
size_t icm_aux_len
ICM AUX size.
Definition: hermon.h:874
#define HERMON_HCR_VOID_CMD(_opcode)
Definition: hermon.h:964
#define HERMON_RESET_OFFSET
Definition: hermon.h:36
#define HERMON_HCR_IN_CMD(_opcode, _in_mbox, _in_len)
Definition: hermon.h:958
struct hermon_dev_cap cap
Device capabilities.
Definition: hermon.h:899
size_t eqc_entry_size
EQ context entry size.
Definition: hermon.h:581
size_t wqe_size
Size of work queue.
Definition: hermon.h:669
#define HERMON_HCR_CLOSE_HCA
Definition: hermon.h:50
unsigned long qpn
Queue Pair Number.
Definition: infiniband.h:74
struct hermonprm_mtt mtt
Definition: hermon.h:491
int bofm_register(struct bofm_device *bofm)
Register BOFM device.
Definition: bofm.c:49
int register_ibdev(struct ib_device *ibdev)
Register Infiniband device.
Definition: infiniband.c:964
#define ETH_ALEN
Definition: if_ether.h:8
#define HERMON_HCR_OUT_MBOX
Definition: hermon.h:944
static void hermon_remove(struct pci_device *pci)
Remove PCI device.
Definition: hermon.c:3911
#define IB_PORT_STATE_DOWN
Definition: ib_mad.h:151
#define HERMON_HCR_OUT_CMD(_opcode, _out_mbox, _out_len)
Definition: hermon.h:961
A PCI device ID list entry.
Definition: pci.h:151
struct ib_queue_pair * qp
Containing queue pair.
Definition: infiniband.h:102
uint8_t headers[IB_MAX_HEADER_SIZE]
Definition: arbel.h:14
static int hermon_cmd_mgid_hash(struct hermon *hermon, const union ib_gid *gid, struct hermonprm_mgm_hash *hash)
Definition: hermon.c:497
#define HERMON_HCR_SW2HW_EQ
Definition: hermon.h:57
#define HERMON_HCR_RUN_FW
Definition: hermon.h:77
static void hermon_bitmask_free(hermon_bitmask_t *bits, int bit, unsigned int num_bits)
Free offsets within usage bitmask.
Definition: hermon.c:112
static int hermon_inform_sma(struct ib_device *ibdev, union ib_mad *mad)
Inform embedded subnet management agent of a received MAD.
Definition: hermon.c:2943
unsigned int uint32_t
Definition: stdint.h:12
static int hermon_cmd_map_fa(struct hermon *hermon, const struct hermonprm_virtual_physical_mapping *map)
Definition: hermon.c:595
__be16 c_eqn
Definition: CIB_PRM.h:38
#define HERMON_HCR_SW2HW_CQ
Definition: hermon.h:60
unsigned long next_idx
Next work queue entry index.
Definition: infiniband.h:122
struct hermonprm_port_state_change_event port_state_change
Definition: hermon.h:531
#define HERMON_OPCODE_SEND_ERROR
Definition: hermon.h:44
uint32_t ds
Definition: librm.h:254
static void hermon_state_change_netdev(struct hermon *hermon __unused, struct hermon_port *port, int link_up)
Handle Hermon Ethernet device port state change.
Definition: hermon.c:3451
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
#define IB_VL_0
Definition: ib_mad.h:165
unsigned long next_idx
Next completion queue entry index.
Definition: infiniband.h:240
<