iPXE
arbel.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>.
3  *
4  * Based in part upon the original driver by Mellanox Technologies
5  * Ltd. Portions may be Copyright (c) Mellanox Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of the
10  * License, or any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  * You can also choose to distribute this program under the terms of
23  * the Unmodified Binary Distribution Licence (as given in the file
24  * COPYING.UBDL), provided that you have satisfied its requirements.
25  */
26 
27 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
28 
29 #include <stdint.h>
30 #include <stdlib.h>
31 #include <stdio.h>
32 #include <string.h>
33 #include <strings.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <byteswap.h>
37 #include <ipxe/io.h>
38 #include <ipxe/pci.h>
39 #include <ipxe/pcibackup.h>
40 #include <ipxe/malloc.h>
41 #include <ipxe/umalloc.h>
42 #include <ipxe/iobuf.h>
43 #include <ipxe/netdevice.h>
44 #include <ipxe/infiniband.h>
45 #include <ipxe/ib_smc.h>
46 #include "arbel.h"
47 
48 /**
49  * @file
50  *
51  * Mellanox Arbel Infiniband HCA
52  *
53  */
54 
55 /***************************************************************************
56  *
57  * Queue number allocation
58  *
59  ***************************************************************************
60  */
61 
62 /**
63  * Allocate offset within usage bitmask
64  *
65  * @v bits Usage bitmask
66  * @v bits_len Length of usage bitmask
67  * @ret bit First free bit within bitmask, or negative error
68  */
70  unsigned int bits_len ) {
71  unsigned int bit = 0;
72  arbel_bitmask_t mask = 1;
73 
74  while ( bit < bits_len ) {
75  if ( ( mask & *bits ) == 0 ) {
76  *bits |= mask;
77  return bit;
78  }
79  bit++;
80  mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
81  if ( mask == 1 )
82  bits++;
83  }
84  return -ENFILE;
85 }
86 
87 /**
88  * Free offset within usage bitmask
89  *
90  * @v bits Usage bitmask
91  * @v bit Bit within bitmask
92  */
93 static void arbel_bitmask_free ( arbel_bitmask_t *bits, int bit ) {
94  arbel_bitmask_t mask;
95 
96  mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
97  bits += ( bit / ( 8 * sizeof ( mask ) ) );
98  *bits &= ~mask;
99 }
100 
101 /***************************************************************************
102  *
103  * HCA commands
104  *
105  ***************************************************************************
106  */
107 
108 /**
109  * Wait for Arbel command completion
110  *
111  * @v arbel Arbel device
112  * @ret rc Return status code
113  */
114 static int arbel_cmd_wait ( struct arbel *arbel,
115  struct arbelprm_hca_command_register *hcr ) {
116  unsigned int wait;
117 
118  for ( wait = ARBEL_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
119  hcr->u.dwords[6] =
120  readl ( arbel->config + ARBEL_HCR_REG ( 6 ) );
121  if ( MLX_GET ( hcr, go ) == 0 )
122  return 0;
123  mdelay ( 1 );
124  }
125  return -EBUSY;
126 }
127 
128 /**
129  * Issue HCA command
130  *
131  * @v arbel Arbel device
132  * @v command Command opcode, flags and input/output lengths
133  * @v op_mod Opcode modifier (0 if no modifier applicable)
134  * @v in Input parameters
135  * @v in_mod Input modifier (0 if no modifier applicable)
136  * @v out Output parameters
137  * @ret rc Return status code
138  */
139 static int arbel_cmd ( struct arbel *arbel, unsigned long command,
140  unsigned int op_mod, const void *in,
141  unsigned int in_mod, void *out ) {
142  struct arbelprm_hca_command_register hcr;
143  unsigned int opcode = ARBEL_HCR_OPCODE ( command );
144  size_t in_len = ARBEL_HCR_IN_LEN ( command );
145  size_t out_len = ARBEL_HCR_OUT_LEN ( command );
146  void *in_buffer;
147  void *out_buffer;
148  unsigned int status;
149  unsigned int i;
150  int rc;
151 
152  assert ( in_len <= ARBEL_MBOX_SIZE );
153  assert ( out_len <= ARBEL_MBOX_SIZE );
154 
155  DBGC2 ( arbel, "Arbel %p command %02x in %zx%s out %zx%s\n",
156  arbel, opcode, in_len,
157  ( ( command & ARBEL_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
158  ( ( command & ARBEL_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
159 
160  /* Check that HCR is free */
161  if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
162  DBGC ( arbel, "Arbel %p command interface locked\n", arbel );
163  return rc;
164  }
165 
166  /* Prepare HCR */
167  memset ( &hcr, 0, sizeof ( hcr ) );
168  in_buffer = &hcr.u.dwords[0];
169  if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
170  in_buffer = arbel->mailbox_in;
171  MLX_FILL_H ( &hcr, 0, in_param_h, virt_to_bus ( in_buffer ) );
172  MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
173  }
174  memcpy ( in_buffer, in, in_len );
175  MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
176  out_buffer = &hcr.u.dwords[3];
177  if ( out_len && ( command & ARBEL_HCR_OUT_MBOX ) ) {
178  out_buffer = arbel->mailbox_out;
179  MLX_FILL_H ( &hcr, 3, out_param_h,
180  virt_to_bus ( out_buffer ) );
181  MLX_FILL_1 ( &hcr, 4, out_param_l,
182  virt_to_bus ( out_buffer ) );
183  }
184  MLX_FILL_3 ( &hcr, 6,
185  opcode, opcode,
186  opcode_modifier, op_mod,
187  go, 1 );
188  DBGC ( arbel, "Arbel %p issuing command %04x\n", arbel, opcode );
190  &hcr, sizeof ( hcr ) );
191  if ( in_len && ( command & ARBEL_HCR_IN_MBOX ) ) {
192  DBGC2 ( arbel, "Input mailbox:\n" );
193  DBGC2_HDA ( arbel, virt_to_phys ( in_buffer ), in_buffer,
194  ( ( in_len < 512 ) ? in_len : 512 ) );
195  }
196 
197  /* Issue command */
198  for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
199  i++ ) {
200  writel ( hcr.u.dwords[i],
201  arbel->config + ARBEL_HCR_REG ( i ) );
202  barrier();
203  }
204 
205  /* Wait for command completion */
206  if ( ( rc = arbel_cmd_wait ( arbel, &hcr ) ) != 0 ) {
207  DBGC ( arbel, "Arbel %p timed out waiting for command:\n",
208  arbel );
209  DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
210  return rc;
211  }
212 
213  /* Check command status */
214  status = MLX_GET ( &hcr, status );
215  if ( status != 0 ) {
216  DBGC ( arbel, "Arbel %p command failed with status %02x:\n",
217  arbel, status );
218  DBGC_HD ( arbel, &hcr, sizeof ( hcr ) );
219  return -EIO;
220  }
221 
222  /* Read output parameters, if any */
223  hcr.u.dwords[3] = readl ( arbel->config + ARBEL_HCR_REG ( 3 ) );
224  hcr.u.dwords[4] = readl ( arbel->config + ARBEL_HCR_REG ( 4 ) );
225  memcpy ( out, out_buffer, out_len );
226  if ( out_len ) {
227  DBGC2 ( arbel, "Output%s:\n",
228  ( command & ARBEL_HCR_OUT_MBOX ) ? " mailbox" : "" );
229  DBGC2_HDA ( arbel, virt_to_phys ( out_buffer ), out_buffer,
230  ( ( out_len < 512 ) ? out_len : 512 ) );
231  }
232 
233  return 0;
234 }
235 
236 static inline int
238  struct arbelprm_query_dev_lim *dev_lim ) {
239  return arbel_cmd ( arbel,
241  1, sizeof ( *dev_lim ) ),
242  0, NULL, 0, dev_lim );
243 }
244 
245 static inline int
246 arbel_cmd_query_fw ( struct arbel *arbel, struct arbelprm_query_fw *fw ) {
247  return arbel_cmd ( arbel,
249  1, sizeof ( *fw ) ),
250  0, NULL, 0, fw );
251 }
252 
253 static inline int
255  const struct arbelprm_init_hca *init_hca ) {
256  return arbel_cmd ( arbel,
258  1, sizeof ( *init_hca ) ),
259  0, init_hca, 0, NULL );
260 }
261 
262 static inline int
264  return arbel_cmd ( arbel,
266  0, NULL, 0, NULL );
267 }
268 
269 static inline int
270 arbel_cmd_init_ib ( struct arbel *arbel, unsigned int port,
271  const struct arbelprm_init_ib *init_ib ) {
272  return arbel_cmd ( arbel,
274  1, sizeof ( *init_ib ) ),
275  0, init_ib, port, NULL );
276 }
277 
278 static inline int
279 arbel_cmd_close_ib ( struct arbel *arbel, unsigned int port ) {
280  return arbel_cmd ( arbel,
282  0, NULL, port, NULL );
283 }
284 
285 static inline int
286 arbel_cmd_sw2hw_mpt ( struct arbel *arbel, unsigned int index,
287  const struct arbelprm_mpt *mpt ) {
288  return arbel_cmd ( arbel,
290  1, sizeof ( *mpt ) ),
291  0, mpt, index, NULL );
292 }
293 
294 static inline int
295 arbel_cmd_map_eq ( struct arbel *arbel, unsigned long index_map,
296  const struct arbelprm_event_mask *mask ) {
297  return arbel_cmd ( arbel,
299  0, sizeof ( *mask ) ),
300  0, mask, index_map, NULL );
301 }
302 
303 static inline int
304 arbel_cmd_sw2hw_eq ( struct arbel *arbel, unsigned int index,
305  const struct arbelprm_eqc *eqctx ) {
306  return arbel_cmd ( arbel,
308  1, sizeof ( *eqctx ) ),
309  0, eqctx, index, NULL );
310 }
311 
312 static inline int
313 arbel_cmd_hw2sw_eq ( struct arbel *arbel, unsigned int index,
314  struct arbelprm_eqc *eqctx ) {
315  return arbel_cmd ( arbel,
317  1, sizeof ( *eqctx ) ),
318  1, NULL, index, eqctx );
319 }
320 
321 static inline int
322 arbel_cmd_sw2hw_cq ( struct arbel *arbel, unsigned long cqn,
323  const struct arbelprm_completion_queue_context *cqctx ) {
324  return arbel_cmd ( arbel,
326  1, sizeof ( *cqctx ) ),
327  0, cqctx, cqn, NULL );
328 }
329 
330 static inline int
331 arbel_cmd_hw2sw_cq ( struct arbel *arbel, unsigned long cqn,
332  struct arbelprm_completion_queue_context *cqctx) {
333  return arbel_cmd ( arbel,
335  1, sizeof ( *cqctx ) ),
336  0, NULL, cqn, cqctx );
337 }
338 
339 static inline int
340 arbel_cmd_query_cq ( struct arbel *arbel, unsigned long cqn,
341  struct arbelprm_completion_queue_context *cqctx ) {
342  return arbel_cmd ( arbel,
344  1, sizeof ( *cqctx ) ),
345  0, NULL, cqn, cqctx );
346 }
347 
348 static inline int
349 arbel_cmd_rst2init_qpee ( struct arbel *arbel, unsigned long qpn,
350  const struct arbelprm_qp_ee_state_transitions *ctx ){
351  return arbel_cmd ( arbel,
353  1, sizeof ( *ctx ) ),
354  0, ctx, qpn, NULL );
355 }
356 
357 static inline int
358 arbel_cmd_init2rtr_qpee ( struct arbel *arbel, unsigned long qpn,
359  const struct arbelprm_qp_ee_state_transitions *ctx ){
360  return arbel_cmd ( arbel,
362  1, sizeof ( *ctx ) ),
363  0, ctx, qpn, NULL );
364 }
365 
366 static inline int
367 arbel_cmd_rtr2rts_qpee ( struct arbel *arbel, unsigned long qpn,
368  const struct arbelprm_qp_ee_state_transitions *ctx ) {
369  return arbel_cmd ( arbel,
371  1, sizeof ( *ctx ) ),
372  0, ctx, qpn, NULL );
373 }
374 
375 static inline int
376 arbel_cmd_rts2rts_qpee ( struct arbel *arbel, unsigned long qpn,
377  const struct arbelprm_qp_ee_state_transitions *ctx ) {
378  return arbel_cmd ( arbel,
380  1, sizeof ( *ctx ) ),
381  0, ctx, qpn, NULL );
382 }
383 
384 static inline int
385 arbel_cmd_2rst_qpee ( struct arbel *arbel, unsigned long qpn ) {
386  return arbel_cmd ( arbel,
388  0x03, NULL, qpn, NULL );
389 }
390 
391 static inline int
392 arbel_cmd_query_qpee ( struct arbel *arbel, unsigned long qpn,
393  struct arbelprm_qp_ee_state_transitions *ctx ) {
394  return arbel_cmd ( arbel,
396  1, sizeof ( *ctx ) ),
397  0, NULL, qpn, ctx );
398 }
399 
400 static inline int
401 arbel_cmd_conf_special_qp ( struct arbel *arbel, unsigned int qp_type,
402  unsigned long base_qpn ) {
403  return arbel_cmd ( arbel,
405  qp_type, NULL, base_qpn, NULL );
406 }
407 
408 static inline int
409 arbel_cmd_mad_ifc ( struct arbel *arbel, unsigned int port,
410  union arbelprm_mad *mad ) {
411  return arbel_cmd ( arbel,
413  1, sizeof ( *mad ),
414  1, sizeof ( *mad ) ),
415  0x03, mad, port, mad );
416 }
417 
418 static inline int
419 arbel_cmd_read_mgm ( struct arbel *arbel, unsigned int index,
420  struct arbelprm_mgm_entry *mgm ) {
421  return arbel_cmd ( arbel,
423  1, sizeof ( *mgm ) ),
424  0, NULL, index, mgm );
425 }
426 
427 static inline int
428 arbel_cmd_write_mgm ( struct arbel *arbel, unsigned int index,
429  const struct arbelprm_mgm_entry *mgm ) {
430  return arbel_cmd ( arbel,
432  1, sizeof ( *mgm ) ),
433  0, mgm, index, NULL );
434 }
435 
436 static inline int
437 arbel_cmd_mgid_hash ( struct arbel *arbel, const union ib_gid *gid,
438  struct arbelprm_mgm_hash *hash ) {
439  return arbel_cmd ( arbel,
441  1, sizeof ( *gid ),
442  0, sizeof ( *hash ) ),
443  0, gid, 0, hash );
444 }
445 
446 static inline int
448  return arbel_cmd ( arbel,
450  0, NULL, 0, NULL );
451 }
452 
453 static inline int
455  return arbel_cmd ( arbel,
457  0, NULL, 0, NULL );
458 }
459 
460 static inline int
461 arbel_cmd_enable_lam ( struct arbel *arbel, struct arbelprm_access_lam *lam ) {
462  return arbel_cmd ( arbel,
464  1, sizeof ( *lam ) ),
465  1, NULL, 0, lam );
466 }
467 
468 static inline int
469 arbel_cmd_unmap_icm ( struct arbel *arbel, unsigned int page_count,
470  const struct arbelprm_scalar_parameter *offset ) {
471  return arbel_cmd ( arbel,
473  sizeof ( *offset ) ),
474  0, offset, page_count, NULL );
475 }
476 
477 static inline int
479  const struct arbelprm_virtual_physical_mapping *map ) {
480  return arbel_cmd ( arbel,
482  1, sizeof ( *map ) ),
483  0, map, 1, NULL );
484 }
485 
486 static inline int
488  return arbel_cmd ( arbel,
490  0, NULL, 0, NULL );
491 }
492 
493 static inline int
495  const struct arbelprm_virtual_physical_mapping *map ) {
496  return arbel_cmd ( arbel,
498  1, sizeof ( *map ) ),
499  0, map, 1, NULL );
500 }
501 
502 static inline int
504  const struct arbelprm_scalar_parameter *icm_size,
505  struct arbelprm_scalar_parameter *icm_aux_size ) {
506  return arbel_cmd ( arbel,
508  0, sizeof ( *icm_size ),
509  0, sizeof ( *icm_aux_size ) ),
510  0, icm_size, 0, icm_aux_size );
511 }
512 
513 static inline int
515  return arbel_cmd ( arbel,
517  0, NULL, 0, NULL );
518 }
519 
520 static inline int
522  const struct arbelprm_virtual_physical_mapping *map ) {
523  return arbel_cmd ( arbel,
525  1, sizeof ( *map ) ),
526  0, map, 1, NULL );
527 }
528 
529 /***************************************************************************
530  *
531  * MAD operations
532  *
533  ***************************************************************************
534  */
535 
536 /**
537  * Issue management datagram
538  *
539  * @v ibdev Infiniband device
540  * @v mad Management datagram
541  * @ret rc Return status code
542  */
543 static int arbel_mad ( struct ib_device *ibdev, union ib_mad *mad ) {
544  struct arbel *arbel = ib_get_drvdata ( ibdev );
545  union arbelprm_mad mad_ifc;
546  int rc;
547 
548  /* Sanity check */
549  static_assert ( sizeof ( *mad ) == sizeof ( mad_ifc.mad ) );
550 
551  /* Copy in request packet */
552  memcpy ( &mad_ifc.mad, mad, sizeof ( mad_ifc.mad ) );
553 
554  /* Issue MAD */
555  if ( ( rc = arbel_cmd_mad_ifc ( arbel, ibdev->port,
556  &mad_ifc ) ) != 0 ) {
557  DBGC ( arbel, "Arbel %p port %d could not issue MAD IFC: %s\n",
558  arbel, ibdev->port, strerror ( rc ) );
559  return rc;
560  }
561 
562  /* Copy out reply packet */
563  memcpy ( mad, &mad_ifc.mad, sizeof ( *mad ) );
564 
565  if ( mad->hdr.status != 0 ) {
566  DBGC ( arbel, "Arbel %p port %d MAD IFC status %04x\n",
567  arbel, ibdev->port, ntohs ( mad->hdr.status ) );
568  return -EIO;
569  }
570  return 0;
571 }
572 
573 /***************************************************************************
574  *
575  * Completion queue operations
576  *
577  ***************************************************************************
578  */
579 
580 /**
581  * Dump completion queue context (for debugging only)
582  *
583  * @v arbel Arbel device
584  * @v cq Completion queue
585  * @ret rc Return status code
586  */
587 static __attribute__ (( unused )) int
589  struct arbelprm_completion_queue_context cqctx;
590  int rc;
591 
592  memset ( &cqctx, 0, sizeof ( cqctx ) );
593  if ( ( rc = arbel_cmd_query_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
594  DBGC ( arbel, "Arbel %p CQN %#lx QUERY_CQ failed: %s\n",
595  arbel, cq->cqn, strerror ( rc ) );
596  return rc;
597  }
598  DBGC ( arbel, "Arbel %p CQN %#lx context:\n", arbel, cq->cqn );
599  DBGC_HDA ( arbel, 0, &cqctx, sizeof ( cqctx ) );
600 
601  return 0;
602 }
603 
604 /**
605  * Create completion queue
606  *
607  * @v ibdev Infiniband device
608  * @v cq Completion queue
609  * @ret rc Return status code
610  */
611 static int arbel_create_cq ( struct ib_device *ibdev,
612  struct ib_completion_queue *cq ) {
613  struct arbel *arbel = ib_get_drvdata ( ibdev );
614  struct arbel_completion_queue *arbel_cq;
615  struct arbelprm_completion_queue_context cqctx;
616  struct arbelprm_cq_ci_db_record *ci_db_rec;
617  struct arbelprm_cq_arm_db_record *arm_db_rec;
618  int cqn_offset;
619  unsigned int i;
620  int rc;
621 
622  /* Find a free completion queue number */
623  cqn_offset = arbel_bitmask_alloc ( arbel->cq_inuse, ARBEL_MAX_CQS );
624  if ( cqn_offset < 0 ) {
625  DBGC ( arbel, "Arbel %p out of completion queues\n", arbel );
626  rc = cqn_offset;
627  goto err_cqn_offset;
628  }
629  cq->cqn = ( arbel->limits.reserved_cqs + cqn_offset );
630 
631  /* Allocate control structures */
632  arbel_cq = zalloc ( sizeof ( *arbel_cq ) );
633  if ( ! arbel_cq ) {
634  rc = -ENOMEM;
635  goto err_arbel_cq;
636  }
637  arbel_cq->ci_doorbell_idx = arbel_cq_ci_doorbell_idx ( arbel, cq );
639 
640  /* Allocate completion queue itself */
641  arbel_cq->cqe_size = ( cq->num_cqes * sizeof ( arbel_cq->cqe[0] ) );
642  arbel_cq->cqe = malloc_phys ( arbel_cq->cqe_size,
643  sizeof ( arbel_cq->cqe[0] ) );
644  if ( ! arbel_cq->cqe ) {
645  rc = -ENOMEM;
646  goto err_cqe;
647  }
648  memset ( arbel_cq->cqe, 0, arbel_cq->cqe_size );
649  for ( i = 0 ; i < cq->num_cqes ; i++ ) {
650  MLX_FILL_1 ( &arbel_cq->cqe[i].normal, 7, owner, 1 );
651  }
652  barrier();
653 
654  /* Initialise doorbell records */
655  ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
656  MLX_FILL_1 ( ci_db_rec, 0, counter, 0 );
657  MLX_FILL_2 ( ci_db_rec, 1,
658  res, ARBEL_UAR_RES_CQ_CI,
659  cq_number, cq->cqn );
660  arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
661  MLX_FILL_1 ( arm_db_rec, 0, counter, 0 );
662  MLX_FILL_2 ( arm_db_rec, 1,
664  cq_number, cq->cqn );
665 
666  /* Hand queue over to hardware */
667  memset ( &cqctx, 0, sizeof ( cqctx ) );
668  MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
669  MLX_FILL_H ( &cqctx, 1, start_address_h,
670  virt_to_bus ( arbel_cq->cqe ) );
671  MLX_FILL_1 ( &cqctx, 2, start_address_l,
672  virt_to_bus ( arbel_cq->cqe ) );
673  MLX_FILL_2 ( &cqctx, 3,
674  usr_page, arbel->limits.reserved_uars,
675  log_cq_size, fls ( cq->num_cqes - 1 ) );
676  MLX_FILL_1 ( &cqctx, 5, c_eqn, arbel->eq.eqn );
677  MLX_FILL_1 ( &cqctx, 6, pd, ARBEL_GLOBAL_PD );
678  MLX_FILL_1 ( &cqctx, 7, l_key, arbel->lkey );
679  MLX_FILL_1 ( &cqctx, 12, cqn, cq->cqn );
680  MLX_FILL_1 ( &cqctx, 13,
681  cq_ci_db_record, arbel_cq->ci_doorbell_idx );
682  MLX_FILL_1 ( &cqctx, 14,
683  cq_state_db_record, arbel_cq->arm_doorbell_idx );
684  if ( ( rc = arbel_cmd_sw2hw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
685  DBGC ( arbel, "Arbel %p CQN %#lx SW2HW_CQ failed: %s\n",
686  arbel, cq->cqn, strerror ( rc ) );
687  goto err_sw2hw_cq;
688  }
689 
690  DBGC ( arbel, "Arbel %p CQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
691  arbel, cq->cqn, virt_to_phys ( arbel_cq->cqe ),
692  ( virt_to_phys ( arbel_cq->cqe ) + arbel_cq->cqe_size ),
693  virt_to_phys ( ci_db_rec ) );
694  ib_cq_set_drvdata ( cq, arbel_cq );
695  return 0;
696 
697  err_sw2hw_cq:
698  MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
699  MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
700  free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
701  err_cqe:
702  free ( arbel_cq );
703  err_arbel_cq:
704  arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
705  err_cqn_offset:
706  return rc;
707 }
708 
709 /**
710  * Destroy completion queue
711  *
712  * @v ibdev Infiniband device
713  * @v cq Completion queue
714  */
715 static void arbel_destroy_cq ( struct ib_device *ibdev,
716  struct ib_completion_queue *cq ) {
717  struct arbel *arbel = ib_get_drvdata ( ibdev );
718  struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
719  struct arbelprm_completion_queue_context cqctx;
720  struct arbelprm_cq_ci_db_record *ci_db_rec;
721  struct arbelprm_cq_arm_db_record *arm_db_rec;
722  int cqn_offset;
723  int rc;
724 
725  /* Take ownership back from hardware */
726  if ( ( rc = arbel_cmd_hw2sw_cq ( arbel, cq->cqn, &cqctx ) ) != 0 ) {
727  DBGC ( arbel, "Arbel %p CQN %#lx FATAL HW2SW_CQ failed: "
728  "%s\n", arbel, cq->cqn, strerror ( rc ) );
729  /* Leak memory and return; at least we avoid corruption */
730  return;
731  }
732 
733  /* Clear doorbell records */
734  ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
735  arm_db_rec = &arbel->db_rec[arbel_cq->arm_doorbell_idx].cq_arm;
736  MLX_FILL_1 ( ci_db_rec, 1, res, ARBEL_UAR_RES_NONE );
737  MLX_FILL_1 ( arm_db_rec, 1, res, ARBEL_UAR_RES_NONE );
738 
739  /* Free memory */
740  free_phys ( arbel_cq->cqe, arbel_cq->cqe_size );
741  free ( arbel_cq );
742 
743  /* Mark queue number as free */
744  cqn_offset = ( cq->cqn - arbel->limits.reserved_cqs );
745  arbel_bitmask_free ( arbel->cq_inuse, cqn_offset );
746 
747  ib_cq_set_drvdata ( cq, NULL );
748 }
749 
750 /***************************************************************************
751  *
752  * Queue pair operations
753  *
754  ***************************************************************************
755  */
756 
757 /**
758  * Assign queue pair number
759  *
760  * @v ibdev Infiniband device
761  * @v qp Queue pair
762  * @ret rc Return status code
763  */
764 static int arbel_alloc_qpn ( struct ib_device *ibdev,
765  struct ib_queue_pair *qp ) {
766  struct arbel *arbel = ib_get_drvdata ( ibdev );
767  unsigned int port_offset;
768  int qpn_offset;
769 
770  /* Calculate queue pair number */
771  port_offset = ( ibdev->port - ARBEL_PORT_BASE );
772 
773  switch ( qp->type ) {
774  case IB_QPT_SMI:
775  qp->qpn = ( arbel->special_qpn_base + port_offset );
776  return 0;
777  case IB_QPT_GSI:
778  qp->qpn = ( arbel->special_qpn_base + 2 + port_offset );
779  return 0;
780  case IB_QPT_UD:
781  case IB_QPT_RC:
782  /* Find a free queue pair number */
783  qpn_offset = arbel_bitmask_alloc ( arbel->qp_inuse,
784  ARBEL_MAX_QPS );
785  if ( qpn_offset < 0 ) {
786  DBGC ( arbel, "Arbel %p out of queue pairs\n",
787  arbel );
788  return qpn_offset;
789  }
790  qp->qpn = ( ( random() & ARBEL_QPN_RANDOM_MASK ) |
791  ( arbel->qpn_base + qpn_offset ) );
792  return 0;
793  default:
794  DBGC ( arbel, "Arbel %p unsupported QP type %d\n",
795  arbel, qp->type );
796  return -ENOTSUP;
797  }
798 }
799 
800 /**
801  * Free queue pair number
802  *
803  * @v ibdev Infiniband device
804  * @v qp Queue pair
805  */
806 static void arbel_free_qpn ( struct ib_device *ibdev,
807  struct ib_queue_pair *qp ) {
808  struct arbel *arbel = ib_get_drvdata ( ibdev );
809  int qpn_offset;
810 
811  qpn_offset = ( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) - arbel->qpn_base );
812  if ( qpn_offset >= 0 )
813  arbel_bitmask_free ( arbel->qp_inuse, qpn_offset );
814 }
815 
816 /**
817  * Calculate transmission rate
818  *
819  * @v av Address vector
820  * @ret arbel_rate Arbel rate
821  */
822 static unsigned int arbel_rate ( struct ib_address_vector *av ) {
823  return ( ( ( av->rate >= IB_RATE_2_5 ) && ( av->rate <= IB_RATE_120 ) )
824  ? ( av->rate + 5 ) : 0 );
825 }
826 
827 /** Queue pair transport service type map */
828 static uint8_t arbel_qp_st[] = {
833 };
834 
835 /**
836  * Dump queue pair context (for debugging only)
837  *
838  * @v arbel Arbel device
839  * @v qp Queue pair
840  * @ret rc Return status code
841  */
842 static __attribute__ (( unused )) int
844  struct arbelprm_qp_ee_state_transitions qpctx;
845  int rc;
846 
847  memset ( &qpctx, 0, sizeof ( qpctx ) );
848  if ( ( rc = arbel_cmd_query_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ) {
849  DBGC ( arbel, "Arbel %p QPN %#lx QUERY_QPEE failed: %s\n",
850  arbel, qp->qpn, strerror ( rc ) );
851  return rc;
852  }
853  DBGC ( arbel, "Arbel %p QPN %#lx context:\n", arbel, qp->qpn );
854  DBGC_HDA ( arbel, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
855 
856  return 0;
857 }
858 
859 /**
860  * Create send work queue
861  *
862  * @v arbel_send_wq Send work queue
863  * @v num_wqes Number of work queue entries
864  * @ret rc Return status code
865  */
866 static int arbel_create_send_wq ( struct arbel_send_work_queue *arbel_send_wq,
867  unsigned int num_wqes ) {
868  union arbel_send_wqe *wqe;
869  union arbel_send_wqe *next_wqe;
870  unsigned int wqe_idx_mask;
871  unsigned int i;
872 
873  /* Allocate work queue */
874  arbel_send_wq->wqe_size = ( num_wqes *
875  sizeof ( arbel_send_wq->wqe[0] ) );
876  arbel_send_wq->wqe = malloc_phys ( arbel_send_wq->wqe_size,
877  sizeof ( arbel_send_wq->wqe[0] ) );
878  if ( ! arbel_send_wq->wqe )
879  return -ENOMEM;
880  memset ( arbel_send_wq->wqe, 0, arbel_send_wq->wqe_size );
881 
882  /* Link work queue entries */
883  wqe_idx_mask = ( num_wqes - 1 );
884  for ( i = 0 ; i < num_wqes ; i++ ) {
885  wqe = &arbel_send_wq->wqe[i];
886  next_wqe = &arbel_send_wq->wqe[ ( i + 1 ) & wqe_idx_mask ];
887  MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
888  ( virt_to_bus ( next_wqe ) >> 6 ) );
889  MLX_FILL_1 ( &wqe->next, 1, always1, 1 );
890  }
891 
892  return 0;
893 }
894 
895 /**
896  * Create receive work queue
897  *
898  * @v arbel_recv_wq Receive work queue
899  * @v num_wqes Number of work queue entries
900  * @v type Queue pair type
901  * @ret rc Return status code
902  */
903 static int arbel_create_recv_wq ( struct arbel_recv_work_queue *arbel_recv_wq,
904  unsigned int num_wqes,
905  enum ib_queue_pair_type type ) {
906  struct arbelprm_recv_wqe *wqe;
907  struct arbelprm_recv_wqe *next_wqe;
908  unsigned int wqe_idx_mask;
909  size_t nds;
910  unsigned int i;
911  unsigned int j;
912  int rc;
913 
914  /* Allocate work queue */
915  arbel_recv_wq->wqe_size = ( num_wqes *
916  sizeof ( arbel_recv_wq->wqe[0] ) );
917  arbel_recv_wq->wqe = malloc_phys ( arbel_recv_wq->wqe_size,
918  sizeof ( arbel_recv_wq->wqe[0] ) );
919  if ( ! arbel_recv_wq->wqe ) {
920  rc = -ENOMEM;
921  goto err_alloc_wqe;
922  }
923  memset ( arbel_recv_wq->wqe, 0, arbel_recv_wq->wqe_size );
924 
925  /* Allocate GRH entries, if needed */
926  if ( ( type == IB_QPT_SMI ) || ( type == IB_QPT_GSI ) ||
927  ( type == IB_QPT_UD ) ) {
928  arbel_recv_wq->grh_size = ( num_wqes *
929  sizeof ( arbel_recv_wq->grh[0] ) );
930  arbel_recv_wq->grh = malloc_phys ( arbel_recv_wq->grh_size,
931  sizeof ( void * ) );
932  if ( ! arbel_recv_wq->grh ) {
933  rc = -ENOMEM;
934  goto err_alloc_grh;
935  }
936  }
937 
938  /* Link work queue entries */
939  wqe_idx_mask = ( num_wqes - 1 );
940  nds = ( ( offsetof ( typeof ( *wqe ), data ) +
941  sizeof ( wqe->data[0] ) ) >> 4 );
942  for ( i = 0 ; i < num_wqes ; i++ ) {
943  wqe = &arbel_recv_wq->wqe[i].recv;
944  next_wqe = &arbel_recv_wq->wqe[( i + 1 ) & wqe_idx_mask].recv;
945  MLX_FILL_1 ( &wqe->next, 0, nda_31_6,
946  ( virt_to_bus ( next_wqe ) >> 6 ) );
947  MLX_FILL_1 ( &wqe->next, 1, nds, nds );
948  for ( j = 0 ; ( ( ( void * ) &wqe->data[j] ) <
949  ( ( void * ) ( wqe + 1 ) ) ) ; j++ ) {
950  MLX_FILL_1 ( &wqe->data[j], 1,
951  l_key, ARBEL_INVALID_LKEY );
952  }
953  }
954 
955  return 0;
956 
957  free_phys ( arbel_recv_wq->grh, arbel_recv_wq->grh_size );
958  err_alloc_grh:
959  free_phys ( arbel_recv_wq->wqe, arbel_recv_wq->wqe_size );
960  err_alloc_wqe:
961  return rc;
962 }
963 
964 /**
965  * Create queue pair
966  *
967  * @v ibdev Infiniband device
968  * @v qp Queue pair
969  * @ret rc Return status code
970  */
971 static int arbel_create_qp ( struct ib_device *ibdev,
972  struct ib_queue_pair *qp ) {
973  struct arbel *arbel = ib_get_drvdata ( ibdev );
974  struct arbel_queue_pair *arbel_qp;
975  struct arbelprm_qp_ee_state_transitions qpctx;
976  struct arbelprm_qp_db_record *send_db_rec;
977  struct arbelprm_qp_db_record *recv_db_rec;
978  physaddr_t send_wqe_base_adr;
979  physaddr_t recv_wqe_base_adr;
980  physaddr_t wqe_base_adr;
981  int rc;
982 
983  /* Warn about dysfunctional code
984  *
985  * Arbel seems to crash the system as soon as the first send
986  * WQE completes on an RC queue pair. (NOPs complete
987  * successfully, so this is a problem specific to the work
988  * queue rather than the completion queue.) The cause of this
989  * problem has remained unknown for over a year. Patches to
990  * fix this are welcome.
991  */
992  if ( qp->type == IB_QPT_RC )
993  DBG ( "*** WARNING: Arbel RC support is non-functional ***\n" );
994 
995  /* Calculate queue pair number */
996  if ( ( rc = arbel_alloc_qpn ( ibdev, qp ) ) != 0 )
997  goto err_alloc_qpn;
998 
999  /* Allocate control structures */
1000  arbel_qp = zalloc ( sizeof ( *arbel_qp ) );
1001  if ( ! arbel_qp ) {
1002  rc = -ENOMEM;
1003  goto err_arbel_qp;
1004  }
1007 
1008  /* Create send and receive work queues */
1009  if ( ( rc = arbel_create_send_wq ( &arbel_qp->send,
1010  qp->send.num_wqes ) ) != 0 )
1011  goto err_create_send_wq;
1012  if ( ( rc = arbel_create_recv_wq ( &arbel_qp->recv, qp->recv.num_wqes,
1013  qp->type ) ) != 0 )
1014  goto err_create_recv_wq;
1015 
1016  /* Send and receive work queue entries must be within the same 4GB */
1017  send_wqe_base_adr = virt_to_bus ( arbel_qp->send.wqe );
1018  recv_wqe_base_adr = virt_to_bus ( arbel_qp->recv.wqe );
1019  if ( ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) &&
1020  ( ( ( ( uint64_t ) send_wqe_base_adr ) >> 32 ) !=
1021  ( ( ( uint64_t ) recv_wqe_base_adr ) >> 32 ) ) ) {
1022  DBGC ( arbel, "Arbel %p QPN %#lx cannot support send %08lx "
1023  "recv %08lx\n", arbel, qp->qpn,
1024  send_wqe_base_adr, recv_wqe_base_adr );
1025  rc = -ENOTSUP;
1026  goto err_unsupported_address_split;
1027  }
1028  wqe_base_adr = send_wqe_base_adr;
1029 
1030  /* Initialise doorbell records */
1031  send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
1032  MLX_FILL_1 ( send_db_rec, 0, counter, 0 );
1033  MLX_FILL_2 ( send_db_rec, 1,
1034  res, ARBEL_UAR_RES_SQ,
1035  qp_number, qp->qpn );
1036  recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
1037  MLX_FILL_1 ( recv_db_rec, 0, counter, 0 );
1038  MLX_FILL_2 ( recv_db_rec, 1,
1039  res, ARBEL_UAR_RES_RQ,
1040  qp_number, qp->qpn );
1041 
1042  /* Transition queue to INIT state */
1043  memset ( &qpctx, 0, sizeof ( qpctx ) );
1044  MLX_FILL_3 ( &qpctx, 2,
1045  qpc_eec_data.de, 1,
1046  qpc_eec_data.pm_state, ARBEL_PM_STATE_MIGRATED,
1047  qpc_eec_data.st, arbel_qp_st[qp->type] );
1048  MLX_FILL_4 ( &qpctx, 4,
1049  qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
1050  qpc_eec_data.log_rq_stride,
1051  ( fls ( sizeof ( arbel_qp->recv.wqe[0] ) - 1 ) - 4 ),
1052  qpc_eec_data.log_sq_size, fls ( qp->send.num_wqes - 1 ),
1053  qpc_eec_data.log_sq_stride,
1054  ( fls ( sizeof ( arbel_qp->send.wqe[0] ) - 1 ) - 4 ) );
1055  MLX_FILL_1 ( &qpctx, 5,
1056  qpc_eec_data.usr_page, arbel->limits.reserved_uars );
1057  MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
1058  ibdev->port );
1059  MLX_FILL_1 ( &qpctx, 27, qpc_eec_data.pd, ARBEL_GLOBAL_PD );
1060  MLX_FILL_H ( &qpctx, 28, qpc_eec_data.wqe_base_adr_h, wqe_base_adr );
1061  MLX_FILL_1 ( &qpctx, 29, qpc_eec_data.wqe_lkey, arbel->lkey );
1062  MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
1063  MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
1064  MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
1065  ( send_wqe_base_adr >> 6 ) );
1066  MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
1067  arbel_qp->send.doorbell_idx );
1068  MLX_FILL_4 ( &qpctx, 38,
1069  qpc_eec_data.rre, 1,
1070  qpc_eec_data.rwe, 1,
1071  qpc_eec_data.rae, 1,
1072  qpc_eec_data.rsc, 1 );
1073  MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
1074  MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
1075  ( recv_wqe_base_adr >> 6 ) );
1076  MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
1077  arbel_qp->recv.doorbell_idx );
1078  if ( ( rc = arbel_cmd_rst2init_qpee ( arbel, qp->qpn, &qpctx )) != 0 ){
1079  DBGC ( arbel, "Arbel %p QPN %#lx RST2INIT_QPEE failed: %s\n",
1080  arbel, qp->qpn, strerror ( rc ) );
1081  goto err_rst2init_qpee;
1082  }
1083  arbel_qp->state = ARBEL_QP_ST_INIT;
1084 
1085  DBGC ( arbel, "Arbel %p QPN %#lx send ring [%08lx,%08lx), doorbell "
1086  "%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->send.wqe ),
1087  ( virt_to_phys ( arbel_qp->send.wqe ) +
1088  arbel_qp->send.wqe_size ),
1089  virt_to_phys ( send_db_rec ) );
1090  DBGC ( arbel, "Arbel %p QPN %#lx receive ring [%08lx,%08lx), doorbell "
1091  "%08lx\n", arbel, qp->qpn, virt_to_phys ( arbel_qp->recv.wqe ),
1092  ( virt_to_phys ( arbel_qp->recv.wqe ) +
1093  arbel_qp->recv.wqe_size ),
1094  virt_to_phys ( recv_db_rec ) );
1095  DBGC ( arbel, "Arbel %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1096  arbel, qp->qpn, qp->send.cq->cqn, qp->recv.cq->cqn );
1097  ib_qp_set_drvdata ( qp, arbel_qp );
1098  return 0;
1099 
1100  arbel_cmd_2rst_qpee ( arbel, qp->qpn );
1101  err_rst2init_qpee:
1102  MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1103  MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1104  err_unsupported_address_split:
1105  free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
1106  free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
1107  err_create_recv_wq:
1108  free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
1109  err_create_send_wq:
1110  free ( arbel_qp );
1111  err_arbel_qp:
1112  arbel_free_qpn ( ibdev, qp );
1113  err_alloc_qpn:
1114  return rc;
1115 }
1116 
1117 /**
1118  * Modify queue pair
1119  *
1120  * @v ibdev Infiniband device
1121  * @v qp Queue pair
1122  * @ret rc Return status code
1123  */
1124 static int arbel_modify_qp ( struct ib_device *ibdev,
1125  struct ib_queue_pair *qp ) {
1126  struct arbel *arbel = ib_get_drvdata ( ibdev );
1127  struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1128  struct arbelprm_qp_ee_state_transitions qpctx;
1129  int rc;
1130 
1131  /* Transition queue to RTR state, if applicable */
1132  if ( arbel_qp->state < ARBEL_QP_ST_RTR ) {
1133  memset ( &qpctx, 0, sizeof ( qpctx ) );
1134  MLX_FILL_2 ( &qpctx, 4,
1135  qpc_eec_data.mtu, ARBEL_MTU_2048,
1136  qpc_eec_data.msg_max, 31 );
1137  MLX_FILL_1 ( &qpctx, 7,
1138  qpc_eec_data.remote_qpn_een, qp->av.qpn );
1139  MLX_FILL_2 ( &qpctx, 11,
1140  qpc_eec_data.primary_address_path.rnr_retry,
1142  qpc_eec_data.primary_address_path.rlid,
1143  qp->av.lid );
1144  MLX_FILL_2 ( &qpctx, 12,
1145  qpc_eec_data.primary_address_path.ack_timeout,
1146  14 /* 4.096us * 2^(14) = 67ms */,
1147  qpc_eec_data.primary_address_path.max_stat_rate,
1148  arbel_rate ( &qp->av ) );
1149  memcpy ( &qpctx.u.dwords[14], &qp->av.gid,
1150  sizeof ( qp->av.gid ) );
1151  MLX_FILL_1 ( &qpctx, 30,
1152  qpc_eec_data.retry_count, ARBEL_RETRY_MAX );
1153  MLX_FILL_1 ( &qpctx, 39,
1154  qpc_eec_data.next_rcv_psn, qp->recv.psn );
1155  MLX_FILL_1 ( &qpctx, 40,
1156  qpc_eec_data.ra_buff_indx,
1158  ( ( qp->qpn & ~ARBEL_QPN_RANDOM_MASK ) -
1159  arbel->special_qpn_base ) ) );
1160  if ( ( rc = arbel_cmd_init2rtr_qpee ( arbel, qp->qpn,
1161  &qpctx ) ) != 0 ) {
1162  DBGC ( arbel, "Arbel %p QPN %#lx INIT2RTR_QPEE failed:"
1163  " %s\n", arbel, qp->qpn, strerror ( rc ) );
1164  return rc;
1165  }
1166  arbel_qp->state = ARBEL_QP_ST_RTR;
1167  }
1168 
1169  /* Transition queue to RTS state, if applicable */
1170  if ( arbel_qp->state < ARBEL_QP_ST_RTS ) {
1171  memset ( &qpctx, 0, sizeof ( qpctx ) );
1172  MLX_FILL_1 ( &qpctx, 11,
1173  qpc_eec_data.primary_address_path.rnr_retry,
1174  ARBEL_RETRY_MAX );
1175  MLX_FILL_1 ( &qpctx, 12,
1176  qpc_eec_data.primary_address_path.ack_timeout,
1177  14 /* 4.096us * 2^(14) = 67ms */ );
1178  MLX_FILL_2 ( &qpctx, 30,
1179  qpc_eec_data.retry_count, ARBEL_RETRY_MAX,
1180  qpc_eec_data.sic, 1 );
1181  MLX_FILL_1 ( &qpctx, 32,
1182  qpc_eec_data.next_send_psn, qp->send.psn );
1183  if ( ( rc = arbel_cmd_rtr2rts_qpee ( arbel, qp->qpn,
1184  &qpctx ) ) != 0 ) {
1185  DBGC ( arbel, "Arbel %p QPN %#lx RTR2RTS_QPEE failed: "
1186  "%s\n", arbel, qp->qpn, strerror ( rc ) );
1187  return rc;
1188  }
1189  arbel_qp->state = ARBEL_QP_ST_RTS;
1190  }
1191 
1192  /* Update parameters in RTS state */
1193  memset ( &qpctx, 0, sizeof ( qpctx ) );
1195  MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
1196  if ( ( rc = arbel_cmd_rts2rts_qpee ( arbel, qp->qpn, &qpctx ) ) != 0 ){
1197  DBGC ( arbel, "Arbel %p QPN %#lx RTS2RTS_QPEE failed: %s\n",
1198  arbel, qp->qpn, strerror ( rc ) );
1199  return rc;
1200  }
1201 
1202  return 0;
1203 }
1204 
1205 /**
1206  * Destroy queue pair
1207  *
1208  * @v ibdev Infiniband device
1209  * @v qp Queue pair
1210  */
1211 static void arbel_destroy_qp ( struct ib_device *ibdev,
1212  struct ib_queue_pair *qp ) {
1213  struct arbel *arbel = ib_get_drvdata ( ibdev );
1214  struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1215  struct arbelprm_qp_db_record *send_db_rec;
1216  struct arbelprm_qp_db_record *recv_db_rec;
1217  int rc;
1218 
1219  /* Take ownership back from hardware */
1220  if ( ( rc = arbel_cmd_2rst_qpee ( arbel, qp->qpn ) ) != 0 ) {
1221  DBGC ( arbel, "Arbel %p QPN %#lx FATAL 2RST_QPEE failed: "
1222  "%s\n", arbel, qp->qpn, strerror ( rc ) );
1223  /* Leak memory and return; at least we avoid corruption */
1224  return;
1225  }
1226 
1227  /* Clear doorbell records */
1228  send_db_rec = &arbel->db_rec[arbel_qp->send.doorbell_idx].qp;
1229  recv_db_rec = &arbel->db_rec[arbel_qp->recv.doorbell_idx].qp;
1230  MLX_FILL_1 ( send_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1231  MLX_FILL_1 ( recv_db_rec, 1, res, ARBEL_UAR_RES_NONE );
1232 
1233  /* Free memory */
1234  free_phys ( arbel_qp->recv.grh, arbel_qp->recv.grh_size );
1235  free_phys ( arbel_qp->recv.wqe, arbel_qp->recv.wqe_size );
1236  free_phys ( arbel_qp->send.wqe, arbel_qp->send.wqe_size );
1237  free ( arbel_qp );
1238 
1239  /* Mark queue number as free */
1240  arbel_free_qpn ( ibdev, qp );
1241 
1242  ib_qp_set_drvdata ( qp, NULL );
1243 }
1244 
1245 /***************************************************************************
1246  *
1247  * Work request operations
1248  *
1249  ***************************************************************************
1250  */
1251 
1252 /**
1253  * Ring doorbell register in UAR
1254  *
1255  * @v arbel Arbel device
1256  * @v db_reg Doorbell register structure
1257  * @v offset Address of doorbell
1258  */
1259 static void arbel_ring_doorbell ( struct arbel *arbel,
1260  union arbelprm_doorbell_register *db_reg,
1261  unsigned int offset ) {
1262 
1263  DBGC2 ( arbel, "Arbel %p ringing doorbell %08x:%08x at %lx\n",
1264  arbel, ntohl ( db_reg->dword[0] ), ntohl ( db_reg->dword[1] ),
1265  virt_to_phys ( arbel->uar + offset ) );
1266 
1267  barrier();
1268  writel ( db_reg->dword[0], ( arbel->uar + offset + 0 ) );
1269  barrier();
1270  writel ( db_reg->dword[1], ( arbel->uar + offset + 4 ) );
1271 }
1272 
1273 /** GID used for GID-less send work queue entries */
1274 static const union ib_gid arbel_no_gid = {
1275  .bytes = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0 },
1276 };
1277 
1278 /**
1279  * Construct UD send work queue entry
1280  *
1281  * @v ibdev Infiniband device
1282  * @v qp Queue pair
1283  * @v dest Destination address vector
1284  * @v iobuf I/O buffer
1285  * @v wqe Send work queue entry
1286  * @ret nds Work queue entry size
1287  */
1288 static size_t arbel_fill_ud_send_wqe ( struct ib_device *ibdev,
1289  struct ib_queue_pair *qp __unused,
1290  struct ib_address_vector *dest,
1291  struct io_buffer *iobuf,
1292  union arbel_send_wqe *wqe ) {
1293  struct arbel *arbel = ib_get_drvdata ( ibdev );
1294  const union ib_gid *gid;
1295 
1296  /* Construct this work queue entry */
1297  MLX_FILL_1 ( &wqe->ud.ctrl, 0, always1, 1 );
1298  MLX_FILL_2 ( &wqe->ud.ud, 0,
1299  ud_address_vector.pd, ARBEL_GLOBAL_PD,
1300  ud_address_vector.port_number, ibdev->port );
1301  MLX_FILL_2 ( &wqe->ud.ud, 1,
1302  ud_address_vector.rlid, dest->lid,
1303  ud_address_vector.g, dest->gid_present );
1304  MLX_FILL_2 ( &wqe->ud.ud, 2,
1305  ud_address_vector.max_stat_rate, arbel_rate ( dest ),
1306  ud_address_vector.msg, 3 );
1307  MLX_FILL_1 ( &wqe->ud.ud, 3, ud_address_vector.sl, dest->sl );
1308  gid = ( dest->gid_present ? &dest->gid : &arbel_no_gid );
1309  memcpy ( &wqe->ud.ud.u.dwords[4], gid, sizeof ( *gid ) );
1310  MLX_FILL_1 ( &wqe->ud.ud, 8, destination_qp, dest->qpn );
1311  MLX_FILL_1 ( &wqe->ud.ud, 9, q_key, dest->qkey );
1312  MLX_FILL_1 ( &wqe->ud.data[0], 0, byte_count, iob_len ( iobuf ) );
1313  MLX_FILL_1 ( &wqe->ud.data[0], 1, l_key, arbel->lkey );
1314  MLX_FILL_H ( &wqe->ud.data[0], 2,
1315  local_address_h, virt_to_bus ( iobuf->data ) );
1316  MLX_FILL_1 ( &wqe->ud.data[0], 3,
1317  local_address_l, virt_to_bus ( iobuf->data ) );
1318 
1319  return ( offsetof ( typeof ( wqe->ud ), data[1] ) >> 4 );
1320 }
1321 
1322 /**
1323  * Construct MLX send work queue entry
1324  *
1325  * @v ibdev Infiniband device
1326  * @v qp Queue pair
1327  * @v dest Destination address vector
1328  * @v iobuf I/O buffer
1329  * @v wqe Send work queue entry
1330  * @ret nds Work queue entry size
1331  */
1332 static size_t arbel_fill_mlx_send_wqe ( struct ib_device *ibdev,
1333  struct ib_queue_pair *qp,
1334  struct ib_address_vector *dest,
1335  struct io_buffer *iobuf,
1336  union arbel_send_wqe *wqe ) {
1337  struct arbel *arbel = ib_get_drvdata ( ibdev );
1338  struct io_buffer headers;
1339 
1340  /* Construct IB headers */
1341  iob_populate ( &headers, &wqe->mlx.headers, 0,
1342  sizeof ( wqe->mlx.headers ) );
1343  iob_reserve ( &headers, sizeof ( wqe->mlx.headers ) );
1344  ib_push ( ibdev, &headers, qp, iob_len ( iobuf ), dest );
1345 
1346  /* Construct this work queue entry */
1347  MLX_FILL_5 ( &wqe->mlx.ctrl, 0,
1348  c, 1 /* generate completion */,
1349  icrc, 0 /* generate ICRC */,
1350  max_statrate, arbel_rate ( dest ),
1351  slr, 0,
1352  v15, ( ( qp->ext_qpn == IB_QPN_SMI ) ? 1 : 0 ) );
1353  MLX_FILL_1 ( &wqe->mlx.ctrl, 1, rlid, dest->lid );
1354  MLX_FILL_1 ( &wqe->mlx.data[0], 0,
1355  byte_count, iob_len ( &headers ) );
1356  MLX_FILL_1 ( &wqe->mlx.data[0], 1, l_key, arbel->lkey );
1357  MLX_FILL_H ( &wqe->mlx.data[0], 2,
1358  local_address_h, virt_to_bus ( headers.data ) );
1359  MLX_FILL_1 ( &wqe->mlx.data[0], 3,
1360  local_address_l, virt_to_bus ( headers.data ) );
1361  MLX_FILL_1 ( &wqe->mlx.data[1], 0,
1362  byte_count, ( iob_len ( iobuf ) + 4 /* ICRC */ ) );
1363  MLX_FILL_1 ( &wqe->mlx.data[1], 1, l_key, arbel->lkey );
1364  MLX_FILL_H ( &wqe->mlx.data[1], 2,
1365  local_address_h, virt_to_bus ( iobuf->data ) );
1366  MLX_FILL_1 ( &wqe->mlx.data[1], 3,
1367  local_address_l, virt_to_bus ( iobuf->data ) );
1368 
1369  return ( offsetof ( typeof ( wqe->mlx ), data[2] ) >> 4 );
1370 }
1371 
1372 /**
1373  * Construct RC send work queue entry
1374  *
1375  * @v ibdev Infiniband device
1376  * @v qp Queue pair
1377  * @v dest Destination address vector
1378  * @v iobuf I/O buffer
1379  * @v wqe Send work queue entry
1380  * @ret nds Work queue entry size
1381  */
1382 static size_t arbel_fill_rc_send_wqe ( struct ib_device *ibdev,
1383  struct ib_queue_pair *qp __unused,
1385  struct io_buffer *iobuf,
1386  union arbel_send_wqe *wqe ) {
1387  struct arbel *arbel = ib_get_drvdata ( ibdev );
1388 
1389  /* Construct this work queue entry */
1390  MLX_FILL_1 ( &wqe->rc.ctrl, 0, always1, 1 );
1391  MLX_FILL_1 ( &wqe->rc.data[0], 0, byte_count, iob_len ( iobuf ) );
1392  MLX_FILL_1 ( &wqe->rc.data[0], 1, l_key, arbel->lkey );
1393  MLX_FILL_H ( &wqe->rc.data[0], 2,
1394  local_address_h, virt_to_bus ( iobuf->data ) );
1395  MLX_FILL_1 ( &wqe->rc.data[0], 3,
1396  local_address_l, virt_to_bus ( iobuf->data ) );
1397 
1398  return ( offsetof ( typeof ( wqe->rc ), data[1] ) >> 4 );
1399 }
1400 
1401 /** Work queue entry constructors */
1402 static size_t
1403 ( * arbel_fill_send_wqe[] ) ( struct ib_device *ibdev,
1404  struct ib_queue_pair *qp,
1405  struct ib_address_vector *dest,
1406  struct io_buffer *iobuf,
1407  union arbel_send_wqe *wqe ) = {
1412 };
1413 
1414 /**
1415  * Post send work queue entry
1416  *
1417  * @v ibdev Infiniband device
1418  * @v qp Queue pair
1419  * @v dest Destination address vector
1420  * @v iobuf I/O buffer
1421  * @ret rc Return status code
1422  */
1423 static int arbel_post_send ( struct ib_device *ibdev,
1424  struct ib_queue_pair *qp,
1425  struct ib_address_vector *dest,
1426  struct io_buffer *iobuf ) {
1427  struct arbel *arbel = ib_get_drvdata ( ibdev );
1428  struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1429  struct ib_work_queue *wq = &qp->send;
1430  struct arbel_send_work_queue *arbel_send_wq = &arbel_qp->send;
1431  union arbel_send_wqe *prev_wqe;
1432  union arbel_send_wqe *wqe;
1433  struct arbelprm_qp_db_record *qp_db_rec;
1434  union arbelprm_doorbell_register db_reg;
1435  unsigned long wqe_idx_mask;
1436  size_t nds;
1437 
1438  /* Allocate work queue entry */
1439  wqe_idx_mask = ( wq->num_wqes - 1 );
1440  if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1441  DBGC ( arbel, "Arbel %p QPN %#lx send queue full",
1442  arbel, qp->qpn );
1443  return -ENOBUFS;
1444  }
1445  wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1446  prev_wqe = &arbel_send_wq->wqe[(wq->next_idx - 1) & wqe_idx_mask];
1447  wqe = &arbel_send_wq->wqe[wq->next_idx & wqe_idx_mask];
1448 
1449  /* Construct work queue entry */
1450  memset ( ( ( ( void * ) wqe ) + sizeof ( wqe->next ) ), 0,
1451  ( sizeof ( *wqe ) - sizeof ( wqe->next ) ) );
1452  assert ( qp->type < ( sizeof ( arbel_fill_send_wqe ) /
1453  sizeof ( arbel_fill_send_wqe[0] ) ) );
1454  assert ( arbel_fill_send_wqe[qp->type] != NULL );
1455  nds = arbel_fill_send_wqe[qp->type] ( ibdev, qp, dest, iobuf, wqe );
1456  DBGCP ( arbel, "Arbel %p QPN %#lx posting send WQE %#lx:\n",
1457  arbel, qp->qpn, ( wq->next_idx & wqe_idx_mask ) );
1458  DBGCP_HDA ( arbel, virt_to_phys ( wqe ), wqe, sizeof ( *wqe ) );
1459 
1460  /* Update previous work queue entry's "next" field */
1461  MLX_SET ( &prev_wqe->next, nopcode, ARBEL_OPCODE_SEND );
1462  MLX_FILL_3 ( &prev_wqe->next, 1,
1463  nds, nds,
1464  f, 0,
1465  always1, 1 );
1466 
1467  /* Update doorbell record */
1468  barrier();
1469  qp_db_rec = &arbel->db_rec[arbel_send_wq->doorbell_idx].qp;
1470  MLX_FILL_1 ( qp_db_rec, 0,
1471  counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
1472 
1473  /* Ring doorbell register */
1474  MLX_FILL_4 ( &db_reg.send, 0,
1475  nopcode, ARBEL_OPCODE_SEND,
1476  f, 0,
1477  wqe_counter, ( wq->next_idx & 0xffff ),
1478  wqe_cnt, 1 );
1479  MLX_FILL_2 ( &db_reg.send, 1,
1480  nds, nds,
1481  qpn, qp->qpn );
1483 
1484  /* Update work queue's index */
1485  wq->next_idx++;
1486 
1487  return 0;
1488 }
1489 
1490 /**
1491  * Post receive work queue entry
1492  *
1493  * @v ibdev Infiniband device
1494  * @v qp Queue pair
1495  * @v iobuf I/O buffer
1496  * @ret rc Return status code
1497  */
1498 static int arbel_post_recv ( struct ib_device *ibdev,
1499  struct ib_queue_pair *qp,
1500  struct io_buffer *iobuf ) {
1501  struct arbel *arbel = ib_get_drvdata ( ibdev );
1502  struct arbel_queue_pair *arbel_qp = ib_qp_get_drvdata ( qp );
1503  struct ib_work_queue *wq = &qp->recv;
1504  struct arbel_recv_work_queue *arbel_recv_wq = &arbel_qp->recv;
1505  struct arbelprm_recv_wqe *wqe;
1506  struct arbelprm_wqe_segment_data_ptr *data;
1507  struct ib_global_route_header *grh;
1508  union arbelprm_doorbell_record *db_rec;
1509  unsigned int wqe_idx_mask;
1510 
1511  /* Allocate work queue entry */
1512  wqe_idx_mask = ( wq->num_wqes - 1 );
1513  if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1514  DBGC ( arbel, "Arbel %p QPN %#lx receive queue full\n",
1515  arbel, qp->qpn );
1516  return -ENOBUFS;
1517  }
1518  wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1519  wqe = &arbel_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1520 
1521  /* Construct work queue entry */
1522  data = &wqe->data[0];
1523  if ( arbel_recv_wq->grh ) {
1524  grh = &arbel_recv_wq->grh[wq->next_idx & wqe_idx_mask];
1525  MLX_FILL_1 ( data, 0, byte_count, sizeof ( *grh ) );
1526  MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
1527  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( grh ) );
1528  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( grh ) );
1529  data++;
1530  }
1531  MLX_FILL_1 ( data, 0, byte_count, iob_tailroom ( iobuf ) );
1532  MLX_FILL_1 ( data, 1, l_key, arbel->lkey );
1533  MLX_FILL_H ( data, 2, local_address_h, virt_to_bus ( iobuf->data ) );
1534  MLX_FILL_1 ( data, 3, local_address_l, virt_to_bus ( iobuf->data ) );
1535 
1536  /* Update doorbell record */
1537  barrier();
1538  db_rec = &arbel->db_rec[arbel_recv_wq->doorbell_idx];
1539  MLX_FILL_1 ( &db_rec->qp, 0,
1540  counter, ( ( wq->next_idx + 1 ) & 0xffff ) );
1541 
1542  /* Update work queue's index */
1543  wq->next_idx++;
1544 
1545  return 0;
1546 }
1547 
1548 /**
1549  * Handle completion
1550  *
1551  * @v ibdev Infiniband device
1552  * @v cq Completion queue
1553  * @v cqe Hardware completion queue entry
1554  * @ret rc Return status code
1555  */
1556 static int arbel_complete ( struct ib_device *ibdev,
1557  struct ib_completion_queue *cq,
1558  union arbelprm_completion_entry *cqe ) {
1559  struct arbel *arbel = ib_get_drvdata ( ibdev );
1560  struct ib_work_queue *wq;
1561  struct ib_queue_pair *qp;
1562  struct arbel_queue_pair *arbel_qp;
1563  struct arbel_send_work_queue *arbel_send_wq;
1564  struct arbel_recv_work_queue *arbel_recv_wq;
1565  struct arbelprm_recv_wqe *recv_wqe;
1566  struct io_buffer *iobuf;
1567  struct ib_address_vector recv_dest;
1568  struct ib_address_vector recv_source;
1569  struct ib_global_route_header *grh;
1570  struct ib_address_vector *source;
1571  unsigned int opcode;
1572  unsigned long qpn;
1573  int is_send;
1574  unsigned long wqe_adr;
1575  unsigned long wqe_idx;
1576  size_t len;
1577  int rc = 0;
1578 
1579  /* Parse completion */
1580  qpn = MLX_GET ( &cqe->normal, my_qpn );
1581  is_send = MLX_GET ( &cqe->normal, s );
1582  wqe_adr = ( MLX_GET ( &cqe->normal, wqe_adr ) << 6 );
1583  opcode = MLX_GET ( &cqe->normal, opcode );
1584  if ( opcode >= ARBEL_OPCODE_RECV_ERROR ) {
1585  /* "s" field is not valid for error opcodes */
1586  is_send = ( opcode == ARBEL_OPCODE_SEND_ERROR );
1587  DBGC ( arbel, "Arbel %p CQN %#lx %s QPN %#lx syndrome %#x "
1588  "vendor %#x\n", arbel, cq->cqn,
1589  ( is_send ? "send" : "recv" ), qpn,
1590  MLX_GET ( &cqe->error, syndrome ),
1591  MLX_GET ( &cqe->error, vendor_code ) );
1592  DBGC_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1593  rc = -EIO;
1594  /* Don't return immediately; propagate error to completer */
1595  }
1596 
1597  /* Identify work queue */
1598  wq = ib_find_wq ( cq, qpn, is_send );
1599  if ( ! wq ) {
1600  DBGC ( arbel, "Arbel %p CQN %#lx unknown %s QPN %#lx\n",
1601  arbel, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
1602  return -EIO;
1603  }
1604  qp = wq->qp;
1605  arbel_qp = ib_qp_get_drvdata ( qp );
1606  arbel_send_wq = &arbel_qp->send;
1607  arbel_recv_wq = &arbel_qp->recv;
1608 
1609  /* Identify work queue entry index */
1610  if ( is_send ) {
1611  wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_send_wq->wqe ) ) /
1612  sizeof ( arbel_send_wq->wqe[0] ) );
1613  assert ( wqe_idx < qp->send.num_wqes );
1614  } else {
1615  wqe_idx = ( ( wqe_adr - virt_to_bus ( arbel_recv_wq->wqe ) ) /
1616  sizeof ( arbel_recv_wq->wqe[0] ) );
1617  assert ( wqe_idx < qp->recv.num_wqes );
1618  }
1619 
1620  DBGCP ( arbel, "Arbel %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1621  arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1622  wqe_idx );
1623  DBGCP_HDA ( arbel, virt_to_phys ( cqe ), cqe, sizeof ( *cqe ) );
1624 
1625  /* Identify I/O buffer */
1626  iobuf = wq->iobufs[wqe_idx];
1627  if ( ! iobuf ) {
1628  DBGC ( arbel, "Arbel %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
1629  arbel, cq->cqn, qp->qpn, ( is_send ? "send" : "recv" ),
1630  wqe_idx );
1631  return -EIO;
1632  }
1633  wq->iobufs[wqe_idx] = NULL;
1634 
1635  if ( is_send ) {
1636  /* Hand off to completion handler */
1637  ib_complete_send ( ibdev, qp, iobuf, rc );
1638  } else {
1639  /* Set received length */
1640  len = MLX_GET ( &cqe->normal, byte_cnt );
1641  recv_wqe = &arbel_recv_wq->wqe[wqe_idx].recv;
1642  assert ( MLX_GET ( &recv_wqe->data[0], local_address_l ) ==
1643  virt_to_bus ( iobuf->data ) );
1644  assert ( MLX_GET ( &recv_wqe->data[0], byte_count ) ==
1645  iob_tailroom ( iobuf ) );
1646  MLX_FILL_1 ( &recv_wqe->data[0], 0, byte_count, 0 );
1647  MLX_FILL_1 ( &recv_wqe->data[0], 1,
1648  l_key, ARBEL_INVALID_LKEY );
1649  memset ( &recv_dest, 0, sizeof ( recv_dest ) );
1650  recv_dest.qpn = qpn;
1651  switch ( qp->type ) {
1652  case IB_QPT_SMI:
1653  case IB_QPT_GSI:
1654  case IB_QPT_UD:
1655  /* Locate corresponding GRH */
1656  assert ( arbel_recv_wq->grh != NULL );
1657  grh = &arbel_recv_wq->grh[wqe_idx];
1658  len -= sizeof ( *grh );
1659  /* Construct address vector */
1660  source = &recv_source;
1661  memset ( source, 0, sizeof ( *source ) );
1662  source->qpn = MLX_GET ( &cqe->normal, rqpn );
1663  source->lid = MLX_GET ( &cqe->normal, rlid );
1664  source->sl = MLX_GET ( &cqe->normal, sl );
1665  recv_dest.gid_present = source->gid_present =
1666  MLX_GET ( &cqe->normal, g );
1667  memcpy ( &recv_dest.gid, &grh->dgid,
1668  sizeof ( recv_dest.gid ) );
1669  memcpy ( &source->gid, &grh->sgid,
1670  sizeof ( source->gid ) );
1671  break;
1672  case IB_QPT_RC:
1673  source = &qp->av;
1674  break;
1675  default:
1676  assert ( 0 );
1677  return -EINVAL;
1678  }
1679  assert ( len <= iob_tailroom ( iobuf ) );
1680  iob_put ( iobuf, len );
1681  /* Hand off to completion handler */
1682  ib_complete_recv ( ibdev, qp, &recv_dest, source, iobuf, rc );
1683  }
1684 
1685  return rc;
1686 }
1687 
1688 /**
1689  * Poll completion queue
1690  *
1691  * @v ibdev Infiniband device
1692  * @v cq Completion queue
1693  */
1694 static void arbel_poll_cq ( struct ib_device *ibdev,
1695  struct ib_completion_queue *cq ) {
1696  struct arbel *arbel = ib_get_drvdata ( ibdev );
1697  struct arbel_completion_queue *arbel_cq = ib_cq_get_drvdata ( cq );
1698  struct arbelprm_cq_ci_db_record *ci_db_rec;
1699  union arbelprm_completion_entry *cqe;
1700  unsigned int cqe_idx_mask;
1701  int rc;
1702 
1703  while ( 1 ) {
1704  /* Look for completion entry */
1705  cqe_idx_mask = ( cq->num_cqes - 1 );
1706  cqe = &arbel_cq->cqe[cq->next_idx & cqe_idx_mask];
1707  if ( MLX_GET ( &cqe->normal, owner ) != 0 ) {
1708  /* Entry still owned by hardware; end of poll */
1709  break;
1710  }
1711 
1712  /* Handle completion */
1713  if ( ( rc = arbel_complete ( ibdev, cq, cqe ) ) != 0 ) {
1714  DBGC ( arbel, "Arbel %p CQN %#lx failed to complete: "
1715  "%s\n", arbel, cq->cqn, strerror ( rc ) );
1716  DBGC_HD ( arbel, cqe, sizeof ( *cqe ) );
1717  }
1718 
1719  /* Return ownership to hardware */
1720  MLX_FILL_1 ( &cqe->normal, 7, owner, 1 );
1721  barrier();
1722  /* Update completion queue's index */
1723  cq->next_idx++;
1724  /* Update doorbell record */
1725  ci_db_rec = &arbel->db_rec[arbel_cq->ci_doorbell_idx].cq_ci;
1726  MLX_FILL_1 ( ci_db_rec, 0,
1727  counter, ( cq->next_idx & 0xffffffffUL ) );
1728  }
1729 }
1730 
1731 /***************************************************************************
1732  *
1733  * Event queues
1734  *
1735  ***************************************************************************
1736  */
1737 
1738 /**
1739  * Create event queue
1740  *
1741  * @v arbel Arbel device
1742  * @ret rc Return status code
1743  */
1744 static int arbel_create_eq ( struct arbel *arbel ) {
1745  struct arbel_event_queue *arbel_eq = &arbel->eq;
1746  struct arbelprm_eqc eqctx;
1747  struct arbelprm_event_mask mask;
1748  unsigned int i;
1749  int rc;
1750 
1751  /* Select event queue number */
1752  arbel_eq->eqn = arbel->limits.reserved_eqs;
1753 
1754  /* Calculate doorbell address */
1755  arbel_eq->doorbell = ( arbel->eq_ci_doorbells +
1756  ARBEL_DB_EQ_OFFSET ( arbel_eq->eqn ) );
1757 
1758  /* Allocate event queue itself */
1759  arbel_eq->eqe_size =
1760  ( ARBEL_NUM_EQES * sizeof ( arbel_eq->eqe[0] ) );
1761  arbel_eq->eqe = malloc_phys ( arbel_eq->eqe_size,
1762  sizeof ( arbel_eq->eqe[0] ) );
1763  if ( ! arbel_eq->eqe ) {
1764  rc = -ENOMEM;
1765  goto err_eqe;
1766  }
1767  memset ( arbel_eq->eqe, 0, arbel_eq->eqe_size );
1768  for ( i = 0 ; i < ARBEL_NUM_EQES ; i++ ) {
1769  MLX_FILL_1 ( &arbel_eq->eqe[i].generic, 7, owner, 1 );
1770  }
1771  barrier();
1772 
1773  /* Hand queue over to hardware */
1774  memset ( &eqctx, 0, sizeof ( eqctx ) );
1775  MLX_FILL_1 ( &eqctx, 0, st, 0xa /* "Fired" */ );
1776  MLX_FILL_H ( &eqctx, 1,
1777  start_address_h, virt_to_phys ( arbel_eq->eqe ) );
1778  MLX_FILL_1 ( &eqctx, 2,
1779  start_address_l, virt_to_phys ( arbel_eq->eqe ) );
1780  MLX_FILL_1 ( &eqctx, 3, log_eq_size, fls ( ARBEL_NUM_EQES - 1 ) );
1781  MLX_FILL_1 ( &eqctx, 6, pd, ARBEL_GLOBAL_PD );
1782  MLX_FILL_1 ( &eqctx, 7, lkey, arbel->lkey );
1783  if ( ( rc = arbel_cmd_sw2hw_eq ( arbel, arbel_eq->eqn,
1784  &eqctx ) ) != 0 ) {
1785  DBGC ( arbel, "Arbel %p EQN %#lx SW2HW_EQ failed: %s\n",
1786  arbel, arbel_eq->eqn, strerror ( rc ) );
1787  goto err_sw2hw_eq;
1788  }
1789 
1790  /* Map events to this event queue */
1791  memset ( &mask, 0xff, sizeof ( mask ) );
1792  if ( ( rc = arbel_cmd_map_eq ( arbel,
1793  ( ARBEL_MAP_EQ | arbel_eq->eqn ),
1794  &mask ) ) != 0 ) {
1795  DBGC ( arbel, "Arbel %p EQN %#lx MAP_EQ failed: %s\n",
1796  arbel, arbel_eq->eqn, strerror ( rc ) );
1797  goto err_map_eq;
1798  }
1799 
1800  DBGC ( arbel, "Arbel %p EQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
1801  arbel, arbel_eq->eqn, virt_to_phys ( arbel_eq->eqe ),
1802  ( virt_to_phys ( arbel_eq->eqe ) + arbel_eq->eqe_size ),
1803  virt_to_phys ( arbel_eq->doorbell ) );
1804  return 0;
1805 
1806  err_map_eq:
1807  arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn, &eqctx );
1808  err_sw2hw_eq:
1809  free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
1810  err_eqe:
1811  memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
1812  return rc;
1813 }
1814 
1815 /**
1816  * Destroy event queue
1817  *
1818  * @v arbel Arbel device
1819  */
1820 static void arbel_destroy_eq ( struct arbel *arbel ) {
1821  struct arbel_event_queue *arbel_eq = &arbel->eq;
1822  struct arbelprm_eqc eqctx;
1823  struct arbelprm_event_mask mask;
1824  int rc;
1825 
1826  /* Unmap events from event queue */
1827  memset ( &mask, 0, sizeof ( mask ) );
1828  MLX_FILL_1 ( &mask, 1, port_state_change, 1 );
1829  if ( ( rc = arbel_cmd_map_eq ( arbel,
1830  ( ARBEL_UNMAP_EQ | arbel_eq->eqn ),
1831  &mask ) ) != 0 ) {
1832  DBGC ( arbel, "Arbel %p EQN %#lx FATAL MAP_EQ failed to "
1833  "unmap: %s\n", arbel, arbel_eq->eqn, strerror ( rc ) );
1834  /* Continue; HCA may die but system should survive */
1835  }
1836 
1837  /* Take ownership back from hardware */
1838  if ( ( rc = arbel_cmd_hw2sw_eq ( arbel, arbel_eq->eqn,
1839  &eqctx ) ) != 0 ) {
1840  DBGC ( arbel, "Arbel %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
1841  arbel, arbel_eq->eqn, strerror ( rc ) );
1842  /* Leak memory and return; at least we avoid corruption */
1843  return;
1844  }
1845 
1846  /* Free memory */
1847  free_phys ( arbel_eq->eqe, arbel_eq->eqe_size );
1848  memset ( arbel_eq, 0, sizeof ( *arbel_eq ) );
1849 }
1850 
1851 /**
1852  * Handle port state event
1853  *
1854  * @v arbel Arbel device
1855  * @v eqe Port state change event queue entry
1856  */
1858  union arbelprm_event_entry *eqe){
1859  unsigned int port;
1860  int link_up;
1861 
1862  /* Get port and link status */
1863  port = ( MLX_GET ( &eqe->port_state_change, data.p ) - 1 );
1864  link_up = ( MLX_GET ( &eqe->generic, event_sub_type ) & 0x04 );
1865  DBGC ( arbel, "Arbel %p port %d link %s\n", arbel, ( port + 1 ),
1866  ( link_up ? "up" : "down" ) );
1867 
1868  /* Sanity check */
1869  if ( port >= ARBEL_NUM_PORTS ) {
1870  DBGC ( arbel, "Arbel %p port %d does not exist!\n",
1871  arbel, ( port + 1 ) );
1872  return;
1873  }
1874 
1875  /* Update MAD parameters */
1877 }
1878 
1879 /**
1880  * Poll event queue
1881  *
1882  * @v ibdev Infiniband device
1883  */
1884 static void arbel_poll_eq ( struct ib_device *ibdev ) {
1885  struct arbel *arbel = ib_get_drvdata ( ibdev );
1886  struct arbel_event_queue *arbel_eq = &arbel->eq;
1887  union arbelprm_event_entry *eqe;
1888  union arbelprm_eq_doorbell_register db_reg;
1889  unsigned int eqe_idx_mask;
1890  unsigned int event_type;
1891 
1892  /* No event is generated upon reaching INIT, so we must poll
1893  * separately for link state changes while we remain DOWN.
1894  */
1895  if ( ib_is_open ( ibdev ) &&
1896  ( ibdev->port_state == IB_PORT_STATE_DOWN ) ) {
1897  ib_smc_update ( ibdev, arbel_mad );
1898  }
1899 
1900  /* Poll event queue */
1901  while ( 1 ) {
1902  /* Look for event entry */
1903  eqe_idx_mask = ( ARBEL_NUM_EQES - 1 );
1904  eqe = &arbel_eq->eqe[arbel_eq->next_idx & eqe_idx_mask];
1905  if ( MLX_GET ( &eqe->generic, owner ) != 0 ) {
1906  /* Entry still owned by hardware; end of poll */
1907  break;
1908  }
1909  DBGCP ( arbel, "Arbel %p EQN %#lx event:\n",
1910  arbel, arbel_eq->eqn );
1911  DBGCP_HDA ( arbel, virt_to_phys ( eqe ),
1912  eqe, sizeof ( *eqe ) );
1913 
1914  /* Handle event */
1915  event_type = MLX_GET ( &eqe->generic, event_type );
1916  switch ( event_type ) {
1919  break;
1920  default:
1921  DBGC ( arbel, "Arbel %p EQN %#lx unrecognised event "
1922  "type %#x:\n",
1923  arbel, arbel_eq->eqn, event_type );
1924  DBGC_HDA ( arbel, virt_to_phys ( eqe ),
1925  eqe, sizeof ( *eqe ) );
1926  break;
1927  }
1928 
1929  /* Return ownership to hardware */
1930  MLX_FILL_1 ( &eqe->generic, 7, owner, 1 );
1931  barrier();
1932 
1933  /* Update event queue's index */
1934  arbel_eq->next_idx++;
1935 
1936  /* Ring doorbell */
1937  MLX_FILL_1 ( &db_reg.ci, 0, ci, arbel_eq->next_idx );
1938  writel ( db_reg.dword[0], arbel_eq->doorbell );
1939  }
1940 }
1941 
1942 /***************************************************************************
1943  *
1944  * Firmware control
1945  *
1946  ***************************************************************************
1947  */
1948 
1949 /**
1950  * Map virtual to physical address for firmware usage
1951  *
1952  * @v arbel Arbel device
1953  * @v map Mapping function
1954  * @v va Virtual address
1955  * @v pa Physical address
1956  * @v len Length of region
1957  * @ret rc Return status code
1958  */
1959 static int arbel_map_vpm ( struct arbel *arbel,
1960  int ( *map ) ( struct arbel *arbel,
1961  const struct arbelprm_virtual_physical_mapping* ),
1962  uint64_t va, physaddr_t pa, size_t len ) {
1963  struct arbelprm_virtual_physical_mapping mapping;
1964  physaddr_t start;
1965  physaddr_t low;
1966  physaddr_t high;
1967  physaddr_t end;
1968  size_t size;
1969  int rc;
1970 
1971  /* Sanity checks */
1972  assert ( ( va & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1973  assert ( ( pa & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1974  assert ( ( len & ( ARBEL_PAGE_SIZE - 1 ) ) == 0 );
1975  assert ( len != 0 );
1976 
1977  /* Calculate starting points */
1978  start = pa;
1979  end = ( start + len );
1980  size = ( 1UL << ( fls ( start ^ end ) - 1 ) );
1981  low = high = ( end & ~( size - 1 ) );
1982  assert ( start < low );
1983  assert ( high <= end );
1984 
1985  /* These mappings tend to generate huge volumes of
1986  * uninteresting debug data, which basically makes it
1987  * impossible to use debugging otherwise.
1988  */
1990 
1991  /* Map blocks in descending order of size */
1992  while ( size >= ARBEL_PAGE_SIZE ) {
1993 
1994  /* Find the next candidate block */
1995  if ( ( low - size ) >= start ) {
1996  low -= size;
1997  pa = low;
1998  } else if ( high <= ( end - size ) ) {
1999  pa = high;
2000  high += size;
2001  } else {
2002  size >>= 1;
2003  continue;
2004  }
2005  assert ( ( va & ( size - 1 ) ) == 0 );
2006  assert ( ( pa & ( size - 1 ) ) == 0 );
2007 
2008  /* Map this block */
2009  memset ( &mapping, 0, sizeof ( mapping ) );
2010  MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2011  MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2012  MLX_FILL_H ( &mapping, 2, pa_h, pa );
2013  MLX_FILL_2 ( &mapping, 3,
2014  log2size, ( ( fls ( size ) - 1 ) - 12 ),
2015  pa_l, ( pa >> 12 ) );
2016  if ( ( rc = map ( arbel, &mapping ) ) != 0 ) {
2018  DBGC ( arbel, "Arbel %p could not map %08llx+%zx to "
2019  "%08lx: %s\n",
2020  arbel, va, size, pa, strerror ( rc ) );
2021  return rc;
2022  }
2023  va += size;
2024  }
2025  assert ( low == start );
2026  assert ( high == end );
2027 
2029  return 0;
2030 }
2031 
2032 /**
2033  * Start firmware running
2034  *
2035  * @v arbel Arbel device
2036  * @ret rc Return status code
2037  */
2038 static int arbel_start_firmware ( struct arbel *arbel ) {
2039  struct arbelprm_query_fw fw;
2040  struct arbelprm_access_lam lam;
2041  unsigned int fw_pages;
2042  size_t fw_len;
2043  physaddr_t fw_base;
2044  uint64_t eq_set_ci_base_addr;
2045  int rc;
2046 
2047  /* Get firmware parameters */
2048  if ( ( rc = arbel_cmd_query_fw ( arbel, &fw ) ) != 0 ) {
2049  DBGC ( arbel, "Arbel %p could not query firmware: %s\n",
2050  arbel, strerror ( rc ) );
2051  goto err_query_fw;
2052  }
2053  DBGC ( arbel, "Arbel %p firmware version %d.%d.%d\n", arbel,
2054  MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
2055  MLX_GET ( &fw, fw_rev_subminor ) );
2056  fw_pages = MLX_GET ( &fw, fw_pages );
2057  DBGC ( arbel, "Arbel %p requires %d kB for firmware\n",
2058  arbel, ( fw_pages * 4 ) );
2059  eq_set_ci_base_addr =
2060  ( ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_h ) << 32 ) |
2061  ( (uint64_t) MLX_GET ( &fw, eq_set_ci_base_addr_l ) ) );
2062  arbel->eq_ci_doorbells = pci_ioremap ( arbel->pci, eq_set_ci_base_addr,
2063  0x200 );
2064 
2065  /* Enable locally-attached memory. Ignore failure; there may
2066  * be no attached memory.
2067  */
2068  arbel_cmd_enable_lam ( arbel, &lam );
2069 
2070  /* Allocate firmware pages and map firmware area */
2071  fw_len = ( fw_pages * ARBEL_PAGE_SIZE );
2072  if ( ! arbel->firmware_area ) {
2073  arbel->firmware_len = fw_len;
2075  if ( ! arbel->firmware_area ) {
2076  rc = -ENOMEM;
2077  goto err_alloc_fa;
2078  }
2079  } else {
2080  assert ( arbel->firmware_len == fw_len );
2081  }
2082  fw_base = user_to_phys ( arbel->firmware_area, 0 );
2083  DBGC ( arbel, "Arbel %p firmware area at [%08lx,%08lx)\n",
2084  arbel, fw_base, ( fw_base + fw_len ) );
2085  if ( ( rc = arbel_map_vpm ( arbel, arbel_cmd_map_fa,
2086  0, fw_base, fw_len ) ) != 0 ) {
2087  DBGC ( arbel, "Arbel %p could not map firmware: %s\n",
2088  arbel, strerror ( rc ) );
2089  goto err_map_fa;
2090  }
2091 
2092  /* Start firmware */
2093  if ( ( rc = arbel_cmd_run_fw ( arbel ) ) != 0 ) {
2094  DBGC ( arbel, "Arbel %p could not run firmware: %s\n",
2095  arbel, strerror ( rc ) );
2096  goto err_run_fw;
2097  }
2098 
2099  DBGC ( arbel, "Arbel %p firmware started\n", arbel );
2100  return 0;
2101 
2102  err_run_fw:
2104  err_map_fa:
2105  err_alloc_fa:
2106  err_query_fw:
2107  return rc;
2108 }
2109 
2110 /**
2111  * Stop firmware running
2112  *
2113  * @v arbel Arbel device
2114  */
2115 static void arbel_stop_firmware ( struct arbel *arbel ) {
2116  int rc;
2117 
2118  if ( ( rc = arbel_cmd_unmap_fa ( arbel ) ) != 0 ) {
2119  DBGC ( arbel, "Arbel %p FATAL could not stop firmware: %s\n",
2120  arbel, strerror ( rc ) );
2121  /* Leak memory and return; at least we avoid corruption */
2123  return;
2124  }
2125 }
2126 
2127 /***************************************************************************
2128  *
2129  * Infinihost Context Memory management
2130  *
2131  ***************************************************************************
2132  */
2133 
2134 /**
2135  * Get device limits
2136  *
2137  * @v arbel Arbel device
2138  * @ret rc Return status code
2139  */
2140 static int arbel_get_limits ( struct arbel *arbel ) {
2141  struct arbelprm_query_dev_lim dev_lim;
2142  int rc;
2143 
2144  if ( ( rc = arbel_cmd_query_dev_lim ( arbel, &dev_lim ) ) != 0 ) {
2145  DBGC ( arbel, "Arbel %p could not get device limits: %s\n",
2146  arbel, strerror ( rc ) );
2147  return rc;
2148  }
2149 
2151  ( 1 << MLX_GET ( &dev_lim, log2_rsvd_qps ) );
2152  arbel->limits.qpc_entry_size = MLX_GET ( &dev_lim, qpc_entry_sz );
2153  arbel->limits.eqpc_entry_size = MLX_GET ( &dev_lim, eqpc_entry_sz );
2155  ( 1 << MLX_GET ( &dev_lim, log2_rsvd_srqs ) );
2156  arbel->limits.srqc_entry_size = MLX_GET ( &dev_lim, srq_entry_sz );
2158  ( 1 << MLX_GET ( &dev_lim, log2_rsvd_ees ) );
2159  arbel->limits.eec_entry_size = MLX_GET ( &dev_lim, eec_entry_sz );
2160  arbel->limits.eeec_entry_size = MLX_GET ( &dev_lim, eeec_entry_sz );
2162  ( 1 << MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
2163  arbel->limits.cqc_entry_size = MLX_GET ( &dev_lim, cqc_entry_sz );
2165  ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
2166  arbel->limits.mtt_entry_size = MLX_GET ( &dev_lim, mtt_entry_sz );
2168  ( 1 << MLX_GET ( &dev_lim, log2_rsvd_mrws ) );
2169  arbel->limits.mpt_entry_size = MLX_GET ( &dev_lim, mpt_entry_sz );
2171  ( 1 << MLX_GET ( &dev_lim, log2_rsvd_rdbs ) );
2172  arbel->limits.reserved_eqs = MLX_GET ( &dev_lim, num_rsvd_eqs );
2173  arbel->limits.eqc_entry_size = MLX_GET ( &dev_lim, eqc_entry_sz );
2174  arbel->limits.reserved_uars = MLX_GET ( &dev_lim, num_rsvd_uars );
2176  MLX_GET ( &dev_lim, uar_scratch_entry_sz );
2177 
2178  DBGC ( arbel, "Arbel %p reserves %d x %#zx QPC, %d x %#zx EQPC, "
2179  "%d x %#zx SRQC\n", arbel,
2183  DBGC ( arbel, "Arbel %p reserves %d x %#zx EEC, %d x %#zx EEEC, "
2184  "%d x %#zx CQC\n", arbel,
2188  DBGC ( arbel, "Arbel %p reserves %d x %#zx EQC, %d x %#zx MTT, "
2189  "%d x %#zx MPT\n", arbel,
2193  DBGC ( arbel, "Arbel %p reserves %d x %#zx RDB, %d x %#zx UAR, "
2194  "%d x %#zx UAR scratchpad\n", arbel,
2199 
2200  return 0;
2201 }
2202 
2203 /**
2204  * Align ICM table
2205  *
2206  * @v icm_offset Current ICM offset
2207  * @v len ICM table length
2208  * @ret icm_offset ICM offset
2209  */
2210 static size_t icm_align ( size_t icm_offset, size_t len ) {
2211 
2212  /* Round up to a multiple of the table size */
2213  assert ( len == ( 1UL << ( fls ( len ) - 1 ) ) );
2214  return ( ( icm_offset + len - 1 ) & ~( len - 1 ) );
2215 }
2216 
2217 /**
2218  * Allocate ICM
2219  *
2220  * @v arbel Arbel device
2221  * @v init_hca INIT_HCA structure to fill in
2222  * @ret rc Return status code
2223  */
2224 static int arbel_alloc_icm ( struct arbel *arbel,
2225  struct arbelprm_init_hca *init_hca ) {
2226  struct arbelprm_scalar_parameter icm_size;
2227  struct arbelprm_scalar_parameter icm_aux_size;
2228  struct arbelprm_scalar_parameter unmap_icm;
2229  union arbelprm_doorbell_record *db_rec;
2230  size_t icm_offset = 0;
2231  unsigned int log_num_uars, log_num_qps, log_num_srqs, log_num_ees;
2232  unsigned int log_num_cqs, log_num_mtts, log_num_mpts, log_num_rdbs;
2233  unsigned int log_num_eqs, log_num_mcs;
2234  size_t icm_len, icm_aux_len;
2235  size_t len;
2236  physaddr_t icm_phys;
2237  int rc;
2238 
2239  /* Calculate number of each object type within ICM */
2240  log_num_qps = fls ( arbel->limits.reserved_qps +
2242  log_num_srqs = fls ( arbel->limits.reserved_srqs - 1 );
2243  log_num_ees = fls ( arbel->limits.reserved_ees - 1 );
2244  log_num_cqs = fls ( arbel->limits.reserved_cqs + ARBEL_MAX_CQS - 1 );
2245  log_num_eqs = fls ( arbel->limits.reserved_eqs + ARBEL_MAX_EQS - 1 );
2246  log_num_mtts = fls ( arbel->limits.reserved_mtts - 1 );
2247  log_num_mpts = fls ( arbel->limits.reserved_mrws + 1 - 1 );
2248  log_num_rdbs = fls ( arbel->limits.reserved_rdbs +
2250  log_num_uars = fls ( arbel->limits.reserved_uars +
2251  1 /* single UAR used */ - 1 );
2252  log_num_mcs = ARBEL_LOG_MULTICAST_HASH_SIZE;
2253 
2254  /* Queue pair contexts */
2255  len = ( ( 1 << log_num_qps ) * arbel->limits.qpc_entry_size );
2256  icm_offset = icm_align ( icm_offset, len );
2257  MLX_FILL_2 ( init_hca, 13,
2258  qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
2259  ( icm_offset >> 7 ),
2260  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
2261  log_num_qps );
2262  DBGC ( arbel, "Arbel %p ICM QPC is %d x %#zx at [%zx,%zx)\n",
2263  arbel, ( 1 << log_num_qps ), arbel->limits.qpc_entry_size,
2264  icm_offset, ( icm_offset + len ) );
2265  icm_offset += len;
2266 
2267  /* Extended queue pair contexts */
2268  len = ( ( 1 << log_num_qps ) * arbel->limits.eqpc_entry_size );
2269  icm_offset = icm_align ( icm_offset, len );
2270  MLX_FILL_1 ( init_hca, 25,
2271  qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr_l,
2272  icm_offset );
2273  DBGC ( arbel, "Arbel %p ICM EQPC is %d x %#zx at [%zx,%zx)\n",
2274  arbel, ( 1 << log_num_qps ), arbel->limits.eqpc_entry_size,
2275  icm_offset, ( icm_offset + len ) );
2276  icm_offset += len;
2277 
2278  /* Completion queue contexts */
2279  len = ( ( 1 << log_num_cqs ) * arbel->limits.cqc_entry_size );
2280  icm_offset = icm_align ( icm_offset, len );
2281  MLX_FILL_2 ( init_hca, 21,
2282  qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
2283  ( icm_offset >> 6 ),
2284  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
2285  log_num_cqs );
2286  DBGC ( arbel, "Arbel %p ICM CQC is %d x %#zx at [%zx,%zx)\n",
2287  arbel, ( 1 << log_num_cqs ), arbel->limits.cqc_entry_size,
2288  icm_offset, ( icm_offset + len ) );
2289  icm_offset += len;
2290 
2291  /* Event queue contexts */
2292  len = ( ( 1 << log_num_eqs ) * arbel->limits.eqc_entry_size );
2293  icm_offset = icm_align ( icm_offset, len );
2294  MLX_FILL_2 ( init_hca, 33,
2295  qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
2296  ( icm_offset >> 6 ),
2297  qpc_eec_cqc_eqc_rdb_parameters.log_num_eq,
2298  log_num_eqs );
2299  DBGC ( arbel, "Arbel %p ICM EQC is %d x %#zx at [%zx,%zx)\n",
2300  arbel, ( 1 << log_num_eqs ), arbel->limits.eqc_entry_size,
2301  icm_offset, ( icm_offset + len ) );
2302  icm_offset += len;
2303 
2304  /* End-to-end contexts */
2305  len = ( ( 1 << log_num_ees ) * arbel->limits.eec_entry_size );
2306  icm_offset = icm_align ( icm_offset, len );
2307  MLX_FILL_2 ( init_hca, 17,
2308  qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr_l,
2309  ( icm_offset >> 7 ),
2310  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee,
2311  log_num_ees );
2312  DBGC ( arbel, "Arbel %p ICM EEC is %d x %#zx at [%zx,%zx)\n",
2313  arbel, ( 1 << log_num_ees ), arbel->limits.eec_entry_size,
2314  icm_offset, ( icm_offset + len ) );
2315  icm_offset += len;
2316 
2317  /* Shared receive queue contexts */
2318  len = ( ( 1 << log_num_srqs ) * arbel->limits.srqc_entry_size );
2319  icm_offset = icm_align ( icm_offset, len );
2320  MLX_FILL_2 ( init_hca, 19,
2321  qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
2322  ( icm_offset >> 5 ),
2323  qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
2324  log_num_srqs );
2325  DBGC ( arbel, "Arbel %p ICM SRQC is %d x %#zx at [%zx,%zx)\n",
2326  arbel, ( 1 << log_num_srqs ), arbel->limits.srqc_entry_size,
2327  icm_offset, ( icm_offset + len ) );
2328  icm_offset += len;
2329 
2330  /* Memory protection table */
2331  len = ( ( 1 << log_num_mpts ) * arbel->limits.mpt_entry_size );
2332  icm_offset = icm_align ( icm_offset, len );
2333  MLX_FILL_1 ( init_hca, 61,
2334  tpt_parameters.mpt_base_adr_l, icm_offset );
2335  MLX_FILL_1 ( init_hca, 62,
2336  tpt_parameters.log_mpt_sz, log_num_mpts );
2337  DBGC ( arbel, "Arbel %p ICM MPT is %d x %#zx at [%zx,%zx)\n",
2338  arbel, ( 1 << log_num_mpts ), arbel->limits.mpt_entry_size,
2339  icm_offset, ( icm_offset + len ) );
2340  icm_offset += len;
2341 
2342  /* Remote read data base table */
2343  len = ( ( 1 << log_num_rdbs ) * ARBEL_RDB_ENTRY_SIZE );
2344  icm_offset = icm_align ( icm_offset, len );
2345  MLX_FILL_1 ( init_hca, 37,
2346  qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr_l,
2347  icm_offset );
2348  DBGC ( arbel, "Arbel %p ICM RDB is %d x %#zx at [%zx,%zx)\n",
2349  arbel, ( 1 << log_num_rdbs ), ARBEL_RDB_ENTRY_SIZE,
2350  icm_offset, ( icm_offset + len ) );
2351  icm_offset += len;
2352 
2353  /* Extended end-to-end contexts */
2354  len = ( ( 1 << log_num_ees ) * arbel->limits.eeec_entry_size );
2355  icm_offset = icm_align ( icm_offset, len );
2356  MLX_FILL_1 ( init_hca, 29,
2357  qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr_l,
2358  icm_offset );
2359  DBGC ( arbel, "Arbel %p ICM EEEC is %d x %#zx at [%zx,%zx)\n",
2360  arbel, ( 1 << log_num_ees ), arbel->limits.eeec_entry_size,
2361  icm_offset, ( icm_offset + len ) );
2362  icm_offset += len;
2363 
2364  /* Multicast table */
2365  len = ( ( 1 << log_num_mcs ) * sizeof ( struct arbelprm_mgm_entry ) );
2366  icm_offset = icm_align ( icm_offset, len );
2367  MLX_FILL_1 ( init_hca, 49,
2368  multicast_parameters.mc_base_addr_l, icm_offset );
2369  MLX_FILL_1 ( init_hca, 52,
2370  multicast_parameters.log_mc_table_entry_sz,
2371  fls ( sizeof ( struct arbelprm_mgm_entry ) - 1 ) );
2372  MLX_FILL_1 ( init_hca, 53,
2373  multicast_parameters.mc_table_hash_sz,
2374  ( 1 << log_num_mcs ) );
2375  MLX_FILL_1 ( init_hca, 54,
2376  multicast_parameters.log_mc_table_sz,
2377  log_num_mcs /* Only one entry per hash */ );
2378  DBGC ( arbel, "Arbel %p ICM MC is %d x %#zx at [%zx,%zx)\n", arbel,
2379  ( 1 << log_num_mcs ), sizeof ( struct arbelprm_mgm_entry ),
2380  icm_offset, ( icm_offset + len ) );
2381  icm_offset += len;
2382 
2383  /* Memory translation table */
2384  len = ( ( 1 << log_num_mtts ) * arbel->limits.mtt_entry_size );
2385  icm_offset = icm_align ( icm_offset, len );
2386  MLX_FILL_1 ( init_hca, 65,
2387  tpt_parameters.mtt_base_addr_l, icm_offset );
2388  DBGC ( arbel, "Arbel %p ICM MTT is %d x %#zx at [%zx,%zx)\n",
2389  arbel, ( 1 << log_num_mtts ), arbel->limits.mtt_entry_size,
2390  icm_offset, ( icm_offset + len ) );
2391  icm_offset += len;
2392 
2393  /* User access region scratchpads */
2394  len = ( ( 1 << log_num_uars ) * arbel->limits.uar_scratch_entry_size );
2395  icm_offset = icm_align ( icm_offset, len );
2396  MLX_FILL_1 ( init_hca, 77,
2397  uar_parameters.uar_scratch_base_addr_l, icm_offset );
2398  DBGC ( arbel, "Arbel %p UAR scratchpad is %d x %#zx at [%zx,%zx)\n",
2399  arbel, ( 1 << log_num_uars ),
2401  icm_offset, ( icm_offset + len ) );
2402  icm_offset += len;
2403 
2404  /* Record amount of ICM to be allocated */
2405  icm_offset = icm_align ( icm_offset, ARBEL_PAGE_SIZE );
2406  icm_len = icm_offset;
2407 
2408  /* User access region contexts
2409  *
2410  * The reserved UAR(s) do not need to be backed by physical
2411  * memory, and our UAR is allocated separately; neither are
2412  * part of the umalloc()ed ICM block, but both contribute to
2413  * the total length of ICM virtual address space.
2414  */
2415  len = ( ( 1 << log_num_uars ) * ARBEL_PAGE_SIZE );
2416  icm_offset = icm_align ( icm_offset, len );
2417  MLX_FILL_1 ( init_hca, 74, uar_parameters.log_max_uars, log_num_uars );
2418  MLX_FILL_1 ( init_hca, 79,
2419  uar_parameters.uar_context_base_addr_l, icm_offset );
2420  arbel->db_rec_offset =
2421  ( icm_offset +
2423  DBGC ( arbel, "Arbel %p UAR is %d x %#zx at [%zx,%zx), doorbells "
2424  "[%zx,%zx)\n", arbel, ( 1 << log_num_uars ), ARBEL_PAGE_SIZE,
2425  icm_offset, ( icm_offset + len ), arbel->db_rec_offset,
2427  icm_offset += len;
2428 
2429  /* Get ICM auxiliary area size */
2430  memset ( &icm_size, 0, sizeof ( icm_size ) );
2431  MLX_FILL_1 ( &icm_size, 1, value, icm_len );
2432  if ( ( rc = arbel_cmd_set_icm_size ( arbel, &icm_size,
2433  &icm_aux_size ) ) != 0 ) {
2434  DBGC ( arbel, "Arbel %p could not set ICM size: %s\n",
2435  arbel, strerror ( rc ) );
2436  goto err_set_icm_size;
2437  }
2438  icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * ARBEL_PAGE_SIZE );
2439 
2440  /* Allocate ICM data and auxiliary area */
2441  DBGC ( arbel, "Arbel %p requires %zd kB ICM and %zd kB AUX ICM\n",
2442  arbel, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
2443  if ( ! arbel->icm ) {
2444  arbel->icm_len = icm_len;
2445  arbel->icm_aux_len = icm_aux_len;
2447  if ( ! arbel->icm ) {
2448  rc = -ENOMEM;
2449  goto err_alloc_icm;
2450  }
2451  } else {
2452  assert ( arbel->icm_len == icm_len );
2453  assert ( arbel->icm_aux_len == icm_aux_len );
2454  }
2455  icm_phys = user_to_phys ( arbel->icm, 0 );
2456 
2457  /* Allocate doorbell UAR */
2459  if ( ! arbel->db_rec ) {
2460  rc = -ENOMEM;
2461  goto err_alloc_doorbell;
2462  }
2463 
2464  /* Map ICM auxiliary area */
2465  DBGC ( arbel, "Arbel %p ICM AUX at [%08lx,%08lx)\n",
2466  arbel, icm_phys, ( icm_phys + arbel->icm_aux_len ) );
2468  0, icm_phys, arbel->icm_aux_len ) ) != 0 ){
2469  DBGC ( arbel, "Arbel %p could not map AUX ICM: %s\n",
2470  arbel, strerror ( rc ) );
2471  goto err_map_icm_aux;
2472  }
2473  icm_phys += arbel->icm_aux_len;
2474 
2475  /* Map ICM area */
2476  DBGC ( arbel, "Arbel %p ICM at [%08lx,%08lx)\n",
2477  arbel, icm_phys, ( icm_phys + arbel->icm_len ) );
2479  0, icm_phys, arbel->icm_len ) ) != 0 ) {
2480  DBGC ( arbel, "Arbel %p could not map ICM: %s\n",
2481  arbel, strerror ( rc ) );
2482  goto err_map_icm;
2483  }
2484  icm_phys += arbel->icm_len;
2485 
2486  /* Map doorbell UAR */
2487  DBGC ( arbel, "Arbel %p UAR at [%08lx,%08lx)\n",
2492  virt_to_phys ( arbel->db_rec ),
2493  ARBEL_PAGE_SIZE ) ) != 0 ) {
2494  DBGC ( arbel, "Arbel %p could not map doorbell UAR: %s\n",
2495  arbel, strerror ( rc ) );
2496  goto err_map_doorbell;
2497  }
2498 
2499  /* Initialise doorbell records */
2502  MLX_FILL_1 ( &db_rec->qp, 1, res, ARBEL_UAR_RES_GROUP_SEP );
2503 
2504  return 0;
2505 
2506  memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2507  MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
2508  arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
2509  err_map_doorbell:
2510  memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2512  &unmap_icm );
2513  err_map_icm:
2515  err_map_icm_aux:
2517  arbel->db_rec= NULL;
2518  err_alloc_doorbell:
2519  err_alloc_icm:
2520  err_set_icm_size:
2521  return rc;
2522 }
2523 
2524 /**
2525  * Free ICM
2526  *
2527  * @v arbel Arbel device
2528  */
2529 static void arbel_free_icm ( struct arbel *arbel ) {
2530  struct arbelprm_scalar_parameter unmap_icm;
2531 
2532  memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2533  MLX_FILL_1 ( &unmap_icm, 1, value, arbel->db_rec_offset );
2534  arbel_cmd_unmap_icm ( arbel, 1, &unmap_icm );
2535  memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
2537  &unmap_icm );
2540  arbel->db_rec = NULL;
2541 }
2542 
2543 /***************************************************************************
2544  *
2545  * Initialisation and teardown
2546  *
2547  ***************************************************************************
2548  */
2549 
2550 /**
2551  * Reset device
2552  *
2553  * @v arbel Arbel device
2554  */
2555 static void arbel_reset ( struct arbel *arbel ) {
2556  struct pci_device *pci = arbel->pci;
2557  struct pci_config_backup backup;
2558  static const uint8_t backup_exclude[] =
2559  PCI_CONFIG_BACKUP_EXCLUDE ( 0x58, 0x5c );
2560  uint16_t vendor;
2561  unsigned int i;
2562 
2563  /* Perform device reset and preserve PCI configuration */
2564  pci_backup ( pci, &backup, PCI_CONFIG_BACKUP_ALL, backup_exclude );
2566  ( arbel->config + ARBEL_RESET_OFFSET ) );
2567  for ( i = 0 ; i < ARBEL_RESET_WAIT_TIME_MS ; i++ ) {
2568  mdelay ( 1 );
2570  if ( vendor != 0xffff )
2571  break;
2572  }
2573  pci_restore ( pci, &backup, PCI_CONFIG_BACKUP_ALL, backup_exclude );
2574 }
2575 
2576 /**
2577  * Set up memory protection table
2578  *
2579  * @v arbel Arbel device
2580  * @ret rc Return status code
2581  */
2582 static int arbel_setup_mpt ( struct arbel *arbel ) {
2583  struct arbelprm_mpt mpt;
2584  uint32_t key;
2585  int rc;
2586 
2587  /* Derive key */
2589  arbel->lkey = ( ( key << 8 ) | ( key >> 24 ) );
2590 
2591  /* Initialise memory protection table */
2592  memset ( &mpt, 0, sizeof ( mpt ) );
2593  MLX_FILL_7 ( &mpt, 0,
2594  a, 1,
2595  rw, 1,
2596  rr, 1,
2597  lw, 1,
2598  lr, 1,
2599  pa, 1,
2600  r_w, 1 );
2601  MLX_FILL_1 ( &mpt, 2, mem_key, key );
2602  MLX_FILL_2 ( &mpt, 3,
2603  pd, ARBEL_GLOBAL_PD,
2604  rae, 1 );
2605  MLX_FILL_1 ( &mpt, 6, reg_wnd_len_h, 0xffffffffUL );
2606  MLX_FILL_1 ( &mpt, 7, reg_wnd_len_l, 0xffffffffUL );
2608  &mpt ) ) != 0 ) {
2609  DBGC ( arbel, "Arbel %p could not set up MPT: %s\n",
2610  arbel, strerror ( rc ) );
2611  return rc;
2612  }
2613 
2614  return 0;
2615 }
2616 
2617 /**
2618  * Configure special queue pairs
2619  *
2620  * @v arbel Arbel device
2621  * @ret rc Return status code
2622  */
2623 static int arbel_configure_special_qps ( struct arbel *arbel ) {
2624  unsigned int smi_qpn_base;
2625  unsigned int gsi_qpn_base;
2626  int rc;
2627 
2628  /* Special QP block must be aligned on an even number */
2629  arbel->special_qpn_base = ( ( arbel->limits.reserved_qps + 1 ) & ~1 );
2632  DBGC ( arbel, "Arbel %p special QPs at [%lx,%lx]\n", arbel,
2633  arbel->special_qpn_base, ( arbel->qpn_base - 1 ) );
2634  smi_qpn_base = arbel->special_qpn_base;
2635  gsi_qpn_base = ( smi_qpn_base + 2 );
2636 
2637  /* Issue commands to configure special QPs */
2638  if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 0,
2639  smi_qpn_base ) ) != 0 ) {
2640  DBGC ( arbel, "Arbel %p could not configure SMI QPs: %s\n",
2641  arbel, strerror ( rc ) );
2642  return rc;
2643  }
2644  if ( ( rc = arbel_cmd_conf_special_qp ( arbel, 1,
2645  gsi_qpn_base ) ) != 0 ) {
2646  DBGC ( arbel, "Arbel %p could not configure GSI QPs: %s\n",
2647  arbel, strerror ( rc ) );
2648  return rc;
2649  }
2650 
2651  return 0;
2652 }
2653 
2654 /**
2655  * Start Arbel device
2656  *
2657  * @v arbel Arbel device
2658  * @v running Firmware is already running
2659  * @ret rc Return status code
2660  */
2661 static int arbel_start ( struct arbel *arbel, int running ) {
2662  struct arbelprm_init_hca init_hca;
2663  unsigned int i;
2664  int rc;
2665 
2666  /* Start firmware if not already running */
2667  if ( ! running ) {
2668  if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
2669  goto err_start_firmware;
2670  }
2671 
2672  /* Allocate ICM */
2673  memset ( &init_hca, 0, sizeof ( init_hca ) );
2674  if ( ( rc = arbel_alloc_icm ( arbel, &init_hca ) ) != 0 )
2675  goto err_alloc_icm;
2676 
2677  /* Initialise HCA */
2678  if ( ( rc = arbel_cmd_init_hca ( arbel, &init_hca ) ) != 0 ) {
2679  DBGC ( arbel, "Arbel %p could not initialise HCA: %s\n",
2680  arbel, strerror ( rc ) );
2681  goto err_init_hca;
2682  }
2683 
2684  /* Set up memory protection */
2685  if ( ( rc = arbel_setup_mpt ( arbel ) ) != 0 )
2686  goto err_setup_mpt;
2687  for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
2688  arbel->ibdev[i]->rdma_key = arbel->lkey;
2689 
2690  /* Set up event queue */
2691  if ( ( rc = arbel_create_eq ( arbel ) ) != 0 )
2692  goto err_create_eq;
2693 
2694  /* Configure special QPs */
2695  if ( ( rc = arbel_configure_special_qps ( arbel ) ) != 0 )
2696  goto err_conf_special_qps;
2697 
2698  return 0;
2699 
2700  err_conf_special_qps:
2701  arbel_destroy_eq ( arbel );
2702  err_create_eq:
2703  err_setup_mpt:
2705  err_init_hca:
2706  arbel_free_icm ( arbel );
2707  err_alloc_icm:
2709  err_start_firmware:
2710  return rc;
2711 }
2712 
2713 /**
2714  * Stop Arbel device
2715  *
2716  * @v arbel Arbel device
2717  */
2718 static void arbel_stop ( struct arbel *arbel ) {
2719  arbel_destroy_eq ( arbel );
2721  arbel_free_icm ( arbel );
2723  arbel_reset ( arbel );
2724 }
2725 
2726 /**
2727  * Open Arbel device
2728  *
2729  * @v arbel Arbel device
2730  * @ret rc Return status code
2731  */
2732 static int arbel_open ( struct arbel *arbel ) {
2733  int rc;
2734 
2735  /* Start device if applicable */
2736  if ( arbel->open_count == 0 ) {
2737  if ( ( rc = arbel_start ( arbel, 0 ) ) != 0 )
2738  return rc;
2739  }
2740 
2741  /* Increment open counter */
2742  arbel->open_count++;
2743 
2744  return 0;
2745 }
2746 
2747 /**
2748  * Close Arbel device
2749  *
2750  * @v arbel Arbel device
2751  */
2752 static void arbel_close ( struct arbel *arbel ) {
2753 
2754  /* Decrement open counter */
2755  assert ( arbel->open_count != 0 );
2756  arbel->open_count--;
2757 
2758  /* Stop device if applicable */
2759  if ( arbel->open_count == 0 )
2760  arbel_stop ( arbel );
2761 }
2762 
2763 /***************************************************************************
2764  *
2765  * Infiniband link-layer operations
2766  *
2767  ***************************************************************************
2768  */
2769 
2770 /**
2771  * Initialise Infiniband link
2772  *
2773  * @v ibdev Infiniband device
2774  * @ret rc Return status code
2775  */
2776 static int arbel_ib_open ( struct ib_device *ibdev ) {
2777  struct arbel *arbel = ib_get_drvdata ( ibdev );
2778  struct arbelprm_init_ib init_ib;
2779  int rc;
2780 
2781  /* Open hardware */
2782  if ( ( rc = arbel_open ( arbel ) ) != 0 )
2783  goto err_open;
2784 
2785  /* Initialise IB */
2786  memset ( &init_ib, 0, sizeof ( init_ib ) );
2787  MLX_FILL_3 ( &init_ib, 0,
2788  mtu_cap, ARBEL_MTU_2048,
2789  port_width_cap, 3,
2790  vl_cap, 1 );
2791  MLX_FILL_1 ( &init_ib, 1, max_gid, 1 );
2792  MLX_FILL_1 ( &init_ib, 2, max_pkey, 64 );
2793  if ( ( rc = arbel_cmd_init_ib ( arbel, ibdev->port,
2794  &init_ib ) ) != 0 ) {
2795  DBGC ( arbel, "Arbel %p port %d could not intialise IB: %s\n",
2796  arbel, ibdev->port, strerror ( rc ) );
2797  goto err_init_ib;
2798  }
2799 
2800  /* Update MAD parameters */
2801  ib_smc_update ( ibdev, arbel_mad );
2802 
2803  return 0;
2804 
2805  err_init_ib:
2806  arbel_close ( arbel );
2807  err_open:
2808  return rc;
2809 }
2810 
2811 /**
2812  * Close Infiniband link
2813  *
2814  * @v ibdev Infiniband device
2815  */
2816 static void arbel_ib_close ( struct ib_device *ibdev ) {
2817  struct arbel *arbel = ib_get_drvdata ( ibdev );
2818  int rc;
2819 
2820  /* Close IB */
2821  if ( ( rc = arbel_cmd_close_ib ( arbel, ibdev->port ) ) != 0 ) {
2822  DBGC ( arbel, "Arbel %p port %d could not close IB: %s\n",
2823  arbel, ibdev->port, strerror ( rc ) );
2824  /* Nothing we can do about this */
2825  }
2826 
2827  /* Close hardware */
2828  arbel_close ( arbel );
2829 }
2830 
2831 /**
2832  * Inform embedded subnet management agent of a received MAD
2833  *
2834  * @v ibdev Infiniband device
2835  * @v mad MAD
2836  * @ret rc Return status code
2837  */
2838 static int arbel_inform_sma ( struct ib_device *ibdev, union ib_mad *mad ) {
2839  int rc;
2840 
2841  /* Send the MAD to the embedded SMA */
2842  if ( ( rc = arbel_mad ( ibdev, mad ) ) != 0 )
2843  return rc;
2844 
2845  /* Update parameters held in software */
2847 
2848  return 0;
2849 }
2850 
2851 /***************************************************************************
2852  *
2853  * Multicast group operations
2854  *
2855  ***************************************************************************
2856  */
2857 
2858 /**
2859  * Attach to multicast group
2860  *
2861  * @v ibdev Infiniband device
2862  * @v qp Queue pair
2863  * @v gid Multicast GID
2864  * @ret rc Return status code
2865  */
2866 static int arbel_mcast_attach ( struct ib_device *ibdev,
2867  struct ib_queue_pair *qp,
2868  union ib_gid *gid ) {
2869  struct arbel *arbel = ib_get_drvdata ( ibdev );
2870  struct arbelprm_mgm_hash hash;
2871  struct arbelprm_mgm_entry mgm;
2872  unsigned int index;
2873  int rc;
2874 
2875  /* Generate hash table index */
2876  if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
2877  DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
2878  arbel, strerror ( rc ) );
2879  return rc;
2880  }
2881  index = MLX_GET ( &hash, hash );
2882 
2883  /* Check for existing hash table entry */
2884  if ( ( rc = arbel_cmd_read_mgm ( arbel, index, &mgm ) ) != 0 ) {
2885  DBGC ( arbel, "Arbel %p could not read MGM %#x: %s\n",
2886  arbel, index, strerror ( rc ) );
2887  return rc;
2888  }
2889  if ( MLX_GET ( &mgm, mgmqp_0.qi ) != 0 ) {
2890  /* FIXME: this implementation allows only a single QP
2891  * per multicast group, and doesn't handle hash
2892  * collisions. Sufficient for IPoIB but may need to
2893  * be extended in future.
2894  */
2895  DBGC ( arbel, "Arbel %p MGID index %#x already in use\n",
2896  arbel, index );
2897  return -EBUSY;
2898  }
2899 
2900  /* Update hash table entry */
2901  MLX_FILL_2 ( &mgm, 8,
2902  mgmqp_0.qpn_i, qp->qpn,
2903  mgmqp_0.qi, 1 );
2904  memcpy ( &mgm.u.dwords[4], gid, sizeof ( *gid ) );
2905  if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
2906  DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
2907  arbel, index, strerror ( rc ) );
2908  return rc;
2909  }
2910 
2911  return 0;
2912 }
2913 
2914 /**
2915  * Detach from multicast group
2916  *
2917  * @v ibdev Infiniband device
2918  * @v qp Queue pair
2919  * @v gid Multicast GID
2920  */
2921 static void arbel_mcast_detach ( struct ib_device *ibdev,
2922  struct ib_queue_pair *qp __unused,
2923  union ib_gid *gid ) {
2924  struct arbel *arbel = ib_get_drvdata ( ibdev );
2925  struct arbelprm_mgm_hash hash;
2926  struct arbelprm_mgm_entry mgm;
2927  unsigned int index;
2928  int rc;
2929 
2930  /* Generate hash table index */
2931  if ( ( rc = arbel_cmd_mgid_hash ( arbel, gid, &hash ) ) != 0 ) {
2932  DBGC ( arbel, "Arbel %p could not hash GID: %s\n",
2933  arbel, strerror ( rc ) );
2934  return;
2935  }
2936  index = MLX_GET ( &hash, hash );
2937 
2938  /* Clear hash table entry */
2939  memset ( &mgm, 0, sizeof ( mgm ) );
2940  if ( ( rc = arbel_cmd_write_mgm ( arbel, index, &mgm ) ) != 0 ) {
2941  DBGC ( arbel, "Arbel %p could not write MGM %#x: %s\n",
2942  arbel, index, strerror ( rc ) );
2943  return;
2944  }
2945 }
2946 
2947 /** Arbel Infiniband operations */
2950  .destroy_cq = arbel_destroy_cq,
2951  .create_qp = arbel_create_qp,
2952  .modify_qp = arbel_modify_qp,
2953  .destroy_qp = arbel_destroy_qp,
2954  .post_send = arbel_post_send,
2955  .post_recv = arbel_post_recv,
2956  .poll_cq = arbel_poll_cq,
2957  .poll_eq = arbel_poll_eq,
2958  .open = arbel_ib_open,
2959  .close = arbel_ib_close,
2960  .mcast_attach = arbel_mcast_attach,
2961  .mcast_detach = arbel_mcast_detach,
2962  .set_port_info = arbel_inform_sma,
2963  .set_pkey_table = arbel_inform_sma,
2964 };
2965 
2966 /***************************************************************************
2967  *
2968  * PCI interface
2969  *
2970  ***************************************************************************
2971  */
2972 
2973 /**
2974  * Allocate Arbel device
2975  *
2976  * @ret arbel Arbel device
2977  */
2978 static struct arbel * arbel_alloc ( void ) {
2979  struct arbel *arbel;
2980 
2981  /* Allocate Arbel device */
2982  arbel = zalloc ( sizeof ( *arbel ) );
2983  if ( ! arbel )
2984  goto err_arbel;
2985 
2986  /* Allocate space for mailboxes */
2988  if ( ! arbel->mailbox_in )
2989  goto err_mailbox_in;
2991  if ( ! arbel->mailbox_out )
2992  goto err_mailbox_out;
2993 
2994  return arbel;
2995 
2997  err_mailbox_out:
2999  err_mailbox_in:
3000  free ( arbel );
3001  err_arbel:
3002  return NULL;
3003 }
3004 
3005 /**
3006  * Free Arbel device
3007  *
3008  * @v arbel Arbel device
3009  */
3010 static void arbel_free ( struct arbel *arbel ) {
3011 
3012  ufree ( arbel->icm );
3013  ufree ( arbel->firmware_area );
3016  free ( arbel );
3017 }
3018 
3019 /**
3020  * Probe PCI device
3021  *
3022  * @v pci PCI device
3023  * @v id PCI ID
3024  * @ret rc Return status code
3025  */
3026 static int arbel_probe ( struct pci_device *pci ) {
3027  struct arbel *arbel;
3028  struct ib_device *ibdev;
3029  unsigned long config;
3030  unsigned long uar;
3031  int i;
3032  int rc;
3033 
3034  /* Allocate Arbel device */
3035  arbel = arbel_alloc();
3036  if ( ! arbel ) {
3037  rc = -ENOMEM;
3038  goto err_alloc;
3039  }
3040  pci_set_drvdata ( pci, arbel );
3041  arbel->pci = pci;
3042 
3043  /* Fix up PCI device */
3044  adjust_pci_device ( pci );
3045 
3046  /* Map PCI BARs */
3047  config = pci_bar_start ( pci, ARBEL_PCI_CONFIG_BAR );
3048  arbel->config = pci_ioremap ( pci, config, ARBEL_PCI_CONFIG_BAR_SIZE );
3049  uar = ( pci_bar_start ( pci, ARBEL_PCI_UAR_BAR ) +
3051  arbel->uar = pci_ioremap ( pci, uar, ARBEL_PCI_UAR_SIZE );
3052 
3053  /* Allocate Infiniband devices */
3054  for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
3055  ibdev = alloc_ibdev ( 0 );
3056  if ( ! ibdev ) {
3057  rc = -ENOMEM;
3058  goto err_alloc_ibdev;
3059  }
3060  arbel->ibdev[i] = ibdev;
3061  ibdev->op = &arbel_ib_operations;
3062  ibdev->dev = &pci->dev;
3063  ibdev->port = ( ARBEL_PORT_BASE + i );
3064  ibdev->ports = ARBEL_NUM_PORTS;
3065  ib_set_drvdata ( ibdev, arbel );
3066  }
3067 
3068  /* Reset device */
3069  arbel_reset ( arbel );
3070 
3071  /* Start firmware */
3072  if ( ( rc = arbel_start_firmware ( arbel ) ) != 0 )
3073  goto err_start_firmware;
3074 
3075  /* Get device limits */
3076  if ( ( rc = arbel_get_limits ( arbel ) ) != 0 )
3077  goto err_get_limits;
3078 
3079  /* Start device */
3080  if ( ( rc = arbel_start ( arbel, 1 ) ) != 0 )
3081  goto err_start;
3082 
3083  /* Initialise parameters using SMC */
3084  for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ )
3085  ib_smc_init ( arbel->ibdev[i], arbel_mad );
3086 
3087  /* Register Infiniband devices */
3088  for ( i = 0 ; i < ARBEL_NUM_PORTS ; i++ ) {
3089  if ( ( rc = register_ibdev ( arbel->ibdev[i] ) ) != 0 ) {
3090  DBGC ( arbel, "Arbel %p port %d could not register IB "
3091  "device: %s\n", arbel,
3092  arbel->ibdev[i]->port, strerror ( rc ) );
3093  goto err_register_ibdev;
3094  }
3095  }
3096 
3097  /* Leave device quiescent until opened */
3098  if ( arbel->open_count == 0 )
3099  arbel_stop ( arbel );
3100 
3101  return 0;
3102 
3103  i = ARBEL_NUM_PORTS;
3104  err_register_ibdev:
3105  for ( i-- ; i >= 0 ; i-- )
3106  unregister_ibdev ( arbel->ibdev[i] );
3107  arbel_stop ( arbel );
3108  err_start:
3109  err_get_limits:
3111  err_start_firmware:
3112  i = ARBEL_NUM_PORTS;
3113  err_alloc_ibdev:
3114  for ( i-- ; i >= 0 ; i-- )
3115  ibdev_put ( arbel->ibdev[i] );
3116  iounmap ( arbel->uar );
3117  iounmap ( arbel->config );
3118  arbel_free ( arbel );
3119  err_alloc:
3120  return rc;
3121 }
3122 
3123 /**
3124  * Remove PCI device
3125  *
3126  * @v pci PCI device
3127  */
3128 static void arbel_remove ( struct pci_device *pci ) {
3129  struct arbel *arbel = pci_get_drvdata ( pci );
3130  int i;
3131 
3132  for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
3133  unregister_ibdev ( arbel->ibdev[i] );
3134  for ( i = ( ARBEL_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
3135  ibdev_put ( arbel->ibdev[i] );
3136  iounmap ( arbel->uar );
3137  iounmap ( arbel->config );
3138  arbel_free ( arbel );
3139 }
3140 
3141 static struct pci_device_id arbel_nics[] = {
3142  PCI_ROM ( 0x15b3, 0x6274, "mt25204", "MT25204 HCA driver", 0 ),
3143  PCI_ROM ( 0x15b3, 0x6282, "mt25218", "MT25218 HCA driver", 0 ),
3144 };
3145 
3146 struct pci_driver arbel_driver __pci_driver = {
3147  .ids = arbel_nics,
3148  .id_count = ( sizeof ( arbel_nics ) / sizeof ( arbel_nics[0] ) ),
3149  .probe = arbel_probe,
3150  .remove = arbel_remove,
3151 };
void unregister_ibdev(struct ib_device *ibdev)
Unregister Infiniband device.
Definition: infiniband.c:985
static int arbel_cmd_unmap_icm_aux(struct arbel *arbel)
Definition: arbel.c:487
struct arbelprm_wqe_segment_ud ud
Definition: arbel.h:223
static size_t icm_align(size_t icm_offset, size_t len)
Align ICM table.
Definition: arbel.c:2210
uint32_t c
Definition: md4.c:30
struct ib_global_route_header * grh
GRH buffers (if applicable)
Definition: arbel.h:373
#define __attribute__(x)
Definition: compiler.h:10
#define ARBEL_HCR_READ_MGM
Definition: arbel.h:74
static void arbel_free_icm(struct arbel *arbel)
Free ICM.
Definition: arbel.c:2529
#define EINVAL
Invalid argument.
Definition: errno.h:428
static __always_inline void ib_set_drvdata(struct ib_device *ibdev, void *priv)
Set Infiniband device driver-private data.
Definition: infiniband.h:697
static int arbel_mad(struct ib_device *ibdev, union ib_mad *mad)
Issue management datagram.
Definition: arbel.c:543
static void arbel_destroy_eq(struct arbel *arbel)
Destroy event queue.
Definition: arbel.c:1820
static int arbel_cmd_rst2init_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
Definition: arbel.c:349
iPXE I/O API
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:14
Infiniband protocol.
#define MLX_FILL_7(_ptr, _index,...)
Definition: mlx_bitops.h:191
void pci_restore(struct pci_device *pci, struct pci_config_backup *backup, unsigned int limit, const uint8_t *exclude)
Restore PCI configuration space.
Definition: pcibackup.c:87
unsigned short uint16_t
Definition: stdint.h:11
#define ARBEL_OPCODE_SEND_ERROR
Definition: arbel.h:50
An Arbel send work queue entry.
Definition: arbel.h:337
arbel_bitmask_t cq_inuse[ARBEL_BITMASK_SIZE(ARBEL_MAX_CQS)]
Completion queue in-use bitmask.
Definition: arbel.h:520
uint32_t low
Low 16 bits of address.
Definition: myson.h:19
static int arbel_cmd_map_eq(struct arbel *arbel, unsigned long index_map, const struct arbelprm_event_mask *mask)
Definition: arbel.c:295
#define MLX_FILL_2(_ptr, _index,...)
Definition: mlx_bitops.h:171
static unsigned int arbel_cq_arm_doorbell_idx(struct arbel *arbel, struct ib_completion_queue *cq)
Get arm completion queue doorbell index.
Definition: arbel.h:612
static unsigned int arbel_send_doorbell_idx(struct arbel *arbel, struct ib_queue_pair *qp)
Get send work request doorbell index.
Definition: arbel.h:625
#define ARBEL_HCR_QUERY_QPEE
Definition: arbel.h:71
static int arbel_cmd_map_fa(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *map)
Definition: arbel.c:521
#define iob_put(iobuf, len)
Definition: iobuf.h:120
#define ARBEL_GROUP_SEPARATOR_DOORBELL
Definition: arbel.h:601
#define MLX_FILL_4(_ptr, _index,...)
Definition: mlx_bitops.h:179
#define IB_QPN_SMI
Subnet management interface QPN.
Definition: infiniband.h:21
static void arbel_poll_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Poll completion queue.
Definition: arbel.c:1694
#define ARBEL_HCR_RTS2RTS_QPEE
Definition: arbel.h:69
unsigned int reserved_srqs
Number of reserved SRQs.
Definition: arbel.h:300
A PCI driver.
Definition: pci.h:247
#define EBUSY
Device or resource busy.
Definition: errno.h:338
static int ib_is_open(struct ib_device *ibdev)
Check whether or not Infiniband device is open.
Definition: infiniband.h:576
#define ARBEL_HCR_OUT_LEN(_command)
Definition: arbel.h:567
unsigned int reserved_mtts
Number of reserved MTTs.
Definition: arbel.h:316
Infiniband device operations.
Definition: infiniband.h:254
union arbel_recv_wqe * wqe
Work queue entries.
Definition: arbel.h:369
__be32 in[4]
Definition: CIB_PRM.h:35
static void arbel_poll_eq(struct ib_device *ibdev)
Poll event queue.
Definition: arbel.c:1884
An Arbel send work queue.
Definition: arbel.h:346
userptr_t icm
ICM area.
Definition: arbel.h:506
#define ARBEL_UAR_RES_SQ
Definition: arbel.h:43
static void arbel_ib_close(struct ib_device *ibdev)
Close Infiniband link.
Definition: arbel.c:2816
static void arbel_destroy_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Destroy queue pair.
Definition: arbel.c:1211
#define ARBEL_HCR_INOUT_CMD(_opcode, _in_mbox, _in_len, _out_mbox, _out_len)
Build HCR command from component parts.
Definition: arbel.h:570
static unsigned int unsigned int bit
Definition: bigint.h:208
#define ARBEL_UAR_RES_CQ_CI
Definition: arbel.h:41
#define ARBEL_HCR_CLOSE_HCA
Definition: arbel.h:56
static int arbel_alloc_icm(struct arbel *arbel, struct arbelprm_init_hca *init_hca)
Allocate ICM.
Definition: arbel.c:2224
uint8_t opcode
Opcode.
Definition: ena.h:16
void * eq_ci_doorbells
Event queue consumer index doorbells.
Definition: arbel.h:477
__be32 lkey
Definition: CIB_PRM.h:29
static int arbel_cmd_query_cq(struct arbel *arbel, unsigned long cqn, struct arbelprm_completion_queue_context *cqctx)
Definition: arbel.c:340
#define ARBEL_HCR_MAD_IFC
Definition: arbel.h:73
Error codes.
__SIZE_TYPE__ size_t
Definition: stdint.h:6
#define ARBEL_MBOX_SIZE
Definition: arbel.h:550
static int arbel_cmd_write_mgm(struct arbel *arbel, unsigned int index, const struct arbelprm_mgm_entry *mgm)
Definition: arbel.c:428
uint8_t headers[IB_MAX_HEADER_SIZE]
Definition: arbel.h:231
u8 owner
Definition: CIB_PRM.h:36
size_t eeec_entry_size
Extended EE context entry size.
Definition: arbel.h:308
static unsigned int arbel_cq_ci_doorbell_idx(struct arbel *arbel, struct ib_completion_queue *cq)
Get completion queue consumer counter doorbell index.
Definition: arbel.h:653
struct ib_device * ibdev[ARBEL_NUM_PORTS]
Infiniband devices.
Definition: arbel.h:532
static int arbel_cmd_hw2sw_cq(struct arbel *arbel, unsigned long cqn, struct arbelprm_completion_queue_context *cqctx)
Definition: arbel.c:331
A command-line command.
Definition: command.h:9
static int arbel_create_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
Definition: arbel.c:611
I/O buffers.
size_t eqc_entry_size
EQ context entry size.
Definition: arbel.h:326
#define ARBEL_ST_MLX
Definition: arbel.h:91
#define ARBEL_UAR_RES_NONE
Definition: arbel.h:40
#define DBG_ENABLE(level)
Definition: compiler.h:313
struct pci_device_id * ids
PCI ID table.
Definition: pci.h:249
static unsigned short vendor
Definition: davicom.c:128
struct arbelprm_eq_set_ci ci
Definition: arbel.h:277
static int arbel_create_send_wq(struct arbel_send_work_queue *arbel_send_wq, unsigned int num_wqes)
Create send work queue.
Definition: arbel.c:866
#define ARBEL_HCR_UNMAP_ICM_AUX
Definition: arbel.h:82
static int arbel_alloc_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Assign queue pair number.
Definition: arbel.c:764
uint32_t g
Definition: sha256.c:34
#define ARBEL_HCR_INIT2RTR_QPEE
Definition: arbel.h:67
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
static int arbel_cmd_disable_lam(struct arbel *arbel)
Definition: arbel.c:454
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_SCATTER]
Definition: arbel.h:252
static int arbel_cmd_sw2hw_mpt(struct arbel *arbel, unsigned int index, const struct arbelprm_mpt *mpt)
Definition: arbel.c:286
void * mailbox_out
Command output mailbox.
Definition: arbel.h:482
unsigned int doorbell_idx
Doorbell record number.
Definition: arbel.h:367
#define DBGC(...)
Definition: compiler.h:505
size_t wqe_size
Size of work queue.
Definition: arbel.h:352
static size_t arbel_fill_rc_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest __unused, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Construct RC send work queue entry.
Definition: arbel.c:1382
#define ARBEL_GLOBAL_PD
Global protection domain.
Definition: arbel.h:536
#define ARBEL_RESET_MAGIC
Definition: arbel.h:36
__be32 byte_count
Definition: CIB_PRM.h:28
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER]
Definition: arbel.h:230
struct device * dev
Underlying device.
Definition: infiniband.h:410
static void arbel_close(struct arbel *arbel)
Close Arbel device.
Definition: arbel.c:2752
unsigned long long uint64_t
Definition: stdint.h:13
#define DBG_DISABLE(level)
Definition: compiler.h:312
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
Definition: malloc.h:62
static __always_inline void * ib_qp_get_drvdata(struct ib_queue_pair *qp)
Get Infiniband queue pair driver-private data.
Definition: infiniband.h:642
void * config
PCI configuration registers.
Definition: arbel.h:473
#define ntohl(value)
Definition: byteswap.h:134
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
struct ib_global_route_header grh
Definition: ib_packet.h:16
#define ARBEL_UAR_RES_CQ_ARM
Definition: arbel.h:42
unsigned int reserved_eqs
Number of reserved EQs.
Definition: arbel.h:314
#define ntohs(value)
Definition: byteswap.h:136
unsigned long eqn
Event queue number.
Definition: arbel.h:446
unsigned int doorbell_idx
Doorbell record number.
Definition: arbel.h:348
static int arbel_create_recv_wq(struct arbel_recv_work_queue *arbel_recv_wq, unsigned int num_wqes, enum ib_queue_pair_type type)
Create receive work queue.
Definition: arbel.c:903
static int arbel_cmd_unmap_icm(struct arbel *arbel, unsigned int page_count, const struct arbelprm_scalar_parameter *offset)
Definition: arbel.c:469
unsigned long lkey
Unrestricted LKey.
Definition: arbel.h:517
#define offsetof(type, field)
Get offset of a field within a structure.
Definition: stddef.h:24
static void arbel_mcast_detach(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, union ib_gid *gid)
Detach from multicast group.
Definition: arbel.c:2921
unsigned int gid_present
GID is present.
Definition: infiniband.h:90
#define static_assert(x)
Assert a condition at build time.
Definition: assert.h:65
uint32_t arbel_bitmask_t
An Arbel resource bitmask.
Definition: arbel.h:461
static size_t arbel_fill_ud_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Construct UD send work queue entry.
Definition: arbel.c:1288
static void iob_populate(struct io_buffer *iobuf, void *data, size_t len, size_t max_len)
Create a temporary I/O buffer.
Definition: iobuf.h:190
union arbel_send_wqe * wqe
Work queue entries.
Definition: arbel.h:350
struct arbelprm_qp_db_record qp
Definition: arbel.h:268
static int arbel_start_firmware(struct arbel *arbel)
Start firmware running.
Definition: arbel.c:2038
size_t cqc_entry_size
CQ context entry size.
Definition: arbel.h:312
Definition: arbel.h:260
union arbelprm_doorbell_record * db_rec
Doorbell records.
Definition: arbel.h:510
#define PCI_CONFIG_BACKUP_ALL
Limit of PCI configuration space.
Definition: pcibackup.h:15
struct arbelprm_recv_wqe recv
Definition: arbel.h:11
static int arbel_cmd_wait(struct arbel *arbel, struct arbelprm_hca_command_register *hcr)
Wait for Arbel command completion.
Definition: arbel.c:114
static int arbel_ib_open(struct ib_device *ibdev)
Initialise Infiniband link.
Definition: arbel.c:2776
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
Definition: pci.c:154
size_t eqpc_entry_size
Extended QP context entry size.
Definition: arbel.h:298
An Infiniband Global Identifier.
Definition: ib_packet.h:33
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
Definition: uaccess.h:287
__be32 qpn
Definition: CIB_PRM.h:29
unsigned int ci_doorbell_idx
Consumer counter doorbell record number.
Definition: arbel.h:424
#define ARBEL_OPCODE_SEND
Definition: arbel.h:48
#define ARBEL_MAX_EQS
Maximum number of allocatable event queues.
Definition: arbel.h:437
struct device dev
Generic device.
Definition: pci.h:208
uint32_t a
Definition: md4.c:28
struct arbel_event_queue eq
Event queue.
Definition: arbel.h:512
static const union ib_gid arbel_no_gid
GID used for GID-less send work queue entries.
Definition: arbel.c:1274
static int arbel_cmd_map_icm_aux(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *map)
Definition: arbel.c:494
size_t grh_size
Size of GRB buffers.
Definition: arbel.h:375
__be32 out[4]
Definition: CIB_PRM.h:36
static int arbel_dump_cqctx(struct arbel *arbel, struct ib_completion_queue *cq)
Dump completion queue context (for debugging only)
Definition: arbel.c:588
union ib_gid dgid
Destiniation GID.
Definition: ib_packet.h:106
#define ENOTSUP
Operation not supported.
Definition: errno.h:589
#define ARBEL_HCR_RTR2RTS_QPEE
Definition: arbel.h:68
static void arbel_ring_doorbell(struct arbel *arbel, union arbelprm_doorbell_register *db_reg, unsigned int offset)
Ring doorbell register in UAR.
Definition: arbel.c:1259
enum ib_rate rate
Rate.
Definition: infiniband.h:86
Dynamic memory allocation.
#define ARBEL_HCR_SW2HW_MPT
Definition: arbel.h:59
union arbelprm_event_entry * eqe
Event queue entries.
Definition: arbel.h:442
struct arbelprm_send_doorbell send
Definition: arbel.h:272
static unsigned int arbel_rate(struct ib_address_vector *av)
Calculate transmission rate.
Definition: arbel.c:822
struct sockaddr_tcpip st
Definition: syslog.c:56
uint32_t start
Starting offset.
Definition: netvsc.h:12
static int arbel_cmd(struct arbel *arbel, unsigned long command, unsigned int op_mod, const void *in, unsigned int in_mod, void *out)
Issue HCA command.
Definition: arbel.c:139
An Infiniband device.
Definition: infiniband.h:398
uint8_t status
Status.
Definition: ena.h:16
static int arbel_cmd_sw2hw_eq(struct arbel *arbel, unsigned int index, const struct arbelprm_eqc *eqctx)
Definition: arbel.c:304
static int arbel_create_eq(struct arbel *arbel)
Create event queue.
Definition: arbel.c:1744
unsigned int reserved_rdbs
Number of reserved RDBs.
Definition: arbel.h:324
pseudo_bit_t ci[0x00020]
Definition: arbel.h:11
#define ARBEL_LOG_MULTICAST_HASH_SIZE
Definition: arbel.h:114
size_t srqc_entry_size
SRQ context entry size.
Definition: arbel.h:302
#define DBGCP_HDA(...)
Definition: compiler.h:540
#define ARBEL_QPN_RANDOM_MASK
Queue pair number randomisation mask.
Definition: arbel.h:395
#define MLX_FILL_3(_ptr, _index,...)
Definition: mlx_bitops.h:175
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
Definition: pci.h:359
static int arbel_cmd_set_icm_size(struct arbel *arbel, const struct arbelprm_scalar_parameter *icm_size, struct arbelprm_scalar_parameter *icm_aux_size)
Definition: arbel.c:503
static int arbel_cmd_map_icm(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *map)
Definition: arbel.c:478
#define ENOMEM
Not enough space.
Definition: errno.h:534
#define ARBEL_RDB_ENTRY_SIZE
Definition: arbel.h:102
struct arbelprm_mlx_send_wqe mlx
Definition: arbel.h:340
static int arbel_cmd_hw2sw_eq(struct arbel *arbel, unsigned int index, struct arbelprm_eqc *eqctx)
Definition: arbel.c:313
uint8_t bytes[16]
Definition: ib_packet.h:34
static int arbel_post_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf)
Post receive work queue entry.
Definition: arbel.c:1498
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define ARBEL_HCR_OUT_CMD(_opcode, _out_mbox, _out_len)
Definition: arbel.h:581
#define ARBEL_NUM_EQES
Number of event queue entries.
Definition: arbel.h:457
u8 port
Port number.
Definition: CIB_PRM.h:31
static __always_inline void * ib_get_drvdata(struct ib_device *ibdev)
Get Infiniband device driver-private data.
Definition: infiniband.h:708
static int arbel_cmd_mgid_hash(struct arbel *arbel, const union ib_gid *gid, struct arbelprm_mgm_hash *hash)
Definition: arbel.c:437
#define ARBEL_DB_POST_SND_OFFSET
Definition: arbel.h:104
struct arbelprm_event_queue_entry generic
Definition: arbel.h:261
int ib_smc_init(struct ib_device *ibdev, ib_local_mad_t local_mad)
Initialise Infiniband parameters using SMC.
Definition: ib_smc.c:232
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
Definition: io.h:183
static int arbel_cmd_init_hca(struct arbel *arbel, const struct arbelprm_init_hca *init_hca)
Definition: arbel.c:254
#define ARBEL_HCR_BASE
Definition: arbel.h:546
#define ARBEL_HCR_IN_CMD(_opcode, _in_mbox, _in_len)
Definition: arbel.h:578
#define ARBEL_HCR_QUERY_CQ
Definition: arbel.h:65
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
struct ib_device_operations * op
Infiniband operations.
Definition: infiniband.h:416
#define DBGLVL_LOG
Definition: compiler.h:316
static int arbel_create_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Create queue pair.
Definition: arbel.c:971
#define ARBEL_QPEE_OPT_PARAM_QKEY
Definition: arbel.h:107
union arbelprm_completion_entry * cqe
Completion queue entries.
Definition: arbel.h:428
An Infiniband Work Queue.
Definition: infiniband.h:100
static int arbel_cmd_mad_ifc(struct arbel *arbel, unsigned int port, union arbelprm_mad *mad)
Definition: arbel.c:409
#define ARBEL_UNMAP_EQ
Definition: arbel.h:110
#define ARBEL_HCR_HW2SW_EQ
Definition: arbel.h:62
#define DBGC_HDA(...)
Definition: compiler.h:506
static void arbel_free_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Free queue pair number.
Definition: arbel.c:806
static uint8_t arbel_qp_st[]
Queue pair transport service type map.
Definition: arbel.c:828
static int arbel_open(struct arbel *arbel)
Open Arbel device.
Definition: arbel.c:2732
void ib_complete_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Complete send work queue entry.
Definition: infiniband.c:515
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
#define ARBEL_PCI_UAR_SIZE
Definition: arbel.h:32
__be16 wqe_counter
Definition: CIB_PRM.h:36
#define ARBEL_HCR_HW2SW_CQ
Definition: arbel.h:64
size_t db_rec_offset
Offset within ICM of doorbell records.
Definition: arbel.h:508
struct arbelprm_cq_ci_db_record cq_ci
Definition: arbel.h:267
#define ARBEL_HCR_2RST_QPEE
Definition: arbel.h:70
static userptr_t size_t offset
Offset of the first segment within the content.
Definition: deflate.h:259
#define ARBEL_NUM_SPECIAL_QPS
Number of special queue pairs.
Definition: arbel.h:379
#define ARBEL_PM_STATE_MIGRATED
Definition: arbel.h:118
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
Definition: pci.c:96
static int arbel_complete(struct ib_device *ibdev, struct ib_completion_queue *cq, union arbelprm_completion_entry *cqe)
Handle completion.
Definition: arbel.c:1556
size_t cqe_size
Size of completion queue.
Definition: arbel.h:430
#define ARBEL_HCR_CLOSE_IB
Definition: arbel.h:58
static void arbel_stop(struct arbel *arbel)
Stop Arbel device.
Definition: arbel.c:2718
#define ARBEL_INVALID_LKEY
Definition: arbel.h:98
An Infiniband Global Route Header.
Definition: ib_packet.h:89
#define ARBEL_HCR_INIT_IB
Definition: arbel.h:57
#define ARBEL_RSVD_SPECIAL_QPS
Number of queue pairs reserved for the "special QP" block.
Definition: arbel.h:386
userptr_t firmware_area
Firmware area in external memory.
Definition: arbel.h:495
static int arbel_cmd_rtr2rts_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
Definition: arbel.c:367
struct arbelprm_completion_with_error error
Definition: arbel.h:257
struct ib_work_queue * ib_find_wq(struct ib_completion_queue *cq, unsigned long qpn, int is_send)
Find work queue belonging to completion queue.
Definition: infiniband.c:396
static void arbel_free(struct arbel *arbel)
Free Arbel device.
Definition: arbel.c:3010
#define ARBEL_DB_EQ_OFFSET(_eqn)
Definition: arbel.h:105
unsigned int num_wqes
Number of work queue entries.
Definition: infiniband.h:112
static void * dest
Definition: strings.h:176
unsigned int arm_doorbell_idx
Arm queue doorbell record number.
Definition: arbel.h:426
size_t eqe_size
Size of event queue.
Definition: arbel.h:444
union ib_mad mad
Definition: arbel.h:283
pseudo_bit_t value[0x00020]
Definition: arbel.h:13
uint32_t high
High 32 bits of address.
Definition: myson.h:20
An Arbel receive work queue.
Definition: arbel.h:365
#define ARBEL_MAP_EQ
Definition: arbel.h:109
#define ARBEL_MTU_2048
Definition: arbel.h:94
#define DBGC2_HDA(...)
Definition: compiler.h:523
#define ARBEL_HCR_UNMAP_FA
Definition: arbel.h:85
uint32_t rdma_key
RDMA key.
Definition: infiniband.h:456
#define ARBEL_EV_PORT_STATE_CHANGE
Definition: arbel.h:112
An Arbel completion queue.
Definition: arbel.h:422
pseudo_bit_t hash[0x00010]
Hash algorithm.
Definition: arbel.h:13
unsigned int port
Port number.
Definition: infiniband.h:418
static __always_inline void ibdev_put(struct ib_device *ibdev)
Drop reference to Infiniband device.
Definition: infiniband.h:598
An Arbel queue pair.
Definition: arbel.h:406
static __always_inline void ib_cq_set_drvdata(struct ib_completion_queue *cq, void *priv)
Set Infiniband completion queue driver-private data.
Definition: infiniband.h:675
char * strerror(int errno)
Retrieve string representation of error number.
Definition: strerror.c:78
union ib_gid sgid
Source GID.
Definition: ib_packet.h:104
size_t mtt_entry_size
MTT entry size.
Definition: arbel.h:318
static void(* free)(struct refcnt *refcnt))
Definition: refcnt.h:54
#define ARBEL_MAX_CQS
Maximum number of allocatable completion queues.
Definition: arbel.h:419
static int arbel_mcast_attach(struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid)
Attach to multicast group.
Definition: arbel.c:2866
struct arbelprm_completion_queue_entry normal
Definition: arbel.h:256
static void arbel_remove(struct pci_device *pci)
Remove PCI device.
Definition: arbel.c:3128
ib_queue_pair_type
An Infiniband queue pair type.
Definition: infiniband.h:138
static int arbel_post_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf)
Post send work queue entry.
Definition: arbel.c:1423
void * zalloc(size_t size)
Allocate cleared memory.
Definition: malloc.c:624
__be16 rlid
Definition: CIB_PRM.h:38
PCI bus.
#define ARBEL_HCR_RUN_FW
Definition: arbel.h:77
A PCI device.
Definition: pci.h:206
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
Definition: iobuf.h:155
#define ARBEL_PCI_UAR_IDX
Definition: arbel.h:31
#define ENFILE
Too many open files in system.
Definition: errno.h:493
struct ib_device * alloc_ibdev(size_t priv_size)
Allocate Infiniband device.
Definition: infiniband.c:917
static int arbel_start(struct arbel *arbel, int running)
Start Arbel device.
Definition: arbel.c:2661
struct golan_eq_context ctx
Definition: CIB_PRM.h:28
A PCI configuration space backup.
Definition: pcibackup.h:21
unsigned int reserved_cqs
Number of reserved CQs.
Definition: arbel.h:310
User memory allocation.
static int arbel_cmd_query_dev_lim(struct arbel *arbel, struct arbelprm_query_dev_lim *dev_lim)
Definition: arbel.c:237
#define MLX_GET(_ptr, _field)
Definition: mlx_bitops.h:222
#define DBGC_HD(...)
Definition: compiler.h:507
u8 sl
Definition: CIB_PRM.h:42
static void arbel_reset(struct arbel *arbel)
Reset device.
Definition: arbel.c:2555
long int random(void)
Generate a pseudo-random number between 0 and 2147483647L or 2147483562?
Definition: random.c:31
size_t eec_entry_size
EE context entry size.
Definition: arbel.h:306
static size_t iob_tailroom(struct io_buffer *iobuf)
Calculate available space at end of an I/O buffer.
Definition: iobuf.h:175
void * uar
PCI user Access Region.
Definition: arbel.h:475
static struct arbel * arbel_alloc(void)
Allocate Arbel device.
Definition: arbel.c:2978
An Infiniband Completion Queue.
Definition: infiniband.h:224
void * doorbell
Doorbell register.
Definition: arbel.h:450
int ib_smc_update(struct ib_device *ibdev, ib_local_mad_t local_mad)
Update Infiniband parameters using SMC.
Definition: ib_smc.c:249
#define PCI_VENDOR_ID
PCI vendor ID.
Definition: pci.h:19
#define MLX_FILL_1(_ptr, _index,...)
Definition: mlx_bitops.h:167
#define MLX_FILL_H(_structure_st, _index, _field, _address)
Definition: mlx_bitops.h:240
unsigned char uint8_t
Definition: stdint.h:10
Mellanox Arbel Infiniband HCA driver.
#define ARBEL_HCR_IN_LEN(_command)
Definition: arbel.h:566
#define ARBEL_HCR_UNMAP_ICM
Definition: arbel.h:80
static int arbel_cmd_init_ib(struct arbel *arbel, unsigned int port, const struct arbelprm_init_ib *init_ib)
Definition: arbel.c:270
size_t wqe_size
Size of work queue.
Definition: arbel.h:371
#define ARBEL_RESET_OFFSET
Definition: arbel.h:35
#define ARBEL_ST_RC
Definition: arbel.h:89
unsigned long qpn
Queue Pair Number.
Definition: infiniband.h:74
#define ARBEL_PCI_UAR_BAR
Definition: arbel.h:30
int register_ibdev(struct ib_device *ibdev)
Register Infiniband device.
Definition: infiniband.c:944
#define ARBEL_HCR_WRITE_MGM
Definition: arbel.h:75
#define IB_PORT_STATE_DOWN
Definition: ib_mad.h:151
static __always_inline int struct dma_mapping * map
Definition: dma.h:181
A PCI device ID list entry.
Definition: pci.h:170
struct ib_queue_pair * qp
Containing queue pair.
Definition: infiniband.h:102
uint8_t headers[IB_MAX_HEADER_SIZE]
Definition: arbel.h:14
enum arbel_queue_pair_state state
Queue state.
Definition: arbel.h:412
struct arbelprm_wqe_segment_next next
Definition: arbel.h:338
unsigned int ports
Total ports on device.
Definition: infiniband.h:420
unsigned int uint32_t
Definition: stdint.h:12
__be16 c_eqn
Definition: CIB_PRM.h:38
#define ARBEL_PORT_BASE
Definition: arbel.h:25
unsigned int reserved_mrws
Number of reserved MRWs.
Definition: arbel.h:320
#define ARBEL_HCR_MAX_WAIT_MS
Definition: arbel.h:548
unsigned long next_idx
Next work queue entry index.
Definition: infiniband.h:122
static struct pci_device_id arbel_nics[]
Definition: arbel.c:3141
uint16_t syndrome
ID of event.
Definition: ena.h:14
unsigned int reserved_ees
Number of reserved EEs.
Definition: arbel.h:304
size_t icm_aux_len
ICM AUX size.
Definition: arbel.h:499
static void arbel_bitmask_free(arbel_bitmask_t *bits, int bit)
Free offset within usage bitmask.
Definition: arbel.c:93
#define ARBEL_HCR_IN_MBOX
Definition: arbel.h:563
static struct xen_remove_from_physmap * remove
Definition: xenmem.h:39
#define ARBEL_HCR_SET_ICM_SIZE
Definition: arbel.h:84
#define ARBEL_HCR_MAP_FA
Definition: arbel.h:86
static int arbel_bitmask_alloc(arbel_bitmask_t *bits, unsigned int bits_len)
Allocate offset within usage bitmask.
Definition: arbel.c:69
static int arbel_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
Definition: arbel.c:1124
unsigned long next_idx
Next completion queue entry index.
Definition: infiniband.h:240
unsigned long qpn_base
QPN base.
Definition: arbel.h:529
struct arbelprm_ud_send_wqe ud
Definition: arbel.h:339
An Infiniband Queue Pair.
Definition: infiniband.h:157
#define ARBEL_HCR_RST2INIT_QPEE
Definition: arbel.h:66
pseudo_bit_t port_state_change[0x00001]
Definition: arbel.h:22
unsigned int sl
Service level.
Definition: infiniband.h:88
#define ARBEL_OPCODE_RECV_ERROR
Definition: arbel.h:49
#define ARBEL_HCR_OPCODE(_command)
Definition: arbel.h:565
Network device management.
unsigned long physaddr_t
Definition: stdint.h:20
static int arbel_cmd_conf_special_qp(struct arbel *arbel, unsigned int qp_type, unsigned long base_qpn)
Definition: arbel.c:401
#define ARBEL_HCR_MGID_HASH
Definition: arbel.h:76
#define ARBEL_HCR_SW2HW_CQ
Definition: arbel.h:63
struct pci_driver arbel_driver __pci_driver
Definition: arbel.c:3146
#define __unused
Declare a variable or data structure as unused.
Definition: compiler.h:573
#define ARBEL_RETRY_MAX
Definition: arbel.h:120
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
Definition: pci.h:369
static unsigned int arbel_recv_doorbell_idx(struct arbel *arbel, struct ib_queue_pair *qp)
Get receive work request doorbell index.
Definition: arbel.h:639
struct arbelprm_qp_db_record qp
Definition: arbel.h:13
struct arbelprm_wqe_segment_ctrl_send ctrl
Definition: arbel.h:236
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
Definition: timer.c:78
#define iob_reserve(iobuf, len)
Definition: iobuf.h:67
static int arbel_setup_mpt(struct arbel *arbel)
Set up memory protection table.
Definition: arbel.c:2582
struct arbelprm_port_state_change_event port_state_change
Definition: arbel.h:262
static void arbel_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
Definition: arbel.c:715
unsigned int reserved_qps
Number of reserved QPs.
Definition: arbel.h:294
static int arbel_cmd_enable_lam(struct arbel *arbel, struct arbelprm_access_lam *lam)
Definition: arbel.c:461
#define ARBEL_NUM_PORTS
Definition: arbel.h:24
size_t qpc_entry_size
QP context entry size.
Definition: arbel.h:296
#define ARBEL_HCR_QUERY_DEV_LIM
Definition: arbel.h:53
struct ib_mad_hdr hdr
Definition: ib_mad.h:611
size_t firmware_len
Firmware size.
Definition: arbel.h:488
#define UNULL
Equivalent of NULL for user pointers.
Definition: uaccess.h:36
#define ARBEL_HCR_VOID_CMD(_opcode)
Definition: arbel.h:584
#define ARBEL_PCI_CONFIG_BAR_SIZE
Definition: arbel.h:29
static volatile void * bits
Definition: bitops.h:27
static __always_inline void ufree(userptr_t userptr)
Free external memory.
Definition: umalloc.h:65
uint32_t len
Length.
Definition: ena.h:14
uint32_t type
Operating system type.
Definition: ena.h:12
static int arbel_inform_sma(struct ib_device *ibdev, union ib_mad *mad)
Inform embedded subnet management agent of a received MAD.
Definition: arbel.c:2838
uint8_t unused[32]
Unused.
Definition: eltorito.h:15
static __always_inline userptr_t umalloc(size_t size)
Allocate external memory.
Definition: umalloc.h:54
#define ENOBUFS
No buffer space available.
Definition: errno.h:498
#define ARBEL_PAGE_SIZE
Definition: arbel.h:100
size_t uar_scratch_entry_size
UAR scratchpad entry size.
Definition: arbel.h:330
#define DBGC2(...)
Definition: compiler.h:522
void pci_backup(struct pci_device *pci, struct pci_config_backup *backup, unsigned int limit, const uint8_t *exclude)
Back up PCI configuration space.
Definition: pcibackup.c:67
int(* probe)(struct pci_device *pci)
Probe device.
Definition: pci.h:260
static size_t(* arbel_fill_send_wqe[])(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Work queue entry constructors.
Definition: arbel.c:1403
struct arbelprm_rc_send_wqe rc
Definition: arbel.h:341
#define PCI_CONFIG_BACKUP_EXCLUDE(...)
Define a PCI configuration space backup exclusion list.
Definition: pcibackup.h:29
#define ARBEL_HCR_REG(x)
Definition: arbel.h:547
struct arbelprm_wqe_segment_ctrl_mlx ctrl
Definition: arbel.h:229
#define ARBEL_HCR_INIT_HCA
Definition: arbel.h:55
struct arbelprm_recv_wqe_segment_next next
Definition: arbel.h:250
struct arbel_dev_limits limits
Device limits.
Definition: arbel.h:525
void * data
Start of data.
Definition: iobuf.h:48
#define barrier()
Optimisation barrier.
Definition: compiler.h:655
#define EIO
Input/output error.
Definition: errno.h:433
struct arbelprm_recv_wqe recv
Definition: arbel.h:360
static int arbel_dump_qpctx(struct arbel *arbel, struct ib_queue_pair *qp)
Dump queue pair context (for debugging only)
Definition: arbel.c:843
#define ARBEL_PCI_CONFIG_BAR
Definition: arbel.h:28
union ib_gid gid
GID, if present.
Definition: infiniband.h:92
static int arbel_cmd_unmap_fa(struct arbel *arbel)
Definition: arbel.c:514
static size_t arbel_fill_mlx_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Construct MLX send work queue entry.
Definition: arbel.c:1332
uint8_t port_state
Port state.
Definition: infiniband.h:425
#define ARBEL_MKEY_PREFIX
Memory key prefix.
Definition: arbel.h:539
static int arbel_cmd_read_mgm(struct arbel *arbel, unsigned int index, struct arbelprm_mgm_entry *mgm)
Definition: arbel.c:419
unsigned long cqn
Completion queue number.
Definition: infiniband.h:230
uint32_t end
Ending offset.
Definition: netvsc.h:18
uint8_t size
Entry size (in 32-bit words)
Definition: ena.h:16
void iounmap(volatile const void *io_addr)
Unmap I/O address.
uint8_t data[48]
Additional event data.
Definition: ena.h:22
unsigned int num_cqes
Number of completion queue entries.
Definition: infiniband.h:232
size_t icm_len
ICM size.
Definition: arbel.h:497
A management datagram.
Definition: ib_mad.h:610
#define ARBEL_RESET_WAIT_TIME_MS
Definition: arbel.h:37
#define ARBEL_HCR_OUT_MBOX
Definition: arbel.h:564
#define DBGCP(...)
Definition: compiler.h:539
static int arbel_get_limits(struct arbel *arbel)
Get device limits.
Definition: arbel.c:2140
static int arbel_cmd_sw2hw_cq(struct arbel *arbel, unsigned long cqn, const struct arbelprm_completion_queue_context *cqctx)
Definition: arbel.c:322
#define ARBEL_MBOX_ALIGN
Definition: arbel.h:549
uint32_t f
Definition: sha256.c:33
unsigned int open_count
Device open request counter.
Definition: arbel.h:485
static void arbel_stop_firmware(struct arbel *arbel)
Stop firmware running.
Definition: arbel.c:2115
unsigned long next_idx
Next event queue entry index.
Definition: arbel.h:448
#define ARBEL_HCR_SW2HW_EQ
Definition: arbel.h:61
void ib_complete_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct ib_address_vector *source, struct io_buffer *iobuf, int rc)
Complete receive work queue entry.
Definition: infiniband.c:536
unsigned long special_qpn_base
Special QPN base.
Definition: arbel.h:527
#define ARBEL_HCR_MAP_ICM
Definition: arbel.h:81
#define ARBEL_HCR_CONF_SPECIAL_QP
Definition: arbel.h:72
static struct ib_device_operations arbel_ib_operations
Arbel Infiniband operations.
Definition: arbel.c:2948
struct arbel_recv_work_queue recv
Receive work queue.
Definition: arbel.h:410
static void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
Definition: malloc.h:77
An Infiniband Address Vector.
Definition: infiniband.h:72
static int arbel_cmd_rts2rts_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
Definition: arbel.c:376
unsigned int reserved_uars
Number of reserved UARs.
Definition: arbel.h:328
__be32 opt_param_mask
Definition: CIB_PRM.h:28
struct pci_device * pci
PCI device.
Definition: arbel.h:471
#define MLX_FILL_5(_ptr, _index,...)
Definition: mlx_bitops.h:183
#define ARBEL_HCR_MAP_ICM_AUX
Definition: arbel.h:83
Infiniband Subnet Management Client.
typeof(acpi_finder=acpi_find)
ACPI table finder.
Definition: acpi.c:45
#define ARBEL_HCR_DISABLE_LAM
Definition: arbel.h:78
unsigned int lid
Local ID.
Definition: infiniband.h:81
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER]
Definition: arbel.h:237
uint64_t index
Index of the first segment within the content.
Definition: pccrc.h:21
static int arbel_cmd_run_fw(struct arbel *arbel)
Definition: arbel.c:447
static int arbel_cmd_query_qpee(struct arbel *arbel, unsigned long qpn, struct arbelprm_qp_ee_state_transitions *ctx)
Definition: arbel.c:392
#define DBG(...)
Print a debugging message.
Definition: compiler.h:498
static int arbel_probe(struct pci_device *pci)
Probe PCI device.
Definition: arbel.c:3026
static int arbel_map_vpm(struct arbel *arbel, int(*map)(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *), uint64_t va, physaddr_t pa, size_t len)
Map virtual to physical address for firmware usage.
Definition: arbel.c:1959
__be32 cqn
Definition: CIB_PRM.h:29
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
static void arbel_event_port_state_change(struct arbel *arbel, union arbelprm_event_entry *eqe)
Handle port state event.
Definition: arbel.c:1857
static int arbel_configure_special_qps(struct arbel *arbel)
Configure special queue pairs.
Definition: arbel.c:2623
#define fls(x)
Find last (i.e.
Definition: strings.h:166
struct arbelprm_cq_arm_db_record cq_arm
Definition: arbel.h:266
#define MLX_SET(_ptr, _field, _value)
Definition: mlx_bitops.h:204
#define DBGLVL_EXTRA
Definition: compiler.h:318
u8 gid[16]
Definition: CIB_PRM.h:31
#define ARBEL_HCR_MAP_EQ
Definition: arbel.h:60
struct arbelprm_wqe_segment_ctrl_send ctrl
Definition: arbel.h:222
Definition: arbel.h:255
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER]
Definition: arbel.h:224
static int arbel_cmd_2rst_qpee(struct arbel *arbel, unsigned long qpn)
Definition: arbel.c:385
struct arbel_send_work_queue send
Send work queue.
Definition: arbel.h:408
static __always_inline void * ib_cq_get_drvdata(struct ib_completion_queue *cq)
Get Infiniband completion queue driver-private data.
Definition: infiniband.h:686
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
#define NULL
NULL pointer (VOID *)
Definition: Base.h:321
An Arbel device.
Definition: arbel.h:469
#define ARBEL_ST_UD
Definition: arbel.h:90
int ib_push(struct ib_device *ibdev, struct io_buffer *iobuf, struct ib_queue_pair *qp, size_t payload_len, const struct ib_address_vector *dest)
Add IB headers.
Definition: ib_packet.c:52
String functions.
#define PCI_ROM(_vendor, _device, _name, _description, _data)
Definition: pci.h:303
__be32 byte_cnt
Definition: CIB_PRM.h:37
arbel_bitmask_t qp_inuse[ARBEL_BITMASK_SIZE(ARBEL_MAX_QPS)]
Queue pair in-use bitmask.
Definition: arbel.h:522
#define ARBEL_HCR_QUERY_FW
Definition: arbel.h:54
A Arbel event queue.
Definition: arbel.h:440
union @382 key
Sense key.
Definition: crypto.h:284
static int arbel_cmd_close_ib(struct arbel *arbel, unsigned int port)
Definition: arbel.c:279
size_t mpt_entry_size
MPT entry size.
Definition: arbel.h:322
union ib_mad mad
Definition: arbel.h:12
static int arbel_cmd_close_hca(struct arbel *arbel)
Definition: arbel.c:263
static __always_inline void ib_qp_set_drvdata(struct ib_queue_pair *qp, void *priv)
Set Infiniband queue pair driver-private data.
Definition: infiniband.h:631
int(* create_cq)(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
Definition: infiniband.h:261
uint16_t status
Definition: ib_mad.h:543
void * mailbox_in
Command input mailbox.
Definition: arbel.h:480
#define ARBEL_MAX_QPS
Maximum number of allocatable queue pairs.
Definition: arbel.h:392
String functions.
if(natsemi->flags &NATSEMI_64BIT) return 1
static int arbel_cmd_query_fw(struct arbel *arbel, struct arbelprm_query_fw *fw)
Definition: arbel.c:246
#define ARBEL_HCR_ENABLE_LAM
Definition: arbel.h:79
void * memset(void *dest, int character, size_t len) __nonnull
A persistent I/O buffer.
Definition: iobuf.h:33
#define ARBEL_UAR_RES_RQ
Definition: arbel.h:44
struct arbelprm_send_doorbell send
Definition: arbel.h:11
PCI configuration space backup and restoration.
#define ARBEL_UAR_RES_GROUP_SEP
Definition: arbel.h:45
static int arbel_cmd_init2rtr_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
Definition: arbel.c:358
struct io_buffer ** iobufs
I/O buffers assigned to work queue.
Definition: infiniband.h:124