70 unsigned int bits_len ) {
74 while (
bit < bits_len ) {
75 if ( ( mask & *
bits ) == 0 ) {
80 mask = ( mask << 1 ) | ( mask >> ( 8 *
sizeof ( mask ) - 1 ) );
96 mask = ( 1 << (
bit % ( 8 *
sizeof ( mask ) ) ) );
97 bits += (
bit / ( 8 *
sizeof ( mask ) ) );
115 struct arbelprm_hca_command_register *hcr ) {
121 if (
MLX_GET ( hcr, go ) == 0 )
140 unsigned int op_mod,
const void *
in,
141 unsigned int in_mod,
void *
out ) {
142 struct arbelprm_hca_command_register hcr;
155 DBGC2 (
arbel,
"Arbel %p command %02x in %zx%s out %zx%s\n",
167 memset ( &hcr, 0,
sizeof ( hcr ) );
168 in_buffer = &hcr.u.dwords[0];
175 MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
176 out_buffer = &hcr.u.dwords[3];
186 opcode_modifier, op_mod,
190 &hcr, sizeof ( hcr ) );
194 ( ( in_len < 512 ) ? in_len : 512 ) );
198 for ( i = 0 ; i < (
sizeof ( hcr ) /
sizeof ( hcr.u.dwords[0] ) ) ;
207 DBGC (
arbel,
"Arbel %p timed out waiting for command:\n",
216 DBGC (
arbel,
"Arbel %p command failed with status %02x:\n",
230 ( ( out_len < 512 ) ? out_len : 512 ) );
238 struct arbelprm_query_dev_lim *dev_lim ) {
241 1,
sizeof ( *dev_lim ) ),
242 0,
NULL, 0, dev_lim );
255 const struct arbelprm_init_hca *init_hca ) {
258 1,
sizeof ( *init_hca ) ),
259 0, init_hca, 0,
NULL );
271 const struct arbelprm_init_ib *init_ib ) {
274 1,
sizeof ( *init_ib ) ),
287 const struct arbelprm_mpt *mpt ) {
290 1,
sizeof ( *mpt ) ),
296 const struct arbelprm_event_mask *mask ) {
299 0,
sizeof ( *mask ) ),
300 0, mask, index_map,
NULL );
305 const struct arbelprm_eqc *eqctx ) {
308 1,
sizeof ( *eqctx ) ),
314 struct arbelprm_eqc *eqctx ) {
317 1,
sizeof ( *eqctx ) ),
323 const struct arbelprm_completion_queue_context *cqctx ) {
326 1,
sizeof ( *cqctx ) ),
332 struct arbelprm_completion_queue_context *cqctx) {
335 1,
sizeof ( *cqctx ) ),
341 struct arbelprm_completion_queue_context *cqctx ) {
344 1,
sizeof ( *cqctx ) ),
350 const struct arbelprm_qp_ee_state_transitions *
ctx ){
353 1,
sizeof ( *
ctx ) ),
359 const struct arbelprm_qp_ee_state_transitions *
ctx ){
362 1,
sizeof ( *
ctx ) ),
368 const struct arbelprm_qp_ee_state_transitions *
ctx ) {
371 1,
sizeof ( *
ctx ) ),
377 const struct arbelprm_qp_ee_state_transitions *
ctx ) {
380 1,
sizeof ( *
ctx ) ),
393 struct arbelprm_qp_ee_state_transitions *
ctx ) {
396 1,
sizeof ( *
ctx ) ),
402 unsigned long base_qpn ) {
414 1,
sizeof ( *
mad ) ),
420 struct arbelprm_mgm_entry *mgm ) {
423 1,
sizeof ( *mgm ) ),
429 const struct arbelprm_mgm_entry *mgm ) {
432 1,
sizeof ( *mgm ) ),
438 struct arbelprm_mgm_hash *
hash ) {
442 0,
sizeof ( *
hash ) ),
464 1,
sizeof ( *lam ) ),
470 const struct arbelprm_scalar_parameter *
offset ) {
479 const struct arbelprm_virtual_physical_mapping *
map ) {
482 1,
sizeof ( *
map ) ),
495 const struct arbelprm_virtual_physical_mapping *
map ) {
498 1,
sizeof ( *
map ) ),
504 const struct arbelprm_scalar_parameter *icm_size,
505 struct arbelprm_scalar_parameter *icm_aux_size ) {
508 0,
sizeof ( *icm_size ),
509 0,
sizeof ( *icm_aux_size ) ),
510 0, icm_size, 0, icm_aux_size );
522 const struct arbelprm_virtual_physical_mapping *
map ) {
525 1,
sizeof ( *
map ) ),
556 &mad_ifc ) ) != 0 ) {
557 DBGC (
arbel,
"Arbel %p port %d could not issue MAD IFC: %s\n",
566 DBGC (
arbel,
"Arbel %p port %d MAD IFC status %04x\n",
589 struct arbelprm_completion_queue_context cqctx;
592 memset ( &cqctx, 0,
sizeof ( cqctx ) );
594 DBGC (
arbel,
"Arbel %p CQN %#lx QUERY_CQ failed: %s\n",
615 struct arbelprm_completion_queue_context cqctx;
616 struct arbelprm_cq_ci_db_record *ci_db_rec;
617 struct arbelprm_cq_arm_db_record *arm_db_rec;
624 if ( cqn_offset < 0 ) {
632 arbel_cq =
zalloc (
sizeof ( *arbel_cq ) );
643 sizeof ( arbel_cq->
cqe[0] ) );
644 if ( ! arbel_cq->
cqe ) {
649 for ( i = 0 ; i < cq->
num_cqes ; i++ ) {
659 cq_number, cq->
cqn );
664 cq_number, cq->
cqn );
667 memset ( &cqctx, 0,
sizeof ( cqctx ) );
685 DBGC (
arbel,
"Arbel %p CQN %#lx SW2HW_CQ failed: %s\n",
690 DBGC (
arbel,
"Arbel %p CQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
719 struct arbelprm_completion_queue_context cqctx;
720 struct arbelprm_cq_ci_db_record *ci_db_rec;
721 struct arbelprm_cq_arm_db_record *arm_db_rec;
727 DBGC (
arbel,
"Arbel %p CQN %#lx FATAL HW2SW_CQ failed: " 767 unsigned int port_offset;
773 switch (
qp->type ) {
785 if ( qpn_offset < 0 ) {
786 DBGC (
arbel,
"Arbel %p out of queue pairs\n",
794 DBGC (
arbel,
"Arbel %p unsupported QP type %d\n",
812 if ( qpn_offset >= 0 )
824 ? ( av->
rate + 5 ) : 0 );
844 struct arbelprm_qp_ee_state_transitions qpctx;
847 memset ( &qpctx, 0,
sizeof ( qpctx ) );
849 DBGC (
arbel,
"Arbel %p QPN %#lx QUERY_QPEE failed: %s\n",
854 DBGC_HDA (
arbel, 0, &qpctx.u.dwords[2], ( sizeof ( qpctx ) - 8 ) );
867 unsigned int num_wqes ) {
870 unsigned int wqe_idx_mask;
874 arbel_send_wq->
wqe_size = ( num_wqes *
875 sizeof ( arbel_send_wq->
wqe[0] ) );
877 sizeof ( arbel_send_wq->
wqe[0] ) );
878 if ( ! arbel_send_wq->
wqe )
883 wqe_idx_mask = ( num_wqes - 1 );
884 for ( i = 0 ; i < num_wqes ; i++ ) {
885 wqe = &arbel_send_wq->
wqe[i];
886 next_wqe = &arbel_send_wq->
wqe[ ( i + 1 ) & wqe_idx_mask ];
904 unsigned int num_wqes,
908 unsigned int wqe_idx_mask;
915 arbel_recv_wq->
wqe_size = ( num_wqes *
916 sizeof ( arbel_recv_wq->
wqe[0] ) );
918 sizeof ( arbel_recv_wq->
wqe[0] ) );
919 if ( ! arbel_recv_wq->
wqe ) {
928 arbel_recv_wq->
grh_size = ( num_wqes *
929 sizeof ( arbel_recv_wq->
grh[0] ) );
932 if ( ! arbel_recv_wq->
grh ) {
939 wqe_idx_mask = ( num_wqes - 1 );
941 sizeof ( wqe->
data[0] ) ) >> 4 );
942 for ( i = 0 ; i < num_wqes ; i++ ) {
943 wqe = &arbel_recv_wq->
wqe[i].
recv;
944 next_wqe = &arbel_recv_wq->
wqe[( i + 1 ) & wqe_idx_mask].
recv;
948 for ( j = 0 ; ( ( (
void * ) &wqe->
data[j] ) <
949 ( (
void * ) ( wqe + 1 ) ) ) ; j++ ) {
975 struct arbelprm_qp_ee_state_transitions qpctx;
976 struct arbelprm_qp_db_record *send_db_rec;
977 struct arbelprm_qp_db_record *recv_db_rec;
993 DBG (
"*** WARNING: Arbel RC support is non-functional ***\n" );
1000 arbel_qp =
zalloc (
sizeof ( *arbel_qp ) );
1010 qp->send.num_wqes ) ) != 0 )
1011 goto err_create_send_wq;
1014 goto err_create_recv_wq;
1020 ( ( ( (
uint64_t ) send_wqe_base_adr ) >> 32 ) !=
1021 ( ( (
uint64_t ) recv_wqe_base_adr ) >> 32 ) ) ) {
1022 DBGC (
arbel,
"Arbel %p QPN %#lx cannot support send %08lx " 1023 "recv %08lx\n",
arbel,
qp->qpn,
1024 send_wqe_base_adr, recv_wqe_base_adr );
1026 goto err_unsupported_address_split;
1028 wqe_base_adr = send_wqe_base_adr;
1035 qp_number,
qp->qpn );
1040 qp_number,
qp->qpn );
1043 memset ( &qpctx, 0,
sizeof ( qpctx ) );
1049 qpc_eec_data.log_rq_size,
fls (
qp->recv.num_wqes - 1 ),
1050 qpc_eec_data.log_rq_stride,
1051 (
fls (
sizeof ( arbel_qp->
recv.
wqe[0] ) - 1 ) - 4 ),
1052 qpc_eec_data.log_sq_size,
fls (
qp->send.num_wqes - 1 ),
1053 qpc_eec_data.log_sq_stride,
1054 (
fls (
sizeof ( arbel_qp->
send.
wqe[0] ) - 1 ) - 4 ) );
1057 MLX_FILL_1 ( &qpctx, 10, qpc_eec_data.primary_address_path.port_number,
1060 MLX_FILL_H ( &qpctx, 28, qpc_eec_data.wqe_base_adr_h, wqe_base_adr );
1062 MLX_FILL_1 ( &qpctx, 30, qpc_eec_data.ssc, 1 );
1063 MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd,
qp->send.cq->cqn );
1064 MLX_FILL_1 ( &qpctx, 34, qpc_eec_data.snd_wqe_base_adr_l,
1065 ( send_wqe_base_adr >> 6 ) );
1066 MLX_FILL_1 ( &qpctx, 35, qpc_eec_data.snd_db_record_index,
1069 qpc_eec_data.rre, 1,
1070 qpc_eec_data.rwe, 1,
1071 qpc_eec_data.rae, 1,
1072 qpc_eec_data.rsc, 1 );
1073 MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv,
qp->recv.cq->cqn );
1074 MLX_FILL_1 ( &qpctx, 42, qpc_eec_data.rcv_wqe_base_adr_l,
1075 ( recv_wqe_base_adr >> 6 ) );
1076 MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.rcv_db_record_index,
1079 DBGC (
arbel,
"Arbel %p QPN %#lx RST2INIT_QPEE failed: %s\n",
1081 goto err_rst2init_qpee;
1085 DBGC (
arbel,
"Arbel %p QPN %#lx send ring [%08lx,%08lx), doorbell " 1090 DBGC (
arbel,
"Arbel %p QPN %#lx receive ring [%08lx,%08lx), doorbell " 1095 DBGC (
arbel,
"Arbel %p QPN %#lx send CQN %#lx receive CQN %#lx\n",
1096 arbel,
qp->qpn,
qp->send.cq->cqn,
qp->recv.cq->cqn );
1104 err_unsupported_address_split:
1128 struct arbelprm_qp_ee_state_transitions qpctx;
1133 memset ( &qpctx, 0,
sizeof ( qpctx ) );
1136 qpc_eec_data.msg_max, 31 );
1138 qpc_eec_data.remote_qpn_een,
qp->av.qpn );
1140 qpc_eec_data.primary_address_path.rnr_retry,
1142 qpc_eec_data.primary_address_path.rlid,
1145 qpc_eec_data.primary_address_path.ack_timeout,
1147 qpc_eec_data.primary_address_path.max_stat_rate,
1149 memcpy ( &qpctx.u.dwords[14], &
qp->av.gid,
1150 sizeof (
qp->av.gid ) );
1154 qpc_eec_data.next_rcv_psn,
qp->recv.psn );
1156 qpc_eec_data.ra_buff_indx,
1162 DBGC (
arbel,
"Arbel %p QPN %#lx INIT2RTR_QPEE failed:" 1171 memset ( &qpctx, 0,
sizeof ( qpctx ) );
1173 qpc_eec_data.primary_address_path.rnr_retry,
1176 qpc_eec_data.primary_address_path.ack_timeout,
1180 qpc_eec_data.sic, 1 );
1182 qpc_eec_data.next_send_psn,
qp->send.psn );
1185 DBGC (
arbel,
"Arbel %p QPN %#lx RTR2RTS_QPEE failed: " 1193 memset ( &qpctx, 0,
sizeof ( qpctx ) );
1195 MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key,
qp->qkey );
1197 DBGC (
arbel,
"Arbel %p QPN %#lx RTS2RTS_QPEE failed: %s\n",
1215 struct arbelprm_qp_db_record *send_db_rec;
1216 struct arbelprm_qp_db_record *recv_db_rec;
1221 DBGC (
arbel,
"Arbel %p QPN %#lx FATAL 2RST_QPEE failed: " 1263 DBGC2 (
arbel,
"Arbel %p ringing doorbell %08x:%08x at %lx\n",
1275 .
bytes = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0 },
1300 ud_address_vector.port_number, ibdev->
port );
1302 ud_address_vector.rlid,
dest->lid,
1303 ud_address_vector.g,
dest->gid_present );
1306 ud_address_vector.msg, 3 );
1433 struct arbelprm_qp_db_record *qp_db_rec;
1435 unsigned long wqe_idx_mask;
1439 wqe_idx_mask = ( wq->
num_wqes - 1 );
1441 DBGC (
arbel,
"Arbel %p QPN %#lx send queue full",
1446 prev_wqe = &arbel_send_wq->
wqe[(wq->
next_idx - 1) & wqe_idx_mask];
1447 wqe = &arbel_send_wq->
wqe[wq->
next_idx & wqe_idx_mask];
1450 memset ( ( ( (
void * ) wqe ) +
sizeof ( wqe->next ) ), 0,
1451 ( sizeof ( *wqe ) -
sizeof ( wqe->next ) ) );
1456 DBGCP (
arbel,
"Arbel %p QPN %#lx posting send WQE %#lx:\n",
1471 counter, ( ( wq->
next_idx + 1 ) & 0xffff ) );
1506 struct arbelprm_wqe_segment_data_ptr *
data;
1509 unsigned int wqe_idx_mask;
1512 wqe_idx_mask = ( wq->
num_wqes - 1 );
1514 DBGC (
arbel,
"Arbel %p QPN %#lx receive queue full\n",
1523 if ( arbel_recv_wq->
grh ) {
1540 counter, ( ( wq->
next_idx + 1 ) & 0xffff ) );
1574 unsigned long wqe_adr;
1575 unsigned long wqe_idx;
1587 DBGC (
arbel,
"Arbel %p CQN %#lx %s QPN %#lx syndrome %#x " 1589 ( is_send ?
"send" :
"recv" ),
qpn,
1600 DBGC (
arbel,
"Arbel %p CQN %#lx unknown %s QPN %#lx\n",
1601 arbel, cq->
cqn, ( is_send ?
"send" :
"recv" ),
qpn );
1606 arbel_send_wq = &arbel_qp->
send;
1607 arbel_recv_wq = &arbel_qp->
recv;
1612 sizeof ( arbel_send_wq->
wqe[0] ) );
1616 sizeof ( arbel_recv_wq->
wqe[0] ) );
1620 DBGCP (
arbel,
"Arbel %p CQN %#lx QPN %#lx %s WQE %#lx completed:\n",
1621 arbel, cq->
cqn,
qp->qpn, ( is_send ?
"send" :
"recv" ),
1626 iobuf = wq->
iobufs[wqe_idx];
1628 DBGC (
arbel,
"Arbel %p CQN %#lx QPN %#lx empty %s WQE %#lx\n",
1629 arbel, cq->
cqn,
qp->qpn, ( is_send ?
"send" :
"recv" ),
1641 recv_wqe = &arbel_recv_wq->
wqe[wqe_idx].
recv;
1649 memset ( &recv_dest, 0,
sizeof ( recv_dest ) );
1651 switch (
qp->type ) {
1657 grh = &arbel_recv_wq->
grh[wqe_idx];
1658 len -=
sizeof ( *grh );
1660 source = &recv_source;
1661 memset ( source, 0,
sizeof ( *source ) );
1668 sizeof ( recv_dest.
gid ) );
1670 sizeof ( source->
gid ) );
1698 struct arbelprm_cq_ci_db_record *ci_db_rec;
1700 unsigned int cqe_idx_mask;
1705 cqe_idx_mask = ( cq->
num_cqes - 1 );
1706 cqe = &arbel_cq->
cqe[cq->
next_idx & cqe_idx_mask];
1714 DBGC (
arbel,
"Arbel %p CQN %#lx failed to complete: " 1727 counter, ( cq->
next_idx & 0xffffffffUL ) );
1746 struct arbelprm_eqc eqctx;
1747 struct arbelprm_event_mask mask;
1762 sizeof ( arbel_eq->
eqe[0] ) );
1763 if ( ! arbel_eq->
eqe ) {
1774 memset ( &eqctx, 0,
sizeof ( eqctx ) );
1785 DBGC (
arbel,
"Arbel %p EQN %#lx SW2HW_EQ failed: %s\n",
1791 memset ( &mask, 0xff,
sizeof ( mask ) );
1795 DBGC (
arbel,
"Arbel %p EQN %#lx MAP_EQ failed: %s\n",
1800 DBGC (
arbel,
"Arbel %p EQN %#lx ring [%08lx,%08lx), doorbell %08lx\n",
1811 memset ( arbel_eq, 0,
sizeof ( *arbel_eq ) );
1822 struct arbelprm_eqc eqctx;
1823 struct arbelprm_event_mask mask;
1827 memset ( &mask, 0,
sizeof ( mask ) );
1832 DBGC (
arbel,
"Arbel %p EQN %#lx FATAL MAP_EQ failed to " 1840 DBGC (
arbel,
"Arbel %p EQN %#lx FATAL HW2SW_EQ failed: %s\n",
1848 memset ( arbel_eq, 0,
sizeof ( *arbel_eq ) );
1866 ( link_up ?
"up" :
"down" ) );
1870 DBGC (
arbel,
"Arbel %p port %d does not exist!\n",
1889 unsigned int eqe_idx_mask;
1890 unsigned int event_type;
1904 eqe = &arbel_eq->
eqe[arbel_eq->
next_idx & eqe_idx_mask];
1912 eqe,
sizeof ( *eqe ) );
1916 switch ( event_type ) {
1921 DBGC (
arbel,
"Arbel %p EQN %#lx unrecognised event " 1923 arbel, arbel_eq->
eqn, event_type );
1925 eqe,
sizeof ( *eqe ) );
1961 const struct arbelprm_virtual_physical_mapping* ),
1963 struct arbelprm_virtual_physical_mapping mapping;
2009 memset ( &mapping, 0,
sizeof ( mapping ) );
2010 MLX_FILL_1 ( &mapping, 0, va_h, ( va >> 32 ) );
2011 MLX_FILL_1 ( &mapping, 1, va_l, ( va >> 12 ) );
2014 log2size, ( (
fls (
size ) - 1 ) - 12 ),
2015 pa_l, ( pa >> 12 ) );
2016 if ( (
rc =
map (
arbel, &mapping ) ) != 0 ) {
2018 DBGC (
arbel,
"Arbel %p could not map %08llx+%zx to " 2039 struct arbelprm_query_fw fw;
2040 struct arbelprm_access_lam lam;
2041 unsigned int fw_pages;
2049 DBGC (
arbel,
"Arbel %p could not query firmware: %s\n",
2055 MLX_GET ( &fw, fw_rev_subminor ) );
2056 fw_pages =
MLX_GET ( &fw, fw_pages );
2057 DBGC (
arbel,
"Arbel %p requires %d kB for firmware\n",
2058 arbel, ( fw_pages * 4 ) );
2059 eq_set_ci_base_addr =
2083 DBGC (
arbel,
"Arbel %p firmware area at [%08lx,%08lx)\n",
2084 arbel, fw_base, ( fw_base + fw_len ) );
2086 0, fw_base, fw_len ) ) != 0 ) {
2087 DBGC (
arbel,
"Arbel %p could not map firmware: %s\n",
2094 DBGC (
arbel,
"Arbel %p could not run firmware: %s\n",
2119 DBGC (
arbel,
"Arbel %p FATAL could not stop firmware: %s\n",
2141 struct arbelprm_query_dev_lim dev_lim;
2145 DBGC (
arbel,
"Arbel %p could not get device limits: %s\n",
2151 ( 1 <<
MLX_GET ( &dev_lim, log2_rsvd_qps ) );
2155 ( 1 <<
MLX_GET ( &dev_lim, log2_rsvd_srqs ) );
2158 ( 1 <<
MLX_GET ( &dev_lim, log2_rsvd_ees ) );
2162 ( 1 <<
MLX_GET ( &dev_lim, log2_rsvd_cqs ) );
2165 ( 1 <<
MLX_GET ( &dev_lim, log2_rsvd_mtts ) );
2168 ( 1 <<
MLX_GET ( &dev_lim, log2_rsvd_mrws ) );
2171 ( 1 <<
MLX_GET ( &dev_lim, log2_rsvd_rdbs ) );
2176 MLX_GET ( &dev_lim, uar_scratch_entry_sz );
2178 DBGC (
arbel,
"Arbel %p reserves %d x %#zx QPC, %d x %#zx EQPC, " 2179 "%d x %#zx SRQC\n",
arbel,
2183 DBGC (
arbel,
"Arbel %p reserves %d x %#zx EEC, %d x %#zx EEEC, " 2184 "%d x %#zx CQC\n",
arbel,
2188 DBGC (
arbel,
"Arbel %p reserves %d x %#zx EQC, %d x %#zx MTT, " 2189 "%d x %#zx MPT\n",
arbel,
2193 DBGC (
arbel,
"Arbel %p reserves %d x %#zx RDB, %d x %#zx UAR, " 2194 "%d x %#zx UAR scratchpad\n",
arbel,
2214 return ( ( icm_offset +
len - 1 ) & ~(
len - 1 ) );
2225 struct arbelprm_init_hca *init_hca ) {
2226 struct arbelprm_scalar_parameter icm_size;
2227 struct arbelprm_scalar_parameter icm_aux_size;
2228 struct arbelprm_scalar_parameter unmap_icm;
2230 size_t icm_offset = 0;
2231 unsigned int log_num_uars, log_num_qps, log_num_srqs, log_num_ees;
2232 unsigned int log_num_cqs, log_num_mtts, log_num_mpts, log_num_rdbs;
2233 unsigned int log_num_eqs, log_num_mcs;
2234 size_t icm_len, icm_aux_len;
2258 qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
2259 ( icm_offset >> 7 ),
2260 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
2262 DBGC (
arbel,
"Arbel %p ICM QPC is %d x %#zx at [%zx,%zx)\n",
2264 icm_offset, ( icm_offset +
len ) );
2271 qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr_l,
2273 DBGC (
arbel,
"Arbel %p ICM EQPC is %d x %#zx at [%zx,%zx)\n",
2275 icm_offset, ( icm_offset +
len ) );
2282 qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
2283 ( icm_offset >> 6 ),
2284 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
2286 DBGC (
arbel,
"Arbel %p ICM CQC is %d x %#zx at [%zx,%zx)\n",
2288 icm_offset, ( icm_offset +
len ) );
2295 qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
2296 ( icm_offset >> 6 ),
2297 qpc_eec_cqc_eqc_rdb_parameters.log_num_eq,
2299 DBGC (
arbel,
"Arbel %p ICM EQC is %d x %#zx at [%zx,%zx)\n",
2301 icm_offset, ( icm_offset +
len ) );
2308 qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr_l,
2309 ( icm_offset >> 7 ),
2310 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee,
2312 DBGC (
arbel,
"Arbel %p ICM EEC is %d x %#zx at [%zx,%zx)\n",
2314 icm_offset, ( icm_offset +
len ) );
2321 qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
2322 ( icm_offset >> 5 ),
2323 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
2325 DBGC (
arbel,
"Arbel %p ICM SRQC is %d x %#zx at [%zx,%zx)\n",
2327 icm_offset, ( icm_offset +
len ) );
2334 tpt_parameters.mpt_base_adr_l, icm_offset );
2336 tpt_parameters.log_mpt_sz, log_num_mpts );
2337 DBGC (
arbel,
"Arbel %p ICM MPT is %d x %#zx at [%zx,%zx)\n",
2339 icm_offset, ( icm_offset +
len ) );
2346 qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr_l,
2348 DBGC (
arbel,
"Arbel %p ICM RDB is %d x %#zx at [%zx,%zx)\n",
2350 icm_offset, ( icm_offset +
len ) );
2357 qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr_l,
2359 DBGC (
arbel,
"Arbel %p ICM EEEC is %d x %#zx at [%zx,%zx)\n",
2361 icm_offset, ( icm_offset +
len ) );
2365 len = ( ( 1 << log_num_mcs ) *
sizeof (
struct arbelprm_mgm_entry ) );
2368 multicast_parameters.mc_base_addr_l, icm_offset );
2370 multicast_parameters.log_mc_table_entry_sz,
2371 fls (
sizeof (
struct arbelprm_mgm_entry ) - 1 ) );
2373 multicast_parameters.mc_table_hash_sz,
2374 ( 1 << log_num_mcs ) );
2376 multicast_parameters.log_mc_table_sz,
2378 DBGC (
arbel,
"Arbel %p ICM MC is %d x %#zx at [%zx,%zx)\n",
arbel,
2379 ( 1 << log_num_mcs ),
sizeof (
struct arbelprm_mgm_entry ),
2380 icm_offset, ( icm_offset +
len ) );
2387 tpt_parameters.mtt_base_addr_l, icm_offset );
2388 DBGC (
arbel,
"Arbel %p ICM MTT is %d x %#zx at [%zx,%zx)\n",
2390 icm_offset, ( icm_offset +
len ) );
2397 uar_parameters.uar_scratch_base_addr_l, icm_offset );
2398 DBGC (
arbel,
"Arbel %p UAR scratchpad is %d x %#zx at [%zx,%zx)\n",
2399 arbel, ( 1 << log_num_uars ),
2401 icm_offset, ( icm_offset +
len ) );
2406 icm_len = icm_offset;
2417 MLX_FILL_1 ( init_hca, 74, uar_parameters.log_max_uars, log_num_uars );
2419 uar_parameters.uar_context_base_addr_l, icm_offset );
2423 DBGC (
arbel,
"Arbel %p UAR is %d x %#zx at [%zx,%zx), doorbells " 2430 memset ( &icm_size, 0,
sizeof ( icm_size ) );
2433 &icm_aux_size ) ) != 0 ) {
2434 DBGC (
arbel,
"Arbel %p could not set ICM size: %s\n",
2436 goto err_set_icm_size;
2441 DBGC (
arbel,
"Arbel %p requires %zd kB ICM and %zd kB AUX ICM\n",
2442 arbel, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
2461 goto err_alloc_doorbell;
2465 DBGC (
arbel,
"Arbel %p ICM AUX at [%08lx,%08lx)\n",
2469 DBGC (
arbel,
"Arbel %p could not map AUX ICM: %s\n",
2471 goto err_map_icm_aux;
2476 DBGC (
arbel,
"Arbel %p ICM at [%08lx,%08lx)\n",
2480 DBGC (
arbel,
"Arbel %p could not map ICM: %s\n",
2487 DBGC (
arbel,
"Arbel %p UAR at [%08lx,%08lx)\n",
2494 DBGC (
arbel,
"Arbel %p could not map doorbell UAR: %s\n",
2496 goto err_map_doorbell;
2506 memset ( &unmap_icm, 0,
sizeof ( unmap_icm ) );
2510 memset ( &unmap_icm, 0,
sizeof ( unmap_icm ) );
2530 struct arbelprm_scalar_parameter unmap_icm;
2532 memset ( &unmap_icm, 0,
sizeof ( unmap_icm ) );
2535 memset ( &unmap_icm, 0,
sizeof ( unmap_icm ) );
2558 static const uint8_t backup_exclude[] =
2583 struct arbelprm_mpt mpt;
2592 memset ( &mpt, 0,
sizeof ( mpt ) );
2605 MLX_FILL_1 ( &mpt, 6, reg_wnd_len_h, 0xffffffffUL );
2606 MLX_FILL_1 ( &mpt, 7, reg_wnd_len_l, 0xffffffffUL );
2609 DBGC (
arbel,
"Arbel %p could not set up MPT: %s\n",
2624 unsigned int smi_qpn_base;
2625 unsigned int gsi_qpn_base;
2635 gsi_qpn_base = ( smi_qpn_base + 2 );
2639 smi_qpn_base ) ) != 0 ) {
2640 DBGC (
arbel,
"Arbel %p could not configure SMI QPs: %s\n",
2645 gsi_qpn_base ) ) != 0 ) {
2646 DBGC (
arbel,
"Arbel %p could not configure GSI QPs: %s\n",
2662 struct arbelprm_init_hca init_hca;
2669 goto err_start_firmware;
2673 memset ( &init_hca, 0,
sizeof ( init_hca ) );
2679 DBGC (
arbel,
"Arbel %p could not initialise HCA: %s\n",
2696 goto err_conf_special_qps;
2700 err_conf_special_qps:
2778 struct arbelprm_init_ib init_ib;
2786 memset ( &init_ib, 0,
sizeof ( init_ib ) );
2794 &init_ib ) ) != 0 ) {
2795 DBGC (
arbel,
"Arbel %p port %d could not intialise IB: %s\n",
2822 DBGC (
arbel,
"Arbel %p port %d could not close IB: %s\n",
2870 struct arbelprm_mgm_hash
hash;
2871 struct arbelprm_mgm_entry mgm;
2877 DBGC (
arbel,
"Arbel %p could not hash GID: %s\n",
2885 DBGC (
arbel,
"Arbel %p could not read MGM %#x: %s\n",
2889 if (
MLX_GET ( &mgm, mgmqp_0.qi ) != 0 ) {
2895 DBGC (
arbel,
"Arbel %p MGID index %#x already in use\n",
2902 mgmqp_0.qpn_i,
qp->qpn,
2906 DBGC (
arbel,
"Arbel %p could not write MGM %#x: %s\n",
2925 struct arbelprm_mgm_hash
hash;
2926 struct arbelprm_mgm_entry mgm;
2932 DBGC (
arbel,
"Arbel %p could not hash GID: %s\n",
2939 memset ( &mgm, 0,
sizeof ( mgm ) );
2941 DBGC (
arbel,
"Arbel %p could not write MGM %#x: %s\n",
2989 goto err_mailbox_in;
2992 goto err_mailbox_out;
3029 unsigned long config;
3058 goto err_alloc_ibdev;
3073 goto err_start_firmware;
3077 goto err_get_limits;
3090 DBGC (
arbel,
"Arbel %p port %d could not register IB " 3091 "device: %s\n",
arbel,
3093 goto err_register_ibdev;
3105 for ( i-- ; i >= 0 ; i-- )
3114 for ( i-- ; i >= 0 ; i-- )
3142 PCI_ROM ( 0x15b3, 0x6274,
"mt25204",
"MT25204 HCA driver", 0 ),
3143 PCI_ROM ( 0x15b3, 0x6282,
"mt25218",
"MT25218 HCA driver", 0 ),
void unregister_ibdev(struct ib_device *ibdev)
Unregister Infiniband device.
static int arbel_cmd_unmap_icm_aux(struct arbel *arbel)
struct arbelprm_wqe_segment_ud ud
static size_t icm_align(size_t icm_offset, size_t len)
Align ICM table.
struct ib_global_route_header * grh
GRH buffers (if applicable)
#define ARBEL_HCR_READ_MGM
static void arbel_free_icm(struct arbel *arbel)
Free ICM.
#define EINVAL
Invalid argument.
static __always_inline void ib_set_drvdata(struct ib_device *ibdev, void *priv)
Set Infiniband device driver-private data.
static int arbel_mad(struct ib_device *ibdev, union ib_mad *mad)
Issue management datagram.
static void arbel_destroy_eq(struct arbel *arbel)
Destroy event queue.
static int arbel_cmd_rst2init_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
struct arbelprm_rc_send_wqe rc
pseudo_bit_t hash[0x00010]
#define MLX_FILL_7(_ptr, _index,...)
void pci_restore(struct pci_device *pci, struct pci_config_backup *backup, unsigned int limit, const uint8_t *exclude)
Restore PCI configuration space.
#define ARBEL_OPCODE_SEND_ERROR
An Arbel send work queue entry.
arbel_bitmask_t cq_inuse[ARBEL_BITMASK_SIZE(ARBEL_MAX_CQS)]
Completion queue in-use bitmask.
uint32_t low
Low 16 bits of address.
static int arbel_cmd_map_eq(struct arbel *arbel, unsigned long index_map, const struct arbelprm_event_mask *mask)
#define MLX_FILL_2(_ptr, _index,...)
static __always_inline void off_t int c
static unsigned int arbel_cq_arm_doorbell_idx(struct arbel *arbel, struct ib_completion_queue *cq)
Get arm completion queue doorbell index.
static unsigned int arbel_send_doorbell_idx(struct arbel *arbel, struct ib_queue_pair *qp)
Get send work request doorbell index.
#define ARBEL_HCR_QUERY_QPEE
static int arbel_cmd_map_fa(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *map)
#define iob_put(iobuf, len)
#define ARBEL_GROUP_SEPARATOR_DOORBELL
#define MLX_FILL_4(_ptr, _index,...)
#define IB_QPN_SMI
Subnet management interface QPN.
static void arbel_poll_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Poll completion queue.
#define ARBEL_HCR_RTS2RTS_QPEE
unsigned int reserved_srqs
Number of reserved SRQs.
#define EBUSY
Device or resource busy.
static int ib_is_open(struct ib_device *ibdev)
Check whether or not Infiniband device is open.
#define ARBEL_HCR_OUT_LEN(_command)
unsigned int reserved_mtts
Number of reserved MTTs.
Infiniband device operations.
union arbel_recv_wqe * wqe
Work queue entries.
static void arbel_poll_eq(struct ib_device *ibdev)
Poll event queue.
An Arbel send work queue.
static void arbel_ib_close(struct ib_device *ibdev)
Close Infiniband link.
static void arbel_destroy_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Destroy queue pair.
#define ARBEL_HCR_INOUT_CMD(_opcode, _in_mbox, _in_len, _out_mbox, _out_len)
Build HCR command from component parts.
static unsigned int unsigned int bit
#define ARBEL_UAR_RES_CQ_CI
#define ARBEL_HCR_CLOSE_HCA
static int arbel_alloc_icm(struct arbel *arbel, struct arbelprm_init_hca *init_hca)
Allocate ICM.
void * eq_ci_doorbells
Event queue consumer index doorbells.
static int arbel_cmd_query_cq(struct arbel *arbel, unsigned long cqn, struct arbelprm_completion_queue_context *cqctx)
#define ARBEL_HCR_MAD_IFC
static int arbel_cmd_write_mgm(struct arbel *arbel, unsigned int index, const struct arbelprm_mgm_entry *mgm)
uint8_t headers[IB_MAX_HEADER_SIZE]
size_t eeec_entry_size
Extended EE context entry size.
static unsigned int arbel_cq_ci_doorbell_idx(struct arbel *arbel, struct ib_completion_queue *cq)
Get completion queue consumer counter doorbell index.
struct ib_device * ibdev[ARBEL_NUM_PORTS]
Infiniband devices.
static int arbel_cmd_hw2sw_cq(struct arbel *arbel, unsigned long cqn, struct arbelprm_completion_queue_context *cqctx)
static int arbel_create_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
size_t eqc_entry_size
EQ context entry size.
#define ARBEL_UAR_RES_NONE
#define DBG_ENABLE(level)
struct pci_device_id * ids
PCI ID table.
static unsigned short vendor
struct arbelprm_eq_set_ci ci
uint32_t type
Operating system type.
static int arbel_create_send_wq(struct arbel_send_work_queue *arbel_send_wq, unsigned int num_wqes)
Create send work queue.
#define ARBEL_HCR_UNMAP_ICM_AUX
static int arbel_alloc_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Assign queue pair number.
#define ARBEL_HCR_INIT2RTR_QPEE
uint32_t readl(volatile uint32_t *io_addr)
Read 32-bit dword from memory-mapped device.
unsigned long user_to_phys(userptr_t userptr, off_t offset)
Convert user pointer to physical address.
static int arbel_cmd_disable_lam(struct arbel *arbel)
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_SCATTER]
static int arbel_cmd_sw2hw_mpt(struct arbel *arbel, unsigned int index, const struct arbelprm_mpt *mpt)
void * mailbox_out
Command output mailbox.
unsigned int doorbell_idx
Doorbell record number.
size_t wqe_size
Size of work queue.
static size_t arbel_fill_rc_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest __unused, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Construct RC send work queue entry.
#define ARBEL_GLOBAL_PD
Global protection domain.
#define ARBEL_RESET_MAGIC
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER]
struct device * dev
Underlying device.
static void arbel_close(struct arbel *arbel)
Close Arbel device.
unsigned long long uint64_t
#define DBG_DISABLE(level)
static void *__malloc malloc_phys(size_t size, size_t phys_align)
Allocate memory with specified physical alignment.
static __always_inline void * ib_qp_get_drvdata(struct ib_queue_pair *qp)
Get Infiniband queue pair driver-private data.
void * config
PCI configuration registers.
int pci_read_config_word(struct pci_device *pci, unsigned int where, uint16_t *value)
Read 16-bit word from PCI configuration space.
struct ib_global_route_header grh
#define ARBEL_UAR_RES_CQ_ARM
unsigned int reserved_eqs
Number of reserved EQs.
unsigned long eqn
Event queue number.
unsigned int doorbell_idx
Doorbell record number.
static int arbel_create_recv_wq(struct arbel_recv_work_queue *arbel_recv_wq, unsigned int num_wqes, enum ib_queue_pair_type type)
Create receive work queue.
static int arbel_cmd_unmap_icm(struct arbel *arbel, unsigned int page_count, const struct arbelprm_scalar_parameter *offset)
unsigned long lkey
Unrestricted LKey.
#define offsetof(type, field)
Get offset of a field within a structure.
struct golan_eq_context ctx
static void arbel_mcast_detach(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, union ib_gid *gid)
Detach from multicast group.
unsigned int gid_present
GID is present.
#define static_assert(x)
Assert a condition at build time.
uint32_t arbel_bitmask_t
An Arbel resource bitmask.
static size_t arbel_fill_ud_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp __unused, struct ib_address_vector *dest, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Construct UD send work queue entry.
static void iob_populate(struct io_buffer *iobuf, void *data, size_t len, size_t max_len)
Create a temporary I/O buffer.
union arbel_send_wqe * wqe
Work queue entries.
struct arbelprm_qp_db_record qp
static int arbel_start_firmware(struct arbel *arbel)
Start firmware running.
size_t cqc_entry_size
CQ context entry size.
union arbelprm_doorbell_record * db_rec
Doorbell records.
#define PCI_CONFIG_BACKUP_ALL
Limit of PCI configuration space.
struct arbelprm_recv_wqe recv
static int arbel_cmd_wait(struct arbel *arbel, struct arbelprm_hca_command_register *hcr)
Wait for Arbel command completion.
static int arbel_ib_open(struct ib_device *ibdev)
Initialise Infiniband link.
void adjust_pci_device(struct pci_device *pci)
Enable PCI device.
size_t eqpc_entry_size
Extended QP context entry size.
An Infiniband Global Identifier.
static __always_inline unsigned long virt_to_phys(volatile const void *addr)
Convert virtual address to a physical address.
unsigned int ci_doorbell_idx
Consumer counter doorbell record number.
#define ARBEL_OPCODE_SEND
#define ARBEL_MAX_EQS
Maximum number of allocatable event queues.
struct device dev
Generic device.
struct arbel_event_queue eq
Event queue.
static const union ib_gid arbel_no_gid
GID used for GID-less send work queue entries.
static int arbel_cmd_map_icm_aux(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *map)
size_t grh_size
Size of GRB buffers.
static int arbel_dump_cqctx(struct arbel *arbel, struct ib_completion_queue *cq)
Dump completion queue context (for debugging only)
#define ENOTSUP
Operation not supported.
#define ARBEL_HCR_RTR2RTS_QPEE
static void arbel_ring_doorbell(struct arbel *arbel, union arbelprm_doorbell_register *db_reg, unsigned int offset)
Ring doorbell register in UAR.
Dynamic memory allocation.
#define ARBEL_HCR_SW2HW_MPT
union arbelprm_event_entry * eqe
Event queue entries.
struct arbelprm_send_doorbell send
static unsigned int arbel_rate(struct ib_address_vector *av)
Calculate transmission rate.
uint32_t start
Starting offset.
static int arbel_cmd(struct arbel *arbel, unsigned long command, unsigned int op_mod, const void *in, unsigned int in_mod, void *out)
Issue HCA command.
static int arbel_cmd_sw2hw_eq(struct arbel *arbel, unsigned int index, const struct arbelprm_eqc *eqctx)
static int arbel_create_eq(struct arbel *arbel)
Create event queue.
unsigned int reserved_rdbs
Number of reserved RDBs.
#define ARBEL_LOG_MULTICAST_HASH_SIZE
size_t srqc_entry_size
SRQ context entry size.
#define ARBEL_QPN_RANDOM_MASK
Queue pair number randomisation mask.
#define MLX_FILL_3(_ptr, _index,...)
static void pci_set_drvdata(struct pci_device *pci, void *priv)
Set PCI driver-private data.
static int arbel_cmd_set_icm_size(struct arbel *arbel, const struct arbelprm_scalar_parameter *icm_size, struct arbelprm_scalar_parameter *icm_aux_size)
static int arbel_cmd_map_icm(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *map)
#define ENOMEM
Not enough space.
#define ARBEL_RDB_ENTRY_SIZE
struct arbelprm_mlx_send_wqe mlx
static int arbel_cmd_hw2sw_eq(struct arbel *arbel, unsigned int index, struct arbelprm_eqc *eqctx)
static int arbel_post_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf)
Post receive work queue entry.
void * memcpy(void *dest, const void *src, size_t len) __nonnull
#define ARBEL_HCR_OUT_CMD(_opcode, _out_mbox, _out_len)
#define ARBEL_NUM_EQES
Number of event queue entries.
static __always_inline void * ib_get_drvdata(struct ib_device *ibdev)
Get Infiniband device driver-private data.
static int arbel_cmd_mgid_hash(struct arbel *arbel, const union ib_gid *gid, struct arbelprm_mgm_hash *hash)
#define __unused
Declare a variable or data structure as unused.
#define ARBEL_DB_POST_SND_OFFSET
struct arbelprm_event_queue_entry generic
int ib_smc_init(struct ib_device *ibdev, ib_local_mad_t local_mad)
Initialise Infiniband parameters using SMC.
static __always_inline unsigned long virt_to_bus(volatile const void *addr)
Convert virtual address to a bus address.
static int arbel_cmd_init_hca(struct arbel *arbel, const struct arbelprm_init_hca *init_hca)
#define ARBEL_HCR_IN_CMD(_opcode, _in_mbox, _in_len)
#define ARBEL_HCR_QUERY_CQ
assert((readw(&hdr->flags) &(GTF_reading|GTF_writing))==0)
struct ib_device_operations * op
Infiniband operations.
static int arbel_create_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Create queue pair.
#define ARBEL_QPEE_OPT_PARAM_QKEY
union arbelprm_completion_entry * cqe
Completion queue entries.
An Infiniband Work Queue.
static int arbel_cmd_mad_ifc(struct arbel *arbel, unsigned int port, union arbelprm_mad *mad)
pseudo_bit_t value[0x00020]
#define ARBEL_HCR_HW2SW_EQ
static void arbel_free_qpn(struct ib_device *ibdev, struct ib_queue_pair *qp)
Free queue pair number.
static uint8_t arbel_qp_st[]
Queue pair transport service type map.
static int arbel_open(struct arbel *arbel)
Open Arbel device.
void ib_complete_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct io_buffer *iobuf, int rc)
Complete send work queue entry.
void writel(uint32_t data, volatile uint32_t *io_addr)
Write 32-bit dword to memory-mapped device.
#define ARBEL_PCI_UAR_SIZE
#define ARBEL_HCR_HW2SW_CQ
size_t db_rec_offset
Offset within ICM of doorbell records.
struct arbelprm_cq_ci_db_record cq_ci
#define ARBEL_HCR_2RST_QPEE
#define ARBEL_NUM_SPECIAL_QPS
Number of special queue pairs.
#define ARBEL_PM_STATE_MIGRATED
unsigned long pci_bar_start(struct pci_device *pci, unsigned int reg)
Find the start of a PCI BAR.
static int arbel_complete(struct ib_device *ibdev, struct ib_completion_queue *cq, union arbelprm_completion_entry *cqe)
Handle completion.
size_t cqe_size
Size of completion queue.
#define ARBEL_HCR_CLOSE_IB
static void arbel_stop(struct arbel *arbel)
Stop Arbel device.
#define ARBEL_INVALID_LKEY
#define ARBEL_HCR_INIT_IB
#define ARBEL_RSVD_SPECIAL_QPS
Number of queue pairs reserved for the "special QP" block.
userptr_t firmware_area
Firmware area in external memory.
static int arbel_cmd_rtr2rts_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
struct arbelprm_completion_with_error error
struct ib_work_queue * ib_find_wq(struct ib_completion_queue *cq, unsigned long qpn, int is_send)
Find work queue belonging to completion queue.
static void arbel_free(struct arbel *arbel)
Free Arbel device.
#define ARBEL_DB_EQ_OFFSET(_eqn)
unsigned int num_wqes
Number of work queue entries.
unsigned int arm_doorbell_idx
Arm queue doorbell record number.
size_t eqe_size
Size of event queue.
uint32_t high
High 32 bits of address.
An Arbel receive work queue.
#define ARBEL_HCR_UNMAP_FA
uint32_t rdma_key
RDMA key.
#define ARBEL_EV_PORT_STATE_CHANGE
An Arbel completion queue.
unsigned int port
Port number.
static __always_inline void ibdev_put(struct ib_device *ibdev)
Drop reference to Infiniband device.
static __always_inline void ib_cq_set_drvdata(struct ib_completion_queue *cq, void *priv)
Set Infiniband completion queue driver-private data.
char * strerror(int errno)
Retrieve string representation of error number.
size_t mtt_entry_size
MTT entry size.
static void(* free)(struct refcnt *refcnt))
#define ARBEL_MAX_CQS
Maximum number of allocatable completion queues.
static int arbel_mcast_attach(struct ib_device *ibdev, struct ib_queue_pair *qp, union ib_gid *gid)
Attach to multicast group.
struct arbelprm_completion_queue_entry normal
static void arbel_remove(struct pci_device *pci)
Remove PCI device.
ib_queue_pair_type
An Infiniband queue pair type.
static int arbel_post_send(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf)
Post send work queue entry.
void * zalloc(size_t size)
Allocate cleared memory.
static size_t iob_len(struct io_buffer *iobuf)
Calculate length of data in an I/O buffer.
#define ARBEL_PCI_UAR_IDX
#define ENFILE
Too many open files in system.
struct ib_device * alloc_ibdev(size_t priv_size)
Allocate Infiniband device.
static int arbel_start(struct arbel *arbel, int running)
Start Arbel device.
A PCI configuration space backup.
unsigned int reserved_cqs
Number of reserved CQs.
static int arbel_cmd_query_dev_lim(struct arbel *arbel, struct arbelprm_query_dev_lim *dev_lim)
#define MLX_GET(_ptr, _field)
static void arbel_reset(struct arbel *arbel)
Reset device.
long int random(void)
Generate a pseudo-random number between 0 and 2147483647L or 2147483562?
size_t eec_entry_size
EE context entry size.
static size_t iob_tailroom(struct io_buffer *iobuf)
Calculate available space at end of an I/O buffer.
void * uar
PCI user Access Region.
static struct arbel * arbel_alloc(void)
Allocate Arbel device.
An Infiniband Completion Queue.
void * doorbell
Doorbell register.
int ib_smc_update(struct ib_device *ibdev, ib_local_mad_t local_mad)
Update Infiniband parameters using SMC.
#define PCI_VENDOR_ID
PCI vendor ID.
#define MLX_FILL_1(_ptr, _index,...)
#define MLX_FILL_H(_structure_st, _index, _field, _address)
Mellanox Arbel Infiniband HCA driver.
#define ARBEL_HCR_IN_LEN(_command)
#define ARBEL_HCR_UNMAP_ICM
static int arbel_cmd_init_ib(struct arbel *arbel, unsigned int port, const struct arbelprm_init_ib *init_ib)
size_t wqe_size
Size of work queue.
#define ARBEL_RESET_OFFSET
unsigned long qpn
Queue Pair Number.
#define ARBEL_PCI_UAR_BAR
int register_ibdev(struct ib_device *ibdev)
Register Infiniband device.
#define ARBEL_HCR_WRITE_MGM
#define IB_PORT_STATE_DOWN
static __always_inline int struct dma_mapping * map
A PCI device ID list entry.
struct ib_queue_pair * qp
Containing queue pair.
uint8_t headers[IB_MAX_HEADER_SIZE]
enum arbel_queue_pair_state state
Queue state.
struct arbelprm_wqe_segment_next next
unsigned int ports
Total ports on device.
unsigned int reserved_mrws
Number of reserved MRWs.
#define ARBEL_HCR_MAX_WAIT_MS
unsigned long next_idx
Next work queue entry index.
static struct pci_device_id arbel_nics[]
uint16_t syndrome
ID of event.
unsigned int reserved_ees
Number of reserved EEs.
size_t icm_aux_len
ICM AUX size.
static void arbel_bitmask_free(arbel_bitmask_t *bits, int bit)
Free offset within usage bitmask.
#define ARBEL_HCR_IN_MBOX
static struct xen_remove_from_physmap * remove
#define ARBEL_HCR_SET_ICM_SIZE
static int arbel_bitmask_alloc(arbel_bitmask_t *bits, unsigned int bits_len)
Allocate offset within usage bitmask.
static int arbel_modify_qp(struct ib_device *ibdev, struct ib_queue_pair *qp)
Modify queue pair.
unsigned long next_idx
Next completion queue entry index.
unsigned long qpn_base
QPN base.
struct arbelprm_ud_send_wqe ud
An Infiniband Queue Pair.
#define ARBEL_HCR_RST2INIT_QPEE
pseudo_bit_t port_state_change[0x00001]
unsigned int sl
Service level.
#define ARBEL_OPCODE_RECV_ERROR
#define ARBEL_HCR_OPCODE(_command)
Network device management.
static int arbel_cmd_conf_special_qp(struct arbel *arbel, unsigned int qp_type, unsigned long base_qpn)
#define ARBEL_HCR_MGID_HASH
#define ARBEL_HCR_SW2HW_CQ
struct pci_driver arbel_driver __pci_driver
static void * pci_get_drvdata(struct pci_device *pci)
Get PCI driver-private data.
static unsigned int arbel_recv_doorbell_idx(struct arbel *arbel, struct ib_queue_pair *qp)
Get receive work request doorbell index.
struct arbelprm_qp_db_record qp
struct arbelprm_wqe_segment_ctrl_send ctrl
void mdelay(unsigned long msecs)
Delay for a fixed number of milliseconds.
#define iob_reserve(iobuf, len)
static int arbel_setup_mpt(struct arbel *arbel)
Set up memory protection table.
struct arbelprm_port_state_change_event port_state_change
static void arbel_destroy_cq(struct ib_device *ibdev, struct ib_completion_queue *cq)
Destroy completion queue.
unsigned int reserved_qps
Number of reserved QPs.
static int arbel_cmd_enable_lam(struct arbel *arbel, struct arbelprm_access_lam *lam)
size_t qpc_entry_size
QP context entry size.
#define ARBEL_HCR_QUERY_DEV_LIM
size_t firmware_len
Firmware size.
#define UNULL
Equivalent of NULL for user pointers.
#define ARBEL_HCR_VOID_CMD(_opcode)
#define ARBEL_PCI_CONFIG_BAR_SIZE
static volatile void * bits
static __always_inline void ufree(userptr_t userptr)
Free external memory.
static int arbel_inform_sma(struct ib_device *ibdev, union ib_mad *mad)
Inform embedded subnet management agent of a received MAD.
static __always_inline userptr_t umalloc(size_t size)
Allocate external memory.
#define ENOBUFS
No buffer space available.
size_t uar_scratch_entry_size
UAR scratchpad entry size.
void pci_backup(struct pci_device *pci, struct pci_config_backup *backup, unsigned int limit, const uint8_t *exclude)
Back up PCI configuration space.
int(* probe)(struct pci_device *pci)
Probe device.
static size_t(* arbel_fill_send_wqe[])(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Work queue entry constructors.
struct arbelprm_rc_send_wqe rc
#define PCI_CONFIG_BACKUP_EXCLUDE(...)
Define a PCI configuration space backup exclusion list.
struct arbelprm_wqe_segment_ctrl_mlx ctrl
#define ARBEL_HCR_INIT_HCA
struct arbelprm_recv_wqe_segment_next next
struct arbel_dev_limits limits
Device limits.
void * data
Start of data.
#define barrier()
Optimisation barrier.
#define EIO
Input/output error.
struct arbelprm_recv_wqe recv
static int arbel_dump_qpctx(struct arbel *arbel, struct ib_queue_pair *qp)
Dump queue pair context (for debugging only)
#define ARBEL_PCI_CONFIG_BAR
union ib_gid gid
GID, if present.
static int arbel_cmd_unmap_fa(struct arbel *arbel)
static size_t arbel_fill_mlx_send_wqe(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct io_buffer *iobuf, union arbel_send_wqe *wqe)
Construct MLX send work queue entry.
uint8_t port_state
Port state.
#define ARBEL_MKEY_PREFIX
Memory key prefix.
static int arbel_cmd_read_mgm(struct arbel *arbel, unsigned int index, struct arbelprm_mgm_entry *mgm)
unsigned long cqn
Completion queue number.
if(len >=6 *4) __asm__ __volatile__("movsl" if(len >=5 *4) __asm__ __volatile__("movsl" if(len >=4 *4) __asm__ __volatile__("movsl" if(len >=3 *4) __asm__ __volatile__("movsl" if(len >=2 *4) __asm__ __volatile__("movsl" if(len >=1 *4) __asm__ __volatile__("movsl" if((len % 4) >=2) __asm__ __volatile__("movsw" if((len % 2) >=1) __asm__ __volatile__("movsb" return dest
uint32_t end
Ending offset.
uint8_t size
Entry size (in 32-bit words)
void iounmap(volatile const void *io_addr)
Unmap I/O address.
uint8_t data[48]
Additional event data.
unsigned int num_cqes
Number of completion queue entries.
#define ARBEL_RESET_WAIT_TIME_MS
#define ARBEL_HCR_OUT_MBOX
static int arbel_get_limits(struct arbel *arbel)
Get device limits.
static int arbel_cmd_sw2hw_cq(struct arbel *arbel, unsigned long cqn, const struct arbelprm_completion_queue_context *cqctx)
unsigned int open_count
Device open request counter.
static void arbel_stop_firmware(struct arbel *arbel)
Stop firmware running.
unsigned long next_idx
Next event queue entry index.
#define ARBEL_HCR_SW2HW_EQ
void ib_complete_recv(struct ib_device *ibdev, struct ib_queue_pair *qp, struct ib_address_vector *dest, struct ib_address_vector *source, struct io_buffer *iobuf, int rc)
Complete receive work queue entry.
unsigned long special_qpn_base
Special QPN base.
#define ARBEL_HCR_MAP_ICM
#define ARBEL_HCR_CONF_SPECIAL_QP
static struct ib_device_operations arbel_ib_operations
Arbel Infiniband operations.
struct arbel_recv_work_queue recv
Receive work queue.
static void free_phys(void *ptr, size_t size)
Free memory allocated with malloc_phys()
An Infiniband Address Vector.
static int arbel_cmd_rts2rts_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
unsigned int reserved_uars
Number of reserved UARs.
uint16_t offset
Offset to command line.
struct pci_device * pci
PCI device.
#define MLX_FILL_5(_ptr, _index,...)
#define ARBEL_HCR_MAP_ICM_AUX
Infiniband Subnet Management Client.
typeof(acpi_finder=acpi_find)
ACPI table finder.
#define ARBEL_HCR_DISABLE_LAM
unsigned int lid
Local ID.
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER]
static int arbel_cmd_run_fw(struct arbel *arbel)
static int arbel_cmd_query_qpee(struct arbel *arbel, unsigned long qpn, struct arbelprm_qp_ee_state_transitions *ctx)
#define DBG(...)
Print a debugging message.
static int arbel_probe(struct pci_device *pci)
Probe PCI device.
static int arbel_map_vpm(struct arbel *arbel, int(*map)(struct arbel *arbel, const struct arbelprm_virtual_physical_mapping *), uint64_t va, physaddr_t pa, size_t len)
Map virtual to physical address for firmware usage.
void * pci_ioremap(struct pci_device *pci, unsigned long bus_addr, size_t len)
Map PCI bus address as an I/O address.
static void arbel_event_port_state_change(struct arbel *arbel, union arbelprm_event_entry *eqe)
Handle port state event.
static int arbel_configure_special_qps(struct arbel *arbel)
Configure special queue pairs.
#define fls(x)
Find last (i.e.
struct arbelprm_cq_arm_db_record cq_arm
#define MLX_SET(_ptr, _field, _value)
struct arbelprm_wqe_segment_ctrl_send ctrl
struct arbelprm_wqe_segment_data_ptr data[ARBEL_MAX_GATHER]
static int arbel_cmd_2rst_qpee(struct arbel *arbel, unsigned long qpn)
struct arbel_send_work_queue send
Send work queue.
static __always_inline void * ib_cq_get_drvdata(struct ib_completion_queue *cq)
Get Infiniband completion queue driver-private data.
FILE_LICENCE(GPL2_OR_LATER_OR_UBDL)
#define NULL
NULL pointer (VOID *)
int ib_push(struct ib_device *ibdev, struct io_buffer *iobuf, struct ib_queue_pair *qp, size_t payload_len, const struct ib_address_vector *dest)
Add IB headers.
#define PCI_ROM(_vendor, _device, _name, _description, _data)
arbel_bitmask_t qp_inuse[ARBEL_BITMASK_SIZE(ARBEL_MAX_QPS)]
Queue pair in-use bitmask.
#define ARBEL_HCR_QUERY_FW
static int arbel_cmd_close_ib(struct arbel *arbel, unsigned int port)
size_t mpt_entry_size
MPT entry size.
static int arbel_cmd_close_hca(struct arbel *arbel)
static __always_inline void ib_qp_set_drvdata(struct ib_queue_pair *qp, void *priv)
Set Infiniband queue pair driver-private data.
int(* create_cq)(struct ib_device *ibdev, struct ib_completion_queue *cq)
Create completion queue.
void * mailbox_in
Command input mailbox.
#define ARBEL_MAX_QPS
Maximum number of allocatable queue pairs.
if(natsemi->flags &NATSEMI_64BIT) return 1
static int arbel_cmd_query_fw(struct arbel *arbel, struct arbelprm_query_fw *fw)
#define ARBEL_HCR_ENABLE_LAM
void * memset(void *dest, int character, size_t len) __nonnull
struct arbelprm_send_doorbell send
PCI configuration space backup and restoration.
#define ARBEL_UAR_RES_GROUP_SEP
static int arbel_cmd_init2rtr_qpee(struct arbel *arbel, unsigned long qpn, const struct arbelprm_qp_ee_state_transitions *ctx)
struct io_buffer ** iobufs
I/O buffers assigned to work queue.