struct devx_async_data { struct mlx5_ib_dev *mdev; struct list_head list; struct devx_async_cmd_event_file *ev_file; struct mlx5_async_work cb_work;
u16 cmd_out_len; /* must be last field in this structure */ struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
};
/* first level XA value data structure */ struct devx_event { struct xarray object_ids; /* second XA level, Key = object id */ struct list_head unaffiliated_list;
};
/* second level XA value data structure */ struct devx_obj_event { struct rcu_head rcu; struct list_head obj_sub_list;
};
struct devx_event_subscription { struct list_head file_list; /* headed in ev_file-> * subscribed_events_list
*/ struct list_head xa_list; /* headed in devx_event->unaffiliated_list or * devx_obj_event->obj_sub_list
*/ struct list_head obj_list; /* headed in devx_object */ struct list_head event_list; /* headed in ev_file->event_list or in * temp list via subscription
*/
staticbool is_legacy_obj_event_num(u16 event_num)
{ switch (event_num) { case MLX5_EVENT_TYPE_PATH_MIG: case MLX5_EVENT_TYPE_COMM_EST: case MLX5_EVENT_TYPE_SQ_DRAINED: case MLX5_EVENT_TYPE_SRQ_LAST_WQE: case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: case MLX5_EVENT_TYPE_CQ_ERROR: case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: case MLX5_EVENT_TYPE_PATH_MIG_FAILED: case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: case MLX5_EVENT_TYPE_DCT_DRAINED: case MLX5_EVENT_TYPE_COMP: case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: case MLX5_EVENT_TYPE_XRQ_ERROR: returntrue; default: returnfalse;
}
}
static u16 get_legacy_obj_type(u16 opcode)
{ switch (opcode) { case MLX5_CMD_OP_CREATE_RQ: case MLX5_CMD_OP_CREATE_RMP: return MLX5_EVENT_QUEUE_TYPE_RQ; case MLX5_CMD_OP_CREATE_QP: return MLX5_EVENT_QUEUE_TYPE_QP; case MLX5_CMD_OP_CREATE_SQ: return MLX5_EVENT_QUEUE_TYPE_SQ; case MLX5_CMD_OP_CREATE_DCT: return MLX5_EVENT_QUEUE_TYPE_DCT; default: return 0;
}
}
if (is_legacy_obj_event_num(event_num)) return get_legacy_obj_type(opcode);
switch (opcode) { case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: return (obj->obj_id >> 48); case MLX5_CMD_OP_CREATE_RQ: return MLX5_OBJ_TYPE_RQ; case MLX5_CMD_OP_CREATE_QP: return MLX5_OBJ_TYPE_QP; case MLX5_CMD_OP_CREATE_SQ: return MLX5_OBJ_TYPE_SQ; case MLX5_CMD_OP_CREATE_DCT: return MLX5_OBJ_TYPE_DCT; case MLX5_CMD_OP_CREATE_TIR: return MLX5_OBJ_TYPE_TIR; case MLX5_CMD_OP_CREATE_TIS: return MLX5_OBJ_TYPE_TIS; case MLX5_CMD_OP_CREATE_PSV: return MLX5_OBJ_TYPE_PSV; case MLX5_OBJ_TYPE_MKEY: return MLX5_OBJ_TYPE_MKEY; case MLX5_CMD_OP_CREATE_RMP: return MLX5_OBJ_TYPE_RMP; case MLX5_CMD_OP_CREATE_XRC_SRQ: return MLX5_OBJ_TYPE_XRC_SRQ; case MLX5_CMD_OP_CREATE_XRQ: return MLX5_OBJ_TYPE_XRQ; case MLX5_CMD_OP_CREATE_RQT: return MLX5_OBJ_TYPE_RQT; case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: return MLX5_OBJ_TYPE_FLOW_COUNTER; case MLX5_CMD_OP_CREATE_CQ: return MLX5_OBJ_TYPE_CQ; default: return 0;
}
}
static u16 get_event_obj_type(unsignedlong event_type, struct mlx5_eqe *eqe)
{ switch (event_type) { case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX5_EVENT_TYPE_SRQ_LAST_WQE: case MLX5_EVENT_TYPE_PATH_MIG: case MLX5_EVENT_TYPE_PATH_MIG_FAILED: case MLX5_EVENT_TYPE_COMM_EST: case MLX5_EVENT_TYPE_SQ_DRAINED: case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: return eqe->data.qp_srq.type; case MLX5_EVENT_TYPE_CQ_ERROR: case MLX5_EVENT_TYPE_XRQ_ERROR: return 0; case MLX5_EVENT_TYPE_DCT_DRAINED: case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION: return MLX5_EVENT_QUEUE_TYPE_DCT; default: return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
}
}
/* * As the obj_id in the firmware is not globally unique the object type * must be considered upon checking for a valid object id. * For that the opcode of the creator command is encoded as part of the obj_id.
*/ static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
{ return ((u64)opcode << 32) | obj_id;
}
static u32 devx_get_created_obj_id(constvoid *in, constvoid *out, u16 opcode)
{ switch (opcode) { case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); case MLX5_CMD_OP_CREATE_UMEM: return MLX5_GET(create_umem_out, out, umem_id); case MLX5_CMD_OP_CREATE_MKEY: return MLX5_GET(create_mkey_out, out, mkey_index); case MLX5_CMD_OP_CREATE_CQ: return MLX5_GET(create_cq_out, out, cqn); case MLX5_CMD_OP_ALLOC_PD: return MLX5_GET(alloc_pd_out, out, pd); case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: return MLX5_GET(alloc_transport_domain_out, out,
transport_domain); case MLX5_CMD_OP_CREATE_RMP: return MLX5_GET(create_rmp_out, out, rmpn); case MLX5_CMD_OP_CREATE_SQ: return MLX5_GET(create_sq_out, out, sqn); case MLX5_CMD_OP_CREATE_RQ: return MLX5_GET(create_rq_out, out, rqn); case MLX5_CMD_OP_CREATE_RQT: return MLX5_GET(create_rqt_out, out, rqtn); case MLX5_CMD_OP_CREATE_TIR: return MLX5_GET(create_tir_out, out, tirn); case MLX5_CMD_OP_CREATE_TIS: return MLX5_GET(create_tis_out, out, tisn); case MLX5_CMD_OP_ALLOC_Q_COUNTER: return MLX5_GET(alloc_q_counter_out, out, counter_set_id); case MLX5_CMD_OP_CREATE_FLOW_TABLE: return MLX5_GET(create_flow_table_out, out, table_id); case MLX5_CMD_OP_CREATE_FLOW_GROUP: return MLX5_GET(create_flow_group_out, out, group_id); case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: return MLX5_GET(set_fte_in, in, flow_index); case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: return MLX5_GET(alloc_packet_reformat_context_out, out,
packet_reformat_id); case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: return MLX5_GET(alloc_modify_header_context_out, out,
modify_header_id); case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: return MLX5_GET(create_scheduling_element_out, out,
scheduling_element_id); case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port); case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: return MLX5_GET(set_l2_table_entry_in, in, table_index); case MLX5_CMD_OP_CREATE_QP: return MLX5_GET(create_qp_out, out, qpn); case MLX5_CMD_OP_CREATE_SRQ: return MLX5_GET(create_srq_out, out, srqn); case MLX5_CMD_OP_CREATE_XRC_SRQ: return MLX5_GET(create_xrc_srq_out, out, xrc_srqn); case MLX5_CMD_OP_CREATE_DCT: return MLX5_GET(create_dct_out, out, dctn); case MLX5_CMD_OP_CREATE_XRQ: return MLX5_GET(create_xrq_out, out, xrqn); case MLX5_CMD_OP_ATTACH_TO_MCG: return MLX5_GET(attach_to_mcg_in, in, qpn); case MLX5_CMD_OP_ALLOC_XRCD: return MLX5_GET(alloc_xrcd_out, out, xrcd); case MLX5_CMD_OP_CREATE_PSV: return MLX5_GET(create_psv_out, out, psv0_index); default: /* The entry must match to one of the devx_is_obj_create_cmd */
WARN_ON(true); return 0;
}
}
switch (opcode) { case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
MLX5_GET(general_obj_in_cmd_hdr, in,
obj_type) << 16,
MLX5_GET(general_obj_in_cmd_hdr, in,
obj_id)); break; case MLX5_CMD_OP_QUERY_MKEY:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
MLX5_GET(query_mkey_in, in,
mkey_index)); break; case MLX5_CMD_OP_QUERY_CQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
MLX5_GET(query_cq_in, in, cqn)); break; case MLX5_CMD_OP_MODIFY_CQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
MLX5_GET(modify_cq_in, in, cqn)); break; case MLX5_CMD_OP_QUERY_SQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
MLX5_GET(query_sq_in, in, sqn)); break; case MLX5_CMD_OP_MODIFY_SQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
MLX5_GET(modify_sq_in, in, sqn)); break; case MLX5_CMD_OP_QUERY_RQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
MLX5_GET(query_rq_in, in, rqn)); break; case MLX5_CMD_OP_MODIFY_RQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
MLX5_GET(modify_rq_in, in, rqn)); break; case MLX5_CMD_OP_QUERY_RMP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
MLX5_GET(query_rmp_in, in, rmpn)); break; case MLX5_CMD_OP_MODIFY_RMP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
MLX5_GET(modify_rmp_in, in, rmpn)); break; case MLX5_CMD_OP_QUERY_RQT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
MLX5_GET(query_rqt_in, in, rqtn)); break; case MLX5_CMD_OP_MODIFY_RQT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
MLX5_GET(modify_rqt_in, in, rqtn)); break; case MLX5_CMD_OP_QUERY_TIR:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
MLX5_GET(query_tir_in, in, tirn)); break; case MLX5_CMD_OP_MODIFY_TIR:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
MLX5_GET(modify_tir_in, in, tirn)); break; case MLX5_CMD_OP_QUERY_TIS:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
MLX5_GET(query_tis_in, in, tisn)); break; case MLX5_CMD_OP_MODIFY_TIS:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
MLX5_GET(modify_tis_in, in, tisn)); break; case MLX5_CMD_OP_QUERY_FLOW_TABLE:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
MLX5_GET(query_flow_table_in, in,
table_id)); break; case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
MLX5_GET(modify_flow_table_in, in,
table_id)); break; case MLX5_CMD_OP_QUERY_FLOW_GROUP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
MLX5_GET(query_flow_group_in, in,
group_id)); break; case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
MLX5_GET(query_fte_in, in,
flow_index)); break; case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
MLX5_GET(set_fte_in, in, flow_index)); break; case MLX5_CMD_OP_QUERY_Q_COUNTER:
obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
MLX5_GET(query_q_counter_in, in,
counter_set_id)); break; case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
MLX5_GET(query_flow_counter_in, in,
flow_counter_id)); break; case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
MLX5_GET(query_modify_header_context_in,
in, modify_header_id)); break; case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
MLX5_GET(query_scheduling_element_in,
in, scheduling_element_id)); break; case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
MLX5_GET(modify_scheduling_element_in,
in, scheduling_element_id)); break; case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
MLX5_GET(add_vxlan_udp_dport_in, in,
vxlan_udp_port)); break; case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
MLX5_GET(query_l2_table_entry_in, in,
table_index)); break; case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
MLX5_GET(set_l2_table_entry_in, in,
table_index)); break; case MLX5_CMD_OP_QUERY_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(query_qp_in, in, qpn)); break; case MLX5_CMD_OP_RST2INIT_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(rst2init_qp_in, in, qpn)); break; case MLX5_CMD_OP_INIT2INIT_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(init2init_qp_in, in, qpn)); break; case MLX5_CMD_OP_INIT2RTR_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(init2rtr_qp_in, in, qpn)); break; case MLX5_CMD_OP_RTR2RTS_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(rtr2rts_qp_in, in, qpn)); break; case MLX5_CMD_OP_RTS2RTS_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(rts2rts_qp_in, in, qpn)); break; case MLX5_CMD_OP_SQERR2RTS_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(sqerr2rts_qp_in, in, qpn)); break; case MLX5_CMD_OP_2ERR_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(qp_2err_in, in, qpn)); break; case MLX5_CMD_OP_2RST_QP:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
MLX5_GET(qp_2rst_in, in, qpn)); break; case MLX5_CMD_OP_QUERY_DCT:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
MLX5_GET(query_dct_in, in, dctn)); break; case MLX5_CMD_OP_QUERY_XRQ: case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY: case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
MLX5_GET(query_xrq_in, in, xrqn)); break; case MLX5_CMD_OP_QUERY_XRC_SRQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
MLX5_GET(query_xrc_srq_in, in,
xrc_srqn)); break; case MLX5_CMD_OP_ARM_XRC_SRQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
MLX5_GET(arm_xrc_srq_in, in, xrc_srqn)); break; case MLX5_CMD_OP_QUERY_SRQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
MLX5_GET(query_srq_in, in, srqn)); break; case MLX5_CMD_OP_ARM_RQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
MLX5_GET(arm_rq_in, in, srq_number)); break; case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
MLX5_GET(drain_dct_in, in, dctn)); break; case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: case MLX5_CMD_OP_RELEASE_XRQ_ERROR: case MLX5_CMD_OP_MODIFY_XRQ:
obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
MLX5_GET(arm_xrq_in, in, xrqn)); break; case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
obj_id = get_enc_obj_id
(MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
MLX5_GET(query_packet_reformat_context_in,
in, packet_reformat_id)); break; default:
obj_id = 0;
}
switch (*opcode) { case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: case MLX5_CMD_OP_CREATE_MKEY: case MLX5_CMD_OP_CREATE_CQ: case MLX5_CMD_OP_ALLOC_PD: case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: case MLX5_CMD_OP_CREATE_RMP: case MLX5_CMD_OP_CREATE_SQ: case MLX5_CMD_OP_CREATE_RQ: case MLX5_CMD_OP_CREATE_RQT: case MLX5_CMD_OP_CREATE_TIR: case MLX5_CMD_OP_CREATE_TIS: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_CREATE_FLOW_TABLE: case MLX5_CMD_OP_CREATE_FLOW_GROUP: case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_CREATE_QP: case MLX5_CMD_OP_CREATE_SRQ: case MLX5_CMD_OP_CREATE_XRC_SRQ: case MLX5_CMD_OP_CREATE_DCT: case MLX5_CMD_OP_CREATE_XRQ: case MLX5_CMD_OP_ATTACH_TO_MCG: case MLX5_CMD_OP_ALLOC_XRCD: returntrue; case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
{
u16 op_mod = MLX5_GET(set_fte_in, in, op_mod); if (op_mod == 0) returntrue; returnfalse;
} case MLX5_CMD_OP_CREATE_PSV:
{
u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
staticbool devx_is_obj_modify_cmd(constvoid *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) { case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: case MLX5_CMD_OP_MODIFY_CQ: case MLX5_CMD_OP_MODIFY_RMP: case MLX5_CMD_OP_MODIFY_SQ: case MLX5_CMD_OP_MODIFY_RQ: case MLX5_CMD_OP_MODIFY_RQT: case MLX5_CMD_OP_MODIFY_TIR: case MLX5_CMD_OP_MODIFY_TIS: case MLX5_CMD_OP_MODIFY_FLOW_TABLE: case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: case MLX5_CMD_OP_RST2INIT_QP: case MLX5_CMD_OP_INIT2RTR_QP: case MLX5_CMD_OP_INIT2INIT_QP: case MLX5_CMD_OP_RTR2RTS_QP: case MLX5_CMD_OP_RTS2RTS_QP: case MLX5_CMD_OP_SQERR2RTS_QP: case MLX5_CMD_OP_2ERR_QP: case MLX5_CMD_OP_2RST_QP: case MLX5_CMD_OP_ARM_XRC_SRQ: case MLX5_CMD_OP_ARM_RQ: case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: case MLX5_CMD_OP_ARM_XRQ: case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: case MLX5_CMD_OP_RELEASE_XRQ_ERROR: case MLX5_CMD_OP_MODIFY_XRQ: returntrue; case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
{
u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
staticbool devx_is_obj_query_cmd(constvoid *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) { case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: case MLX5_CMD_OP_QUERY_MKEY: case MLX5_CMD_OP_QUERY_CQ: case MLX5_CMD_OP_QUERY_RMP: case MLX5_CMD_OP_QUERY_SQ: case MLX5_CMD_OP_QUERY_RQ: case MLX5_CMD_OP_QUERY_RQT: case MLX5_CMD_OP_QUERY_TIR: case MLX5_CMD_OP_QUERY_TIS: case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_FLOW_TABLE: case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_FLOW_COUNTER: case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT: case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_QP: case MLX5_CMD_OP_QUERY_SRQ: case MLX5_CMD_OP_QUERY_XRC_SRQ: case MLX5_CMD_OP_QUERY_DCT: case MLX5_CMD_OP_QUERY_XRQ: case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY: case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS: case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT: returntrue; default: returnfalse;
}
}
staticbool devx_is_whitelist_cmd(void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) { case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: returntrue; default: returnfalse;
}
}
/* Pass all cmds for vhca_tunnel as general, tracking is done in FW */ if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
(opcode >= MLX5_CMD_OP_GENERAL_START &&
opcode < MLX5_CMD_OP_GENERAL_END)) returntrue;
switch (opcode) { case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_VPORT_STATE: case MLX5_CMD_OP_QUERY_ADAPTER: case MLX5_CMD_OP_QUERY_ISSI: case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: case MLX5_CMD_OP_QUERY_VNIC_ENV: case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: case MLX5_CMD_OP_NOP: case MLX5_CMD_OP_QUERY_CONG_STATUS: case MLX5_CMD_OP_QUERY_CONG_PARAMS: case MLX5_CMD_OP_QUERY_CONG_STATISTICS: case MLX5_CMD_OP_QUERY_LAG: case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: returntrue; default: returnfalse;
}
}
staticint UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)( struct uverbs_attr_bundle *attrs)
{ struct mlx5_ib_ucontext *c; struct mlx5_ib_dev *dev; int user_vector; int dev_eqn; int err;
if (uverbs_copy_from(&user_vector, attrs,
MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC)) return -EFAULT;
c = devx_ufile2uctx(attrs); if (IS_ERR(c)) return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
&dev_eqn, sizeof(dev_eqn))) return -EFAULT;
return 0;
}
/* *Security note: * The hardware protection mechanism works like this: Each device object that * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in * the device specification manual) upon its creation. Then upon doorbell, * hardware fetches the object context for which the doorbell was rang, and * validates that the UAR through which the DB was rang matches the UAR ID * of the object. * If no match the doorbell is silently ignored by the hardware. Of course, * the user cannot ring a doorbell on a UAR that was not mapped to it. * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command * mailboxes (except tagging them with UID), we expose to the user its UAR * ID, so it can embed it in these objects in the expected specification * format. So the only thing the user can do is hurt itself by creating a * QP/SQ/CQ with a UAR ID other than his, and then in this case other users * may ring a doorbell on its objects. * The consequence of that will be that another user can schedule a QP/SQ * of the buggy user for execution (just insert it to the hardware schedule * queue or arm its CQ for event generation), no further harm is expected.
*/ staticint UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)( struct uverbs_attr_bundle *attrs)
{ struct mlx5_ib_ucontext *c; struct mlx5_ib_dev *dev;
u32 user_idx;
s32 dev_idx;
c = devx_ufile2uctx(attrs); if (IS_ERR(c)) return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
if (uverbs_copy_from(&user_idx, attrs,
MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX)) return -EFAULT;
dev = mlx5_udata_to_mdev(&attrs->driver_udata); if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
xa_erase(&obj->ib_dev->odp_mkeys,
mlx5_base_mkey(obj->mkey.key))) /* * The pagefault_single_data_segment() does commands against * the mmkey, we must wait for that to stop before freeing the * mkey, as another allocation could get the same mkey #.
*/
mlx5r_deref_wait_odp_mkey(&obj->mkey);
if (obj->flags & DEVX_OBJ_FLAGS_HW_FREED)
ret = 0; elseif (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); elseif (obj->flags & DEVX_OBJ_FLAGS_CQ)
ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); else
ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
obj->dinlen, out, sizeof(out)); if (ret) return ret;
/* * Note that if the struct devx_async_cmd_event_file uobj begins to be * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this * routine returns, ensuring that it always remains valid here.
*/
spin_lock_irqsave(&ev_queue->lock, flags);
list_add_tail(&async_data->list, &ev_queue->event_list);
spin_unlock_irqrestore(&ev_queue->lock, flags);
obj_event = xa_load(&event->object_ids, key_level2); if (!obj_event) {
obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL); if (!obj_event) /* Level1 is valid for future use, no need to free */ return -ENOMEM;
for (i = 0; i < num_events; i++) { if (obj) { if (!is_legacy_obj_event_num(event_type_num_list[i])) returnfalse;
} elseif (!is_legacy_unaffiliated_event_num(
event_type_num_list[i])) { returnfalse;
}
}
returntrue;
}
#define MAX_SUPP_EVENT_NUM 255 staticbool is_valid_events(struct mlx5_core_dev *dev, int num_events, u16 *event_type_num_list, struct devx_obj *obj)
{
__be64 *aff_events;
__be64 *unaff_events; int mask_entry; int mask_bit; int i;
if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) return -EINVAL;
INIT_LIST_HEAD(&sub_list);
/* Protect from concurrent subscriptions to same XA entries to allow * both to succeed
*/
mutex_lock(&devx_event_table->event_xa_lock); for (i = 0; i < num_events; i++) {
u32 key_level1;
event_sub->cookie = cookie;
event_sub->ev_file = ev_file; /* May be needed upon cleanup the devx object/subscription */
event_sub->xa_key_level1 = key_level1;
event_sub->xa_key_level2 = obj_id;
INIT_LIST_HEAD(&event_sub->obj_list);
}
/* Once all the allocations and the XA data insertions were done we * can go ahead and add all the subscriptions to the relevant lists * without concern of a failure.
*/
list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) { struct devx_event *event; struct devx_obj_event *obj_event;
/* Don't bother checking larger page sizes as offset must be zero and * total DEVX umem length must be equal to total umem length.
*/
pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
PAGE_SHIFT),
MLX5_ADAPTER_PAGE_SHIFT); if (!pgsz_bitmap) return 0;
page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX); if (!page_size) return 0;
/* If the page_size is less than the CPU page size then we can use the * offset and create a umem which is a subset of the page list. * For larger page sizes we can't be sure the DMA list reflects the * VA so we must ensure that the umem extent is exactly equal to the * page list. Reduce the page size until one of these cases is true.
*/ while ((ib_umem_dma_offset(umem, page_size) != 0 ||
(umem->length % page_size) != 0) &&
page_size > PAGE_SIZE)
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.32 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.