#define SIW_VENDOR_ID 0x626d74 /* ascii 'bmt' for now */ #define SIW_VENDORT_PART_ID 0 #define SIW_MAX_QP (1024 * 100) #define SIW_MAX_QP_WR (1024 * 32) #define SIW_MAX_ORD_QP 128 #define SIW_MAX_IRD_QP 128 #define SIW_MAX_SGE_PBL 256 /* max num sge's for PBL */ #define SIW_MAX_SGE_RD 1 /* iwarp limitation. we could relax */ #define SIW_MAX_CQ (1024 * 100) #define SIW_MAX_CQE (SIW_MAX_QP_WR * 100) #define SIW_MAX_MR (SIW_MAX_QP * 10) #define SIW_MAX_PD SIW_MAX_QP #define SIW_MAX_MW 0 /* to be set if MW's are supported */ #define SIW_MAX_SRQ SIW_MAX_QP #define SIW_MAX_SRQ_WR (SIW_MAX_QP_WR * 10) #define SIW_MAX_CONTEXT SIW_MAX_PD
/* Min number of bytes for using zero copy transmit */ #define SENDPAGE_THRESH PAGE_SIZE
/* Maximum number of frames which can be send in one SQ processing */ #define SQ_USER_MAXBURST 100
/* Maximum number of consecutive IRQ elements which get served * if SQ has pending work. Prevents starving local SQ processing * by serving peer Read Requests.
*/ #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
/* There is always only a port 1 per siw device */ #define SIW_PORT 1
struct siw_dev_cap { int max_qp; int max_qp_wr; int max_ord; /* max. outbound read queue depth */ int max_ird; /* max. inbound read queue depth */ int max_sge; int max_sge_rd; int max_cq; int max_cqe; int max_mr; int max_pd; int max_mw; int max_srq; int max_srq_wr; int max_srq_sge;
};
/* * The RDMA core does not define LOCAL_READ access, which is always * enabled implictely.
*/ #define IWARP_ACCESS_MASK \
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | \
IB_ACCESS_REMOTE_READ)
/* * siw presentation of user memory registered as source * or target of RDMA operations.
*/
struct siw_page_chunk { struct page **plist;
};
struct siw_umem { struct ib_umem *base_mem; struct siw_page_chunk *page_chunk; int num_pages;
u64 fp_addr; /* First page base address */
};
struct siw_pble {
dma_addr_t addr; /* Address of assigned buffer */ unsignedint size; /* Size of this entry */ unsignedlong pbl_off; /* Total offset from start of PBL */
};
/* * Error codes for local or remote * access to registered memory
*/ enum siw_access_state {
E_ACCESS_OK,
E_STAG_INVALID,
E_BASE_BOUNDS,
E_ACCESS_PERM,
E_PD_MISMATCH
};
enum siw_wr_state {
SIW_WR_IDLE,
SIW_WR_QUEUED, /* processing has not started yet */
SIW_WR_INPROGRESS /* initiated processing of the WR */
};
/* The WQE currently being processed (RX or TX) */ struct siw_wqe { /* Copy of applications SQE or RQE */ union { struct siw_sqe sqe; struct siw_rqe rqe;
}; struct siw_mem *mem[SIW_MAX_SGE]; /* per sge's resolved mem */ enum siw_wr_state wr_status; enum siw_wc_status wc_status;
u32 bytes; /* total bytes to process */
u32 processed; /* bytes processed */
};
struct siw_cq { struct ib_cq base_cq;
spinlock_t lock; struct siw_cq_ctrl *notify; struct siw_cqe *queue;
u32 cq_put;
u32 cq_get;
u32 num_cqe; struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */
u32 id; /* For debugging only */
};
enum siw_tx_ctx {
SIW_SEND_HDR, /* start or continue sending HDR */
SIW_SEND_DATA, /* start or continue sending DDP payload */
SIW_SEND_TRAILER, /* start or continue sending TRAILER */
SIW_SEND_SHORT_FPDU/* send whole FPDU hdr|data|trailer at once */
};
enum siw_rx_state {
SIW_GET_HDR, /* await new hdr or within hdr */
SIW_GET_DATA_START, /* start of inbound DDP payload */
SIW_GET_DATA_MORE, /* continuation of (misaligned) DDP payload */
SIW_GET_TRAILER/* await new trailer or within trailer */
};
struct siw_rx_stream { struct sk_buff *skb; int skb_new; /* pending unread bytes in skb */ int skb_offset; /* offset in skb */ int skb_copied; /* processed bytes in skb */
enum siw_rx_state state;
union iwarp_hdr hdr; struct mpa_trailer trailer;
u32 mpa_crc; bool mpa_crc_enabled;
/* * For each FPDU, main RX loop runs through 3 stages: * Receiving protocol headers, placing DDP payload and receiving * trailer information (CRC + possibly padding). * Next two variables keep state on receive status of the * current FPDU part (hdr, data, trailer).
*/ int fpdu_part_rcvd; /* bytes in pkt part copied */ int fpdu_part_rem; /* bytes in pkt part not seen */
/* * Next expected DDP MSN for each QN + * expected steering tag + * expected DDP tagget offset (all HBO)
*/
u32 ddp_msn[RDMAP_UNTAGGED_QN_COUNT];
u32 ddp_stag;
u64 ddp_to;
u32 inval_stag; /* Stag to be invalidated */
u8 rx_suspend : 1;
u8 pad : 2; /* # of pad bytes expected */
u8 rdmap_op : 4; /* opcode of current frame */
};
struct siw_rx_fpdu { /* * Local destination memory of inbound RDMA operation. * Valid, according to wqe->wr_status
*/ struct siw_wqe wqe_active;
unsignedint pbl_idx; /* Index into current PBL */ unsignedint sge_idx; /* current sge in rx */ unsignedint sge_off; /* already rcvd in curr. sge */
char first_ddp_seg; /* this is the first DDP seg */ char more_ddp_segs; /* more DDP segs expected */
u8 prev_rdmap_op : 4; /* opcode of prev frame */
};
/* * Shorthands for short packets w/o payload * to be transmitted more efficient.
*/ struct siw_send_pkt { struct iwarp_send send;
__be32 crc;
};
struct siw_iwarp_tx tx_ctx; /* Transmit context */
spinlock_t sq_lock; struct siw_sqe *sendq; /* send queue element array */
uint32_t sq_get; /* consumer index into sq array */
uint32_t sq_put; /* kernel prod. index into sq array */ struct llist_node tx_list;
struct siw_sqe *orq; /* outbound read queue element array */
spinlock_t orq_lock;
uint32_t orq_get; /* consumer index into orq array */
uint32_t orq_put; /* shared producer index for ORQ */
struct siw_rx_stream rx_stream; struct siw_rx_fpdu *rx_fpdu; struct siw_rx_fpdu rx_tagged; struct siw_rx_fpdu rx_untagged;
spinlock_t rq_lock; struct siw_rqe *recvq; /* recv queue element array */
uint32_t rq_get; /* consumer index into rq array */
uint32_t rq_put; /* kernel prod. index into rq array */
struct siw_sqe *irq; /* inbound read queue element array */
uint32_t irq_get; /* consumer index into irq array */
uint32_t irq_put; /* producer index into irq array */ int irq_burst;
struct { /* information to be carried in TERMINATE pkt, if valid */
u8 valid;
u8 in_tx;
u8 layer : 4, etype : 4;
u8 ecode;
} term_info; struct rdma_user_mmap_entry *sq_entry; /* mmap info for SQE array */ struct rdma_user_mmap_entry *rq_entry; /* mmap info for RQE array */
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.