/* A structure for defining FM port resources */ struct fman_port_rsrc {
u32 num; /* Committed required resource */
u32 extra; /* Extra (not committed) required resource */
};
enum fman_port_dma_swap {
FMAN_PORT_DMA_NO_SWAP, /* No swap, transfer data as is */
FMAN_PORT_DMA_SWAP_LE, /* The transferred data should be swapped in PPC Little Endian mode */
FMAN_PORT_DMA_SWAP_BE /* The transferred data should be swapped in Big Endian mode */
};
/* Default port color */ enum fman_port_color {
FMAN_PORT_COLOR_GREEN, /* Default port color is green */
FMAN_PORT_COLOR_YELLOW, /* Default port color is yellow */
FMAN_PORT_COLOR_RED, /* Default port color is red */
FMAN_PORT_COLOR_OVERRIDE /* Ignore color */
};
/* QMI dequeue from the SP channel - types */ enum fman_port_deq_type {
FMAN_PORT_DEQ_BY_PRI, /* Priority precedence and Intra-Class scheduling */
FMAN_PORT_DEQ_ACTIVE_FQ, /* Active FQ precedence and Intra-Class scheduling */
FMAN_PORT_DEQ_ACTIVE_FQ_NO_ICS /* Active FQ precedence and override Intra-Class scheduling */
};
/* External buffer pools configuration */ struct fman_port_bpools {
u8 count; /* Num of pools to set up */ bool counters_enable; /* Enable allocate counters */
u8 grp_bp_depleted_num; /* Number of depleted pools - if reached the BMI indicates * the MAC to send a pause frame
*/ struct {
u8 bpid; /* BM pool ID */
u16 size; /* Pool's size - must be in ascending order */ bool is_backup; /* If this is a backup pool */ bool grp_bp_depleted; /* Consider this buffer in multiple pools depletion criteria */ bool single_bp_depleted; /* Consider this buffer in single pool depletion criteria */
} bpool[FMAN_PORT_MAX_EXT_POOLS_NUM];
};
if (rx_port) { /* Check buffers are provided in ascending order */ for (i = 0; (i < (bp->count - 1) &&
(i < FMAN_PORT_MAX_EXT_POOLS_NUM - 1)); i++) { if (bp->bpool[i].size > bp->bpool[i + 1].size) return -EINVAL;
}
}
/* Set up external buffers pools */ for (i = 0; i < bp->count; i++) {
tmp = BMI_EXT_BUF_POOL_VALID;
tmp |= ((u32)bp->bpool[i].bpid <<
BMI_EXT_BUF_POOL_ID_SHIFT) & BMI_EXT_BUF_POOL_ID_MASK;
if (rx_port) { if (bp->counters_enable)
tmp |= BMI_EXT_BUF_POOL_EN_COUNTER;
if (bp->bpool[i].is_backup)
tmp |= BMI_EXT_BUF_POOL_BACKUP;
tmp |= (u32)bp->bpool[i].size;
}
iowrite32be(tmp, &bp_reg[i]);
}
/* Clear unused pools */ for (i = bp->count; i < max_bp_num; i++)
iowrite32be(0, &bp_reg[i]);
/* Pools depletion */
tmp = 0; for (i = 0; i < FMAN_PORT_MAX_EXT_POOLS_NUM; i++) { if (bp->bpool[i].grp_bp_depleted) {
grp_depl_used = true;
tmp |= 0x80000000 >> i;
}
if (bp->bpool[i].single_bp_depleted)
tmp |= 0x80 >> i;
}
if (grp_depl_used)
tmp |= ((u32)bp->grp_bp_depleted_num - 1) <<
BMI_POOL_DEP_NUM_OF_POOLS_SHIFT;
iowrite32be(tmp, bp_depl_reg); return 0;
}
staticbool is_init_done(struct fman_port_cfg *cfg)
{ /* Checks if FMan port driver parameters were initialized */ if (!cfg) returntrue;
/* Add some margin for back-to-back capability to improve * performance, allows the hardware to pipeline new frame dma * while the previous frame not yet transmitted.
*/ if (port->port_speed == 10000)
opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS; else
opt_fifo_size_for_b2b += 2 * FMAN_BMI_FIFO_UNITS;
}
/* Add some margin for back-to-back capability to improve * performance,allows the hardware to pipeline new frame dma * while the previous frame not yet transmitted.
*/ if (port->port_speed == 10000)
opt_fifo_size_for_b2b += 8 * FMAN_BMI_FIFO_UNITS; else
opt_fifo_size_for_b2b += 3 * FMAN_BMI_FIFO_UNITS;
}
/* Verify the size */ if (port->fifo_bufs.num < min_fifo_size_required)
dev_dbg(port->dev, "%s: FIFO size should be enlarged to %d bytes\n",
__func__, min_fifo_size_required); elseif (port->fifo_bufs.num < opt_fifo_size_for_b2b)
dev_dbg(port->dev, "%s: For b2b processing,FIFO may be enlarged to %d bytes\n",
__func__, opt_fifo_size_for_b2b);
memset(&bpools, 0, sizeof(struct fman_port_bpools));
bpools.count = ext_buf_pools->num_of_pools_used;
bpools.counters_enable = true; for (i = 0; i < ext_buf_pools->num_of_pools_used; i++) {
bpools.bpool[i].bpid = ordered_array[i];
bpools.bpool[i].size = sizes_array[ordered_array[i]];
}
/* save pools parameters for later use */
port->rx_pools_params.num_of_pools = ext_buf_pools->num_of_pools_used;
port->rx_pools_params.largest_buf_size =
sizes_array[ordered_array[ext_buf_pools->num_of_pools_used - 1]];
/* FMBM_RMPD reg. - pool depletion */ if (buf_pool_depletion->pools_grp_mode_enable) {
bpools.grp_bp_depleted_num = buf_pool_depletion->num_of_pools; for (i = 0; i < port->bm_max_num_of_pools; i++) { if (buf_pool_depletion->pools_to_consider[i]) { for (j = 0; j < ext_buf_pools->
num_of_pools_used; j++) { if (i == ordered_array[j]) {
bpools.bpool[j].
grp_bp_depleted = true; break;
}
}
}
}
}
if (buf_pool_depletion->single_pool_mode_enable) { for (i = 0; i < port->bm_max_num_of_pools; i++) { if (buf_pool_depletion->
pools_to_consider_for_single_mode[i]) { for (j = 0; j < ext_buf_pools->
num_of_pools_used; j++) { if (i == ordered_array[j]) {
bpools.bpool[j].
single_bp_depleted = true; break;
}
}
}
}
}
if (init(port) != 0) {
dev_err(port->dev, "%s: fman port initialization failed\n",
__func__); return -ENODEV;
}
/* The code below is a trick so the FM will not release the buffer * to BM nor will try to enqueue the frame to QM
*/ if (port->port_type == FMAN_PORT_TYPE_TX) { if (!cfg->dflt_fqid && cfg->dont_release_buf) { /* override fmbm_tcfqid 0 with a false non-0 value. * This will force FM to act according to tfene. * Otherwise, if fmbm_tcfqid is 0 the FM will release * buffers to BM regardless of fmbm_tfene
*/
iowrite32be(0xFFFFFF, &port->bmi_regs->tx.fmbm_tcfqid);
iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_TX_RELEASE,
&port->bmi_regs->tx.fmbm_tfene);
}
}
if (major >= 6) { switch (type) { case FMAN_PORT_TYPE_TX: if (speed == 10000)
val = 12; else
val = 3; break; case FMAN_PORT_TYPE_RX: if (speed == 10000)
val = 8; else
val = 2; break; default: return 0;
}
} else { switch (type) { case FMAN_PORT_TYPE_TX: case FMAN_PORT_TYPE_RX: if (speed == 10000)
val = 8; else
val = 1; break; default:
val = 0;
}
}
if (major >= 6) { switch (type) { case FMAN_PORT_TYPE_TX: if (speed == 10000)
val = 64; else
val = 50; break; case FMAN_PORT_TYPE_RX: if (speed == 10000)
val = 96; else
val = 50; break; default:
val = 0;
}
} else { switch (type) { case FMAN_PORT_TYPE_TX: if (speed == 10000)
val = 48; else
val = 44; break; case FMAN_PORT_TYPE_RX: if (speed == 10000)
val = 48; else
val = 45; break; default:
val = 0;
}
}
/** * fman_port_config * @port: Pointer to the port structure * @params: Pointer to data structure of parameters * * Creates a descriptor for the FM PORT module. * The routine returns a pointer to the FM PORT object. * This descriptor must be passed as first parameter to all other FM PORT * function calls. * No actual initialization or configuration of FM hardware is done by this * routine. * * Return: 0 on success; Error code otherwise.
*/ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
{ void __iomem *base_addr = port->dts_params.base_addr; int err;
/* Allocate the FM driver's parameters structure */
port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL); if (!port->cfg) return -EINVAL;
/* Initialize FM port parameters which will be kept by the driver */
port->port_type = port->dts_params.type;
port->port_speed = port->dts_params.speed;
port->port_id = port->dts_params.id;
port->fm = port->dts_params.fman;
port->ext_pools_num = (u8)8;
/* get FM revision */
fman_get_revision(port->fm, &port->rev_info);
err = fill_soc_specific_params(port); if (err) goto err_port_cfg;
switch (port->port_type) { case FMAN_PORT_TYPE_RX:
set_rx_dflt_cfg(port, params);
fallthrough; case FMAN_PORT_TYPE_TX:
set_tx_dflt_cfg(port, params, &port->dts_params);
fallthrough; default:
set_dflt_cfg(port, params);
}
/* Continue with other parameters */ /* set memory map pointers */
port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
/* * fman_port_use_kg_hash * @port: A pointer to a FM Port module. * @enable: enable or disable * * Sets the HW KeyGen or the BMI as HW Parser next engine, enabling * or bypassing the KeyGen hashing of Rx traffic
*/ void fman_port_use_kg_hash(struct fman_port *port, bool enable)
{ if (enable) /* After the Parser frames go to KeyGen */
iowrite32be(NIA_ENG_HWK, &port->bmi_regs->rx.fmbm_rfpne); else /* After the Parser frames go to BMI */
iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME,
&port->bmi_regs->rx.fmbm_rfpne);
}
EXPORT_SYMBOL(fman_port_use_kg_hash);
/** * fman_port_init * @port: A pointer to a FM Port module. * * Initializes the FM PORT module by defining the software structure and * configuring the hardware registers. * * Return: 0 on success; Error code otherwise.
*/ int fman_port_init(struct fman_port *port)
{ struct fman_port_init_params params; struct fman_keygen *keygen; struct fman_port_cfg *cfg; int err;
if (port->port_type == FMAN_PORT_TYPE_RX) { /* Call the external Buffer routine which also checks fifo * size and updates it if necessary
*/ /* define external buffer pools and pool depletion */
err = set_ext_buffer_pools(port); if (err) return err; /* check if the largest external buffer pool is large enough */ if (cfg->buf_margins.start_margins + MIN_EXT_BUF_SIZE +
cfg->buf_margins.end_margins >
port->rx_pools_params.largest_buf_size) {
dev_err(port->dev, "%s: buf_margins.start_margins (%d) + minimum buf size (64) + buf_margins.end_margins (%d) is larger than maximum external buffer size (%d)\n",
__func__, cfg->buf_margins.start_margins,
cfg->buf_margins.end_margins,
port->rx_pools_params.largest_buf_size); return -EINVAL;
}
}
err = fman_set_port_params(port->fm, ¶ms); if (err) return err;
err = init_low_level_driver(port); if (err) return err;
if (port->cfg->pcd_fqs_count) {
keygen = port->dts_params.fman->keygen;
err = keygen_port_hashing_init(keygen, port->port_id,
port->cfg->pcd_base_fqid,
port->cfg->pcd_fqs_count); if (err) return err;
fman_port_use_kg_hash(port, true);
}
kfree(port->cfg);
port->cfg = NULL;
return 0;
}
EXPORT_SYMBOL(fman_port_init);
/** * fman_port_cfg_buf_prefix_content * @port: A pointer to a FM Port module. * @buffer_prefix_content: A structure of parameters describing * the structure of the buffer. * Out parameter: * Start margin - offset of data from * start of external buffer. * Defines the structure, size and content of the application buffer. * The prefix, in Tx ports, if 'pass_prs_result', the application should set * a value to their offsets in the prefix of the FM will save the first * 'priv_data_size', than, depending on 'pass_prs_result' and * 'pass_time_stamp', copy parse result and timeStamp, and the packet itself * (in this order), to the application buffer, and to offset. * Calling this routine changes the buffer margins definitions in the internal * driver data base from its default configuration: * Data size: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PRIV_DATA_SIZE] * Pass Parser result: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_PRS_RESULT]. * Pass timestamp: [DEFAULT_PORT_BUFFER_PREFIX_CONTENT_PASS_TIME_STAMP]. * May be used for all ports * * Allowed only following fman_port_config() and before fman_port_init(). * * Return: 0 on success; Error code otherwise.
*/ int fman_port_cfg_buf_prefix_content(struct fman_port *port, struct fman_buffer_prefix_content *
buffer_prefix_content)
{ if (is_init_done(port->cfg)) return -EINVAL;
memcpy(&port->cfg->buffer_prefix_content,
buffer_prefix_content, sizeof(struct fman_buffer_prefix_content)); /* if data_align was not initialized by user, * we return to driver's default
*/ if (!port->cfg->buffer_prefix_content.data_align)
port->cfg->buffer_prefix_content.data_align =
DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN;
/** * fman_port_disable * @port: A pointer to a FM Port module. * * Gracefully disable an FM port. The port will not start new tasks after all * tasks associated with the port are terminated. * * This is a blocking routine, it returns after port is gracefully stopped, * i.e. the port will not except new frames, but it will finish all frames * or tasks which were already began. * Allowed only following fman_port_init(). * * Return: 0 on success; Error code otherwise.
*/ int fman_port_disable(struct fman_port *port)
{
u32 __iomem *bmi_cfg_reg, *bmi_status_reg;
u32 tmp; bool rx_port, failure = false; int count;
/* Wait for graceful stop end */
count = 500; do {
udelay(10);
tmp = ioread32be(bmi_status_reg);
} while ((tmp & BMI_PORT_STATUS_BSY) && --count);
if (count == 0) { /* Timeout */
failure = true;
}
if (failure)
dev_dbg(port->dev, "%s: FMan Port[%d]: BMI or QMI is Busy. Port forced down\n",
__func__, port->port_id);
return 0;
}
EXPORT_SYMBOL(fman_port_disable);
/** * fman_port_enable * @port: A pointer to a FM Port module. * * A runtime routine provided to allow disable/enable of port. * * Allowed only following fman_port_init(). * * Return: 0 on success; Error code otherwise.
*/ int fman_port_enable(struct fman_port *port)
{
u32 __iomem *bmi_cfg_reg;
u32 tmp; bool rx_port;
/** * fman_port_bind * @dev: FMan Port OF device pointer * * Bind to a specific FMan Port. * * Allowed only after the port was created. * * Return: A pointer to the FMan port device.
*/ struct fman_port *fman_port_bind(struct device *dev)
{ return (struct fman_port *)(dev_get_drvdata(get_device(dev)));
}
EXPORT_SYMBOL(fman_port_bind);
/** * fman_port_get_qman_channel_id * @port: Pointer to the FMan port devuce * * Get the QMan channel ID for the specific port * * Return: QMan channel ID
*/
u32 fman_port_get_qman_channel_id(struct fman_port *port)
{ return port->dts_params.qman_channel_id;
}
EXPORT_SYMBOL(fman_port_get_qman_channel_id);
/** * fman_port_get_device * @port: Pointer to the FMan port device * * Get the 'struct device' associated to the specified FMan port device * * Return: pointer to associated 'struct device'
*/ struct device *fman_port_get_device(struct fman_port *port)
{ return port->dev;
}
EXPORT_SYMBOL(fman_port_get_device);
int fman_port_get_hash_result_offset(struct fman_port *port, u32 *offset)
{ if (port->buffer_offsets.hash_result_offset == ILLEGAL_BASE) return -EINVAL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.