/* Hardware is told about receive buffers once a "batch" has been queued */ #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
/* The amount of RX buffer space consumed by standard skb overhead */ #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */ #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
/* Size in bytes of an IPA packet status structure */ #define IPA_STATUS_SIZE sizeof(__le32[8])
/* IPA status structure decoder; looks up field values for a structure */ static u32 ipa_status_extract(struct ipa *ipa, constvoid *data, enum ipa_status_field_id field)
{ enum ipa_version version = ipa->version; const __le32 *word = data;
switch (field) { case STATUS_OPCODE: return le32_get_bits(word[0], GENMASK(7, 0)); case STATUS_EXCEPTION: return le32_get_bits(word[0], GENMASK(15, 8)); case STATUS_MASK: return le32_get_bits(word[0], GENMASK(31, 16)); case STATUS_LENGTH: return le32_get_bits(word[1], GENMASK(15, 0)); case STATUS_SRC_ENDPOINT: if (version < IPA_VERSION_5_0) return le32_get_bits(word[1], GENMASK(20, 16)); return le32_get_bits(word[1], GENMASK(23, 16)); /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */ /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */ case STATUS_DST_ENDPOINT: if (version < IPA_VERSION_5_0) return le32_get_bits(word[1], GENMASK(28, 24)); return le32_get_bits(word[7], GENMASK(23, 16)); /* Status word 1, bits 29-31 are reserved */ case STATUS_METADATA: return le32_to_cpu(word[2]); case STATUS_FILTER_LOCAL: return le32_get_bits(word[3], GENMASK(0, 0)); case STATUS_FILTER_HASH: return le32_get_bits(word[3], GENMASK(1, 1)); case STATUS_FILTER_GLOBAL: return le32_get_bits(word[3], GENMASK(2, 2)); case STATUS_FILTER_RETAIN: return le32_get_bits(word[3], GENMASK(3, 3)); case STATUS_FILTER_RULE_INDEX: return le32_get_bits(word[3], GENMASK(13, 4)); /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */ case STATUS_ROUTER_LOCAL: if (version < IPA_VERSION_5_0) return le32_get_bits(word[3], GENMASK(14, 14)); return le32_get_bits(word[1], GENMASK(27, 27)); case STATUS_ROUTER_HASH: if (version < IPA_VERSION_5_0) return le32_get_bits(word[3], GENMASK(15, 15)); return le32_get_bits(word[1], GENMASK(28, 28)); case STATUS_UCP: if (version < IPA_VERSION_5_0) return le32_get_bits(word[3], GENMASK(16, 16)); return le32_get_bits(word[7], GENMASK(31, 31)); case STATUS_ROUTER_TABLE: if (version < IPA_VERSION_5_0) return le32_get_bits(word[3], GENMASK(21, 17)); return le32_get_bits(word[3], GENMASK(21, 14)); case STATUS_ROUTER_RULE_INDEX: return le32_get_bits(word[3], GENMASK(31, 22)); case STATUS_NAT_HIT: return le32_get_bits(word[4], GENMASK(0, 0)); case STATUS_NAT_INDEX: return le32_get_bits(word[4], GENMASK(13, 1)); case STATUS_NAT_TYPE: return le32_get_bits(word[4], GENMASK(15, 14)); case STATUS_TAG_LOW32: return le32_get_bits(word[4], GENMASK(31, 16)) |
(le32_get_bits(word[5], GENMASK(15, 0)) << 16); case STATUS_TAG_HIGH16: return le32_get_bits(word[5], GENMASK(31, 16)); case STATUS_SEQUENCE: return le32_get_bits(word[6], GENMASK(7, 0)); case STATUS_TIME_OF_DAY: return le32_get_bits(word[6], GENMASK(31, 8)); case STATUS_HEADER_LOCAL: return le32_get_bits(word[7], GENMASK(0, 0)); case STATUS_HEADER_OFFSET: return le32_get_bits(word[7], GENMASK(10, 1)); case STATUS_FRAG_HIT: return le32_get_bits(word[7], GENMASK(11, 11)); case STATUS_FRAG_RULE_INDEX: return le32_get_bits(word[7], GENMASK(15, 12)); /* Status word 7, bits 16-30 are reserved */ /* Status word 7, bit 31 is reserved (not IPA v5.0+) */ default:
WARN(true, "%s: bad field_id %u\n", __func__, field); return 0;
}
}
/* Compute the aggregation size value to use for a given buffer size */ static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
{ /* A hard aggregation limit will not be crossed; aggregation closes * if saving incoming data would cross the hard byte limit boundary. * * With a soft limit, aggregation closes *after* the size boundary * has been crossed. In that case the limit must leave enough space * after that limit to receive a full MTU of data plus overhead.
*/ if (!aggr_hard_limit)
rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
/* The byte limit is encoded as a number of kilobytes */
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for " "RX endpoint %u\n",
data->endpoint_id); returnfalse;
}
/* Nothing more to check for non-AP RX */ if (data->ee_id != GSI_EE_AP) returntrue;
rx_config = &data->endpoint.config.rx;
/* The buffer size must hold an MTU plus overhead */
buffer_size = rx_config->buffer_size;
limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; if (buffer_size < limit) {
dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
data->endpoint_id, buffer_size, limit); returnfalse;
}
if (!data->endpoint.config.aggregation) { bool result = true;
/* No aggregation; check for bogus aggregation data */ if (rx_config->aggr_time_limit) {
dev_err(dev, "time limit with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
if (rx_config->aggr_hard_limit) {
dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
if (rx_config->aggr_close_eof) {
dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
return result; /* Nothing more to check */
}
/* For an endpoint supporting receive aggregation, the byte * limit defines the point at which aggregation closes. This * check ensures the receive buffer size doesn't result in a * limit that exceeds what's representable in the aggregation * byte limit field.
*/
aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
limit = reg_field_max(reg, BYTE_LIMIT); if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
returnfalse;
}
returntrue; /* Nothing more to check for RX */
}
/* Starting with IPA v4.5 sequencer replication is obsolete */ if (ipa->version >= IPA_VERSION_4_5) { if (data->endpoint.config.tx.seq_rep_type) {
dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
data->endpoint_id); returnfalse;
}
}
if (data->endpoint.config.status_enable) {
other_name = data->endpoint.config.tx.status_endpoint; if (other_name >= count) {
dev_err(dev, "status endpoint name %u out of range " "for endpoint %u\n",
other_name, data->endpoint_id); returnfalse;
}
/* Status endpoint must be defined... */
other_data = &all_data[other_name]; if (ipa_gsi_endpoint_data_empty(other_data)) {
dev_err(dev, "DMA endpoint name %u undefined " "for endpoint %u\n",
other_name, data->endpoint_id); returnfalse;
}
/* ...and has to be an RX endpoint... */ if (other_data->toward_ipa) {
dev_err(dev, "status endpoint for endpoint %u not RX\n",
data->endpoint_id); returnfalse;
}
/* ...and if it's to be an AP endpoint... */ if (other_data->ee_id == GSI_EE_AP) { /* ...make sure it has status enabled. */ if (!other_data->endpoint.config.status_enable) {
dev_err(dev, "status not enabled for endpoint %u\n",
other_data->endpoint_id); returnfalse;
}
}
}
if (data->endpoint.config.dma_mode) {
other_name = data->endpoint.config.dma_endpoint; if (other_name >= count) {
dev_err(dev, "DMA endpoint name %u out of range " "for endpoint %u\n",
other_name, data->endpoint_id); returnfalse;
}
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT); return 0;
}
/* Make sure needed endpoints have defined data */ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n"); return 0;
} if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
dev_err(dev, "LAN RX endpoint not defined\n"); return 0;
} if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
dev_err(dev, "AP->modem TX endpoint not defined\n"); return 0;
} if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
dev_err(dev, "AP<-modem RX endpoint not defined\n"); return 0;
}
max = 0; for (name = 0; name < count; name++, dp++) { if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) return 0;
max = max_t(u32, max, dp->endpoint_id);
}
return max;
}
/* Allocate a transaction to use on a non-command endpoint */ staticstruct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
u32 tre_count)
{ struct gsi *gsi = &endpoint->ipa->gsi;
u32 channel_id = endpoint->channel_id; enum dma_data_direction direction;
direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
/* suspend_delay represents suspend for RX, delay for TX endpoints. * Note that suspend is not supported starting with IPA v4.0, and * delay mode should not be used starting with IPA v4.2.
*/ staticbool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{ struct ipa *ipa = endpoint->ipa; conststruct reg *reg;
u32 field_id;
u32 offset; bool state;
u32 mask;
u32 val;
if (endpoint->toward_ipa)
WARN_ON(ipa->version >= IPA_VERSION_4_2); else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
/* Don't bother if it's already in the requested state */ if (suspend_delay != state) {
val ^= mask;
iowrite32(val, ipa->reg_virt + offset);
}
return state;
}
/* We don't care what the previous state was for delay mode */ staticvoid
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{ /* Delay mode should not be used for IPA v4.2+ */
WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
WARN_ON(!endpoint->toward_ipa);
/** * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt * @endpoint: Endpoint on which to emulate a suspend * * Emulate suspend IPA interrupt to unsuspend an endpoint suspended * with an open aggregation frame. This is to work around a hardware * issue in IPA version 3.5.1 where the suspend interrupt will not be * generated when it should be.
*/ staticvoid ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
{ struct ipa *ipa = endpoint->ipa;
if (!endpoint->config.aggregation) return;
/* Nothing to do if the endpoint doesn't have aggregation open */ if (!ipa_endpoint_aggr_active(endpoint)) return;
/* Force close aggregation */
ipa_endpoint_force_close(endpoint);
ipa_interrupt_simulate_suspend(ipa->interrupt);
}
/* Returns previous suspend state (true means suspend was enabled) */ staticbool
ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
{ bool suspended;
if (endpoint->ipa->version >= IPA_VERSION_4_0) return enable; /* For IPA v4.0+, no change made */
/* A client suspended with an open aggregation frame will not * generate a SUSPEND IPA interrupt. If enabling suspend, have * ipa_endpoint_suspend_aggr() handle this.
*/ if (enable && !suspended)
ipa_endpoint_suspend_aggr(endpoint);
return suspended;
}
/* Put all modem RX endpoints into suspend mode, and stop transmission * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow * control instead.
*/ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
u32 endpoint_id = 0;
while (endpoint_id < ipa->endpoint_count) { struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
/* Reset all modem endpoints to use the default exception endpoint */ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
{ struct gsi_trans *trans;
u32 endpoint_id;
u32 count;
/* We need one command per modem TX endpoint, plus the commands * that clear the pipeline.
*/
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count); if (!trans) {
dev_err(ipa->dev, "no transaction to reset modem exception endpoints\n"); return -EBUSY;
}
/* Value written is 0, and all bits are updated. That * means status is disabled on the endpoint, and as a * result all other fields in the register are ignored.
*/
ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
}
reg = ipa_reg(ipa, ENDP_INIT_CFG); /* FRAG_OFFLOAD_EN is 0 */ if (endpoint->config.checksum) { enum ipa_version version = ipa->version;
if (endpoint->toward_ipa) {
u32 off;
/* Checksum header offset is in 4-byte units */
off = sizeof(struct rmnet_map_header) / sizeof(u32);
val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
: IPA_CS_OFFLOAD_INLINE;
} else {
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_DL
: IPA_CS_OFFLOAD_INLINE;
}
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
val |= reg_encode(reg, CS_OFFLOAD_EN, enabled); /* CS_GEN_QMB_MASTER_SEL is 0 */
/* Without checksum offload, we just have the MAP header */ if (!endpoint->config.checksum) return header_size;
if (version < IPA_VERSION_4_5) { /* Checksum header inserted for AP TX endpoints only */ if (endpoint->toward_ipa)
header_size += sizeof(struct rmnet_map_ul_csum_header);
} else { /* Checksum header is used in both directions */
header_size += sizeof(struct rmnet_map_v5_csum_header);
}
/* We know field_max can be used as a mask (2^n - 1) */
val = reg_encode(reg, HDR_LEN, header_size & field_max); if (version < IPA_VERSION_4_5) {
WARN_ON(header_size > field_max); return val;
}
/* IPA v4.5 adds a few more most-significant bits */
header_size >>= hweight32(field_max);
WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
val |= reg_encode(reg, HDR_LEN_MSB, header_size);
/* We know field_max can be used as a mask (2^n - 1) */
val = reg_encode(reg, HDR_OFST_METADATA, offset); if (version < IPA_VERSION_4_5) {
WARN_ON(offset > field_max); return val;
}
/* IPA v4.5 adds a few more most-significant bits */
offset >>= hweight32(field_max);
WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
return val;
}
/** * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register * @endpoint: Endpoint pointer * * We program QMAP endpoints so each packet received is preceded by a QMAP * header structure. The QMAP header contains a 1-byte mux_id and 2-byte * packet size field, and we have the IPA hardware populate both for each * received packet. The header is configured (in the HDR_EXT register) * to use big endian format. * * The packet size is written into the QMAP header's pkt_len field. That * location is defined here using the HDR_OFST_PKT_SIZE field. * * The mux_id comes from a 4-byte metadata value supplied with each packet * by the modem. It is *not* a QMAP header, but it does contain the mux_id * value that we want, in its low-order byte. A bitmask defined in the * endpoint's METADATA_MASK register defines which byte within the modem * metadata contains the mux_id. And the OFST_METADATA field programmed * here indicates where the extracted byte should be placed within the QMAP * header.
*/ staticvoid ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; conststruct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR); if (endpoint->config.qmap) { enum ipa_version version = ipa->version;
size_t header_size;
header_size = ipa_qmap_header_size(version, endpoint);
val = ipa_header_size_encode(version, reg, header_size);
/* Define how to fill fields in a received QMAP header */ if (!endpoint->toward_ipa) {
u32 off; /* Field offset within header */
/* Where IPA will write the metadata value */
off = offsetof(struct rmnet_map_header, mux_id);
val |= ipa_metadata_offset_encode(version, reg, off);
/* Where IPA will write the length */
off = offsetof(struct rmnet_map_header, pkt_len); /* Upper bits are stored in HDR_EXT with IPA v4.5 */ if (version >= IPA_VERSION_4_5)
off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
} /* For QMAP TX, metadata offset is 0 (modem assumes this) */
val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ /* HDR_A5_MUX is 0 */ /* HDR_LEN_INC_DEAGG_HDR is 0 */ /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); if (endpoint->config.qmap) { /* We have a header, so we must specify its endianness */
val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0. * The RMNet driver assumes this field is meaningful in * packets it receives, and assumes the header's payload * length includes that padding. The RMNet driver does * *not* pad packets it sends, however, so the pad field * (although 0) should be ignored.
*/ if (!endpoint->toward_ipa) {
val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID); /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING); /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ if (!endpoint->toward_ipa)
val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields, * two of which are defined in the HDR (not HDR_EXT) register.
*/ if (ipa->version >= IPA_VERSION_4_5) { /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ if (endpoint->config.qmap && !endpoint->toward_ipa) {
u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
u32 off; /* Field offset within header */
off = offsetof(struct rmnet_map_header, pkt_len); /* Low bits are in the ENDP_INIT_HDR register */
off >>= hweight32(mask);
val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off); /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
/* Note that HDR_ENDIANNESS indicates big endian header fields */ if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
/* For IPA v4.5+, times are expressed using Qtime. A time is represented * at one of several available granularities, which are configured in * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse * generators are set up with different "tick" periods. A Qtime value * encodes a tick count along with an indication of a pulse generator * (which has a fixed tick period). Two pulse generators are always * available to the AP; a third is available starting with IPA v5.0. * This function determines which pulse generator most accurately * represents the time period provided, and returns the tick count to * use to represent that time.
*/ static u32
ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
{
u32 which = 0;
u32 ticks;
/* Pulse generator 0 has 100 microsecond granularity */
ticks = DIV_ROUND_CLOSEST(microseconds, 100); if (ticks <= max) goto out;
/* Pulse generator 1 has millisecond granularity */
which = 1;
ticks = DIV_ROUND_CLOSEST(microseconds, 1000); if (ticks <= max) goto out;
if (ipa->version >= IPA_VERSION_5_0) { /* Pulse generator 2 has 10 millisecond granularity */
which = 2;
ticks = DIV_ROUND_CLOSEST(microseconds, 100);
}
WARN_ON(ticks > max);
out:
*select = which;
return ticks;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */ static u32 aggr_time_limit_encode(struct ipa *ipa, conststruct reg *reg,
u32 microseconds)
{
u32 ticks;
u32 max;
if (!microseconds) return 0; /* Nothing to compute if time limit is 0 */
max = reg_field_max(reg, TIME_LIMIT); if (ipa->version >= IPA_VERSION_4_5) {
u32 select;
/* We program aggregation granularity in ipa_hardware_config() */
ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
microseconds, max * IPA_AGGR_GRANULARITY);
/* The head-of-line blocking timer is defined as a tick count. For * IPA version 4.5 the tick count is based on the Qtimer, which is * derived from the 19.2 MHz SoC XO clock. For older IPA versions * each tick represents 128 cycles of the IPA core clock. * * Return the encoded value representing the timeout period provided * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/ static u32 hol_block_timer_encode(struct ipa *ipa, conststruct reg *reg,
u32 microseconds)
{
u32 width;
u32 scale;
u64 ticks;
u64 rate;
u32 high;
u32 val;
if (!microseconds) return 0; /* Nothing to compute if timer period is 0 */
if (ipa->version >= IPA_VERSION_4_5) {
u32 max = reg_field_max(reg, TIMER_LIMIT);
u32 select;
u32 ticks;
/* Use 64 bit arithmetic to avoid overflow */
rate = ipa_core_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* We still need the result to fit into the field */
WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */ if (ipa->version < IPA_VERSION_4_2) return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and * scale fields within the 32-bit timer register, where: * ticks = base << scale; * The best precision is achieved when the base value is as * large as possible. Find the highest set bit in the tick * count, and extract the number of bits in the base field * such that high bit is included.
*/
high = fls(ticks); /* 1..32 (or warning above) */
width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0; if (scale) { /* If we're scaling, round up to get a closer result */
ticks += 1 << (scale - 1); /* High bit was set, so rounding might have affected it */ if (fls(ticks) != high)
scale++;
}
val = reg_encode(reg, TIMER_SCALE, scale);
val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
/* If microseconds is 0, timeout is immediate */ staticvoid ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
u32 microseconds)
{
u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; conststruct reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
val = hol_block_timer_encode(ipa, reg, microseconds);
/* When enabling, the register must be written twice for IPA v4.5+ */ if (enable && ipa->version >= IPA_VERSION_4_5)
iowrite32(val, ipa->reg_virt + offset);
}
/* Assumes HOL_BLOCK is in disabled state */ staticvoid ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
u32 microseconds)
{
ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
ipa_endpoint_init_hol_block_en(endpoint, true);
}
/** * ipa_endpoint_skb_tx() - Transmit a socket buffer * @endpoint: Endpoint pointer * @skb: Socket buffer to send * * Returns: 0 if successful, or a negative error code
*/ int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
{ struct gsi_trans *trans;
u32 nr_frags; int ret;
/* Make sure source endpoint's TLV FIFO has enough entries to * hold the linear portion of the skb and all its fragments. * If not, see if we can linearize it before giving up.
*/
nr_frags = skb_shinfo(skb)->nr_frags; if (nr_frags > endpoint->skb_frag_max) { if (skb_linearize(skb)) return -E2BIG;
nr_frags = 0;
}
trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); if (!trans) return -EBUSY;
ret = gsi_trans_skb_add(trans, skb); if (ret) goto err_trans_free;
trans->data = skb; /* transaction owns skb now */
reg = ipa_reg(ipa, ENDP_STATUS); if (endpoint->config.status_enable) {
val |= reg_bit(reg, STATUS_EN); if (endpoint->toward_ipa) { enum ipa_endpoint_name name;
u32 status_endpoint_id;
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
} /* STATUS_LOCATION is 0, meaning IPA packet status * precedes the packet (not present for IPA v4.5+)
*/ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
buffer_size = endpoint->config.rx.buffer_size;
page = dev_alloc_pages(get_order(buffer_size)); if (!page) return -ENOMEM;
/* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD;
len = buffer_size - offset;
ret = gsi_trans_page_add(trans, page, len, offset); if (ret)
put_page(page); else
trans->data = page; /* transaction owns page now */
return ret;
}
/** * ipa_endpoint_replenish() - Replenish endpoint receive buffers * @endpoint: Endpoint to be replenished * * The IPA hardware can hold a fixed number of receive buffers for an RX * endpoint, based on the number of entries in the underlying channel ring * buffer. If an endpoint's "backlog" is non-zero, it indicates how many * more receive buffers can be supplied to the hardware. Replenishing for * an endpoint can be disabled, in which case buffers are not queued to * the hardware.
*/ staticvoid ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{ struct gsi_trans *trans;
if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) return;
/* Skip it if it's already active */ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) return;
while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { bool doorbell;
if (ipa_endpoint_replenish_one(endpoint, trans)) goto try_again_later;
/* Ring the doorbell if we've got a full batch */
doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
gsi_trans_commit(trans, doorbell);
}
/* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even * one buffer, nothing will trigger another replenish attempt. * If the hardware has no receive buffers queued, schedule work to * try replenishing again.
*/ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1));
}
/* Start replenishing if hardware currently has no buffers */ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
ipa_endpoint_replenish(endpoint);
}
skb = __dev_alloc_skb(len, GFP_ATOMIC); if (skb) { /* Copy the data into the socket buffer and receive it */
skb_put(skb, len);
memcpy(skb->data, data, len);
skb->truesize += extra;
}
skb = build_skb(page_address(page), buffer_size); if (skb) { /* Reserve the headroom and account for the data */
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
}
/* Receive the buffer (or record drop if unable to build it) */
ipa_modem_skb_rx(endpoint->netdev, skb);
return skb != NULL;
}
/* The format of an IPA packet status structure is the same for several * status types (opcodes). Other types aren't currently supported.
*/ staticbool ipa_status_format_packet(enum ipa_status_opcode opcode)
{ switch (opcode) { case IPA_STATUS_OPCODE_PACKET: case IPA_STATUS_OPCODE_DROPPED_PACKET: case IPA_STATUS_OPCODE_SUSPENDED_PACKET: case IPA_STATUS_OPCODE_PACKET_2ND_PASS: returntrue; default: returnfalse;
}
}
status_mask = ipa_status_extract(ipa, data, STATUS_MASK); if (!status_mask) returnfalse; /* No valid tag */
/* The status contains a valid tag. We know the packet was sent to * this endpoint (already verified by ipa_endpoint_status_skip()). * If the packet came from the AP->command TX endpoint we know * this packet was sent as part of the pipeline clear process.
*/
endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
} else {
dev_err(ipa->dev, "unexpected tagged packet from endpoint %u\n",
endpoint_id);
}
returntrue;
}
/* Return whether the status indicates the packet should be dropped */ staticbool
ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, constvoid *data)
{ enum ipa_status_exception exception; struct ipa *ipa = endpoint->ipa;
u32 rule;
/* If the status indicates a tagged transfer, we'll drop the packet */ if (ipa_endpoint_status_tag_valid(endpoint, data)) returntrue;
/* Deaggregation exceptions we drop; all other types we consume */
exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION); if (exception) return exception == IPA_STATUS_EXCEPTION_DEAGGR;
/* Drop the packet if it fails to match a routing rule; otherwise no */
rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
/* Skip over status packets that lack packet data */
length = ipa_status_extract(ipa, data, STATUS_LENGTH); if (!length || ipa_endpoint_status_skip(endpoint, data)) {
data += IPA_STATUS_SIZE;
resid -= IPA_STATUS_SIZE; continue;
}
/* Compute the amount of buffer space consumed by the packet, * including the status. If the hardware is configured to * pad packet data to an aligned boundary, account for that. * And if checksum offload is enabled a trailer containing * computed checksum information will be appended.
*/
align = endpoint->config.rx.pad_align ? : 1;
len = IPA_STATUS_SIZE + ALIGN(length, align); if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
if (!ipa_endpoint_status_drop(endpoint, data)) { void *data2;
u32 extra;
/* Client receives only packet data (no status) */
data2 = data + IPA_STATUS_SIZE;
/* Have the true size reflect the extra unused space in * the original receive buffer. Distribute the "cost" * proportionately across all aggregated packets in the * buffer.
*/
extra = DIV_ROUND_CLOSEST(unused * len, total_len);
ipa_endpoint_skb_copy(endpoint, data2, length, extra);
}
/* Consume status and the full packet it describes */
data += len;
resid -= len;
}
}
/* Parse or build a socket buffer using the actual received length */
page = trans->data; if (endpoint->config.status_enable)
ipa_endpoint_status_parse(endpoint, page, trans->len); elseif (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
done:
ipa_endpoint_replenish(endpoint);
}
/** * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active * @endpoint: Endpoint to be reset * * If aggregation is active on an RX endpoint when a reset is performed * on its underlying GSI channel, a special sequence of actions must be * taken to ensure the IPA pipeline is properly cleared. * * Return: 0 if successful, or a negative error code
*/ staticint ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
{ struct ipa *ipa = endpoint->ipa; struct device *dev = ipa->dev; struct gsi *gsi = &ipa->gsi; bool suspended = false;
dma_addr_t addr;
u32 retries;
u32 len = 1; void *virt; int ret;
virt = kzalloc(len, GFP_KERNEL); if (!virt) return -ENOMEM;
addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, addr)) {
ret = -ENOMEM; goto out_kfree;
}
/* Force close aggregation before issuing the reset */
ipa_endpoint_force_close(endpoint);
/* Reset and reconfigure the channel with the doorbell engine * disabled. Then poll until we know aggregation is no longer * active. We'll re-enable the doorbell (if appropriate) when * we reset again below.
*/
gsi_channel_reset(gsi, endpoint->channel_id, false);
/* Make sure the channel isn't suspended */
suspended = ipa_endpoint_program_suspend(endpoint, false);
/* Start channel and do a 1 byte read */
ret = gsi_channel_start(gsi, endpoint->channel_id); if (ret) goto out_suspend_again;
ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); if (ret) goto err_endpoint_stop;
/* Wait for aggregation to be closed on the channel */
retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; do { if (!ipa_endpoint_aggr_active(endpoint)) break;
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
} while (retries--);
/* Check one last time */ if (ipa_endpoint_aggr_active(endpoint))
dev_err(dev, "endpoint %u still active during reset\n",
endpoint->endpoint_id);
ret = gsi_channel_stop(gsi, endpoint->channel_id); if (ret) goto out_suspend_again;
/* Finally, reset and reconfigure the channel again (re-enabling * the doorbell engine if appropriate). Sleep for 1 millisecond to * complete the channel reset sequence. Finish by suspending the * channel again (if necessary).
*/
gsi_channel_reset(gsi, endpoint->channel_id, true);
staticvoid ipa_endpoint_reset(struct ipa_endpoint *endpoint)
{
u32 channel_id = endpoint->channel_id; struct ipa *ipa = endpoint->ipa; bool special; int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation * is active, we need to handle things specially to recover. * All other cases just need to reset the underlying GSI channel.
*/
special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
endpoint->config.aggregation; if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint); else
gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
dev_err(ipa->dev, "error %d resetting channel %u for endpoint %u\n",
ret, endpoint->channel_id, endpoint->endpoint_id);
}
staticvoid ipa_endpoint_program(struct ipa_endpoint *endpoint)
{ if (endpoint->toward_ipa) { /* Newer versions of IPA use GSI channel flow control * instead of endpoint DELAY mode to prevent sending data. * Flow control is disabled for newly-allocated channels, * and we can assume flow control is not (ever) enabled * for AP TX channels.
*/ if (endpoint->ipa->version < IPA_VERSION_4_2)
ipa_endpoint_program_delay(endpoint, false);
} else { /* Ensure suspend mode is off on all AP RX endpoints */
(void)ipa_endpoint_program_suspend(endpoint, false);
}
ipa_endpoint_init_cfg(endpoint);
ipa_endpoint_init_nat(endpoint);
ipa_endpoint_init_hdr(endpoint);
ipa_endpoint_init_hdr_ext(endpoint);
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint); if (!endpoint->toward_ipa) { if (endpoint->config.rx.holb_drop)
ipa_endpoint_init_hol_block_enable(endpoint, 0); else
ipa_endpoint_init_hol_block_disable(endpoint);
}
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
ipa_endpoint_status(endpoint);
}
if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
}
/* Note that if stop fails, the channel's state is not well-defined */
ret = gsi_channel_stop(gsi, endpoint->channel_id); if (ret)
dev_err(ipa->dev, "error %d attempting to stop endpoint %u\n",
ret, endpoint_id);
}
/* Only AP endpoints get set up */ if (endpoint->ee_id != GSI_EE_AP) return;
endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; if (!endpoint->toward_ipa) { /* RX transactions require a single TRE, so the maximum * backlog is the same as the maximum outstanding TREs.
*/
clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
INIT_DELAYED_WORK(&endpoint->replenish_work,
ipa_endpoint_replenish_work);
}
/* Prior to IPA v3.5, the FLAVOR_0 register was not supported. * Furthermore, the endpoints were not grouped such that TX * endpoint numbers started with 0 and RX endpoints had numbers * higher than all TX endpoints, so we can't do the simple * direction check used for newer hardware below. * * For hardware that doesn't support the FLAVOR_0 register, * just set the available mask to support any endpoint, and * assume the configuration is valid.
*/ if (ipa->version < IPA_VERSION_3_5) {
ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL); if (!ipa->available) return -ENOMEM;
ipa->available_count = IPA_ENDPOINT_MAX;
bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
return 0;
}
/* Find out about the endpoints supplied by the hardware, and ensure * the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
val = ioread32(ipa->reg_virt + reg_offset(reg));
/* Our RX is an IPA producer; our TX is an IPA consumer. */
tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
rx_base = reg_decode(reg, PROD_LOWEST, val);
/* Until IPA v5.0, the max endpoint ID was 32 */
hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1; if (limit > hw_limit) {
dev_err(dev, "unexpected endpoint count, %u > %u\n",
limit, hw_limit); return -EINVAL;
}
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL); if (!ipa->available) return -ENOMEM;
ipa->available_count = limit;
/* Mark all supported RX and TX endpoints as available */
bitmap_set(ipa->available, 0, tx_count);
bitmap_set(ipa->available, rx_base, rx_count);
if (!test_bit(endpoint_id, ipa->available)) {
dev_err(dev, "unavailable endpoint id %u\n",
endpoint_id); goto err_free_bitmap;
}
/* Make sure it's pointing in the right direction */
endpoint = &ipa->endpoint[endpoint_id]; if (endpoint->toward_ipa) { if (endpoint_id < tx_count) continue;
} elseif (endpoint_id >= rx_base) { continue;
}
dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id); goto err_free_bitmap;
}
/* Returns a bitmask of endpoints that support filtering, or 0 on error */ int ipa_endpoint_init(struct ipa *ipa, u32 count, conststruct ipa_gsi_endpoint_data *data)
{ enum ipa_endpoint_name name;
u32 filtered;
BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
/* Number of endpoints is one more than the maximum ID */
ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; if (!ipa->endpoint_count) return -EINVAL;
/* Initialize endpoint state bitmaps */
ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); if (!ipa->defined) return -ENOMEM;
ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); if (!ipa->set_up) goto err_free_defined;
ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); if (!ipa->enabled) goto err_free_set_up;
filtered = 0; for (name = 0; name < count; name++, data++) { if (ipa_gsi_endpoint_data_empty(data)) continue; /* Skip over empty slots */
ipa_endpoint_init_one(ipa, name, data);
if (data->endpoint.filter_support)
filtered |= BIT(data->endpoint_id); if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
ipa->modem_tx_count++;
}
/* Make sure the set of filtered endpoints is valid */ if (!ipa_filtered_valid(ipa, filtered)) {
ipa_endpoint_exit(ipa);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.