/* Test the link power state and send a MUX command in blocking mode. */ staticint ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
size_t size)
{ struct iosm_mux *ipc_mux = ipc_imem->mux; conststruct mux_acb *acb = msg;
staticint ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
{ struct completion *completion = &ipc_mux->channel->ul_sem; int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
0, &ipc_mux->acb, sizeof(ipc_mux->acb), false); if (ret) {
dev_err(ipc_mux->dev, "unable to send mux command"); return ret;
}
/* if blocking, suspend the app and wait for irq in the flash or * crash phase. return false on timeout to indicate failure.
*/ if (blocking) {
u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
switch (le32_to_cpu(command_type)) { case MUX_CMD_OPEN_SESSION_RESP: case MUX_CMD_CLOSE_SESSION_RESP: /* Resume the control application. */
acb->got_param = param; break;
case MUX_LITE_CMD_FLOW_CTL_ACK: /* This command type is not expected as response for * Aggregation version of the protocol. So return non-zero.
*/ if (ipc_mux->protocol != MUX_LITE) return -EINVAL;
case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK: /* This command type is not expected as response for * Lite version of the protocol. So return non-zero.
*/ if (ipc_mux->protocol == MUX_LITE) return -EINVAL; break;
if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) { /* Backward Compatibility */ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask); else
session->flow_ctl_mask = ~0; /* if CP asks for FLOW CTRL Enable * then set our internal flow control Tx flag * to limit uplink session queueing
*/
session->net_tx_stop = true;
/* We have to call Finish ADB here. * Otherwise any already queued data * will be sent to CP when ADB is full * for some other sessions.
*/ if (ipc_mux->protocol == MUX_AGGREGATION) {
ipc_mux_ul_adb_finish(ipc_mux);
ipc_imem_hrtimer_stop(adb_timer);
} /* Update the stats */
session->flow_ctl_en_cnt++;
} elseif (param->flow_ctl.mask == 0) { /* Just reset the Flow control mask and let * mux_flow_ctrl_low_thre_b take control on * our internal Tx flag and enabling kernel * flow control
*/
dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
if_id, le32_to_cpu(param->flow_ctl.mask)); /* Backward Compatibility */ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask); else
session->flow_ctl_mask = 0; /* Update the stats */
session->flow_ctl_dis_cnt++;
} else { break;
}
/* Decode and Send appropriate response to a command block. */ staticvoid ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{ struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
__le32 trans_id = cmdh->transaction_id; int size;
if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
cmdh->command_type, cmdh->if_id,
cmdh->transaction_id)) { /* Unable to decode command response indicates the cmd_type * may be a command instead of response. So try to decoding it.
*/
size = offsetof(struct mux_lite_cmdh, param) + sizeof(cmdh->param.flow_ctl); if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
cmdh->command_type,
cmdh->if_id,
cmdh->cmd_len, size)) { /* Decoded command may need a response. Give the * response according to the command type.
*/ union mux_cmd_param *mux_cmd = NULL;
size_t size = 0;
u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
/* Is the session active ? */
if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
wwan = ipc_mux->session[if_id].wwan; if (!wwan) {
dev_err(ipc_mux->dev, "session Net ID is NULL"); return;
}
/* Update the Flow Credit information from ADB */
ipc_mux->session[if_id].ul_flow_credits += ul_credits;
/* Check whether the TX can be started */ if (ipc_mux->session[if_id].ul_flow_credits > 0) {
ipc_mux->session[if_id].net_tx_stop = false;
ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
ipc_mux->session[if_id].if_id, false);
}
}
if_id = adgh->if_id; if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id); return;
}
/* Is the session active ? */
if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
wwan = ipc_mux->session[if_id].wwan; if (!wwan) {
dev_err(ipc_mux->dev, "session Net ID is NULL"); return;
}
/* Store the pad len for the corresponding session * Pad bytes as negotiated in the open session less the header size * (see session management chapter for details). * If resulting padding is zero or less, the additional head padding is * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes * set to zero
*/
pad_len =
ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
packet_offset = sizeof(*adgh) + pad_len;
/* Process the aggregated datagram tables. */
adth_index = le32_to_cpu(adbh->first_table_index);
/* Has CP sent an empty ADB ? */ if (adth_index < 1) {
dev_err(ipc_mux->dev, "unexpected empty ADB"); goto adb_decode_err;
}
/* Loop through mixed session tables. */ while (adth_index) { /* Get the reference to the table header. */
adth = (struct mux_adth *)(block + adth_index);
/* Get the interface id and map it to the netif id. */
if_id = adth->if_id; if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) goto adb_decode_err;
staticint ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux, struct mux_adb *ul_adb, u32 type)
{ /* Take the first element of the free list. */ struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
u32 *next_tb_id; int qlt_size;
u32 if_id;
if (!skb) return -EBUSY; /* Wait for a free ADB skb. */
/* Mark it as UL ADB to select the right free operation. */
IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
switch (type) { case IOSM_AGGR_MUX_SIG_ADBH: /* Save the ADB memory settings. */
ul_adb->dest_skb = skb;
ul_adb->buf = skb->data;
ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
/* Clear the local copy of DGs for new ADB */
memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
/* Clear the DG count and QLT updated status for new ADB */ for (if_id = 0; if_id < no_if; if_id++) {
ul_adb->dg_count[if_id] = 0;
ul_adb->qlt_updated[if_id] = 0;
} break;
/* Allocates an ADB from the free list and initializes it with ADBH */ staticbool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux, struct mux_adb *adb, int *size_needed,
u32 type)
{ bool ret_val = false; int status;
if (!adb->dest_skb) { /* Allocate memory for the ADB including of the * datagram table header.
*/
status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type); if (status) /* Is a pending ADB available ? */
ret_val = true; /* None. */
/* Update size need to zero only for new ADB memory */
*size_needed = 0;
}
return ret_val;
}
/* Informs the network stack to stop sending further packets for all opened * sessions
*/ staticvoid ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
{ struct mux_session *session; int idx;
if (qlt_updated) /* Updates the TDs with ul_list */
(void)ipc_imem_ul_write_td(ipc_mux->imem);
return qlt_updated;
}
/* Checks the available credits for the specified session and returns * number of packets for which credits are available.
*/ staticint ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux, struct mux_session *session, struct sk_buff_head *ul_list, int max_nr_of_pkts)
{ int pkts_to_send = 0; struct sk_buff *skb; int credits = 0;
/* Check if there are enough credits/bytes available to send the * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts * depending on available credits.
*/
skb_queue_walk(ul_list, skb)
{ if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts)) break;
credits -= skb->len;
pkts_to_send++;
}
return pkts_to_send;
}
/* Encode the UL IP packet according to Lite spec. */ staticint ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id, struct mux_session *session, struct sk_buff_head *ul_list, struct mux_adb *adb, int nr_of_pkts)
{ int offset = sizeof(struct mux_adgh); int adb_updated = -EINVAL; struct sk_buff *src_skb; int aligned_size = 0; int nr_of_skb = 0;
u32 pad_len = 0;
/* Re-calculate the number of packets depending on number of bytes to be * processed/available credits.
*/
nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
nr_of_pkts);
/* If calculated nr_of_pkts from available credits is <= 0 * then nothing to do.
*/ if (nr_of_pkts <= 0) return 0;
/* Read configured UL head_pad_length for session.*/ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
/* Process all pending UL packets for this session * depending on the allocated datagram table size.
*/ while (nr_of_pkts > 0) { /* get destination skb allocated */ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "no reserved memory for ADGH"); return -ENOMEM;
}
/* Peek at the head of the list. */
src_skb = skb_peek(ul_list); if (!src_skb) {
dev_err(ipc_mux->dev, "skb peek return NULL with count : %d",
nr_of_pkts); break;
}
if (ipc_mux->size_needed > adb->size) {
dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
ipc_mux->size_needed, adb->size); /* Return 1 if any IP packet is added to the transfer * list.
*/ return nr_of_skb ? 1 : 0;
}
/* Add buffer (without head padding to next pending transfer) */
memcpy(adb->buf + offset + pad_len, src_skb->data,
src_skb->len);
if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) /* Decrement the credit value as we are processing the * datagram from the UL list.
*/
session->ul_flow_credits -= src_skb->len;
/* Remove the processed elements and free it. */
src_skb = skb_dequeue(ul_list);
dev_kfree_skb(src_skb);
nr_of_skb++;
ipc_mux_ul_adgh_finish(ipc_mux);
}
if (nr_of_skb) { /* Send QLT info to modem if pending bytes > high watermark * in case of mux lite
*/ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
ipc_mux->ul_data_pend_bytes >=
IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
adb_updated = ipc_mux_lite_send_qlt(ipc_mux); else
adb_updated = 1;
/* Updates the TDs with ul_list */
(void)ipc_imem_ul_write_td(ipc_mux->imem);
}
return adb_updated;
}
/** * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB * @ipc_mux: pointer to MUX instance data * @p_adb: pointer to UL aggegated data block * @session_id: session id * @qlth_n_ql_size: Length (in bytes) of the datagram table * @ul_list: pointer to skb buffer head
*/ void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb, int session_id, int qlth_n_ql_size, struct sk_buff_head *ul_list)
{ int qlevel = ul_list->qlen; struct mux_qlth *p_qlt;
/* Process encode session UL data to ADB. */ staticint mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id, struct mux_session *session, struct sk_buff_head *ul_list, struct mux_adb *adb, int pkt_to_send)
{ int adb_updated = -EINVAL; int head_pad_len, offset; struct sk_buff *src_skb = NULL; struct mux_adth_dg *dg;
u32 qlth_n_ql_size;
/* If any of the opened session has set Flow Control ON then limit the * UL data to mux_flow_ctrl_high_thresh_b bytes
*/ if (ipc_mux->ul_data_pend_bytes >=
IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
ipc_mux_stop_tx_for_all_sessions(ipc_mux); return adb_updated;
}
bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
{ struct sk_buff_head *ul_list; struct mux_session *session; int updated = 0; int session_id; int dg_n; int i;
if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
ipc_mux->adb_prep_ongoing) returnfalse;
ipc_mux->adb_prep_ongoing = true;
for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
session_id = ipc_mux->rr_next_session;
session = &ipc_mux->session[session_id];
/* Go to next handle rr_next_session overflow */
ipc_mux->rr_next_session++; if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
ipc_mux->rr_next_session = 0;
if (!session->wwan || session->flow_ctl_mask ||
session->net_tx_stop) continue;
ul_list = &session->ul_list;
/* Is something pending in UL and flow ctrl off */
dg_n = skb_queue_len(ul_list); if (dg_n > MUX_MAX_UL_DG_ENTRIES)
dg_n = MUX_MAX_UL_DG_ENTRIES;
if (dg_n == 0) /* Nothing to do for ipc_mux session * -> try next session id.
*/ continue; if (ipc_mux->protocol == MUX_LITE)
updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
session, ul_list,
&ipc_mux->ul_adb,
dg_n); else
updated = mux_ul_adb_encode(ipc_mux, session_id,
session, ul_list,
&ipc_mux->ul_adb,
dg_n);
}
if (ipc_mux->ul_flow == MUX_UL)
dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
ipc_mux->ul_data_pend_bytes);
/* Reset the skb settings. */
skb_trim(skb, 0);
/* Add the consumed ADB to the free list. */
skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
}
/* Start the NETIF uplink send transfer in MUX mode. */ staticint ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg, void *msg, size_t size)
{ struct iosm_mux *ipc_mux = ipc_imem->mux; bool ul_data_pend = false;
/* Add session UL data to a ADB and ADGH */
ul_data_pend = ipc_mux_ul_data_encode(ipc_mux); if (ul_data_pend) { if (ipc_mux->protocol == MUX_AGGREGATION)
ipc_imem_adb_timer_start(ipc_mux->imem);
/* Delay the doorbell irq */
ipc_imem_td_update_timer_start(ipc_mux->imem);
} /* reset the debounce flag */
ipc_mux->ev_mux_net_transmit_pending = false;
return 0;
}
int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id, struct sk_buff *skb)
{ struct mux_session *session = &ipc_mux->session[if_id]; int ret = -EINVAL;
if (ipc_mux->channel &&
ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
dev_err(ipc_mux->dev, "channel state is not IMEM_CHANNEL_ACTIVE"); goto out;
}
if (!session->wwan) {
dev_err(ipc_mux->dev, "session net ID is NULL");
ret = -EFAULT; goto out;
}
/* Session is under flow control. * Check if packet can be queued in session list, if not * suspend net tx
*/ if (skb_queue_len(&session->ul_list) >=
(session->net_tx_stop ?
IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
(IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
ret = -EBUSY; goto out;
}
/* Add skb to the uplink skb accumulator. */
skb_queue_tail(&session->ul_list, skb);
/* Inform the IPC kthread to pass uplink IP packets to CP. */ if (!ipc_mux->ev_mux_net_transmit_pending) {
ipc_mux->ev_mux_net_transmit_pending = true;
ret = ipc_task_queue_send_task(ipc_mux->imem,
ipc_mux_tq_ul_trigger_encode, 0,
NULL, 0, false); if (ret) goto out;
}
dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
skb->len, skb->truesize, skb->priority);
ret = 0;
out: return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.45 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.