/* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
ath6kl_credit_deposit(cred_info, cur_ep_dist,
cur_ep_dist->cred_min); /* * Control service is always marked active, it * never goes inactive EVER.
*/
cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
}
/* * Streams have to be created (explicit | implicit) for all * kinds of traffic. BE endpoints are also inactive in the * beginning. When BE traffic starts it creates implicit * streams that redistributes credits. * * Note: all other endpoints have minimums set but are * initially given NO credits. credits will be distributed * as traffic activity demands
*/
}
/* * For ath6kl_credit_seek function, * it use list_for_each_entry_reverse to walk around the whole ep list. * Therefore assign this lowestpri_ep_dist after walk around the ep_list
*/
cred_info->lowestpri_ep_dist = cur_ep_dist->list;
WARN_ON(cred_info->cur_free_credits <= 0);
list_for_each_entry(cur_ep_dist, ep_list, list) { if (cur_ep_dist->endpoint == ENDPOINT_0) continue;
if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
} else { /* * For the remaining data endpoints, we assume that * each cred_per_msg are the same. We use a simple * calculation here, we take the remaining credits * and determine how many max messages this can * cover and then set each endpoint's normal value * equal to 3/4 this amount.
*/
count = (cred_info->cur_free_credits /
cur_ep_dist->cred_per_msg)
* cur_ep_dist->cred_per_msg;
count = (count * 3) >> 2;
count = max(count, cur_ep_dist->cred_per_msg);
cur_ep_dist->cred_norm = count;
}
/* set priority list */
ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
return 0;
}
/* reduce an ep's credits back to a set limit */ staticvoid ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, struct htc_endpoint_credit_dist *ep_dist, int limit)
{ int credits;
ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
ep_dist->endpoint, limit);
if (cur_list->credits > cur_list->cred_assngd)
ath6kl_credit_reduce(cred_info,
cur_list,
cur_list->cred_assngd);
if (cur_list->credits > cur_list->cred_norm)
ath6kl_credit_reduce(cred_info, cur_list,
cur_list->cred_norm);
if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) { if (cur_list->txq_depth == 0)
ath6kl_credit_reduce(cred_info,
cur_list, 0);
}
}
}
}
/* * HTC has an endpoint that needs credits, ep_dist is the endpoint in * question.
*/ staticvoid ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info, struct htc_endpoint_credit_dist *ep_dist)
{ struct htc_endpoint_credit_dist *curdist_list; int credits = 0; int need;
if (ep_dist->svc_id == WMI_CONTROL_SVC) goto out;
if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
(ep_dist->svc_id == WMI_DATA_VO_SVC)) if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) goto out;
/* * For all other services, we follow a simple algorithm of: * * 1. checking the free pool for credits * 2. checking lower priority endpoints for credits to take
*/
/* * We don't have enough in the free pool, try taking away from * lower priority services The rule for taking away credits: * * 1. Only take from lower priority endpoints * 2. Only take what is allocated above the minimum (never * starve an endpoint completely) * 3. Only take what you need.
*/
list_for_each_entry_reverse(curdist_list,
&cred_info->lowestpri_ep_dist,
list) { if (curdist_list == ep_dist) break;
need = ep_dist->seek_cred - cred_info->cur_free_credits;
if ((curdist_list->cred_assngd - need) >=
curdist_list->cred_min) { /* * The current one has been allocated more than * it's minimum and it has enough credits assigned * above it's minimum to fulfill our need try to * take away just enough to fulfill our need.
*/
ath6kl_credit_reduce(cred_info, curdist_list,
curdist_list->cred_assngd - need);
if (cred_info->cur_free_credits >=
ep_dist->seek_cred) break;
}
if (curdist_list->endpoint == ENDPOINT_0) break;
}
/* * * This function is invoked whenever endpoints require credit * distributions. A lock is held while this function is invoked, this * function shall NOT block. The ep_dist_list is a list of distribution * structures in prioritized order as defined by the call to the * htc_set_credit_dist() api.
*/ staticvoid ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info, struct list_head *ep_dist_list, enum htc_credit_dist_reason reason)
{ switch (reason) { case HTC_CREDIT_DIST_SEND_COMPLETE:
ath6kl_credit_update(cred_info, ep_dist_list); break; case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
ath6kl_credit_redistribute(cred_info, ep_dist_list); break; default: break;
}
/* on failure to submit, reclaim credits for this packet */
spin_lock_bh(&target->tx_lock);
endpoint->cred_dist.cred_to_dist +=
packet->info.tx.cred_used;
endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
/* walk through the scatter list and process */ for (i = 0; i < scat_req->scat_entries; i++) {
packet = scat_req->scat_list[i].packet; if (!packet) {
WARN_ON(1); return;
}
/* When we are getting low on credits, ask for more */ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
ep->cred_dist.seek_cred =
ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
/* see if we were successful in getting more */ if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { /* tell the target we need credits ASAP! */
*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit we need credits asap\n");
}
}
/* See if the padded tx length falls on a credit boundary */ staticint htc_get_credit_padding(unsignedint cred_sz, int *len, struct htc_endpoint *ep)
{ int rem_cred, cred_pad;
rem_cred = *len % cred_sz;
/* No padding needed */ if (!rem_cred) return 0;
if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN)) return -1;
/* * The transfer consumes a "partial" credit, this * packet cannot be bundled unless we add * additional "dummy" padding (max 255 bytes) to * consume the entire credit.
*/
cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred;
if ((cred_pad > 0) && (cred_pad <= 255))
*len += cred_pad; else /* The amount of padding is too large, send as non-bundled */ return -1;
return cred_pad;
}
staticint ath6kl_htc_tx_setup_scat_list(struct htc_target *target, struct htc_endpoint *endpoint, struct hif_scatter_req *scat_req, int n_scat, struct list_head *queue)
{ struct htc_packet *packet; int i, len, rem_scat, cred_pad; int status = 0;
u8 flags;
rem_scat = target->max_tx_bndl_sz;
for (i = 0; i < n_scat; i++) {
scat_req->scat_list[i].packet = NULL;
cred_pad = htc_get_credit_padding(target->tgt_cred_sz,
&len, endpoint); if (cred_pad < 0 || rem_scat < len) {
status = -ENOSPC; break;
}
rem_scat -= len; /* now remove it from the queue */
list_del(&packet->list);
scat_req->scat_list[i].packet = packet; /* prepare packet and flag message as part of a send bundle */
flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE;
ath6kl_htc_tx_prep_pkt(packet, flags,
cred_pad, packet->info.tx.seqno); /* Make sure the buffer is 4-byte aligned */
ath6kl_htc_tx_buf_align(&packet->buf,
packet->act_len + HTC_HDR_LENGTH);
scat_req->scat_list[i].buf = packet->buf;
scat_req->scat_list[i].len = len;
/* Roll back scatter setup in case of any failure */ if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) { for (i = scat_req->scat_entries - 1; i >= 0; i--) {
packet = scat_req->scat_list[i].packet; if (packet) {
packet->buf += HTC_HDR_LENGTH;
list_add(&packet->list, queue);
}
} return -EAGAIN;
}
return status;
}
/* * Drain a queue and send as bundles this function may return without fully * draining the queue when * * 1. scatter resources are exhausted * 2. a message that will consume a partial credit will stop the * bundling process early * 3. we drop below the minimum number of messages for a bundle
*/ staticvoid ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, struct list_head *queue, int *sent_bundle, int *n_bundle_pkts)
{ struct htc_target *target = endpoint->target; struct hif_scatter_req *scat_req = NULL; int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i; struct htc_packet *packet; int status;
u32 txb_mask;
u8 ac = WMM_NUM_AC;
if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) {
status = 0;
n_scat = get_queue_depth(queue);
n_scat = min(n_scat, target->msg_per_bndl_max);
if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE) /* not enough to bundle */ break;
scat_req = hif_scatter_req_get(target->dev->ar);
if (!scat_req) { /* no scatter resources */
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx no more scatter resources\n"); break;
}
if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) { if (WMM_AC_BE == ac) /* * BE, BK have priorities and bit * positions reversed
*/
txb_mask = (1 << WMM_AC_BK); else /* * any AC with priority lower than * itself
*/
txb_mask = ((1 << ac) - 1);
/* * when the scatter request resources drop below a * certain threshold, disable Tx bundling for all * AC's with priority lower than the current requesting * AC. Otherwise re-enable Tx bundling for them
*/ if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS)
target->tx_bndl_mask &= ~txb_mask; else
target->tx_bndl_mask |= txb_mask;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
n_scat);
scat_req->len = 0;
scat_req->scat_entries = 0;
status = ath6kl_htc_tx_setup_scat_list(target, endpoint,
scat_req, n_scat,
queue); if (status == -EAGAIN) {
hif_scatter_req_add(target->dev->ar, scat_req); break;
}
/* * drain the endpoint TX queue for transmission as long * as we have enough credits.
*/
INIT_LIST_HEAD(&txq);
if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) &&
(WMI_CONTROL_SVC != endpoint->svc_id))
ac = target->dev->ar->ep2ac_map[endpoint->eid];
while (true) { if (list_empty(&endpoint->txq)) break;
ath6kl_htc_tx_pkts_get(target, endpoint, &txq);
if (list_empty(&txq)) break;
spin_unlock_bh(&target->tx_lock);
bundle_sent = 0;
n_pkts_bundle = 0;
while (true) { /* try to send a bundle on each pass */ if ((target->tx_bndl_mask) &&
(get_queue_depth(&txq) >=
HTC_MIN_HTC_MSGS_TO_BUNDLE)) { int temp1 = 0, temp2 = 0;
/* check if bundling is enabled for an AC */ if (target->tx_bndl_mask & (1 << ac)) {
ath6kl_htc_tx_bundle(endpoint, &txq,
&temp1, &temp2);
bundle_sent += temp1;
n_pkts_bundle += temp2;
}
}
/* * if an AC has bundling disabled and no tx bundling * has occurred continuously for a certain number of TX, * enable tx bundling for this AC
*/ if (!bundle_sent) { if (!(target->tx_bndl_mask & (1 << ac)) &&
(ac < WMM_NUM_AC)) { if (++target->ac_tx_count[ac] >=
TX_RESUME_BUNDLE_THRESHOLD) {
target->ac_tx_count[ac] = 0;
target->tx_bndl_mask |= (1 << ac);
}
}
} else { /* tx bundling will reset the counter */ if (ac < WMM_NUM_AC)
target->ac_tx_count[ac] = 0;
}
}
/* * Run through the credit distribution list to see if there are * packets queued. NOTE: no locks need to be taken since the * distribution list is not dynamic (cannot be re-ordered) and we * are not modifying any state.
*/
list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
endpoint = cred_dist->htc_ep;
spin_lock_bh(&target->tx_lock); if (!list_empty(&endpoint->txq)) {
ath6kl_dbg(ATH6KL_DBG_HTC, "htc creds ep %d credits %d pkts %d\n",
cred_dist->endpoint,
endpoint->cred_dist.credits,
get_queue_depth(&endpoint->txq));
spin_unlock_bh(&target->tx_lock); /* * Try to start the stalled queue, this list is * ordered by priority. If there are credits * available the highest priority queue will get a * chance to reclaim credits from lower priority * ones.
*/
ath6kl_htc_tx_from_queue(target, endpoint);
spin_lock_bh(&target->tx_lock);
}
spin_unlock_bh(&target->tx_lock);
}
}
staticvoid ath6kl_htc_flush_txep_all(struct htc_target *target)
{ struct htc_endpoint *endpoint; int i;
dump_cred_dist_stats(target);
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
endpoint = &target->endpoint[i]; if (endpoint->svc_id == 0) /* not in use.. */ continue;
ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
}
}
status = hif_read_write_sync(dev->ar,
dev->ar->mbox_info.htc_addr,
packet->buf, padded_len,
HIF_RD_SYNC_BLOCK_FIX);
packet->status = status;
return status;
}
/* * optimization for recv packets, we can indicate a * "hint" that there are more single-packets to fetch * on this endpoint.
*/ staticvoid ath6kl_htc_rx_set_indicate(u32 lk_ahd, struct htc_endpoint *endpoint, struct htc_packet *packet)
{ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd;
if (htc_hdr->eid == packet->endpoint) { if (!list_empty(&endpoint->rx_bufq))
packet->info.rx.indicat_flags |=
HTC_RX_FLAGS_INDICATE_MORE_PKTS;
}
}
if (ep_cb.rx_refill_thresh > 0) {
spin_lock_bh(&endpoint->target->rx_lock); if (get_queue_depth(&endpoint->rx_bufq)
< ep_cb.rx_refill_thresh) {
spin_unlock_bh(&endpoint->target->rx_lock);
ep_cb.rx_refill(endpoint->target, endpoint->eid); return;
}
spin_unlock_bh(&endpoint->target->rx_lock);
}
}
/* This function is called with rx_lock held */ staticint ath6kl_htc_rx_setup(struct htc_target *target, struct htc_endpoint *ep,
u32 *lk_ahds, struct list_head *queue, int n_msg)
{ struct htc_packet *packet; /* FIXME: type of lk_ahds can't be right */ struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds; struct htc_ep_callbacks ep_cb; int status = 0, j, full_len; bool no_recycle;
if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
htc_hdr->eid, htc_hdr->flags,
le16_to_cpu(htc_hdr->payld_len)); return -EINVAL;
}
ep_cb = ep->ep_cb; for (j = 0; j < n_msg; j++) { /* * Reset flag, any packets allocated using the * rx_alloc() API cannot be recycled on * cleanup,they must be explicitly returned.
*/
no_recycle = false;
if (no_recycle) /* * flag that these packets cannot be * recycled, they have to be returned to * the user
*/
packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE;
/* Caller needs to free this upon any failure */
list_add_tail(&packet->list, queue);
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
status = -ECANCELED; break;
}
if (j) {
packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR;
packet->info.rx.exp_hdr = 0xFFFFFFFF;
} else /* set expected look ahead */
packet->info.rx.exp_hdr = *lk_ahds;
staticint ath6kl_htc_rx_alloc(struct htc_target *target,
u32 lk_ahds[], int msg, struct htc_endpoint *endpoint, struct list_head *queue)
{ int status = 0; struct htc_packet *packet, *tmp_pkt; struct htc_frame_hdr *htc_hdr; int i, n_msg;
spin_lock_bh(&target->rx_lock);
for (i = 0; i < msg; i++) {
htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i];
if (htc_hdr->eid >= ENDPOINT_MAX) {
ath6kl_err("invalid ep in look-ahead: %d\n",
htc_hdr->eid);
status = -ENOMEM; break;
}
if (htc_hdr->eid != endpoint->eid) {
ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n",
htc_hdr->eid, endpoint->eid, i);
status = -ENOMEM; break;
}
if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) {
ath6kl_err("payload len %d exceeds max htc : %d !\n",
htc_hdr->payld_len,
(u32) HTC_MAX_PAYLOAD_LENGTH);
status = -ENOMEM; break;
}
if (endpoint->svc_id == 0) {
ath6kl_err("ep %d is not connected !\n", htc_hdr->eid);
status = -ENOMEM; break;
}
if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) { /* * HTC header indicates that every packet to follow * has the same padded length so that it can be * optimally fetched as a full bundle.
*/
n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >>
HTC_FLG_RX_BNDL_CNT_S;
/* the count doesn't include the starter frame */
n_msg++; if (n_msg > target->msg_per_bndl_max) {
status = -ENOMEM; break;
}
/* Setup packet buffers for each message */
status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i],
queue, n_msg);
/* * This is due to unavailability of buffers to rx entire data. * Return no error so that free buffers from queue can be used * to receive partial data.
*/ if (status == -ENOSPC) {
spin_unlock_bh(&target->rx_lock); return 0;
}
if (from_ep == rpt->eid) { /* * This credit report arrived on the same endpoint * indicating it arrived in an RX packet.
*/
endpoint->ep_st.cred_from_rx += rpt->credits;
endpoint->ep_st.cred_rpt_from_rx += 1;
} elseif (from_ep == ENDPOINT_0) { /* credit arrived on endpoint 0 as a NULL message */
endpoint->ep_st.cred_from_ep0 += rpt->credits;
endpoint->ep_st.cred_rpt_ep0 += 1;
} else {
endpoint->ep_st.cred_from_other += rpt->credits;
endpoint->ep_st.cred_rpt_from_other += 1;
}
if (rpt->eid == ENDPOINT_0) /* always give endpoint 0 credits back */
endpoint->cred_dist.credits += rpt->credits; else {
endpoint->cred_dist.cred_to_dist += rpt->credits;
dist = true;
}
/* * Refresh tx depth for distribution function that will * recover these credits NOTE: this is only valid when * there are credits to recover!
*/
endpoint->cred_dist.txq_depth =
get_queue_depth(&endpoint->txq);
tot_credits += rpt->credits;
}
if (dist) { /* * This was a credit return based on a completed send * operations note, this is done with the lock held
*/
ath6kl_credit_distribute(target->credit_info,
&target->cred_dist_list,
HTC_CREDIT_DIST_SEND_COMPLETE);
}
while (len > 0) { if (len < sizeof(struct htc_record_hdr)) {
status = -ENOMEM; break;
} /* these are byte aligned structs */
record = (struct htc_record_hdr *) buf;
len -= sizeof(struct htc_record_hdr);
buf += sizeof(struct htc_record_hdr);
if (record->len > len) {
ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n",
record->len, record->rec_id, len);
status = -ENOMEM; break;
}
record_buf = buf;
status = htc_parse_trailer(target, record, record_buf,
next_lk_ahds, endpoint, n_lk_ahds);
if (status) break;
/* advance buffer past this record for next time around */
buf += record->len;
len -= record->len;
}
if (status)
ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer", "", orig_buf, orig_len);
return status;
}
staticint ath6kl_htc_rx_process_hdr(struct htc_target *target, struct htc_packet *packet,
u32 *next_lkahds, int *n_lkahds)
{ int status = 0;
u16 payload_len;
u32 lk_ahd; struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf;
if (n_lkahds != NULL)
*n_lkahds = 0;
/* * NOTE: we cannot assume the alignment of buf, so we use the safe * macros to retrieve 16 bit fields.
*/
payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) { /* * Refresh the expected header and the actual length as it * was unknown when this packet was grabbed as part of the * bundle.
*/
packet->info.rx.exp_hdr = lk_ahd;
packet->act_len = payload_len + HTC_HDR_LENGTH;
/* validate the actual header that was refreshed */ if (packet->act_len > packet->buf_len) {
ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n",
payload_len, lk_ahd); /* * Limit this to max buffer just to print out some * of the buffer.
*/
packet->act_len = min(packet->act_len, packet->buf_len);
status = -ENOMEM; goto fail_rx;
}
if (packet->endpoint != htc_hdr->eid) {
ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n",
htc_hdr->eid, packet->endpoint);
status = -ENOMEM; goto fail_rx;
}
}
if ((get_queue_depth(rxq) - n_scat_pkt) > 0) { /* * We were forced to split this bundle receive operation * all packets in this partial bundle must have their * lookaheads ignored.
*/
part_bundle = true;
/* * This would only happen if the target ignored our max * bundle limit.
*/
ath6kl_warn("%s(): partial bundle detected num:%d , %d\n",
__func__, get_queue_depth(rxq), n_scat_pkt);
}
if (part_bundle || (i < (n_scat_pkt - 1))) /* * Packet 0..n-1 cannot be checked for look-aheads * since we are fetching a bundle the last packet * however can have it's lookahead used
*/
packet->info.rx.rx_flags |=
HTC_RX_PKT_IGNORE_LOOKAHEAD;
/* process header for each of the recv packet */
status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
n_lk_ahd); if (status) return status;
list_del(&packet->list);
if (list_empty(comp_pktq)) { /* * Last packet's more packet flag is set * based on the lookahead.
*/ if (*n_lk_ahd > 0)
ath6kl_htc_rx_set_indicate(lk_ahds[0],
ep, packet);
} else /* * Packets in a bundle automatically have * this flag set.
*/
packet->info.rx.indicat_flags |=
HTC_RX_FLAGS_INDICATE_MORE_PKTS;
ath6kl_htc_rx_update_stats(ep, *n_lk_ahd);
if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE)
ep->ep_st.rx_bundl += 1;
ath6kl_htc_rx_complete(ep, packet);
}
return status;
}
staticint ath6kl_htc_rx_fetch(struct htc_target *target, struct list_head *rx_pktq, struct list_head *comp_pktq)
{ int fetched_pkts; bool part_bundle = false; int status = 0; struct list_head tmp_rxq; struct htc_packet *packet, *tmp_pkt;
/* now go fetch the list of HTC packets */ while (!list_empty(rx_pktq)) {
fetched_pkts = 0;
INIT_LIST_HEAD(&tmp_rxq);
if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { /* * There are enough packets to attempt a * bundle transfer and recv bundling is * allowed.
*/
status = ath6kl_htc_rx_bundle(target, rx_pktq,
&tmp_rxq,
&fetched_pkts,
part_bundle); if (status) goto fail_rx;
if (!list_empty(rx_pktq))
part_bundle = true;
list_splice_tail_init(&tmp_rxq, comp_pktq);
}
if (!fetched_pkts) {
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
if (!list_is_singular(rx_pktq)) /* * look_aheads in all packet * except the last one in the * bundle must be ignored
*/
packet->info.rx.rx_flags |=
HTC_RX_PKT_IGNORE_LOOKAHEAD;
/* go fetch the packet */
status = ath6kl_htc_rx_packet(target, packet,
packet->act_len);
list_move_tail(&packet->list, &tmp_rxq);
if (status) goto fail_rx;
list_splice_tail_init(&tmp_rxq, comp_pktq);
}
}
return 0;
fail_rx:
/* * Cleanup any packets we allocated but didn't use to * actually fetch any packets.
*/
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
u32 msg_look_ahead, int *num_pkts)
{ struct htc_packet *packets, *tmp_pkt; struct htc_endpoint *endpoint; struct list_head rx_pktq, comp_pktq; int status = 0;
u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE]; int num_look_ahead = 1; enum htc_endpoint_id id; int n_fetched = 0;
INIT_LIST_HEAD(&comp_pktq);
*num_pkts = 0;
/* * On first entry copy the look_aheads into our temp array for * processing
*/
look_aheads[0] = msg_look_ahead;
while (true) { /* * First lookahead sets the expected endpoint IDs for all * packets in a bundle.
*/
id = ((struct htc_frame_hdr *)&look_aheads[0])->eid;
endpoint = &target->endpoint[id];
if (id >= ENDPOINT_MAX) {
ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n",
id);
status = -ENOMEM; break;
}
/* * Try to allocate as many HTC RX packets indicated by the * look_aheads.
*/
status = ath6kl_htc_rx_alloc(target, look_aheads,
num_look_ahead, endpoint,
&rx_pktq); if (status) break;
if (get_queue_depth(&rx_pktq) >= 2) /* * A recv bundle was detected, force IRQ status * re-check again
*/
target->chk_irq_status_cnt = 1;
n_fetched += get_queue_depth(&rx_pktq);
num_look_ahead = 0;
status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq);
if (!status)
ath6kl_htc_rx_chk_water_mark(endpoint);
/* Process fetched packets */
status = ath6kl_htc_rx_process_packets(target, &comp_pktq,
look_aheads,
&num_look_ahead);
if (!num_look_ahead || status) break;
/* * For SYNCH processing, if we get here, we are running * through the loop again due to a detected lookahead. Set * flag that we should re-check IRQ status registers again * before leaving IRQ processing, this can net better * performance in high throughput situations.
*/
target->chk_irq_status_cnt = 1;
}
if (status) { if (status != -ECANCELED)
ath6kl_err("failed to get pending recv messages: %d\n",
status);
/* cleanup any packets in sync completion queue */
list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
list_del(&packets->list);
htc_reclaim_rxbuf(target, packets,
&target->endpoint[packets->endpoint]);
}
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
ath6kl_hif_rx_control(target->dev, false);
}
}
/* * Before leaving, check to see if host ran out of buffers and * needs to stop the receiver.
*/ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
ath6kl_hif_rx_control(target->dev, false);
}
*num_pkts = n_fetched;
return status;
}
/* * Synchronously wait for a control message from the target, * This function is used at initialization time ONLY. At init messages * on ENDPOINT 0 are expected.
*/ staticstruct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
{ struct htc_packet *packet = NULL; struct htc_frame_look_ahead look_ahead;
if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead.word,
HTC_TARGET_RESPONSE_TIMEOUT)) return NULL;
if (target->htc_flags & HTC_OP_STATE_STOPPING) { struct htc_packet *packet, *tmp_pkt;
/* walk through queue and mark each one canceled */
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ECANCELED;
list_del(&packet->list);
ath6kl_htc_rx_complete(endpoint, packet);
}
/* check if we are blocked waiting for a new buffer */ if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { if (target->ep_waiting == first_pkt->endpoint) {
ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx blocked on ep %d, unblocking\n",
target->ep_waiting);
target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ENDPOINT_MAX;
rx_unblock = true;
}
}
spin_unlock_bh(&target->rx_lock);
if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) /* TODO : implement a buffer threshold count? */
ath6kl_hif_rx_control(target->dev, true);
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
endpoint = &target->endpoint[i]; if (!endpoint->svc_id) /* not in use.. */ continue;
spin_lock_bh(&target->rx_lock);
list_for_each_entry_safe(packet, tmp_pkt,
&endpoint->rx_bufq, list) {
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len,
packet->endpoint); /* * packets in rx_bufq of endpoint 0 have originally * been queued from target->free_ctrl_rxbuf where * packet and packet->buf_start are allocated * separately using kmalloc(). For other endpoint * rx_bufq, it is allocated as skb where packet is * skb->head. Take care of this difference while freeing * the memory.
*/ if (packet->endpoint == ENDPOINT_0) {
kfree(packet->buf_start);
kfree(packet);
} else {
dev_kfree_skb(packet->pkt_cntxt);
}
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
}
}
ath6kl_dbg(ATH6KL_DBG_HTC, "htc connect service target 0x%p service id 0x%x\n",
target, conn_req->svc_id);
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { /* special case for pseudo control service */
assigned_ep = ENDPOINT_0;
max_msg_sz = HTC_MAX_CTRL_MSG_LEN;
} else { /* allocate a packet to send to the target */
tx_pkt = htc_get_control_buf(target, true);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.