if (mmc->ops->request_atomic)
ret = mmc->ops->request_atomic(mmc, hsq->mrq); else
mmc->ops->request(mmc, hsq->mrq);
/* * If returning BUSY from request_atomic(), which means the card * may be busy now, and we should change to non-atomic context to * try again for this unusual case, to avoid time-consuming operations * in the atomic context. * * Note: we just give a warning for other error cases, since the host * driver will handle them.
*/ if (ret == -EBUSY)
schedule_work(&hsq->retry_work); else
WARN_ON_ONCE(ret);
}
staticvoid mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
{ int tag;
/* * If there are no remain requests in software queue, then set a invalid * tag.
*/ if (!remains) {
hsq->next_tag = HSQ_INVALID_TAG;
hsq->tail_tag = HSQ_INVALID_TAG; return;
}
tag = hsq->tag_slot[hsq->next_tag];
hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
hsq->next_tag = tag;
}
staticvoid mmc_hsq_post_request(struct mmc_hsq *hsq)
{ unsignedlong flags; int remains;
spin_lock_irqsave(&hsq->lock, flags);
remains = hsq->qcnt;
hsq->mrq = NULL;
/* Update the next available tag to be queued. */
mmc_hsq_update_next_tag(hsq, remains);
if (hsq->waiting_for_idle && !remains) {
hsq->waiting_for_idle = false;
wake_up(&hsq->wait_queue);
}
/* Do not pump new request in recovery mode. */ if (hsq->recovery_halt) {
spin_unlock_irqrestore(&hsq->lock, flags); return;
}
spin_unlock_irqrestore(&hsq->lock, flags);
/* * Try to pump new request to host controller as fast as possible, * after completing previous request.
*/ if (remains > 0)
mmc_hsq_pump_requests(hsq);
}
/** * mmc_hsq_finalize_request - finalize one request if the request is done * @mmc: the host controller * @mrq: the request need to be finalized * * Return true if we finalized the corresponding request in software queue, * otherwise return false.
*/ bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
{ struct mmc_hsq *hsq = mmc->cqe_private; unsignedlong flags;
/* * Try to pump new request if there are request pending in software * queue after finishing recovery.
*/ if (remains > 0)
mmc_hsq_pump_requests(hsq);
}
staticint mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
{ struct mmc_hsq *hsq = mmc->cqe_private; int tag = mrq->tag;
spin_lock_irq(&hsq->lock);
if (!hsq->enabled) {
spin_unlock_irq(&hsq->lock); return -ESHUTDOWN;
}
/* Do not queue any new requests in recovery mode. */ if (hsq->recovery_halt) {
spin_unlock_irq(&hsq->lock); return -EBUSY;
}
hsq->slot[tag].mrq = mrq;
/* * Set the next tag as current request tag if no available * next tag.
*/ if (hsq->next_tag == HSQ_INVALID_TAG) {
hsq->next_tag = tag;
hsq->tail_tag = tag;
hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
} else {
hsq->tag_slot[hsq->tail_tag] = tag;
hsq->tail_tag = tag;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.