/** * mei_me_cl_put - decrease me client refcount and free client if necessary * * @me_cl: me client * * Locking: called under "dev->device_lock" lock
*/ void mei_me_cl_put(struct mei_me_client *me_cl)
{ if (me_cl)
kref_put(&me_cl->refcnt, mei_me_cl_release);
}
/** * __mei_me_cl_del - delete me client from the list and decrease * reference counter * * @dev: mei device * @me_cl: me client * * Locking: dev->me_clients_rwsem
*/ staticvoid __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
{ if (!me_cl) return;
/** * mei_me_cl_by_id - locate me client by client id * increases ref count * * @dev: the device structure * @client_id: me client id * * Return: me client or NULL if not found * * Locking: dev->me_clients_rwsem
*/ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
/** * __mei_me_cl_by_uuid_id - locate me client by client id and uuid * increases ref count * * @dev: the device structure * @uuid: me client uuid * @client_id: me client id * * Return: me client or null if not found * * Locking: dev->me_clients_rwsem
*/ staticstruct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 client_id)
{ struct mei_me_client *me_cl; const uuid_le *pn;
/** * mei_me_cl_by_uuid_id - locate me client by client id and uuid * increases ref count * * @dev: the device structure * @uuid: me client uuid * @client_id: me client id * * Return: me client or null if not found
*/ struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 client_id)
{ struct mei_me_client *me_cl;
/** * mei_tx_cb_enqueue - queue tx callback * * @cb: mei callback struct * @head: an instance of list to queue on * * Locking: called under "dev->device_lock" lock
*/ staticinlinevoid mei_tx_cb_enqueue(struct mei_cl_cb *cb, struct list_head *head)
{
list_add_tail(&cb->list, head);
cb->cl->tx_cb_queued++;
}
/** * mei_tx_cb_dequeue - dequeue tx callback * * @cb: mei callback struct to dequeue and free * * Locking: called under "dev->device_lock" lock
*/ staticinlinevoid mei_tx_cb_dequeue(struct mei_cl_cb *cb)
{ if (!WARN_ON(cb->cl->tx_cb_queued == 0))
cb->cl->tx_cb_queued--;
mei_io_cb_free(cb);
}
/** * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp * * @cl: mei client * @fp: pointer to file structure * * Locking: called under "dev->device_lock" lock
*/ staticvoid mei_cl_set_read_by_fp(conststruct mei_cl *cl, conststruct file *fp)
{ struct mei_cl_vtag *cl_vtag;
/** * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating * and enqueuing of the control commands cb * * @cl: host client * @length: size of the buffer * @fop_type: operation type * @fp: associated file pointer (might be NULL) * * Return: cb on success and NULL on failure * Locking: called under "dev->device_lock" lock
*/ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length, enum mei_cb_file_ops fop_type, conststruct file *fp)
{ struct mei_cl_cb *cb;
/* for RX always allocate at least client's mtu */ if (length)
length = max_t(size_t, length, mei_cl_mtu(cl));
cb = mei_cl_alloc_cb(cl, length, fop_type, fp); if (!cb) return NULL;
/** * mei_cl_read_cb - find this cl's callback in the read list * for a specific file * * @cl: host client * @fp: file pointer (matching cb file object), may be NULL * * Return: cb on success, NULL if cb is not found
*/ struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, conststruct file *fp)
{ struct mei_cl_cb *cb; struct mei_cl_cb *ret_cb = NULL;
/** * mei_cl_flush_queues - flushes queue lists belonging to cl. * * @cl: host client * @fp: file pointer (matching cb file object), may be NULL * * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
*/ int mei_cl_flush_queues(struct mei_cl *cl, conststruct file *fp)
{ struct mei_device *dev;
if (WARN_ON(!cl || !cl->dev)) return -EINVAL;
dev = cl->dev;
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp); /* free pending and control cb only in final flush */ if (!fp) {
mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
mei_cl_free_pending(cl);
}
spin_lock(&cl->rd_completed_lock);
mei_io_list_free_fp(&cl->rd_completed, fp);
spin_unlock(&cl->rd_completed_lock);
/** * mei_cl_allocate - allocates cl structure and sets it up. * * @dev: mei device * Return: The allocated file or NULL on failure
*/ struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{ struct mei_cl *cl;
cl = kmalloc(sizeof(*cl), GFP_KERNEL); if (!cl) return NULL;
mei_cl_init(cl, dev);
return cl;
}
/** * mei_cl_link - allocate host id in the host map * * @cl: host client * * Return: 0 on success * -EINVAL on incorrect values * -EMFILE if open count exceeded.
*/ int mei_cl_link(struct mei_cl *cl)
{ struct mei_device *dev; int id;
if (WARN_ON(!cl || !cl->dev)) return -EINVAL;
dev = cl->dev;
id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX); if (id >= MEI_CLIENTS_MAX) {
dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX); return -EMFILE;
}
/** * mei_hbuf_acquire - try to acquire host buffer * * @dev: the device structure * Return: true if host buffer was acquired
*/ bool mei_hbuf_acquire(struct mei_device *dev)
{ if (mei_pg_state(dev) == MEI_PG_ON ||
mei_pg_in_transition(dev)) {
dev_dbg(dev->dev, "device is in pg\n"); returnfalse;
}
if (!dev->hbuf_is_ready) {
dev_dbg(dev->dev, "hbuf is not ready\n"); returnfalse;
}
dev->hbuf_is_ready = false;
returntrue;
}
/** * mei_cl_wake_all - wake up readers, writers and event waiters so * they can be interrupted * * @cl: host client
*/ staticvoid mei_cl_wake_all(struct mei_cl *cl)
{ struct mei_device *dev = cl->dev;
/* synchronized under device mutex */ if (waitqueue_active(&cl->rx_wait)) {
cl_dbg(dev, cl, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
} /* synchronized under device mutex */ if (waitqueue_active(&cl->tx_wait)) {
cl_dbg(dev, cl, "Waking up writing client!\n");
wake_up_interruptible(&cl->tx_wait);
} /* synchronized under device mutex */ if (waitqueue_active(&cl->ev_wait)) {
cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
wake_up_interruptible(&cl->ev_wait);
} /* synchronized under device mutex */ if (waitqueue_active(&cl->wait)) {
cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
wake_up(&cl->wait);
}
}
/** * mei_cl_set_disconnected - set disconnected state and clear * associated states and resources * * @cl: host client
*/ staticvoid mei_cl_set_disconnected(struct mei_cl *cl)
{ struct mei_device *dev = cl->dev;
if (cl->state == MEI_FILE_DISCONNECTED ||
cl->state <= MEI_FILE_INITIALIZING) return;
/* only one connection is allowed for fixed address clients */ if (me_cl->props.fixed_address) { if (me_cl->connect_count) {
mei_me_cl_put(me_cl); return -EBUSY;
}
}
ret = mei_cl_send_disconnect(cl, cb); if (ret)
list_move_tail(&cb->list, cmpl_list);
return ret;
}
/** * __mei_cl_disconnect - disconnect host client from the me one * internal function runtime pm has to be already acquired * * @cl: host client * * Return: 0 on success, <0 on failure.
*/ staticint __mei_cl_disconnect(struct mei_cl *cl)
{ struct mei_device *dev; struct mei_cl_cb *cb; int rets;
rets = cl->status; if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
cl->state != MEI_FILE_DISCONNECTED) {
cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
rets = -ETIME;
}
out: /* we disconnect also on error */
mei_cl_set_disconnected(cl); if (!rets)
cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
mei_io_cb_free(cb); return rets;
}
/** * mei_cl_disconnect - disconnect host client from the me one * * @cl: host client * * Locking: called under "dev->device_lock" lock * * Return: 0 on success, <0 on failure.
*/ int mei_cl_disconnect(struct mei_cl *cl)
{ struct mei_device *dev; int rets;
if (WARN_ON(!cl || !cl->dev)) return -ENODEV;
dev = cl->dev;
cl_dbg(dev, cl, "disconnecting");
if (!mei_cl_is_connected(cl)) return 0;
if (mei_cl_is_fixed_address(cl)) {
mei_cl_set_disconnected(cl); return 0;
}
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
dev->dev_state == MEI_DEV_POWER_DOWN) {
cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
mei_cl_set_disconnected(cl); return 0;
}
/** * mei_cl_is_other_connecting - checks if other * client with the same me client id is connecting * * @cl: private data of the file object * * Return: true if other client is connected, false - otherwise.
*/ staticbool mei_cl_is_other_connecting(struct mei_cl *cl)
{ struct mei_device *dev; struct mei_cl_cb *cb;
/* run hbuf acquire last so we don't have to undo */ if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
rets = mei_cl_send_connect(cl, cb); if (rets) goto out;
}
if (!mei_cl_is_connected(cl)) { if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl); /* ignore disconnect return valuue; * in case of failure reset will be invoked
*/
__mei_cl_disconnect(cl);
rets = -EFAULT; goto out;
}
/* timeout or something went really wrong */ if (!cl->status)
cl->status = -EFAULT;
}
/** * mei_cl_fp_by_vtag - obtain the file pointer by vtag * * @cl: host client * @vtag: virtual tag * * Return: * * A file pointer - on success * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
*/ conststruct file *mei_cl_fp_by_vtag(conststruct mei_cl *cl, u8 vtag)
{ struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list) /* The client on bus has one fixed fp */ if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
vtag_l->vtag == vtag) return vtag_l->fp;
return ERR_PTR(-ENOENT);
}
/** * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag * * @cl: host client * @vtag: vm tag
*/ staticvoid mei_cl_reset_read_by_vtag(conststruct mei_cl *cl, u8 vtag)
{ struct mei_cl_vtag *vtag_l;
list_for_each_entry(vtag_l, &cl->vtag_map, list) { /* The client on bus has one fixed vtag map */ if ((cl->cldev && mei_cldev_enabled(cl->cldev)) ||
vtag_l->vtag == vtag) {
vtag_l->pending_read = false; break;
}
}
}
/** * mei_cl_read_vtag_add_fc - add flow control for next pending reader * in the vtag list * * @cl: host client
*/ staticvoid mei_cl_read_vtag_add_fc(struct mei_cl *cl)
{ struct mei_cl_vtag *cl_vtag;
list_for_each_entry(cl_vtag, &cl->vtag_map, list) { if (cl_vtag->pending_read) { if (mei_cl_enqueue_ctrl_wr_cb(cl,
mei_cl_mtu(cl),
MEI_FOP_READ,
cl_vtag->fp))
cl->rx_flow_ctrl_creds++; break;
}
}
}
/** * mei_cl_vt_support_check - check if client support vtags * * @cl: host client * * Return: * * 0 - supported, or not connected at all * * -EOPNOTSUPP - vtags are not supported by client
*/ int mei_cl_vt_support_check(conststruct mei_cl *cl)
{ struct mei_device *dev = cl->dev;
cl_dbg(dev, cl, "notify event");
cl->notify_ev = true; if (!mei_cl_bus_notify_event(cl))
wake_up_interruptible(&cl->ev_wait);
if (cl->ev_async)
kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
}
/** * mei_cl_notify_get - get or wait for notification event * * @cl: host client * @block: this request is blocking * @notify_ev: true if notification event was received * * Locking: called under "dev->device_lock" lock * * Return: 0 on such and error otherwise.
*/ int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
{ struct mei_device *dev; int rets;
*notify_ev = false;
if (WARN_ON(!cl || !cl->dev)) return -ENODEV;
dev = cl->dev;
if (!dev->hbm_f_ev_supported) {
cl_dbg(dev, cl, "notifications not supported\n"); return -EOPNOTSUPP;
}
/** * Split the message only if we can write the whole host buffer * otherwise wait for next time the host buffer is empty.
*/ if (hdr_len + buf_len <= hbuf_len) {
data_len = buf_len;
mei_hdr->msg_complete = 1;
} elseif (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
mei_hdr->dma_ring = 1; if (buf_len > dr_len)
buf_len = dr_len; else
mei_hdr->msg_complete = 1;
/** * mei_cl_complete - processes completed operation for a client * * @cl: private data of the file object. * @cb: callback block.
*/ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
{ struct mei_device *dev = cl->dev;
switch (cb->fop_type) { case MEI_FOP_WRITE:
mei_tx_cb_dequeue(cb);
cl->writing_state = MEI_WRITE_COMPLETE; if (waitqueue_active(&cl->tx_wait)) {
wake_up_interruptible(&cl->tx_wait);
} else {
pm_runtime_mark_last_busy(dev->dev);
pm_request_autosuspend(dev->dev);
} break;
case MEI_FOP_READ:
mei_cl_add_rd_completed(cl, cb); if (!mei_cl_is_fixed_address(cl) &&
!WARN_ON(!cl->rx_flow_ctrl_creds))
cl->rx_flow_ctrl_creds--; if (!mei_cl_bus_rx_event(cl))
wake_up_interruptible(&cl->rx_wait); break;
case MEI_FOP_CONNECT: case MEI_FOP_DISCONNECT: case MEI_FOP_NOTIFY_STOP: case MEI_FOP_NOTIFY_START: case MEI_FOP_DMA_MAP: case MEI_FOP_DMA_UNMAP: if (waitqueue_active(&cl->wait))
wake_up(&cl->wait);
break; case MEI_FOP_DISCONNECT_RSP:
mei_io_cb_free(cb);
mei_cl_set_disconnected(cl); break; default:
BUG_ON(0);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.