// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015, Sony Mobile Communications Inc. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
*/ #include <linux/module.h> #include <linux/netlink.h> #include <linux/qrtr.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/spinlock.h> #include <linux/wait.h>
/** * struct qrtr_tx_flow - tx flow control * @resume_tx: waiters for a resume tx from the remote * @pending: number of waiting senders * @tx_failed: indicates that a message with confirm_rx flag was lost
*/ struct qrtr_tx_flow { struct wait_queue_head resume_tx; int pending; int tx_failed;
};
/* Release node resources and free the node. * * Do not call directly, use qrtr_node_release. To be used with * kref_put_mutex. As such, the node mutex is expected to be locked on call.
*/ staticvoid __qrtr_node_release(struct kref *kref)
{ struct qrtr_node *node = container_of(kref, struct qrtr_node, ref); struct radix_tree_iter iter; struct qrtr_tx_flow *flow; unsignedlong flags; void __rcu **slot;
spin_lock_irqsave(&qrtr_nodes_lock, flags); /* If the node is a bridge for other nodes, there are possibly * multiple entries pointing to our released node, delete them all.
*/
radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { if (*slot == node)
radix_tree_iter_delete(&qrtr_nodes, &iter, slot);
}
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
/** * qrtr_tx_wait() - flow control for outgoing packets * @node: qrtr_node that the packet is to be send to * @dest_node: node id of the destination * @dest_port: port number of the destination * @type: type of message * * The flow control scheme is based around the low and high "watermarks". When * the low watermark is passed the confirm_rx flag is set on the outgoing * message, which will trigger the remote to send a control message of the type * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit * further transmision should be paused. * * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
*/ staticint qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port, int type)
{ unsignedlong key = (u64)dest_node << 32 | dest_port; struct qrtr_tx_flow *flow; int confirm_rx = 0; int ret;
/* Never set confirm_rx on non-data packets */ if (type != QRTR_TYPE_DATA) return 0;
mutex_lock(&node->qrtr_tx_lock);
flow = radix_tree_lookup(&node->qrtr_tx_flow, key); if (!flow) {
flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (flow) {
init_waitqueue_head(&flow->resume_tx); if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
kfree(flow);
flow = NULL;
}
}
}
mutex_unlock(&node->qrtr_tx_lock);
/* Set confirm_rx if we where unable to find and allocate a flow */ if (!flow) return 1;
/** * qrtr_tx_flow_failed() - flag that tx of confirm_rx flagged messages failed * @node: qrtr_node that the packet is to be send to * @dest_node: node id of the destination * @dest_port: port number of the destination * * Signal that the transmission of a message with confirm_rx flag failed. The * flow's "pending" counter will keep incrementing towards QRTR_TX_FLOW_HIGH, * at which point transmission would stall forever waiting for the resume TX * message associated with the dropped confirm_rx message. * Work around this by marking the flow as having a failed transmission and * cause the next transmission attempt to be sent with the confirm_rx.
*/ staticvoid qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node, int dest_port)
{ unsignedlong key = (u64)dest_node << 32 | dest_port; struct qrtr_tx_flow *flow;
if (!rc) {
mutex_lock(&node->ep_lock);
rc = -ENODEV; if (node->ep)
rc = node->ep->xmit(node->ep, skb); else
kfree_skb(skb);
mutex_unlock(&node->ep_lock);
} /* Need to ensure that a subsequent message carries the otherwise lost
* confirm_rx flag if we dropped this one */ if (rc && confirm_rx)
qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
return rc;
}
/* Lookup node by id. * * callers must release with qrtr_node_release()
*/ staticstruct qrtr_node *qrtr_node_lookup(unsignedint nid)
{ struct qrtr_node *node; unsignedlong flags;
/* Assign node id to node. * * This is mostly useful for automatic node id assignment, based on * the source id in the incoming packet.
*/ staticvoid qrtr_node_assign(struct qrtr_node *node, unsignedint nid)
{ unsignedlong flags;
if (cb->type == QRTR_TYPE_NEW_SERVER) { /* Remote node endpoint can bridge other distant nodes */ conststruct qrtr_ctrl_pkt *pkt;
pkt = data + hdrlen;
qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
}
if (cb->type == QRTR_TYPE_RESUME_TX) {
qrtr_tx_resume(node, skb);
} else {
ipc = qrtr_port_lookup(cb->dst_port); if (!ipc) goto err;
if (sock_queue_rcv_skb(&ipc->sk, skb)) {
qrtr_port_put(ipc); goto err;
}
qrtr_port_put(ipc);
}
return 0;
err:
kfree_skb(skb); return -EINVAL;
}
EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
/** * qrtr_alloc_ctrl_packet() - allocate control packet skb * @pkt: reference to qrtr_ctrl_pkt pointer * @flags: the type of memory to allocate * * Returns newly allocated sk_buff, or NULL on failure * * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and * on success returns a reference to the control packet in @pkt.
*/ staticstruct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt,
gfp_t flags)
{ constint pkt_len = sizeof(struct qrtr_ctrl_pkt); struct sk_buff *skb;
skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, flags); if (!skb) return NULL;
/** * qrtr_endpoint_register() - register a new endpoint * @ep: endpoint to register * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment * Return: 0 on success; negative error code on failure * * The specified endpoint must have the xmit function pointer set on call.
*/ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsignedint nid)
{ struct qrtr_node *node;
if (!ep || !ep->xmit) return -EINVAL;
node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM;
/* Notify the local controller about the event */
spin_lock_irqsave(&qrtr_nodes_lock, flags);
radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) { if (*slot != node) continue;
src.sq_node = iter.index;
skb = qrtr_alloc_ctrl_packet(&pkt, GFP_ATOMIC); if (skb) {
pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
}
}
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
/* Wake up any transmitters waiting for resume-tx from the node */
mutex_lock(&node->qrtr_tx_lock);
radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
flow = *slot;
wake_up_interruptible_all(&flow->resume_tx);
}
mutex_unlock(&node->qrtr_tx_lock);
/* Ensure that if qrtr_port_lookup() did enter the RCU read section we
* wait for it to up increment the refcount */
synchronize_rcu();
}
/* Assign port number to socket. * * Specify port in the integer pointed to by port, and it will be adjusted * on return as necesssary. * * Port may be: * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET] * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN * >QRTR_MIN_EPH_SOCKET: Specified; available to all
*/ staticint qrtr_port_assign(struct qrtr_sock *ipc, int *port)
{ int rc;
/* Queue packet to local peer socket. */ staticint qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, int type, struct sockaddr_qrtr *from, struct sockaddr_qrtr *to)
{ struct qrtr_sock *ipc; struct qrtr_cb *cb;
ipc = qrtr_port_lookup(to->sq_port); if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */ if (ipc)
qrtr_port_put(ipc);
kfree_skb(skb); return -ENODEV;
}
switch (cmd) { case TIOCOUTQ:
len = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (len < 0)
len = 0;
rc = put_user(len, (int __user *)argp); break; case TIOCINQ:
skb = skb_peek(&sk->sk_receive_queue); if (skb)
len = skb->len;
rc = put_user(len, (int __user *)argp); break; case SIOCGIFADDR: if (get_user_ifreq(&ifr, NULL, argp)) {
rc = -EFAULT; break;
}
sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
*sq = ipc->us; if (put_user_ifreq(&ifr, argp)) {
rc = -EFAULT; break;
} break; case SIOCADDRT: case SIOCDELRT: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK:
rc = -EINVAL; break; default:
rc = -ENOIOCTLCMD; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.