/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
IPMI_SEND_PANIC_EVENT,
IPMI_SEND_PANIC_EVENT_STRING,
IPMI_SEND_PANIC_EVENT_MAX
};
/* Indices in this array should be mapped to enum ipmi_panic_event_op */ staticconstchar *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
staticconststruct kernel_param_ops panic_op_ops = {
.set = panic_op_write_handler,
.get = panic_op_read_handler
};
module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
#define MAX_EVENTS_IN_QUEUE 25
/* Remain in auto-maintenance mode for this amount of time (in ms). */ staticunsignedlong maintenance_mode_timeout_ms = 30000;
module_param(maintenance_mode_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(maintenance_mode_timeout_ms, "The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
/* * Don't let a message sit in a queue forever, always time it with at lest * the max message timer. This is in milliseconds.
*/ #define MAX_MSG_TIMEOUT 60000
/* * Timeout times below are in milliseconds, and are done off a 1 * second timer. So setting the value to 1000 would mean anything * between 0 and 1000ms. So really the only reasonable minimum * setting it 2000ms, which is between 1 and 2 seconds.
*/
/* The default timeout for message retries. */ staticunsignedlong default_retry_ms = 2000;
module_param(default_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_retry_ms, "The time (milliseconds) between retry sends");
/* The default timeout for maintenance mode message retries. */ staticunsignedlong default_maintenance_retry_ms = 3000;
module_param(default_maintenance_retry_ms, ulong, 0644);
MODULE_PARM_DESC(default_maintenance_retry_ms, "The time (milliseconds) between retry sends in maintenance mode");
/* The default maximum number of retries */ staticunsignedint default_max_retries = 4;
module_param(default_max_retries, uint, 0644);
MODULE_PARM_DESC(default_max_retries, "The time (milliseconds) between retry sends in maintenance mode");
/* The default maximum number of users that may register. */ staticunsignedint max_users = 30;
module_param(max_users, uint, 0644);
MODULE_PARM_DESC(max_users, "The most users that may use the IPMI stack at one time.");
/* The default maximum number of message a user may have outstanding. */ staticunsignedint max_msgs_per_user = 100;
module_param(max_msgs_per_user, uint, 0644);
MODULE_PARM_DESC(max_msgs_per_user, "The most message a user may have outstanding.");
/* Call every ~1000 ms. */ #define IPMI_TIMEOUT_TIME 1000
/* How many jiffies does it take to get to the timeout time. */ #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
/* * Request events from the queue every second (this is the number of * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the * future, IPMI will add a way to know immediately if an event is in * the queue and this silliness can go away.
*/ #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
/* How long should we cache dynamic device IDs? */ #define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
/* * The main "user" data structure.
*/ struct ipmi_user { struct list_head link;
struct kref refcount;
refcount_t destroyed;
/* The upper layer that handles receive messages. */ conststruct ipmi_user_hndl *handler; void *handler_data;
/* The interface this user is bound to. */ struct ipmi_smi *intf;
/* Does this interface receive IPMI events? */ bool gets_events;
/* * This is used to form a linked lised during mass deletion. * Since this is in an RCU list, we cannot use the link above * or change any data until the RCU period completes. So we * use this next variable during mass deletion so we can have * a list and don't have to wait and restart the search on * every individual deletion of a command.
*/ struct cmd_rcvr *next;
};
/* * To verify on an incoming send message response that this is * the message that the response is for, we keep a sequence id * and increment it every time we send a message.
*/ long seqid;
/* * This is held so we can properly respond to the message on a * timeout, and it is used to hold the temporary data for * retransmission, too.
*/ struct ipmi_recv_msg *recv_msg;
};
/* * Store the information in a msgid (long) to allow us to find a * sequence table entry from the msgid.
*/ #define STORE_SEQ_IN_MSGID(seq, seqid) \
((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
struct ipmi_my_addrinfo { /* * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, * but may be changed by the user.
*/ unsignedchar address;
/* * My LUN. This should generally stay the SMS LUN, but just in * case...
*/ unsignedchar lun;
};
/* * Note that the product id, manufacturer id, guid, and device id are * immutable in this structure, so dyn_mutex is not required for * accessing those. If those change on a BMC, a new BMC is allocated.
*/ struct bmc_device { struct platform_device pdev; struct list_head intfs; /* Interfaces on this BMC. */ struct ipmi_device_id id; struct ipmi_device_id fetch_id; int dyn_id_set; unsignedlong dyn_id_expiry; struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
guid_t guid;
guid_t fetch_guid; int dyn_guid_set; struct kref usecount; struct work_struct remove_work; unsignedchar cc; /* completion code */
}; #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
/* * Various statistics for IPMI, these index stats[] in the ipmi_smi * structure.
*/ enum ipmi_stat_indexes { /* Commands we got from the user that were invalid. */
IPMI_STAT_sent_invalid_commands = 0,
/* Commands we sent to the MC. */
IPMI_STAT_sent_local_commands,
/* Responses from the MC that were delivered to a user. */
IPMI_STAT_handled_local_responses,
/* Responses from the MC that were not delivered to a user. */
IPMI_STAT_unhandled_local_responses,
/* Commands we sent out to the IPMB bus. */
IPMI_STAT_sent_ipmb_commands,
/* Commands sent on the IPMB that had errors on the SEND CMD */
IPMI_STAT_sent_ipmb_command_errs,
/* Each retransmit increments this count. */
IPMI_STAT_retransmitted_ipmb_commands,
/* * When a message times out (runs out of retransmits) this is * incremented.
*/
IPMI_STAT_timed_out_ipmb_commands,
/* * This is like above, but for broadcasts. Broadcasts are * *not* included in the above count (they are expected to * time out).
*/
IPMI_STAT_timed_out_ipmb_broadcasts,
/* Responses I have sent to the IPMB bus. */
IPMI_STAT_sent_ipmb_responses,
/* The response was delivered to the user. */
IPMI_STAT_handled_ipmb_responses,
/* The response had invalid data in it. */
IPMI_STAT_invalid_ipmb_responses,
/* The response didn't have anyone waiting for it. */
IPMI_STAT_unhandled_ipmb_responses,
/* Commands we sent out to the IPMB bus. */
IPMI_STAT_sent_lan_commands,
/* Commands sent on the IPMB that had errors on the SEND CMD */
IPMI_STAT_sent_lan_command_errs,
/* Each retransmit increments this count. */
IPMI_STAT_retransmitted_lan_commands,
/* * When a message times out (runs out of retransmits) this is * incremented.
*/
IPMI_STAT_timed_out_lan_commands,
/* Responses I have sent to the IPMB bus. */
IPMI_STAT_sent_lan_responses,
/* The response was delivered to the user. */
IPMI_STAT_handled_lan_responses,
/* The response had invalid data in it. */
IPMI_STAT_invalid_lan_responses,
/* The response didn't have anyone waiting for it. */
IPMI_STAT_unhandled_lan_responses,
/* The command was delivered to the user. */
IPMI_STAT_handled_commands,
/* The command had invalid data in it. */
IPMI_STAT_invalid_commands,
/* The command didn't have anyone waiting for it. */
IPMI_STAT_unhandled_commands,
/* Invalid data in an event. */
IPMI_STAT_invalid_events,
/* Events that were received with the proper format. */
IPMI_STAT_events,
/* Retransmissions on IPMB that failed. */
IPMI_STAT_dropped_rexmit_ipmb_commands,
/* Retransmissions on LAN that failed. */
IPMI_STAT_dropped_rexmit_lan_commands,
/* This *must* remain last, add new values above this. */
IPMI_NUM_STATS
};
/* Set when the interface is being unregistered. */ bool in_shutdown;
/* Used for a list of interfaces. */ struct list_head link;
/* * The list of upper layers that are using me.
*/ struct list_head users; struct mutex users_mutex;
atomic_t nr_users; struct device_attribute nr_users_devattr; struct device_attribute nr_msgs_devattr;
/* Used for wake ups at startup. */
wait_queue_head_t waitq;
/* * Prevents the interface from being unregistered when the * interface is used by being looked up through the BMC * structure.
*/ struct mutex bmc_reg_mutex;
/* Driver-model device for the system interface. */ struct device *si_dev;
/* * A table of sequence numbers for this interface. We use the * sequence numbers for IPMB messages that go out of the * interface to match them up with their responses. A routine * is called periodically to time the items in this list.
*/ struct mutex seq_lock; struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; int curr_seq;
/* * Messages queued for deliver to the user.
*/ struct mutex user_msgs_mutex; struct list_head user_msgs;
/* * Messages queued for processing. If processing fails (out * of memory for instance), They will stay in here to be * processed later in a periodic timer interrupt. The * workqueue is for handling received messages directly from * the handler.
*/
spinlock_t waiting_rcv_msgs_lock; struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver; struct work_struct smi_work;
/* * The list of command receivers that are registered for commands * on this interface.
*/ struct mutex cmd_rcvrs_mutex; struct list_head cmd_rcvrs;
/* * Events that were queues because no one was there to receive * them.
*/ struct mutex events_mutex; /* For dealing with event stuff. */ struct list_head waiting_events; unsignedint waiting_events_count; /* How many events in queue? */ char event_msg_printed;
/* How many users are waiting for events? */
atomic_t event_waiters; unsignedint ticks_to_req_ev;
spinlock_t watch_lock; /* For dealing with watch stuff below. */
/* How many users are waiting for commands? */ unsignedint command_waiters;
/* How many users are waiting for watchdogs? */ unsignedint watchdog_waiters;
/* How many users are waiting for message responses? */ unsignedint response_waiters;
/* * Tells what the lower layer has last been asked to watch for, * messages and/or watchdogs. Protected by watch_lock.
*/ unsignedint last_watch_mask;
/* * The event receiver for my BMC, only really used at panic * shutdown as a place to store this.
*/ unsignedchar event_receiver; unsignedchar event_receiver_lun; unsignedchar local_sel_device; unsignedchar local_event_generator;
/* For handling of maintenance mode. */ int maintenance_mode; bool maintenance_mode_enable; int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
/* * If we are doing maintenance on something on IPMB, extend * the timeout time to avoid timeouts writing firmware and * such.
*/ int ipmb_maintenance_mode_timeout;
/* * A cheap hack, if this is non-null and a message to an * interface comes in with a NULL user, call this routine with * it. Note that the message will still be freed by the * caller. This only works on the system interface. * * Protected by bmc_reg_mutex.
*/ void (*null_user_handler)(struct ipmi_smi *intf, struct ipmi_recv_msg *msg);
/* * When we are scanning the channels for an SMI, this will * tell which channel we are scanning.
*/ int curr_channel;
/* Channel information */ struct ipmi_channel_set *channel_list; unsignedint curr_working_cset; /* First index into the following. */ struct ipmi_channel_set wchannels[2]; struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS]; bool channels_ready;
atomic_t stats[IPMI_NUM_STATS];
/* * run_to_completion duplicate of smb_info, smi_info * and ipmi_serial_info structures. Used to decrease numbers of * parameters passed by "low" level IPMI code.
*/ int run_to_completion;
}; #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
/* * The driver model view of the IPMI messaging driver.
*/ staticstruct platform_driver ipmidriver = {
.driver = {
.name = "ipmi",
.bus = &platform_bus_type
}
}; /* * This mutex keeps us from adding the same BMC twice.
*/ static DEFINE_MUTEX(ipmidriver_mutex);
/* * Wholesale remove all the entries from the list in the * interface. No need for locks, this is single-threaded.
*/
list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
kfree(rcvr);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if ((intf->seq_table[i].inuse)
&& (intf->seq_table[i].recv_msg))
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
kfree(intf);
}
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{ struct ipmi_smi *intf; unsignedint count = 0, i; int *interfaces = NULL; struct device **devices = NULL; int rv = 0;
/* * Make sure the driver is actually initialized, this handles * problems with initialization order.
*/
rv = ipmi_init_msghandler(); if (rv) return rv;
mutex_lock(&smi_watchers_mutex);
list_add(&watcher->link, &smi_watchers);
/* * Build an array of ipmi interfaces and fill it in, and * another array of the devices. We can't call the callback * with ipmi_interfaces_mutex held. smi_watchers_mutex will * keep things in order for the user.
*/
mutex_lock(&ipmi_interfaces_mutex);
list_for_each_entry(intf, &ipmi_interfaces, link)
count++; if (count > 0) {
interfaces = kmalloc_array(count, sizeof(*interfaces),
GFP_KERNEL); if (!interfaces) {
rv = -ENOMEM;
} else {
devices = kmalloc_array(count, sizeof(*devices),
GFP_KERNEL); if (!devices) {
kfree(interfaces);
interfaces = NULL;
rv = -ENOMEM;
}
}
count = 0;
} if (interfaces) {
list_for_each_entry(intf, &ipmi_interfaces, link) { int intf_num = READ_ONCE(intf->intf_num);
if (!msg->user) { /* Special handling for NULL users. */ if (intf->null_user_handler) {
intf->null_user_handler(intf, msg);
} else { /* No handler, so give up. */
rv = -EINVAL;
}
ipmi_free_recv_msg(msg);
} elseif (oops_in_progress) { /* * If we are running in the panic context, calling the * receive handler doesn't much meaning and has a deadlock * risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
} else { /* * Deliver it in smi_work. The message will hold a * refcount to the user.
*/
mutex_lock(&intf->user_msgs_mutex);
list_add_tail(&msg->link, &intf->user_msgs);
mutex_unlock(&intf->user_msgs_mutex);
queue_work(system_wq, &intf->smi_work);
}
/* * Find the next sequence number not being used and add the given * message with the given timeout to the sequence table. This must be * called with the interface's seq_lock held.
*/ staticint intf_next_seq(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, unsignedlong timeout, int retries, int broadcast, unsignedchar *seq, long *seqid)
{ int rv = 0; unsignedint i;
if (timeout == 0)
timeout = default_retry_ms; if (retries < 0)
retries = default_max_retries;
for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
i = (i+1)%IPMI_IPMB_NUM_SEQ) { if (!intf->seq_table[i].inuse) break;
}
if (!intf->seq_table[i].inuse) {
intf->seq_table[i].recv_msg = recv_msg;
/* * Start with the maximum timeout, when the send response * comes in we will start the real timer.
*/
intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
intf->seq_table[i].orig_timeout = timeout;
intf->seq_table[i].retries_left = retries;
intf->seq_table[i].broadcast = broadcast;
intf->seq_table[i].inuse = 1;
intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
*seq = i;
*seqid = intf->seq_table[i].seqid;
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
need_waiter(intf);
} else {
rv = -EAGAIN;
}
return rv;
}
/* * Return the receive message for the given sequence number and * release the sequence number so it can be reused. Some other data * is passed in to be sure the message matches up correctly (to help * guard against message coming in after their timeout and the * sequence number being reused).
*/ staticint intf_find_seq(struct ipmi_smi *intf, unsignedchar seq, short channel, unsignedchar cmd, unsignedchar netfn, struct ipmi_addr *addr, struct ipmi_recv_msg **recv_msg)
{ int rv = -ENODEV;
if (seq >= IPMI_IPMB_NUM_SEQ) return -EINVAL;
mutex_lock(&intf->seq_lock); if (intf->seq_table[seq].inuse) { struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
/* Start the timer for a specific sequence table entry. */ staticint intf_start_seq_timer(struct ipmi_smi *intf, long msgid)
{ int rv = -ENODEV; unsignedchar seq; unsignedlong seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
mutex_lock(&intf->seq_lock); /* * We do this verification because the user can be deleted * while a message is outstanding.
*/ if ((intf->seq_table[seq].inuse)
&& (intf->seq_table[seq].seqid == seqid)) { struct seq_table *ent = &intf->seq_table[seq];
ent->timeout = ent->orig_timeout;
rv = 0;
}
mutex_unlock(&intf->seq_lock);
return rv;
}
/* Got an error for the send message for a specific sequence number. */ staticint intf_err_seq(struct ipmi_smi *intf, long msgid, unsignedint err)
{ int rv = -ENODEV; unsignedchar seq; unsignedlong seqid; struct ipmi_recv_msg *msg = NULL;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
mutex_lock(&intf->seq_lock); /* * We do this verification because the user can be deleted * while a message is outstanding.
*/ if ((intf->seq_table[seq].inuse)
&& (intf->seq_table[seq].seqid == seqid)) { struct seq_table *ent = &intf->seq_table[seq];
/* * There is no module usecount here, because it's not * required. Since this can only be used by and called from * other modules, they will implicitly use this module, and * thus this can't be removed unless the other modules are * removed.
*/
if (handler == NULL) return -EINVAL;
/* * Make sure the driver is actually initialized, this handles * problems with initialization order.
*/
rv = ipmi_init_msghandler(); if (rv) return rv;
mutex_lock(&ipmi_interfaces_mutex);
list_for_each_entry(intf, &ipmi_interfaces, link) { if (intf->intf_num == if_num) goto found;
} /* Not found, return an error */
rv = -EINVAL; goto out_unlock;
found: if (intf->in_shutdown) {
rv = -ENODEV; goto out_unlock;
}
if (handler->ipmi_watchdog_pretimeout) /* User wants pretimeouts, so make sure to watch for them. */
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
/* Must be called with intf->users_mutex held. */ staticvoid _ipmi_destroy_user(struct ipmi_user *user)
{ struct ipmi_smi *intf = user->intf; int i; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; struct ipmi_recv_msg *msg, *msg2;
if (!refcount_dec_if_one(&user->destroyed)) return;
if (user->handler->shutdown)
user->handler->shutdown(user->handler_data);
if (user->handler->ipmi_watchdog_pretimeout)
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
if (user->gets_events)
atomic_dec(&intf->event_waiters);
/* Remove the user from the interface's list and sequence table. */
list_del(&user->link);
atomic_dec(&intf->nr_users);
mutex_lock(&intf->seq_lock); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
intf->seq_table[i].inuse = 0;
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
mutex_unlock(&intf->seq_lock);
/* * Remove the user from the command receiver's table. First * we build a list of everything (not using the standard link, * since other things may be using it till we do * synchronize_rcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
lockdep_is_held(&intf->cmd_rcvrs_mutex)) { if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
rcvr->next = rcvrs;
rcvrs = rcvr;
}
}
mutex_unlock(&intf->cmd_rcvrs_mutex); while (rcvrs) {
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
}
mutex_lock(&intf->cmd_rcvrs_mutex); /* Make sure the command/netfn is not already registered. */ if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
rv = -EBUSY; goto out_unlock;
}
/* Now tack on the data to the message. */ if (msg->data_len > 0)
memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 9;
/* Now calculate the checksum and tack it on. */
smi_msg->data[i+smi_msg->data_size]
= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
/* * Add on the checksum size and the offset from the * broadcast.
*/
smi_msg->data_size += 1 + i;
/* Now tack on the data to the message. */ if (msg->data_len > 0)
memcpy(&smi_msg->data[10], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 10;
/* Now calculate the checksum and tack it on. */
smi_msg->data[smi_msg->data_size]
= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
/* * Add on the checksum size and the offset from the * broadcast.
*/
smi_msg->data_size += 1;
if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
&& ((msg->cmd == IPMI_SEND_MSG_CMD)
|| (msg->cmd == IPMI_GET_MSG_CMD)
|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { /* * We don't let the user do these, since we manage * the sequence numbers.
*/
ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL;
}
if (is_maintenance_mode_cmd(msg)) { unsignedlong flags;
staticint i_ipmi_req_ipmb(struct ipmi_smi *intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, struct ipmi_smi_msg *smi_msg, struct ipmi_recv_msg *recv_msg, unsignedchar source_address, unsignedchar source_lun, int retries, unsignedint retry_time_ms)
{ struct ipmi_ipmb_addr *ipmb_addr; unsignedchar ipmb_seq; long seqid; int broadcast = 0; struct ipmi_channel *chans; int rv = 0;
if (addr->channel >= IPMI_MAX_CHANNELS) {
ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL;
}
chans = READ_ONCE(intf->channel_list)->c;
if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
ipmi_inc_stat(intf, sent_invalid_commands); return -EINVAL;
}
if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { /* * Broadcasts add a zero at the beginning of the * message, but otherwise is the same as an IPMB * address.
*/
addr->addr_type = IPMI_IPMB_ADDR_TYPE;
broadcast = 1;
retries = 0; /* Don't retry broadcasts. */
}
/* * 9 for the header and 1 for the checksum, plus * possibly one for the broadcast.
*/ if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
ipmi_inc_stat(intf, sent_invalid_commands); return -EMSGSIZE;
}
if (recv_msg->msg.netfn & 0x1) { /* * It's a response, so use the user's sequence * from msgid.
*/
ipmi_inc_stat(intf, sent_ipmb_responses);
format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
msgid, broadcast,
source_address, source_lun);
/* * Save the receive message so we can use it * to deliver the response.
*/
smi_msg->user_data = recv_msg;
} else {
mutex_lock(&intf->seq_lock);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
maintenance_mode_timeout_ms;
if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0) /* Different default in maintenance mode */
retry_time_ms = default_maintenance_retry_ms;
/* * Create a sequence number with a 1 second * timeout and 4 retries.
*/
rv = intf_next_seq(intf,
recv_msg,
retry_time_ms,
retries,
broadcast,
&ipmb_seq,
&seqid); if (rv) /* * We have used up all the sequence numbers, * probably, so abort.
*/ goto out_err;
ipmi_inc_stat(intf, sent_ipmb_commands);
/* * Store the sequence number in the message, * so that when the send message response * comes back we can start the timer.
*/
format_ipmb_msg(smi_msg, msg, ipmb_addr,
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
ipmb_seq, broadcast,
source_address, source_lun);
/* * Copy the message into the recv message data, so we * can retransmit it later if necessary.
*/
memcpy(recv_msg->msg_data, smi_msg->data,
smi_msg->data_size);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = smi_msg->data_size;
/* * We don't unlock until here, because we need * to copy the completed message into the * recv_msg before we release the lock. * Otherwise, race conditions may bite us. I * know that's pretty paranoid, but I prefer * to be correct.
*/
out_err:
mutex_unlock(&intf->seq_lock);
}
/* 11 for the header and 1 for the checksum. */ if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
ipmi_inc_stat(intf, sent_invalid_commands); return -EMSGSIZE;
}
if (recv_msg->msg.netfn & 0x1) { /* * It's a response, so use the user's sequence * from msgid.
*/
ipmi_inc_stat(intf, sent_lan_responses);
format_lan_msg(smi_msg, msg, lan_addr, msgid,
msgid, source_lun);
/* * Save the receive message so we can use it * to deliver the response.
*/
smi_msg->user_data = recv_msg;
} else {
mutex_lock(&intf->seq_lock);
/* * Create a sequence number with a 1 second * timeout and 4 retries.
*/
rv = intf_next_seq(intf,
recv_msg,
retry_time_ms,
retries,
0,
&ipmb_seq,
&seqid); if (rv) /* * We have used up all the sequence numbers, * probably, so abort.
*/ goto out_err;
ipmi_inc_stat(intf, sent_lan_commands);
/* * Store the sequence number in the message, * so that when the send message response * comes back we can start the timer.
*/
format_lan_msg(smi_msg, msg, lan_addr,
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
ipmb_seq, source_lun);
/* * Copy the message into the recv message data, so we * can retransmit it later if necessary.
*/
memcpy(recv_msg->msg_data, smi_msg->data,
smi_msg->data_size);
recv_msg->msg.data = recv_msg->msg_data;
recv_msg->msg.data_len = smi_msg->data_size;
/* * We don't unlock until here, because we need * to copy the completed message into the * recv_msg before we release the lock. * Otherwise, race conditions may bite us. I * know that's pretty paranoid, but I prefer * to be correct.
*/
out_err:
mutex_unlock(&intf->seq_lock);
}
return rv;
}
/* * Separate from ipmi_request so that the user does not have to be * supplied in certain circumstances (mainly at panic time). If * messages are supplied, they will be freed, even if an error * occurs.
*/ staticint i_ipmi_request(struct ipmi_user *user, struct ipmi_smi *intf, struct ipmi_addr *addr, long msgid, struct kernel_ipmi_msg *msg, void *user_msg_data, void *supplied_smi, struct ipmi_recv_msg *supplied_recv, int priority, unsignedchar source_address, unsignedchar source_lun, int retries, unsignedint retry_time_ms)
{ struct ipmi_smi_msg *smi_msg; struct ipmi_recv_msg *recv_msg; int run_to_completion = READ_ONCE(intf->run_to_completion); int rv = 0;
if (supplied_recv) {
recv_msg = supplied_recv;
recv_msg->user = user; if (user) {
atomic_inc(&user->nr_msgs); /* The put happens when the message is freed. */
kref_get(&user->refcount);
}
} else {
recv_msg = ipmi_alloc_recv_msg(user); if (IS_ERR(recv_msg)) return PTR_ERR(recv_msg);
}
recv_msg->user_msg_data = user_msg_data;
if (supplied_smi)
smi_msg = supplied_smi; else {
smi_msg = ipmi_alloc_smi_msg(); if (smi_msg == NULL) { if (!supplied_recv)
ipmi_free_recv_msg(recv_msg); return -ENOMEM;
}
}
if (!run_to_completion)
mutex_lock(&intf->users_mutex); if (intf->in_shutdown) {
rv = -ENODEV; goto out_err;
}
recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout * responses can get the proper response data.
*/
recv_msg->msg = *msg;
if (msg->msg.data[0]) {
dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
msg->msg.data[0]);
intf->bmc->dyn_id_set = 0; goto out;
}
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); if (rv) {
dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); /* record completion code when error */
intf->bmc->cc = msg->msg.data[0];
intf->bmc->dyn_id_set = 0;
} else { /* * Make sure the id data is available before setting * dyn_id_set.
*/
smp_wmb();
intf->bmc->dyn_id_set = 1;
}
out:
wake_up(&intf->waitq);
}
/* * Fetch the device id for the bmc/interface. You must pass in either * bmc or intf, this code will get the other one. If the data has * been recently fetched, this will just use the cached data. Otherwise * it will run a new fetch. *
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.22 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.