/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2011 ProFUSION Embedded Systems
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED.
*/
/* HCI device list */
LIST_HEAD(hci_dev_list);
DEFINE_RWLOCK(hci_dev_list_lock);
/* HCI callback list */
LIST_HEAD(hci_cb_list);
DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida);
/* Get HCI device by index.
* Device is held on return. */ staticstruct hci_dev *__hci_dev_get(int index, int *srcu_index)
{ struct hci_dev *hdev = NULL, *d;
switch (discov->state) { case DISCOVERY_FINDING: case DISCOVERY_RESOLVING: returntrue;
default: returnfalse;
}
}
void hci_discovery_set_state(struct hci_dev *hdev, int state)
{ int old_state = hdev->discovery.state;
if (old_state == state) return;
hdev->discovery.state = state;
switch (state) { case DISCOVERY_STOPPED:
hci_update_passive_scan(hdev);
if (old_state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0); break; case DISCOVERY_STARTING: break; case DISCOVERY_FINDING:
mgmt_discovering(hdev, 1); break; case DISCOVERY_RESOLVING: break; case DISCOVERY_STOPPING: break;
}
bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
}
if (do_inquiry) {
hci_req_sync_lock(hdev);
err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
hci_req_sync_unlock(hdev);
if (err < 0) goto done;
/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is * cleared). If it is interrupted by a signal, return -EINTR.
*/ if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
TASK_INTERRUPTIBLE)) {
err = -EINTR; goto done;
}
}
/* for unlimited number of responses we will use buffer with * 255 entries
*/
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then * copy it to the user space.
*/
buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); if (!buf) {
err = -ENOMEM; goto done;
}
staticint hci_dev_do_open(struct hci_dev *hdev)
{ int ret = 0;
BT_DBG("%s %p", hdev->name, hdev);
hci_req_sync_lock(hdev);
ret = hci_dev_open_sync(hdev);
hci_req_sync_unlock(hdev); return ret;
}
/* ---- HCI ioctl helpers ---- */
int hci_dev_open(__u16 dev)
{ struct hci_dev *hdev; int err;
hdev = hci_dev_get(dev); if (!hdev) return -ENODEV;
/* Devices that are marked as unconfigured can only be powered * up as user channel. Trying to bring them up as normal devices * will result into a failure. Only user channel operation is * possible. * * When this function is called for a user channel, the flag * HCI_USER_CHANNEL will be set first before attempting to * open the device.
*/ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EOPNOTSUPP; goto done;
}
/* We need to ensure that no other power on/off work is pending * before proceeding to call hci_dev_do_open. This is * particularly important if the setup procedure has not yet * completed.
*/ if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
cancel_delayed_work(&hdev->power_off);
/* After this call it is guaranteed that the setup procedure * has finished. This means that error conditions like RFKILL * or no valid public or static random address apply.
*/
flush_workqueue(hdev->req_workqueue);
/* For controllers not using the management interface and that * are brought up using legacy ioctl, set the HCI_BONDABLE bit * so that pairing works for them. Once the management interface * is in use this bit will be cleared again and userspace has * to explicitly enable it.
*/ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
!hci_dev_test_flag(hdev, HCI_MGMT))
hci_dev_set_flag(hdev, HCI_BONDABLE);
err = hci_dev_do_open(hdev);
done:
hci_dev_put(hdev); return err;
}
int hci_dev_do_close(struct hci_dev *hdev)
{ int err;
BT_DBG("%s %p", hdev->name, hdev);
hci_req_sync_lock(hdev);
err = hci_dev_close_sync(hdev);
hci_req_sync_unlock(hdev);
return err;
}
int hci_dev_close(__u16 dev)
{ struct hci_dev *hdev; int err;
hdev = hci_dev_get(dev); if (!hdev) return -ENODEV;
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
err = -EBUSY; goto done;
}
cancel_work_sync(&hdev->power_on); if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
cancel_delayed_work(&hdev->power_off);
err = hci_dev_do_close(hdev);
done:
hci_dev_put(hdev); return err;
}
staticint hci_dev_do_reset(struct hci_dev *hdev)
{ int ret;
BT_DBG("%s %p", hdev->name, hdev);
hci_req_sync_lock(hdev);
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
/* Cancel these to avoid queueing non-chained pending work */
hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); /* Wait for * * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) * queue_delayed_work(&hdev->{cmd,ncmd}_timer) * * inside RCU section to see the flag or complete scheduling.
*/
synchronize_rcu(); /* Explicitly cancel works in case scheduled after setting the flag. */
cancel_delayed_work(&hdev->cmd_timer);
cancel_delayed_work(&hdev->ncmd_timer);
/* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front.
*/
drain_workqueue(hdev->workqueue);
case HCISETENCRYPT: if (!lmp_encrypt_capable(hdev)) {
err = -EOPNOTSUPP; break;
}
if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */
err = hci_cmd_sync_status(hdev,
HCI_OP_WRITE_AUTH_ENABLE,
1, &dr.dev_opt,
HCI_CMD_TIMEOUT); if (err) break;
}
case HCISETSCAN:
err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
1, &dr.dev_opt, HCI_CMD_TIMEOUT);
/* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change.
*/ if (!err)
hci_update_passive_scan_state(hdev, dr.dev_opt); break;
case HCISETLINKPOL:
policy = cpu_to_le16(dr.dev_opt);
/* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down.
*/ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags &= ~BIT(HCI_UP);
dr[n].dev_id = hdev->id;
dr[n].dev_opt = flags;
if (++n >= dev_num) break;
}
read_unlock(&hci_dev_list_lock);
int hci_get_dev_info(void __user *arg)
{ struct hci_dev *hdev; struct hci_dev_info di; unsignedlong flags; int err = 0;
if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT;
hdev = hci_dev_get(di.dev_id); if (!hdev) return -ENODEV;
/* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down.
*/ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
flags = hdev->flags & ~BIT(HCI_UP); else
flags = hdev->flags;
BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY;
if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED)) return 0;
if (blocked) {
hci_dev_set_flag(hdev, HCI_RFKILLED);
if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
!hci_dev_test_flag(hdev, HCI_CONFIG)) {
err = hci_dev_do_poweroff(hdev); if (err) {
bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
err);
/* Make sure the device is still closed even if * anything during power off sequence (eg. * disconnecting devices) failed.
*/
hci_dev_do_close(hdev);
}
}
} else {
hci_dev_clear_flag(hdev, HCI_RFKILLED);
}
/* During the HCI setup phase, a few error conditions are * ignored and they need to be checked now. If they are still * valid, it is important to turn the device back off.
*/ if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
(!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_do_close(hdev);
} elseif (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
HCI_AUTO_OFF_TIMEOUT);
}
if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them.
*/ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
set_bit(HCI_RAW, &hdev->flags);
/* For fully configured devices, this will send * the Index Added event. For unconfigured devices, * it will send Unconfigued Index Added event. * * Devices with HCI_QUIRK_RAW_DEVICE are ignored * and no event will be send.
*/
mgmt_index_added(hdev);
} elseif (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag.
*/ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
clear_bit(HCI_RAW, &hdev->flags);
/* Powering on the controller with HCI_CONFIG set only * happens with the transition from unconfigured to * configured. This will send the Index Added event.
*/
mgmt_index_added(hdev);
}
}
bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
/* During HCI_INIT phase no events can be injected if the ncmd timer * triggers since the procedure has its own timeout handling.
*/ if (test_bit(HCI_INIT, &hdev->flags)) return;
/* This is an irrecoverable state, inject hardware error event */
hci_reset_dev(hdev);
}
/* This function requires the caller holds hdev->lock */ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
{ struct adv_info *adv_instance;
adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -ENOENT;
BT_DBG("%s removing %dMR", hdev->name, instance);
if (hdev->cur_adv_instance == instance) { if (hdev->adv_instance_timeout) {
cancel_delayed_work(&hdev->adv_instance_expire);
hdev->adv_instance_timeout = 0;
}
hdev->cur_adv_instance = 0x00;
}
adv = kzalloc(sizeof(*adv), GFP_KERNEL); if (!adv) return ERR_PTR(-ENOMEM);
adv->pending = true;
adv->instance = instance;
/* If controller support only one set and the instance is set to * 1 then there is no option other than using handle 0x00.
*/ if (hdev->le_num_of_adv_sets == 1 && instance == 1)
adv->handle = 0x00; else
adv->handle = instance;
adv->flags = flags;
adv->min_interval = min_interval;
adv->max_interval = max_interval;
adv->tx_power = tx_power; /* Defining a mesh_handle changes the timing units to ms, * rather than seconds, and ties the instance to the requested * mesh_tx queue.
*/
adv->mesh = mesh_handle;
/* Mark as changed if there are flags which would affect it */ if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
adv->scan_rsp_changed = true;
return 0;
}
/* This function requires the caller holds hdev->lock */
u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
{
u32 flags; struct adv_info *adv;
if (instance == 0x00) { /* Instance 0 always manages the "Tx Power" and "Flags" * fields
*/
flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting * corresponds to the "connectable" instance flag.
*/ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
flags |= MGMT_ADV_FLAG_CONNECTABLE;
/* Frees the monitor structure and do some bookkeepings. * This function requires the caller holds hdev->lock.
*/ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
{ struct adv_pattern *pattern; struct adv_pattern *tmp;
if (monitor->handle)
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED)
hdev->adv_monitors_cnt--;
kfree(monitor);
}
/* Assigns handle to a monitor, and if offloading is supported and power is on, * also attempts to forward the request to the controller. * This function requires the caller holds hci_req_sync_lock.
*/ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
{ int min, max, handle; int status = 0;
if (!monitor) return -EINVAL;
hci_dev_lock(hdev);
min = HCI_MIN_ADV_MONITOR_HANDLE;
max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
GFP_KERNEL);
hci_dev_unlock(hdev);
if (handle < 0) return handle;
monitor->handle = handle;
if (!hdev_is_powered(hdev)) return status;
switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE:
bt_dev_dbg(hdev, "add monitor %d status %d",
monitor->handle, status); /* Message was not forwarded to controller - not an error */ break;
case HCI_ADV_MONITOR_EXT_MSFT:
status = msft_add_monitor_pattern(hdev, monitor);
bt_dev_dbg(hdev, "add monitor %d msft status %d",
handle, status); break;
}
return status;
}
/* Attempts to tell the controller and free the monitor. If somehow the * controller doesn't have a corresponding handle, remove anyway. * This function requires the caller holds hci_req_sync_lock.
*/ staticint hci_remove_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
{ int status = 0; int handle;
switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
bt_dev_dbg(hdev, "remove monitor %d status %d",
monitor->handle, status); goto free_monitor;
case HCI_ADV_MONITOR_EXT_MSFT:
handle = monitor->handle;
status = msft_remove_monitor(hdev, monitor);
bt_dev_dbg(hdev, "remove monitor %d msft status %d",
handle, status); break;
}
/* In case no matching handle registered, just free the monitor */ if (status == -ENOENT) goto free_monitor;
return status;
free_monitor: if (status == -ENOENT)
bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
monitor->handle);
hci_free_adv_monitor(hdev, monitor);
return status;
}
/* This function requires the caller holds hci_req_sync_lock */ int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
{ struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
if (!monitor) return -EINVAL;
return hci_remove_adv_monitor(hdev, monitor);
}
/* This function requires the caller holds hci_req_sync_lock */ int hci_remove_all_adv_monitor(struct hci_dev *hdev)
{ struct adv_monitor *monitor; int idr_next_id = 0; int status = 0;
while (1) {
monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); if (!monitor) break;
status = hci_remove_adv_monitor(hdev, monitor); if (status) return status;
idr_next_id++;
}
return status;
}
/* This function requires the caller holds hdev->lock */ bool hci_is_adv_monitoring(struct hci_dev *hdev)
{ return !idr_is_empty(&hdev->adv_monitors_idr);
}
int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
{ if (msft_monitor_supported(hdev)) return HCI_ADV_MONITOR_EXT_MSFT;
/* This function requires the caller holds hdev->lock */ void hci_pend_le_list_del_init(struct hci_conn_params *param)
{ if (list_empty(¶m->action)) return;
if (params->conn) {
hci_conn_drop(params->conn);
hci_conn_put(params->conn);
}
list_del(¶ms->list);
kfree(params);
}
/* This function requires the caller holds hdev->lock */ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
{ struct hci_conn_params *params;
params = hci_conn_params_lookup(hdev, addr, addr_type); if (!params) return;
hci_conn_params_free(params);
hci_update_passive_scan(hdev);
BT_DBG("addr %pMR (type %u)", addr, addr_type);
}
/* This function requires the caller holds hdev->lock */ void hci_conn_params_clear_disabled(struct hci_dev *hdev)
{ struct hci_conn_params *params, *tmp;
list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { if (params->auto_connect != HCI_AUTO_CONN_DISABLED) continue;
/* If trying to establish one time connection to disabled * device, leave the params, but mark them as just once.
*/ if (params->explicit_connect) {
params->auto_connect = HCI_AUTO_CONN_EXPLICIT; continue;
}
hci_conn_params_free(params);
}
BT_DBG("All LE disabled connection parameters were removed");
}
/* This function requires the caller holds hdev->lock */ staticvoid hci_conn_params_clear_all(struct hci_dev *hdev)
{ struct hci_conn_params *params, *tmp;
BT_DBG("All LE connection parameters were removed");
}
/* Copy the Identity Address of the controller. * * If the controller has a public BD_ADDR, then by default use that one. * If this is a LE only controller without a public address, default to * the static random address. * * For debugging purposes it is possible to force controllers with a * public address to use the static random address instead. * * In case BR/EDR has been disabled on a dual-mode controller and * userspace has configured a static address, then that address * becomes the identity address instead of the public BR/EDR address.
*/ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 *bdaddr_type)
{ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
bacmp(&hdev->static_addr, BDADDR_ANY))) {
bacpy(bdaddr, &hdev->static_addr);
*bdaddr_type = ADDR_LE_DEV_RANDOM;
} else {
bacpy(bdaddr, &hdev->bdaddr);
*bdaddr_type = ADDR_LE_DEV_PUBLIC;
}
}
/* Userspace has full control of this device. Do nothing. */ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return NOTIFY_DONE;
/* To avoid a potential race with hci_unregister_dev. */
hci_dev_hold(hdev);
switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE:
ret = hci_suspend_dev(hdev); break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND:
ret = hci_resume_dev(hdev); break;
}
/* Assume BR/EDR support until proven otherwise (such as * through reading supported features during init.
*/
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
/* Devices that are marked for raw-only usage are unconfigured * and should not be included in normal operation.
*/ if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
/* Mark Remote Wakeup connection flag as supported if driver has wakeup * callback.
*/ if (hdev->wakeup)
hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.