// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright 2016 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. *
*/
if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) return -EINVAL;
return 0;
}
/** * vmw_port_hb_out - Send the message payload either through the * high-bandwidth port if available, or through the backdoor otherwise. * @channel: The rpc channel. * @msg: NULL-terminated message. * @hb: Whether the high-bandwidth port is available. * * Return: The port status.
*/ staticunsignedlong vmw_port_hb_out(struct rpc_channel *channel, constchar *msg, bool hb)
{
u32 ebx, ecx; unsignedlong msg_len = strlen(msg);
/** * vmw_port_hb_in - Receive the message payload either through the * high-bandwidth port if available, or through the backdoor otherwise. * @channel: The rpc channel. * @reply: Pointer to buffer holding reply. * @reply_len: Length of the reply. * @hb: Whether the high-bandwidth port is available. * * Return: The port status.
*/ staticunsignedlong vmw_port_hb_in(struct rpc_channel *channel, char *reply, unsignedlong reply_len, bool hb)
{
u32 ebx, ecx, edx;
/* HB port not available. Retrieve the message 4 bytes at a time. */
ecx = MESSAGE_STATUS_SUCCESS << 16; while (reply_len) { unsignedint bytes = min_t(unsignedlong, reply_len, 4);
/** * vmw_recv_msg: Receives a message from the host * * Note: It is the caller's responsibility to call kfree() on msg. * * @channel: channel opened by vmw_open_channel * @msg: [OUT] message received from the host * @msg_len: message length
*/ staticint vmw_recv_msg(struct rpc_channel *channel, void **msg,
size_t *msg_len)
{
u32 ebx, ecx, edx; char *reply;
size_t reply_len; int retries = 0;
/** * vmw_host_get_guestinfo: Gets a GuestInfo parameter * * Gets the value of a GuestInfo.* parameter. The value returned will be in * a string, and it is up to the caller to post-process. * * @guest_info_param: Parameter to get, e.g. GuestInfo.svga.gl3 * @buffer: if NULL, *reply_len will contain reply size. * @length: size of the reply_buf. Set to size of reply upon return * * Returns: 0 on success
*/ int vmw_host_get_guestinfo(constchar *guest_info_param, char *buffer, size_t *length)
{ struct rpc_channel channel; char *msg, *reply = NULL;
size_t reply_len = 0;
if (!vmw_msg_enabled) return -ENODEV;
if (!guest_info_param || !length) return -EINVAL;
msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param); if (!msg) {
DRM_ERROR("Cannot allocate memory to get guest info \"%s\".",
guest_info_param); return -ENOMEM;
}
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) goto out_open;
vmw_close_channel(&channel); if (buffer && reply && reply_len > 0) { /* Remove reply code, which are the first 2 characters of * the reply
*/
reply_len = max(reply_len - 2, (size_t) 0);
reply_len = min(reply_len, *length);
if (reply_len > 0)
memcpy(buffer, reply + 2, reply_len);
}
*length = reply_len;
kfree(reply);
kfree(msg);
return 0;
out_msg:
vmw_close_channel(&channel);
kfree(reply);
out_open:
*length = 0;
kfree(msg);
DRM_ERROR("Failed to get guest info \"%s\".", guest_info_param);
return -EINVAL;
}
/** * vmw_host_printf: Sends a log message to the host * * @fmt: Regular printf format string and arguments * * Returns: 0 on success
*/
__printf(1, 2) int vmw_host_printf(constchar *fmt, ...)
{
va_list ap; struct rpc_channel channel; char *msg; char *log; int ret = 0;
if (!vmw_msg_enabled) return -ENODEV;
if (!fmt) return ret;
va_start(ap, fmt);
log = kvasprintf(GFP_KERNEL, fmt, ap);
va_end(ap); if (!log) {
DRM_ERROR("Cannot allocate memory for the log message.\n"); return -ENOMEM;
}
msg = kasprintf(GFP_KERNEL, "log %s", log); if (!msg) {
DRM_ERROR("Cannot allocate memory for host log message.\n");
kfree(log); return -ENOMEM;
}
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM)) goto out_open;
/** * vmw_msg_ioctl: Sends and receveives a message to/from host from/to user-space * * Sends a message from user-space to host. * Can also receive a result from host and return that to user-space. * * @dev: Identifies the drm device. * @data: Pointer to the ioctl argument. * @file_priv: Identifies the caller. * Return: Zero on success, negative error code on error.
*/
/** * reset_ppn_array: Resets a PPN64 array to INVALID_PPN64 content * * @arr: Array to reset. * @size: Array length.
*/ staticinlinevoid reset_ppn_array(PPN64 *arr, size_t size)
{
size_t i;
BUG_ON(!arr || size == 0);
for (i = 0; i < size; ++i)
arr[i] = INVALID_PPN64;
}
/** * hypervisor_ppn_reset_all: Removes all mksGuestStat instance descriptors from * the hypervisor. All related pages should be subsequently unpinned or freed. *
*/ staticinlinevoid hypervisor_ppn_reset_all(void)
{
vmware_hypercall1(VMW_PORT_CMD_MKSGS_RESET, 0);
}
/** * hypervisor_ppn_add: Adds a single mksGuestStat instance descriptor to the * hypervisor. Any related userspace pages should be pinned in advance. * * @pfn: Physical page number of the instance descriptor
*/ staticinlinevoid hypervisor_ppn_add(PPN64 pfn)
{
vmware_hypercall1(VMW_PORT_CMD_MKSGS_ADD_PPN, (unsignedlong)pfn);
}
/** * hypervisor_ppn_remove: Removes a single mksGuestStat instance descriptor from * the hypervisor. All related pages should be subsequently unpinned or freed. * * @pfn: Physical page number of the instance descriptor
*/ staticinlinevoid hypervisor_ppn_remove(PPN64 pfn)
{
vmware_hypercall1(VMW_PORT_CMD_MKSGS_REMOVE_PPN, (unsignedlong)pfn);
}
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS)
/* Order of the total number of pages used for kernel-internal mksGuestStat; at least 2 */ #define MKSSTAT_KERNEL_PAGES_ORDER 2 /* Header to the text description of mksGuestStat instance descriptor */ #define MKSSTAT_KERNEL_DESCRIPTION "vmwgfx"
/** * mksstat_init_record_time: Initializes an MKSGuestStatCounterTime-based record * for the respective mksGuestStat index. * * @stat_idx: Index of the MKSGuestStatCounterTime-based mksGuestStat record. * @pstat: Pointer to array of MKSGuestStatCounterTime. * @pinfo: Pointer to array of MKSGuestStatInfoEntry. * @pstrs: Pointer to current end of the name/description sequence. * Return: Pointer to the new end of the names/description sequence.
*/
/** * mksstat_init_kern_id: Creates a single mksGuestStat instance descriptor and * kernel-internal counters. Adds PFN mapping to the hypervisor. * * Create a single mksGuestStat instance descriptor and corresponding structures * for all kernel-internal counters. The corresponding PFNs are mapped with the * hypervisor. * * @ppage: Output pointer to page containing the instance descriptor. * Return: Zero on success, negative error code on error.
*/
/** * vmw_mksstat_get_kern_slot: Acquires a slot for a single kernel-internal * mksGuestStat instance descriptor. * * Find a slot for a single kernel-internal mksGuestStat instance descriptor. * In case no such was already present, allocate a new one and set up a kernel- * internal mksGuestStat instance descriptor for the former. * * @pid: Process for which a slot is sought. * @dev_priv: Identifies the drm private device. * Return: Non-negative slot on success, negative error code on error.
*/
int vmw_mksstat_get_kern_slot(pid_t pid, struct vmw_private *dev_priv)
{ const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
size_t i;
for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { const size_t slot = (i + base) % ARRAY_SIZE(dev_priv->mksstat_kern_pids);
/* Check if an instance descriptor for this pid is already present */ if (pid == (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[slot])) return (int)slot;
/* Set up a new instance descriptor for this pid */ if (!atomic_cmpxchg(&dev_priv->mksstat_kern_pids[slot], 0, MKSSTAT_PID_RESERVED)) { constint ret = mksstat_init_kern_id(&dev_priv->mksstat_kern_pages[slot]);
if (!ret) { /* Reset top-timer tracking for this slot */
dev_priv->mksstat_kern_top_timer[slot] = MKSSTAT_KERN_COUNT;
/** * vmw_mksstat_cleanup_descriptor: Frees a single userspace-originating * mksGuestStat instance-descriptor page and unpins all related user pages. * * Unpin all user pages realated to this instance descriptor and free * the instance-descriptor page itself. * * @page: Page of the instance descriptor.
*/
for (i = 0; i < ARRAY_SIZE(pdesc->statPPNs) && pdesc->statPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->statPPNs[i]));
for (i = 0; i < ARRAY_SIZE(pdesc->infoPPNs) && pdesc->infoPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->infoPPNs[i]));
for (i = 0; i < ARRAY_SIZE(pdesc->strsPPNs) && pdesc->strsPPNs[i] != INVALID_PPN64; ++i)
unpin_user_page(pfn_to_page(pdesc->strsPPNs[i]));
__free_page(page);
}
/** * vmw_mksstat_remove_all: Resets all mksGuestStat instance descriptors * from the hypervisor. * * Discard all hypervisor PFN mappings, containing active mksGuestState instance * descriptors, unpin the related userspace pages and free the related kernel pages. * * @dev_priv: Identifies the drm private device. * Return: Zero on success, negative error code on error.
*/
int vmw_mksstat_remove_all(struct vmw_private *dev_priv)
{ int ret = 0;
size_t i;
/* Discard all PFN mappings with the hypervisor */
hypervisor_ppn_reset_all();
/* Discard all userspace-originating instance descriptors and unpin all related pages */ for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++i) { const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_user_pids[i]);
#if IS_ENABLED(CONFIG_DRM_VMWGFX_MKSSTATS) /* Discard all kernel-internal instance descriptors and free all related pages */ for (i = 0; i < ARRAY_SIZE(dev_priv->mksstat_kern_pids); ++i) { const pid_t pid0 = (pid_t)atomic_read(&dev_priv->mksstat_kern_pids[i]);
/** * vmw_mksstat_reset_ioctl: Resets all mksGuestStat instance descriptors * from the hypervisor. * * Discard all hypervisor PFN mappings, containing active mksGuestStat instance * descriptors, unpin the related userspace pages and free the related kernel pages. * * @dev: Identifies the drm device. * @data: Pointer to the ioctl argument. * @file_priv: Identifies the caller; unused. * Return: Zero on success, negative error code on error.
*/
/** * vmw_mksstat_add_ioctl: Creates a single userspace-originating mksGuestStat * instance descriptor and registers that with the hypervisor. * * Create a hypervisor PFN mapping, containing a single mksGuestStat instance * descriptor and pin the corresponding userspace pages. * * @dev: Identifies the drm device. * @data: Pointer to the ioctl argument. * @file_priv: Identifies the caller; unused. * Return: Zero on success, negative error code on error.
*/
/* Find an available slot in the mksGuestStats user array and reserve it */ for (slot = 0; slot < ARRAY_SIZE(dev_priv->mksstat_user_pids); ++slot) if (!atomic_cmpxchg(&dev_priv->mksstat_user_pids[slot], 0, MKSSTAT_PID_RESERVED)) break;
if (slot == ARRAY_SIZE(dev_priv->mksstat_user_pids)) return -ENOSPC;
BUG_ON(dev_priv->mksstat_user_pages[slot]);
/* Allocate statically-sized temp arrays for pages -- too big to keep in frame */
pages_stat = (struct page **)kmalloc_array(
ARRAY_SIZE(pdesc->statPPNs) +
ARRAY_SIZE(pdesc->infoPPNs) +
ARRAY_SIZE(pdesc->strsPPNs), sizeof(*pages_stat), GFP_KERNEL);
/* Pin mksGuestStat user pages and store those in the instance descriptor */
nr_pinned_stat = pin_user_pages_fast(arg->stat, num_pages_stat, FOLL_LONGTERM, pages_stat); if (num_pages_stat != nr_pinned_stat) goto err_pin_stat;
for (i = 0; i < num_pages_stat; ++i)
pdesc->statPPNs[i] = page_to_pfn(pages_stat[i]);
for (i = 0; i < num_pages_strs; ++i)
pdesc->strsPPNs[i] = page_to_pfn(pages_strs[i]);
/* Send the descriptor to the host via a hypervisor call. The mksGuestStat pages will remain in use until the user requests a matching remove stats
or a stats reset occurs. */
hypervisor_ppn_add((PPN64)page_to_pfn(page));
err_pin_strs: if (nr_pinned_strs > 0)
unpin_user_pages(pages_strs, nr_pinned_strs);
err_pin_info: if (nr_pinned_info > 0)
unpin_user_pages(pages_info, nr_pinned_info);
err_pin_stat: if (nr_pinned_stat > 0)
unpin_user_pages(pages_stat, nr_pinned_stat);
err_nomem:
atomic_set(&dev_priv->mksstat_user_pids[slot], 0); if (page)
__free_page(page);
kfree(pages_stat);
return ret_err;
}
/** * vmw_mksstat_remove_ioctl: Removes a single userspace-originating mksGuestStat * instance descriptor from the hypervisor. * * Discard a hypervisor PFN mapping, containing a single mksGuestStat instance * descriptor and unpin the corresponding userspace pages. * * @dev: Identifies the drm device. * @data: Pointer to the ioctl argument. * @file_priv: Identifies the caller; unused. * Return: Zero on success, negative error code on error.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.