/* * Copyright 2023 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE.
*/ #include <rm/rpc.h>
void
nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *mem)
{ if (mem->data) { /* * Poison the buffer to catch any unexpected access from * GSP-RM if the buffer was prematurely freed.
*/
memset(mem->data, 0xFF, mem->size);
/** * nvkm_gsp_mem_ctor - constructor for nvkm_gsp_mem objects * @gsp: gsp pointer * @size: number of bytes to allocate * @mem: nvkm_gsp_mem object to initialize * * Allocates a block of memory for use with GSP. * * This memory block can potentially out-live the driver's remove() callback, * so we take a device reference to ensure its lifetime. The reference is * dropped in the destructor.
*/ int
nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, size_t size, struct nvkm_gsp_mem *mem)
{
mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL); if (WARN_ON(!mem->data)) return -ENOMEM;
/* An arbitrary limit to the length of a registry key */ #define REGISTRY_MAX_KEY_LENGTH 64
/** * struct registry_list_entry - linked list member for a registry key/value * @head: list_head struct * @type: dword, binary, or string * @klen: the length of name of the key * @vlen: the length of the value * @key: the key name * @dword: the data, if REGISTRY_TABLE_ENTRY_TYPE_DWORD * @binary: the data, if TYPE_BINARY or TYPE_STRING * * Every registry key/value is represented internally by this struct. * * Type DWORD is a simple 32-bit unsigned integer, and its value is stored in * @dword. * * Types BINARY and STRING are variable-length binary blobs. The only real * difference between BINARY and STRING is that STRING is null-terminated and * is expected to contain only printable characters. * * Note: it is technically possible to have multiple keys with the same name * but different types, but this is not useful since GSP-RM expects keys to * have only one specific type.
*/ struct registry_list_entry { struct list_head head; enum registry_type type;
size_t klen; char key[REGISTRY_MAX_KEY_LENGTH];
size_t vlen;
u32 dword; /* TYPE_DWORD */
u8 binary[] __counted_by(vlen); /* TYPE_BINARY or TYPE_STRING */
};
/** * add_registry -- adds a registry entry * @gsp: gsp pointer * @key: name of the registry key * @type: type of data * @data: pointer to value * @length: size of data, in bytes * * Adds a registry key/value pair to the registry database. * * This function collects the registry information in a linked list. After * all registry keys have been added, build_registry() is used to create the * RPC data structure. * * registry_rpc_size is a running total of the size of all registry keys. * It's used to avoid an O(n) calculation of the size when the RPC is built. * * Returns 0 on success, or negative error code on error.
*/ staticint add_registry(struct nvkm_gsp *gsp, constchar *key, enum registry_type type, constvoid *data, size_t length)
{ struct registry_list_entry *reg; const size_t nlen = strnlen(key, REGISTRY_MAX_KEY_LENGTH) + 1;
size_t alloc_size; /* extra bytes to alloc for binary or string value */
if (nlen > REGISTRY_MAX_KEY_LENGTH) return -EINVAL;
/** * build_registry -- create the registry RPC data * @gsp: gsp pointer * @registry: pointer to the RPC payload to fill * * After all registry key/value pairs have been added, call this function to * build the RPC. * * The registry RPC looks like this: * * +-----------------+ * |NvU32 size; | * |NvU32 numEntries;| * +-----------------+ * +----------------------------------------+ * |PACKED_REGISTRY_ENTRY | * +----------------------------------------+ * |Null-terminated key (string) for entry 0| * +----------------------------------------+ * |Binary/string data value for entry 0 | (only if necessary) * +----------------------------------------+ * * +----------------------------------------+ * |PACKED_REGISTRY_ENTRY | * +----------------------------------------+ * |Null-terminated key (string) for entry 1| * +----------------------------------------+ * |Binary/string data value for entry 1 | (only if necessary) * +----------------------------------------+ * ... (and so on, one copy for each entry) * * * The 'data' field of an entry is either a 32-bit integer (for type DWORD) * or an offset into the PACKED_REGISTRY_TABLE (for types BINARY and STRING). * * All memory allocated by add_registry() is released.
*/ staticvoid build_registry(struct nvkm_gsp *gsp, PACKED_REGISTRY_TABLE *registry)
{ struct registry_list_entry *reg, *n;
size_t str_offset; unsignedint i = 0;
/* Append the key name to the table */
registry->entries[i].nameOffset = str_offset;
memcpy((void *)registry + str_offset, reg->key, reg->klen);
str_offset += reg->klen;
switch (reg->type) { case REGISTRY_TABLE_ENTRY_TYPE_DWORD:
registry->entries[i].data = reg->dword; break; case REGISTRY_TABLE_ENTRY_TYPE_BINARY: case REGISTRY_TABLE_ENTRY_TYPE_STRING: /* If the type is binary or string, also append the value */
memcpy((void *)registry + str_offset, reg->binary, reg->vlen);
registry->entries[i].data = str_offset;
str_offset += reg->vlen; break; default: break;
}
i++;
list_del(®->head);
kfree(reg);
}
/* Double-check that we calculated the sizes correctly */
WARN_ON(gsp->registry_rpc_size != str_offset);
registry->size = gsp->registry_rpc_size;
}
/** * clean_registry -- clean up registry memory in case of error * @gsp: gsp pointer * * Call this function to clean up all memory allocated by add_registry() * in case of error and build_registry() is not called.
*/ staticvoid clean_registry(struct nvkm_gsp *gsp)
{ struct registry_list_entry *reg, *n;
list_for_each_entry_safe(reg, n, &gsp->registry_list, head) {
list_del(®->head);
kfree(reg);
}
MODULE_PARM_DESC(NVreg_RegistryDwords, "A semicolon-separated list of key=integer pairs of GSP-RM registry keys"); staticchar *NVreg_RegistryDwords;
module_param(NVreg_RegistryDwords, charp, 0400);
/* * r535_registry_entries - required registry entries for GSP-RM * * This array lists registry entries that are required for GSP-RM to * function correctly. * * RMSecBusResetEnable - enables PCI secondary bus reset * RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration * registers on any PCI reset. * RMDevidCheckIgnore - allows GSP-RM to boot even if the PCI dev ID * is not found in the internal product name database.
*/ staticconststruct nv_gsp_registry_entries r535_registry_entries[] = {
{ "RMSecBusResetEnable", 1 },
{ "RMForcePcieConfigSave", 1 },
{ "RMDevidCheckIgnore", 1 },
}; #define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
/** * strip - strips all characters in 'reject' from 's' * @s: string to strip * @reject: string of characters to remove * * 's' is modified. * * Returns the length of the new string.
*/ static size_t strip(char *s, constchar *reject)
{ char *p = s, *p2 = s;
size_t length = 0; char c;
do { while ((c = *p2) && strchr(reject, c))
p2++;
*p++ = c = *p2++;
length++;
} while (c);
return length;
}
/** * r535_gsp_rpc_set_registry - build registry RPC and call GSP-RM * @gsp: gsp pointer * * The GSP-RM registry is a set of key/value pairs that configure some aspects * of GSP-RM. The keys are strings, and the values are 32-bit integers. * * The registry is built from a combination of a static hard-coded list (see * above) and entries passed on the driver's command line.
*/ staticint
r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
{
PACKED_REGISTRY_TABLE *rpc; unsignedint i; int ret;
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
ret = add_registry_num(gsp, r535_registry_entries[i].name,
r535_registry_entries[i].value); if (ret) goto fail;
}
/* * The NVreg_RegistryDwords parameter is a string of key=value * pairs separated by semicolons. We need to extract and trim each * substring, and then parse the substring to extract the key and * value.
*/ if (NVreg_RegistryDwords) { char *p = kstrdup(NVreg_RegistryDwords, GFP_KERNEL); char *start, *next = p, *equal;
if (!p) {
ret = -ENOMEM; goto fail;
}
/* Remove any whitespace from the parameter string */
strip(p, " \t\n");
while ((start = strsep(&next, ";"))) { long value;
/* Truncate the key=value string to just key */
*equal = 0;
ret = kstrtol(equal + 1, 0, &value); if (!ret) {
ret = add_registry_num(gsp, start, value);
} else { /* Not a number, so treat it as a string */
ret = add_registry_string(gsp, start, equal + 1);
}
do {
status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter); if (ACPI_FAILURE(status) || !iter) return;
status = acpi_evaluate_integer(iter, "_ADR", NULL, &value); if (ACPI_FAILURE(status) || value != id) continue;
handle_mux = iter;
} while (!handle_mux);
if (!handle_mux) return;
/* I -think- 0 means "acquire" according to nvidia's driver source */
input.pointer->integer.type = ACPI_TYPE_INTEGER;
input.pointer->integer.value = 0;
status = acpi_evaluate_integer(handle_mux, "MXDM", &input, &value); if (ACPI_SUCCESS(status)) {
mode->acpiId = id;
mode->mode = value;
mode->status = 0;
}
status = acpi_evaluate_integer(handle_mux, "MXDS", &input, &value); if (ACPI_SUCCESS(status)) {
part->acpiId = id;
part->mode = value;
part->status = 0;
}
}
if (!handled) {
nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
msg->hClient, msg->hEvent);
}
} else {
nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
}
mutex_unlock(&gsp->client_id.mutex); return 0;
}
/** * r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP * @priv: gsp pointer * @fn: function number (ignored) * @repv: pointer to libos print RPC * @repc: message size * * The GSP sequencer is a list of I/O commands that the GSP can send to * the driver to perform for various purposes. The most common usage is to * perform a special mid-initialization reset.
*/ staticint
r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
{ struct nvkm_gsp *gsp = priv; struct nvkm_subdev *subdev = &gsp->subdev; struct nvkm_device *device = subdev->device;
rpc_run_cpu_sequencer_v17_00 *seq = repv; int ptr = 0, ret;
/* * If GSP-RM load fails, then the GSP nvkm object will be deleted, the logging * debugfs entries will be deleted, and it will not be possible to debug the * load failure. The keep_gsp_logging parameter tells Nouveau to copy the * logging buffers to new debugfs entries, and these entries are retained * until the driver unloads.
*/ staticbool keep_gsp_logging;
module_param(keep_gsp_logging, bool, 0444);
MODULE_PARM_DESC(keep_gsp_logging, "Migrate the GSP-RM logging debugfs entries upon exit");
/* * GSP-RM uses a pseudo-class mechanism to define of a variety of per-"engine" * data structures, and each engine has a "class ID" genererated by a * pre-processor. This is the class ID for the PMU.
*/ #define NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU 0xf3d722
/** * struct rpc_ucode_libos_print_v1e_08 - RPC payload for libos print buffers * @ucode_eng_desc: the engine descriptor * @libos_print_buf_size: the size of the libos_print_buf[] * @libos_print_buf: the actual buffer * * The engine descriptor is divided into 31:8 "class ID" and 7:0 "instance * ID". We only care about messages from PMU.
*/ struct rpc_ucode_libos_print_v1e_08 {
u32 ucode_eng_desc;
u32 libos_print_buf_size;
u8 libos_print_buf[];
};
/** * r535_gsp_msg_libos_print - capture log message from the PMU * @priv: gsp pointer * @fn: function number (ignored) * @repv: pointer to libos print RPC * @repc: message size * * Called when we receive a UCODE_LIBOS_PRINT event RPC from GSP-RM. This RPC * contains the contents of the libos print buffer from PMU. It is typically * only written to when PMU encounters an error. * * Technically this RPC can be used to pass print buffers from any number of * GSP-RM engines, but we only expect to receive them for the PMU. * * For the PMU, the buffer is 4K in size and the RPC always contains the full * contents.
*/ staticint
r535_gsp_msg_libos_print(void *priv, u32 fn, void *repv, u32 repc)
{ struct nvkm_gsp *gsp = priv; struct nvkm_subdev *subdev = &gsp->subdev; struct rpc_ucode_libos_print_v1e_08 *rpc = repv; unsignedintclass = rpc->ucode_eng_desc >> 8;
nvkm_debug(subdev, "received libos print from class 0x%x for %u bytes\n", class, rpc->libos_print_buf_size);
if (class != NV_GSP_MSG_EVENT_UCODE_LIBOS_CLASS_PMU) {
nvkm_warn(subdev, "received libos print from unknown class 0x%x\n", class); return -ENOMSG;
}
if (rpc->libos_print_buf_size > GSP_PAGE_SIZE) {
nvkm_error(subdev, "libos print is too large (%u bytes)\n",
rpc->libos_print_buf_size); return -E2BIG;
}
/** * r535_gsp_libos_debugfs_init - create logging debugfs entries * @gsp: gsp pointer * * Create the debugfs entries. This exposes the log buffers to userspace so * that an external tool can parse it. * * The 'logpmu' contains exception dumps from the PMU. It is written via an * RPC sent from GSP-RM and must be only 4KB. We create it here because it's * only useful if there is a debugfs entry to expose it. If we get the PMU * logging RPC and there is no debugfs entry, the RPC is just ignored. * * The blob_init, blob_rm, and blob_pmu objects can't be transient * because debugfs_create_blob doesn't copy them. * * NOTE: OpenRM loads the logging elf image and prints the log messages * in real-time. We may add that capability in the future, but that * requires loading ELF images that are not distributed with the driver and * adding the parsing code to Nouveau. * * Ideally, this should be part of nouveau_debugfs_init(), but that function * is called too late. We really want to create these debugfs entries before * r535_gsp_booter_load() is called, so that if GSP-RM fails to initialize, * there could still be a log to capture.
*/ staticvoid
r535_gsp_libos_debugfs_init(struct nvkm_gsp *gsp)
{ struct device *dev = gsp->subdev.device->dev;
/* Create a new debugfs directory with a name unique to this GPU. */
gsp->debugfs.parent = debugfs_create_dir(dev_name(dev), nouveau_debugfs_root); if (IS_ERR(gsp->debugfs.parent)) {
nvkm_error(&gsp->subdev, "failed to create %s debugfs root\n", dev_name(dev)); return;
}
gsp->debugfs.init = create_debugfs(gsp, "loginit", &gsp->blob_init); if (!gsp->debugfs.init) goto error;
gsp->debugfs.intr = create_debugfs(gsp, "logintr", &gsp->blob_intr); if (!gsp->debugfs.intr) goto error;
gsp->debugfs.rm = create_debugfs(gsp, "logrm", &gsp->blob_rm); if (!gsp->debugfs.rm) goto error;
/* * Since the PMU buffer is copied from an RPC, it doesn't need to be * a DMA buffer.
*/
gsp->blob_pmu.size = GSP_PAGE_SIZE;
gsp->blob_pmu.data = kzalloc(gsp->blob_pmu.size, GFP_KERNEL); if (!gsp->blob_pmu.data) goto error;
staticinline u64
r535_gsp_libos_id8(constchar *name)
{
u64 id = 0;
for (int i = 0; i < sizeof(id) && *name; i++, name++)
id = (id << 8) | *name;
return id;
}
/** * create_pte_array() - creates a PTE array of a physically contiguous buffer * @ptes: pointer to the array * @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned) * @size: size of the buffer * * GSP-RM sometimes expects physically-contiguous buffers to have an array of * "PTEs" for each page in that buffer. Although in theory that allows for * the buffer to be physically discontiguous, GSP-RM does not currently * support that. * * In this case, the PTEs are DMA addresses of each page of the buffer. Since * the buffer is physically contiguous, calculating all the PTEs is simple * math. * * See memdescGetPhysAddrsForGpu()
*/ staticvoid create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
{ unsignedint num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE); unsignedint i;
for (i = 0; i < num_pages; i++)
ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
}
/** * r535_gsp_libos_init() -- create the libos arguments structure * @gsp: gsp pointer * * The logging buffers are byte queues that contain encoded printf-like * messages from GSP-RM. They need to be decoded by a special application * that can parse the buffers. * * The 'loginit' buffer contains logs from early GSP-RM init and * exception dumps. The 'logrm' buffer contains the subsequent logs. Both are * written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE. * * The physical address map for the log buffer is stored in the buffer * itself, starting with offset 1. Offset 0 contains the "put" pointer (pp). * Initially, pp is equal to 0. If the buffer has valid logging data in it, * then pp points to index into the buffer where the next logging entry will * be written. Therefore, the logging data is valid if: * 1 <= pp < sizeof(buffer)/sizeof(u64) * * The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is * configured for a larger page size (e.g. 64K pages), we need to give * the GSP an array of 4K pages. Fortunately, since the buffer is * physically contiguous, it's simple math to calculate the addresses. * * The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently * ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the * buffers to be physically contiguous anyway. * * The memory allocated for the arguments must remain until the GSP sends the * init_done RPC. * * See _kgspInitLibosLoggingStructures (allocates memory for buffers) * See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
*/ staticint
r535_gsp_libos_init(struct nvkm_gsp *gsp)
{
LibosMemoryRegionInitArgument *args; int ret;
ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos); if (ret) return ret;
args = gsp->libos.data;
ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit); if (ret) return ret;
/** * nvkm_gsp_radix3_sg - build a radix3 table from a S/G list * @gsp: gsp pointer * @sgt: S/G list to traverse * @size: size of the image, in bytes * @rx3: radix3 array to update * * The GSP uses a three-level page table, called radix3, to map the firmware. * Each 64-bit "pointer" in the table is either the bus address of an entry in * the next table (for levels 0 and 1) or the bus address of the next page in * the GSP firmware image itself. * * Level 0 contains a single entry in one page that points to the first page * of level 1. * * Level 1, since it's also only one page in size, contains up to 512 entries, * one for each page in Level 2. * * Level 2 can be up to 512 pages in size, and each of those entries points to * the next page of the firmware image. Since there can be up to 512*512 * pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB. * * Internally, the GSP has its window into system memory, but the base * physical address of the aperture is not 0. In fact, it varies depending on * the GPU architecture. Since the GPU is a PCI device, this window is * accessed via DMA and is therefore bound by IOMMU translation. The end * result is that GSP-RM must translate the bus addresses in the table to GSP * physical addresses. All this should happen transparently. * * Returns 0 on success, or negative error code * * See kgspCreateRadix3_IMPL
*/ staticint
nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, struct nvkm_gsp_radix3 *rx3)
{ struct sg_dma_page_iter sg_dma_iter; struct scatterlist *sg;
size_t bufsize;
u64 *pte; int ret, i, page_idx = 0;
ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0); if (ret) return ret;
ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1); if (ret) goto lvl1_fail;
// Write the bus address of level 1 to level 0
pte = rx3->lvl0.data;
*pte = rx3->lvl1.addr;
// Write the bus address of each page in level 2 to level 1
pte = rx3->lvl1.data;
for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0)
*pte++ = sg_page_iter_dma_address(&sg_dma_iter);
// Finally, write the bus address of each page in sgt to level 2
for_each_sgtable_sg(&rx3->lvl2, sg, i) { void *sgl_end;
ret = rm->api->fbsr->suspend(gsp); if (ret) {
nvkm_gsp_mem_dtor(&gsp->sr.meta);
nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); return ret;
}
/* * TODO: Debug the GSP firmware / RPC handling to find out why * without this Turing (but none of the other architectures) * ends up resetting all channels after resume.
*/
msleep(50);
}
ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); if (WARN_ON(ret)) return ret;
nvkm_msec(gsp->subdev.device, 2000, if (nvkm_falcon_rd32(&gsp->falcon, 0x040) == 0x80000000) break;
);
gsp->running = false; return 0;
}
int
r535_gsp_init(struct nvkm_gsp *gsp)
{ int ret;
for (int i = 0; i < ehdr->e_shnum; i++, shdr++) { if (!strcmp(&names[shdr->sh_name], name)) {
*pdata = &img[shdr->sh_offset];
*psize = shdr->sh_size; return 0;
}
}
nvkm_error(&gsp->subdev, "section '%s' not found\n", name); return -ENOENT;
}
#ifdef CONFIG_DEBUG_FS
struct r535_gsp_log { struct nvif_log log;
/* * Logging buffers in debugfs. The wrapper objects need to remain * in memory until the dentry is deleted.
*/ struct dentry *debugfs_logging_dir; struct debugfs_blob_wrapper blob_init; struct debugfs_blob_wrapper blob_intr; struct debugfs_blob_wrapper blob_rm; struct debugfs_blob_wrapper blob_pmu;
};
/** * r535_debugfs_shutdown - delete GSP-RM logging buffers for one GPU * @_log: nvif_log struct for this GPU * * Called when the driver is shutting down, to clean up the retained GSP-RM * logging buffers.
*/ staticvoid r535_debugfs_shutdown(struct nvif_log *_log)
{ struct r535_gsp_log *log = container_of(_log, struct r535_gsp_log, log);
/* We also need to delete the list object */
kfree(log);
}
/** * is_empty - return true if the logging buffer was never written to * @b: blob wrapper with ->data field pointing to logging buffer * * The first 64-bit field of loginit, and logintr, and logrm is the 'put' * pointer, and it is initialized to 0. It's a dword-based index into the * circular buffer, indicating where the next printf write will be made. * * If the pointer is still 0 when GSP-RM is shut down, that means that the * buffer was never written to, so it can be ignored. * * This test also works for logpmu, even though it doesn't have a put pointer.
*/ staticbool is_empty(conststruct debugfs_blob_wrapper *b)
{
u64 *put = b->data;
return put ? (*put == 0) : true;
}
/** * r535_gsp_copy_log - preserve the logging buffers in a blob * @parent: the top-level dentry for this GPU * @name: name of debugfs entry to create * @s: original wrapper object to copy from * @t: new wrapper object to copy to * * When GSP shuts down, the nvkm_gsp object and all its memory is deleted. * To preserve the logging buffers, the buffers need to be copied, but only * if they actually have data.
*/ staticint r535_gsp_copy_log(struct dentry *parent, constchar *name, conststruct debugfs_blob_wrapper *s, struct debugfs_blob_wrapper *t)
{ struct dentry *dent; void *p;
if (is_empty(s)) return 0;
/* The original buffers will be deleted */
p = kmemdup(s->data, s->size, GFP_KERNEL); if (!p) return -ENOMEM;
/** * r535_gsp_retain_logging - copy logging buffers to new debugfs root * @gsp: gsp pointer * * If keep_gsp_logging is enabled, then we want to preserve the GSP-RM logging * buffers and their debugfs entries, but all those objects would normally * deleted if GSP-RM fails to load. * * To preserve the logging buffers, we need to: * * 1) Allocate new buffers and copy the logs into them, so that the original * DMA buffers can be released. * * 2) Preserve the directories. We don't need to save single dentries because * we're going to delete the parent when the * * If anything fails in this process, then all the dentries need to be * deleted. We don't need to deallocate the original logging buffers because * the caller will do that regardless.
*/ staticvoid r535_gsp_retain_logging(struct nvkm_gsp *gsp)
{ struct device *dev = gsp->subdev.device->dev; struct r535_gsp_log *log = NULL; int ret;
if (!keep_gsp_logging || !gsp->debugfs.parent) { /* Nothing to do */ gotoexit;
}
/* Check to make sure at least one buffer has data. */ if (is_empty(&gsp->blob_init) && is_empty(&gsp->blob_intr) &&
is_empty(&gsp->blob_rm) && is_empty(&gsp->blob_rm)) {
nvkm_warn(&gsp->subdev, "all logging buffers are empty\n"); gotoexit;
}
log = kzalloc(sizeof(*log), GFP_KERNEL); if (!log) goto error;
/* * Since the nvkm_gsp object is going away, the debugfs_blob_wrapper * objects are also being deleted, which means the dentries will no * longer be valid. Delete the existing entries so that we can create * new ones with the same name.
*/
debugfs_remove(gsp->debugfs.init);
debugfs_remove(gsp->debugfs.intr);
debugfs_remove(gsp->debugfs.rm);
debugfs_remove(gsp->debugfs.pmu);
ret = r535_gsp_copy_log(gsp->debugfs.parent, "loginit", &gsp->blob_init, &log->blob_init); if (ret) goto error;
ret = r535_gsp_copy_log(gsp->debugfs.parent, "logintr", &gsp->blob_intr, &log->blob_intr); if (ret) goto error;
ret = r535_gsp_copy_log(gsp->debugfs.parent, "logrm", &gsp->blob_rm, &log->blob_rm); if (ret) goto error;
ret = r535_gsp_copy_log(gsp->debugfs.parent, "logpmu", &gsp->blob_pmu, &log->blob_pmu); if (ret) goto error;
/* The nvkm_gsp object is going away, so save the dentry */
log->debugfs_logging_dir = gsp->debugfs.parent;
nvkm_warn(&gsp->subdev, "logging buffers migrated to /sys/kernel/debug/nouveau/%s\n",
dev_name(dev));
return;
error:
nvkm_warn(&gsp->subdev, "failed to migrate logging buffers\n");
exit:
debugfs_remove(gsp->debugfs.parent);
if (log) {
kfree(log->blob_init.data);
kfree(log->blob_intr.data);
kfree(log->blob_rm.data);
kfree(log->blob_pmu.data);
kfree(log);
}
}
#endif
/** * r535_gsp_libos_debugfs_fini - cleanup/retain log buffers on shutdown * @gsp: gsp pointer * * If the log buffers are exposed via debugfs, the data for those entries * needs to be cleaned up when the GSP device shuts down.
*/ staticvoid
r535_gsp_libos_debugfs_fini(struct nvkm_gsp __maybe_unused *gsp)
{ #ifdef CONFIG_DEBUG_FS
r535_gsp_retain_logging(gsp);
/* * Unlike the other buffers, the PMU blob is a kmalloc'd buffer that * exists only if the debugfs entries were created.
*/
kfree(gsp->blob_pmu.data);
gsp->blob_pmu.data = NULL; #endif
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.