staticinlinechar *v3d_queue_to_string(enum v3d_queue queue)
{ switch (queue) { case V3D_BIN: return"bin"; case V3D_RENDER: return"render"; case V3D_TFU: return"tfu"; case V3D_CSD: return"csd"; case V3D_CACHE_CLEAN: return"cache_clean"; case V3D_CPU: return"cpu";
} return"UNKNOWN";
}
/* * This seqcount is used to protect the access to the GPU stats * variables. It must be used as, while we are reading the stats, * IRQs can happen and the stats can be updated.
*/
seqcount_t lock;
};
/* Stores the GPU stats for this queue in the global context. */ struct v3d_stats stats;
};
/* Performance monitor object. The perform lifetime is controlled by userspace * using perfmon related ioctls. A perfmon can be attached to a submit_cl * request, and when this is the case, HW perf counters will be activated just * before the submit_cl is submitted to the GPU and disabled when the job is * done. This way, only events related to a specific job will be counted.
*/ struct v3d_perfmon { /* Tracks the number of users of the perfmon, when this counter reaches * zero the perfmon is destroyed.
*/
refcount_t refcnt;
/* Protects perfmon stop, as it can be invoked from multiple places. */ struct mutex lock;
/* Number of counters activated in this perfmon instance * (should be less than DRM_V3D_MAX_PERF_COUNTERS).
*/
u8 ncounters;
/* Events counted by the HW perf counters. */
u8 counters[DRM_V3D_MAX_PERF_COUNTERS];
/* Storage for counter values. Counters are incremented by the * HW perf counter values every time the perfmon is attached * to a GPU job. This way, perfmon users don't have to * retrieve the results after each job if they want to track * events covering several submissions. Note that counter * values can't be reset, but you can fake a reset by * destroying the perfmon and creating a new one.
*/
u64 values[] __counted_by(ncounters);
};
/* Virtual and DMA addresses of the single shared page table. */ volatile u32 *pt;
dma_addr_t pt_paddr;
/* Virtual and DMA addresses of the MMU's scratch page. When * a read or write is invalid in the MMU, it will be * redirected here.
*/ void *mmu_scratch;
dma_addr_t mmu_scratch_paddr; /* virtual address bits from V3D to the MMU. */ int va_width;
/* Number of V3D cores. */
u32 cores;
/* Allocator managing the address space. All units are in * number of pages.
*/ struct drm_mm mm;
spinlock_t mm_lock;
/* * tmpfs instance used for shmem backed objects
*/ struct vfsmount *gemfs;
/* Spinlock used to synchronize the overflow memory * management against bin job submission.
*/
spinlock_t job_lock;
/* Used to track the active perfmon if any. */ struct v3d_perfmon *active_perfmon;
/* Protects bo_stats */ struct mutex bo_lock;
/* Lock taken when resetting the GPU, to keep multiple * processes from trying to park the scheduler threads and * reset at once.
*/ struct mutex reset_lock;
/* Lock taken when creating and pushing the GPU scheduler * jobs, to keep the sched-fence seqnos in order.
*/ struct mutex sched_lock;
/* Lock taken during a cache clean and when initiating an L2 * flush, to keep L2 flushes from interfering with the * synchronous L2 cleans.
*/ struct mutex cache_clean_lock;
/* To support a performance analysis tool in user space, we require * a single, globally configured performance monitor (perfmon) for * all jobs.
*/ struct v3d_perfmon *global_perfmon;
};
/** * __wait_for - magic wait macro * * Macro to help avoid open coding check/wait/timeout patterns. Note that it's * important that we check the condition again after having timed out, since the * timeout could be due to preemption or similar and we've never had a chance to * check the condition before the timeout.
*/ #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ int ret__; \
might_sleep(); \ for (;;) { \ constbool expired__ = ktime_after(ktime_get_raw(), end__); \
OP; \ /* Guarantee COND check prior to timeout */ \
barrier(); \ if (COND) { \
ret__ = 0; \ break; \
} \ if (expired__) { \
ret__ = -ETIMEDOUT; \ break; \
} \
usleep_range(wait__, wait__ * 2); \ if (wait__ < (Wmax)) \
wait__ <<= 1; \
} \
ret__; \
})
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.