/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com>
*/
/** * List of all GEM objects (mainly for debugfs, protected by obj_lock * (acquire before per GEM object lock)
*/ struct list_head objects; struct mutex obj_lock;
/** * lru: * * The various LRU's that a GEM object is in at various stages of * it's lifetime. Objects start out in the unbacked LRU. When * pinned (for scannout or permanently mapped GPU buffers, like * ringbuffer, memptr, fw, etc) it moves to the pinned LRU. When * unpinned, it moves into willneed or dontneed LRU depending on * madvise state. When backing pages are evicted (willneed) or * purged (dontneed) it moves back into the unbacked LRU. * * The dontneed LRU is considered by the shrinker for objects * that are candidate for purging, and the willneed LRU is * considered for objects that could be evicted.
*/ struct { /** * unbacked: * * The LRU for GEM objects without backing pages allocated. * This mostly exists so that objects are always is one * LRU.
*/ struct drm_gem_lru unbacked;
/** * pinned: * * The LRU for pinned GEM objects
*/ struct drm_gem_lru pinned;
/** * willneed: * * The LRU for unpinned GEM objects which are in madvise * WILLNEED state (ie. can be evicted)
*/ struct drm_gem_lru willneed;
/** * dontneed: * * The LRU for unpinned GEM objects which are in madvise * DONTNEED state (ie. can be purged)
*/ struct drm_gem_lru dontneed;
/** * lock: * * Protects manipulation of all of the LRUs.
*/ struct mutex lock;
} lru;
/** * hangcheck_period: For hang detection, in ms * * Note that in practice, a submit/job will get at least two hangcheck * periods, due to checking for progress being implemented as simply * "have the CP position registers changed since last time?"
*/ unsignedint hangcheck_period;
/** gpu_devfreq_config: Devfreq tuning config for the GPU. */ struct devfreq_simple_ondemand_data gpu_devfreq_config;
/** * gpu_clamp_to_idle: Enable clamping to idle freq when inactive
*/ bool gpu_clamp_to_idle;
/** * disable_err_irq: * * Disable handling of GPU hw error interrupts, to force fallback to * sw hangcheck timer. Written (via debugfs) by igt tests to test * the sw hangcheck mechanism.
*/ bool disable_err_irq;
/** * @fault_stall_reenable_time: * * If stall_enabled is false, when to reenable stall-on-fault. * Protected by @fault_stall_lock.
*/
ktime_t stall_reenable_time;
/** * @stall_enabled: * * Whether stall-on-fault is currently enabled. Protected by * @fault_stall_lock.
*/ bool stall_enabled;
};
conststruct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
struct msm_pending_timer;
int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, struct msm_kms *kms, int crtc_idx); void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer); void msm_atomic_commit_tail(struct drm_atomic_state *state); int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
int msm_crtc_enable_vblank(struct drm_crtc *crtc); void msm_crtc_disable_vblank(struct drm_crtc *crtc);
/** * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work * * @timer: hrtimer to control when the kthread work is triggered * @work: the kthread work * @worker: the kthread worker the work will be scheduled on
*/ struct msm_hrtimer_work { struct hrtimer timer; struct kthread_work work; struct kthread_worker *worker;
};
/* Helper for returning a UABI error with optional logging which can make * it easier for userspace to understand what it is doing wrong.
*/ #define UERR(err, drm, fmt, ...) \
({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.