/* Generic bits for GMAP notification on DAT table entry changes. */ #define GMAP_NOTIFY_SHADOW 0x2 #define GMAP_NOTIFY_MPROT 0x1
/* Status bits only for huge segment entries */ #define _SEGMENT_ENTRY_GMAP_IN 0x0800 /* invalidation notify bit */ #define _SEGMENT_ENTRY_GMAP_UC 0x0002 /* dirty (migration) */
/** * struct gmap_struct - guest address space * @list: list head for the mm->context gmap list * @mm: pointer to the parent mm_struct * @guest_to_host: radix tree with guest to host address translation * @host_to_guest: radix tree with pointer to segment table entries * @guest_table_lock: spinlock to protect all entries in the guest page table * @ref_count: reference counter for the gmap structure * @table: pointer to the page directory * @asce: address space control element for gmap page table * @pfault_enabled: defines if pfaults are applicable for the guest * @guest_handle: protected virtual machine handle for the ultravisor * @host_to_rmap: radix tree with gmap_rmap lists * @children: list of shadow gmap structures * @shadow_lock: spinlock to protect the shadow gmap list * @parent: pointer to the parent gmap for shadow guest address spaces * @orig_asce: ASCE for which the shadow page table has been created * @edat_level: edat level to be used for the shadow translation * @removed: flag to indicate if a shadow guest address space has been removed * @initialized: flag to indicate if a shadow guest address space can be used
*/ struct gmap { struct list_head list; struct mm_struct *mm; struct radix_tree_root guest_to_host; struct radix_tree_root host_to_guest;
spinlock_t guest_table_lock;
refcount_t ref_count; unsignedlong *table; unsignedlong asce; unsignedlong asce_end; void *private; bool pfault_enabled; /* only set for protected virtual machines */ unsignedlong guest_handle; /* Additional data for shadow guest address spaces */ struct radix_tree_root host_to_rmap; struct list_head children;
spinlock_t shadow_lock; struct gmap *parent; unsignedlong orig_asce; int edat_level; bool removed; bool initialized;
};
/** * struct gmap_rmap - reverse mapping for shadow page table entries * @next: pointer to next rmap in the list * @raddr: virtual rmap address in the shadow guest address space
*/ struct gmap_rmap { struct gmap_rmap *next; unsignedlong raddr;
};
/** * s390_uv_destroy_range - Destroy a range of pages in the given mm. * @mm: the mm on which to operate on * @start: the start of the range * @end: the end of the range * * This function will call cond_sched, so it should not generate stalls, but * it will otherwise only return when it completed.
*/ staticinlinevoid s390_uv_destroy_range(struct mm_struct *mm, unsignedlong start, unsignedlong end)
{
(void)__s390_uv_destroy_range(mm, start, end, false);
}
/** * s390_uv_destroy_range_interruptible - Destroy a range of pages in the * given mm, but stop when a fatal signal is received. * @mm: the mm on which to operate on * @start: the start of the range * @end: the end of the range * * This function will call cond_sched, so it should not generate stalls. If * a fatal signal is received, it will return with -EINTR immediately, * without finishing destroying the whole range. Upon successful * completion, 0 is returned.
*/ staticinlineint s390_uv_destroy_range_interruptible(struct mm_struct *mm, unsignedlong start, unsignedlong end)
{ return __s390_uv_destroy_range(mm, start, end, true);
} #endif/* _ASM_S390_GMAP_H */
Messung V0.5
¤ Dauer der Verarbeitung: 0.19 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.