/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE.
*/
externunsignedint xen_blkif_max_ring_order; externunsignedint xenblk_max_queues; /* * This is the maximum number of segments that would be allowed in indirect * requests. This value will also be passed to the frontend.
*/ #define MAX_INDIRECT_SEGMENTS 256
/* * Xen use 4K pages. The guest may use different page size (4K or 64K) * Number of Xen pages per segment
*/ #define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
/* Not a real protocol. Used to generate ring structs which contain * the elements common to all protocols only. This way we get a * compiler-checkable way to use common struct elements, so we can
* avoid using switch(protocol) in a number of places. */ struct blkif_common_request { char dummy;
};
/* i386 protocol version */
struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__));
struct blkif_x86_32_request_discard {
uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
blkif_vdev_t _pad1; /* was "handle" for read/write requests */
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
uint64_t nr_sectors;
} __attribute__((__packed__));
struct blkif_x86_32_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
uint64_t id;
blkif_sector_t sector_number;
blkif_vdev_t handle;
uint16_t _pad1;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; /* * The maximum number of indirect segments (and pages) that will * be used is determined by MAX_INDIRECT_SEGMENTS, this value * is also exported to the guest (via xenstore * feature-max-indirect-segments entry), so the frontend knows how * many indirect segments the backend supports.
*/
uint64_t _pad2; /* make it 64 byte aligned */
} __attribute__((__packed__));
struct blkif_x86_64_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
uint64_t id;
blkif_sector_t sector_number;
blkif_vdev_t handle;
uint16_t _pad2;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; /* * The maximum number of indirect segments (and pages) that will * be used is determined by MAX_INDIRECT_SEGMENTS, this value * is also exported to the guest (via xenstore * feature-max-indirect-segments entry), so the frontend knows how * many indirect segments the backend supports.
*/
uint32_t _pad3; /* make it 64 byte aligned */
} __attribute__((__packed__));
/* Per-ring information. */ struct xen_blkif_ring { /* Physical parameters of the comms window. */ unsignedint irq; union blkif_back_rings blk_rings; void *blk_ring; /* Private fields. */
spinlock_t blk_ring_lock;
wait_queue_head_t wq;
atomic_t inflight; bool active; /* One thread per blkif ring. */ struct task_struct *xenblkd; unsignedint waiting_reqs;
/* List of all 'pending_req' available */ struct list_head pending_free; /* And its spinlock. */
spinlock_t pending_free_lock;
wait_queue_head_t pending_free_wq;
/* Tree to store persistent grants. */ struct rb_root persistent_gnts; unsignedint persistent_gnt_c;
atomic_t persistent_gnt_in_use; unsignedlong next_lru;
/* Used by the kworker that offload work from the persistent purge. */ struct list_head persistent_purge_list; struct work_struct persistent_purge_work;
/* Buffer of free pages to map grant refs. */ struct gnttab_page_cache free_pages;
/* * Each outstanding request that we've passed to the lower device layers has a * 'pending_req' allocated to it. Each buffer_head that completes decrements * the pendcnt towards zero. When it hits zero, the specified domain has a * response queued for it, with the saved 'id' passed back.
*/ struct pending_req { struct xen_blkif_ring *ring;
u64 id; int nr_segs;
atomic_t pendcnt; unsignedshort operation; int status; struct list_head free_list; struct grant_page *segments[MAX_INDIRECT_SEGMENTS]; /* Indirect descriptors */ struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; struct bio *biolist[MAX_INDIRECT_SEGMENTS]; struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS]; struct page *unmap_pages[MAX_INDIRECT_SEGMENTS]; struct gntab_unmap_queue_data gnttab_unmap_data;
};
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, struct backend_info *be, int state);
int xen_blkbk_barrier(struct xenbus_transaction xbt, struct backend_info *be, int state); struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); void xen_blkbk_unmap_purged_grants(struct work_struct *work);
#endif/* __XEN_BLKIF__BACKEND__COMMON_H__ */
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.14Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.