/* we keep old device allocation scheme; IOW, minors are still in 0..255 */ #define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) #define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
/* * States a dasd device can have: * new: the dasd_device structure is allocated. * known: the discipline for the device is identified. * basic: the device can do basic i/o. * unfmt: the device could not be analyzed (format is unknown). * ready: partition detection is done and the device is can do block io. * online: the device accepts requests from the block device queue. * * Things to do for startup state transitions: * new -> known: find discipline for the device and create devfs entries. * known -> basic: request irq line for the device. * basic -> ready: do the initial analysis, e.g. format detection, * do block device setup and detect partitions. * ready -> online: schedule the device tasklet. * Things to do for shutdown state transitions: * online -> ready: just set the new device state. * ready -> basic: flush requests from the block device layer, clear * partition information and reset format information. * basic -> known: terminate all requests and free irq. * known -> new: remove devfs entries and forget discipline.
*/
/* * SECTION: Type definitions
*/ struct dasd_device; struct dasd_block;
/* BIT DEFINITIONS FOR SENSE DATA */ #define DASD_SENSE_BIT_0 0x80 #define DASD_SENSE_BIT_1 0x40 #define DASD_SENSE_BIT_2 0x20 #define DASD_SENSE_BIT_3 0x10
/* BIT DEFINITIONS FOR SIM SENSE */ #define DASD_SIM_SENSE 0x0F #define DASD_SIM_MSG_TO_OP 0x03 #define DASD_SIM_LOG 0x0C
/* lock class for nested cdev lock */ #define CDEV_NESTED_FIRST 1 #define CDEV_NESTED_SECOND 2
/* definition of dbf debug levels */ #define DBF_EMERG 0 /* system is unusable */ #define DBF_ALERT 1 /* action must be taken immediately */ #define DBF_CRIT 2 /* critical conditions */ #define DBF_ERR 3 /* error conditions */ #define DBF_WARNING 4 /* warning conditions */ #define DBF_NOTICE 5 /* normal but significant condition */ #define DBF_INFO 6 /* informational */ #define DBF_DEBUG 6 /* debug-level messages */
/* Macro to calculate number of blocks per page */ #define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
struct dasd_ccw_req { unsignedint magic; /* Eye catcher */ int intrc; /* internal error, e.g. from start_IO */ struct list_head devlist; /* for dasd_device request queue */ struct list_head blocklist; /* for dasd_block request queue */ struct dasd_block *block; /* the originating block device */ struct dasd_device *memdev; /* the device used to allocate this */ struct dasd_device *startdev; /* device the request is started on */ struct dasd_device *basedev; /* base device if no block->base */ void *cpaddr; /* address of ccw or tcw */ short retries; /* A retry counter */ unsignedchar cpmode; /* 0 = cmd mode, 1 = itcw */ char status; /* status of this request */ char lpm; /* logical path mask */ unsignedlong flags; /* flags of this request */ struct dasd_queue *dq; unsignedlong starttime; /* jiffies time of request start */ unsignedlong expires; /* expiration period in jiffies */ void *data; /* pointer to data area */ struct irb irb; /* device status in case of an error */ struct dasd_ccw_req *refers; /* ERP-chain queueing. */ void *function; /* originating ERP action */ void *mem_chunk;
unsignedlong buildclk; /* TOD-clock of request generation */ unsignedlong startclk; /* TOD-clock of request start */ unsignedlong stopclk; /* TOD-clock of request interrupt */ unsignedlong endclk; /* TOD-clock of request termination */
/* per dasd_ccw_req flags */ #define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ #define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */ #define DASD_CQR_VERIFY_PATH 2 /* path verification request */ #define DASD_CQR_ALLOW_SLOCK 3 /* Try this request even when lock was * stolen. Should not be combined with * DASD_CQR_FLAGS_USE_ERP
*/ /* * The following flags are used to suppress output of certain errors.
*/ #define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */ #define DASD_CQR_SUPPRESS_IT 5 /* Suppress 'Invalid Track' error*/ #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ #define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
/* * A single CQR can only contain a maximum of 255 CCWs. It is limited by * the locate record and locate record extended count value which can only hold * 1 Byte max.
*/ #define DASD_CQR_MAX_CCW 255
int dasd_devmap_set_device_copy_relation(struct ccw_device *, bool pprc_enabled);
/* * the struct dasd_discipline is * sth like a table of virtual functions, if you think of dasd_eckd * inheriting dasd... * no, currently we are not planning to reimplement the driver in C++
*/ struct dasd_discipline { struct module *owner; char ebcname[8]; /* a name used for tagging and printks */ char name[8]; /* a name used for tagging and printks */ bool has_discard;
struct list_head list; /* used for list of disciplines */
/* * Device recognition functions. check_device is used to verify * the sense data and the information returned by read device * characteristics. It returns 0 if the discipline can be used * for the device in question. uncheck_device is called during * device shutdown to deregister a device from its discipline.
*/ int (*check_device) (struct dasd_device *); void (*uncheck_device) (struct dasd_device *);
/* * do_analysis is used in the step from device state "basic" to * state "accept". It returns 0 if the device can be made ready, * it returns -EMEDIUMTYPE if the device can't be made ready or * -EAGAIN if do_analysis started a ccw that needs to complete * before the analysis may be repeated.
*/ int (*do_analysis) (struct dasd_block *);
/* * This function is called, when new paths become available. * Disciplins may use this callback to do necessary setup work, * e.g. verify that new path is compatible with the current * configuration.
*/ int (*pe_handler)(struct dasd_device *, __u8, __u8);
/* * Last things to do when a device is set online, and first things * when it is set offline.
*/ int (*basic_to_ready) (struct dasd_device *); int (*online_to_ready) (struct dasd_device *); int (*basic_to_known)(struct dasd_device *);
unsignedint (*max_sectors)(struct dasd_block *); /* (struct dasd_device *); * Device operation functions. build_cp creates a ccw chain for * a block device request, start_io starts the request and * term_IO cancels it (e.g. in case of a timeout). format_device * formats the device and check_device_format compares the format of * a device with the expected format_data. * handle_terminated_request allows to examine a cqr and prepare * it for retry.
*/ struct dasd_ccw_req *(*build_cp) (struct dasd_device *, struct dasd_block *, struct request *); int (*start_IO) (struct dasd_ccw_req *); int (*term_IO) (struct dasd_ccw_req *); void (*handle_terminated_request) (struct dasd_ccw_req *); int (*format_device) (struct dasd_device *, struct format_data_t *, int); int (*check_device_format)(struct dasd_device *, struct format_check_t *, int); int (*free_cp) (struct dasd_ccw_req *, struct request *);
/* * Error recovery functions. examine_error() returns a value that * indicates what to do for an error condition. If examine_error() * returns 'dasd_era_recover' erp_action() is called to create a * special error recovery ccw. erp_postaction() is called after * an error recovery ccw has finished its execution. dump_sense * is called for every error condition to print the sense data * to the console.
*/
dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, struct irb *); void (*dump_sense_dbf) (struct dasd_device *, struct irb *, char *); void (*check_for_device_change) (struct dasd_device *, struct dasd_ccw_req *, struct irb *);
/* i/o control functions. */ int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); int (*ioctl) (struct dasd_block *, unsignedint, void __user *);
/* reload device after state change */ int (*reload) (struct dasd_device *);
int (*get_uid) (struct dasd_device *, struct dasd_uid *); void (*kick_validate) (struct dasd_device *); int (*check_attention)(struct dasd_device *, __u8); int (*host_access_count)(struct dasd_device *); int (*hosts_print)(struct dasd_device *, struct seq_file *); void (*handle_hpf_error)(struct dasd_device *, struct irb *); void (*disable_hpf)(struct dasd_device *); int (*hpf_enabled)(struct dasd_device *); void (*reset_path)(struct dasd_device *, __u8);
/* * Extent Space Efficient (ESE) relevant functions
*/ int (*is_ese)(struct dasd_device *); /* Capacity */ int (*space_allocated)(struct dasd_device *); int (*space_configured)(struct dasd_device *); int (*logical_capacity)(struct dasd_device *); int (*release_space)(struct dasd_device *, struct format_data_t *); /* Extent Pool */ int (*ext_pool_id)(struct dasd_device *); int (*ext_size)(struct dasd_device *); int (*ext_pool_cap_at_warnlevel)(struct dasd_device *); int (*ext_pool_warn_thrshld)(struct dasd_device *); int (*ext_pool_oos)(struct dasd_device *); int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *); struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *, struct irb *); int (*ese_read)(struct dasd_ccw_req *, struct irb *); int (*pprc_status)(struct dasd_device *, struct dasd_pprc_data_sc4 *); bool (*pprc_enabled)(struct dasd_device *); int (*copy_pair_swap)(struct dasd_device *, char *, char *); int (*device_ping)(struct dasd_device *);
};
staticinlinevoid dasd_path_release(struct kobject *kobj)
{ /* Memory for the dasd_path kobject is freed when dasd_free_device() is called */
}
struct dasd_profile_info { /* legacy part of profile data, as in dasd_profile_info_t */ unsignedint dasd_io_reqs; /* number of requests processed */ unsignedint dasd_io_sects; /* number of sectors processed */ unsignedint dasd_io_secs[32]; /* histogram of request's sizes */ unsignedint dasd_io_times[32]; /* histogram of requests's times */ unsignedint dasd_io_timps[32]; /* h. of requests's times per sector */ unsignedint dasd_io_time1[32]; /* hist. of time from build to start */ unsignedint dasd_io_time2[32]; /* hist. of time from start to irq */ unsignedint dasd_io_time2ps[32]; /* hist. of time from start to irq */ unsignedint dasd_io_time3[32]; /* hist. of time from irq to end */ unsignedint dasd_io_nr_req[32]; /* hist. of # of requests in chanq */
/* new data */ struct timespec64 starttod; /* time of start or last reset */ unsignedint dasd_io_alias; /* requests using an alias */ unsignedint dasd_io_tpm; /* requests using transport mode */ unsignedint dasd_read_reqs; /* total number of read requests */ unsignedint dasd_read_sects; /* total number read sectors */ unsignedint dasd_read_alias; /* read request using an alias */ unsignedint dasd_read_tpm; /* read requests in transport mode */ unsignedint dasd_read_secs[32]; /* histogram of request's sizes */ unsignedint dasd_read_times[32]; /* histogram of requests's times */ unsignedint dasd_read_time1[32]; /* hist. time from build to start */ unsignedint dasd_read_time2[32]; /* hist. of time from start to irq */ unsignedint dasd_read_time3[32]; /* hist. of time from irq to end */ unsignedint dasd_read_nr_req[32]; /* hist. of # of requests in chanq */ unsignedlong dasd_sum_times; /* sum of request times */ unsignedlong dasd_sum_time_str; /* sum of time from build to start */ unsignedlong dasd_sum_time_irq; /* sum of time from start to irq */ unsignedlong dasd_sum_time_end; /* sum of time from irq to end */
};
/* * The static memory in ccw_mem and erp_mem is managed by a sorted * list of free memory chunks.
*/ struct dasd_mchunk
{ struct list_head list; unsignedlong size;
} __attribute__ ((aligned(8)));
chunk = (struct dasd_mchunk *)
((char *) mem - sizeof(struct dasd_mchunk)); /* Find out the left neighbour in chunk_list. */
left = chunk_list;
list_for_each(p, chunk_list) { if (list_entry(p, struct dasd_mchunk, list) > chunk) break;
left = p;
} /* Try to merge with right neighbour = next element from left. */ if (left->next != chunk_list) {
tmp = list_entry(left->next, struct dasd_mchunk, list); if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
list_del(&tmp->list);
chunk->size += tmp->size + sizeof(struct dasd_mchunk);
}
} /* Try to merge with left neighbour. */ if (left != chunk_list) {
tmp = list_entry(left, struct dasd_mchunk, list); if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
tmp->size += chunk->size + sizeof(struct dasd_mchunk); return;
}
}
__list_add(&chunk->list, left, left->next);
}
/* * Check if bsize is in { 512, 1024, 2048, 4096 }
*/ staticinlineint
dasd_check_blocksize(int bsize)
{ if (bsize < 512 || bsize > 4096 || !is_power_of_2(bsize)) return -EMEDIUMTYPE; return 0;
}
/* * return the callback data of the original request in case there are * ERP requests build on top of it
*/ staticinlinevoid *dasd_get_callback_data(struct dasd_ccw_req *cqr)
{ while (cqr->refers)
cqr = cqr->refers;
staticinlineint dasd_eer_enabled(struct dasd_device *device)
{ return device->eer_cqr != NULL;
} #else #define dasd_eer_init() (0) #define dasd_eer_exit() do { } while (0) #define dasd_eer_enable(d) (0) #define dasd_eer_disable(d) do { } while (0) #define dasd_eer_write(d,c,i) do { } while (0) #define dasd_eer_snss(d) do { } while (0) #define dasd_eer_enabled(d) (0) #endif/* CONFIG_DASD_ERR */
/* DASD path handling functions */
/* * helper functions to modify bit masks for a given channel path for a device
*/ staticinlineint dasd_path_is_operational(struct dasd_device *device, int chp)
{ return test_bit(DASD_PATH_OPERATIONAL, &device->path[chp].flags);
}
for (chp = 0; chp < 8; chp++) { if (device->opm & (0x80 >> chp)) {
fc_sec = device->path[chp].fc_security; break;
}
} for (; chp < 8; chp++) { if (device->opm & (0x80 >> chp)) if (device->path[chp].fc_security != fc_sec) return -EINVAL;
}
return fc_sec;
}
/* * add functions for path masks * the existing path mask will be extended by the given path mask
*/ staticinlinevoid dasd_path_add_tbvpm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_verify(device, chp);
}
staticinlinevoid dasd_path_add_opm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp)) {
dasd_path_operational(device, chp); /* * if the path is used * it should not be in one of the negative lists
*/
dasd_path_clear_nohpf(device, chp);
dasd_path_clear_cuir(device, chp);
dasd_path_clear_cable(device, chp);
dasd_path_clear_ifcc(device, chp);
}
}
staticinlinevoid dasd_path_add_cablepm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_miscabled(device, chp);
}
staticinlinevoid dasd_path_add_cuirpm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_cuir(device, chp);
}
staticinlinevoid dasd_path_add_ifccpm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_ifcc(device, chp);
}
staticinlinevoid dasd_path_add_nppm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_nonpreferred(device, chp);
}
staticinlinevoid dasd_path_add_nohpfpm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_nohpf(device, chp);
}
staticinlinevoid dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_preferred(device, chp);
}
staticinlinevoid dasd_path_add_fcsecpm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_fcsec(device, chp);
}
/* * set functions for path masks * the existing path mask will be replaced by the given path mask
*/ staticinlinevoid dasd_path_set_tbvpm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) if (pm & (0x80 >> chp))
dasd_path_verify(device, chp); else
dasd_path_clear_verify(device, chp);
}
staticinlinevoid dasd_path_set_opm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) {
dasd_path_clear_oper(device, chp); if (pm & (0x80 >> chp)) {
dasd_path_operational(device, chp); /* * if the path is used * it should not be in one of the negative lists
*/
dasd_path_clear_nohpf(device, chp);
dasd_path_clear_cuir(device, chp);
dasd_path_clear_cable(device, chp);
dasd_path_clear_ifcc(device, chp);
}
}
}
/* * remove functions for path masks * the existing path mask will be cleared with the given path mask
*/ staticinlinevoid dasd_path_remove_opm(struct dasd_device *device, __u8 pm)
{ int chp;
for (chp = 0; chp < 8; chp++) { if (pm & (0x80 >> chp))
dasd_path_clear_oper(device, chp);
}
}
/* * add the newly available path to the to be verified pm and remove it from * normal operation until it is verified
*/ staticinlinevoid dasd_path_available(struct dasd_device *device, int chp)
{
dasd_path_clear_oper(device, chp);
dasd_path_verify(device, chp);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.