// SPDX-License-Identifier: GPL-2.0-only /* * ms_block.c - Sony MemoryStick (legacy) storage support
* Copyright (C) 2013 Maxim Levitsky <maximlevitsky@gmail.com> * * Minor portions of the driver were copied from mspro_block.c which is * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
*/ #define DRIVER_NAME "ms_block" #define pr_fmt(fmt) DRIVER_NAME ": " fmt
/* * Compares section of 'sg' starting from offset 'offset' and with length 'len' * to linear buffer of length 'len' at address 'buffer' * Returns 0 if equal and -1 otherwice
*/ staticint msb_sg_compare_to_buffer(struct scatterlist *sg,
size_t offset, u8 *buffer, size_t len)
{ int retval = 0, cmplen; struct sg_mapping_iter miter;
/* Get zone at which block with logical address 'lba' lives * Flash is broken into zones. * Each zone consists of 512 eraseblocks, out of which in first * zone 494 are used and 496 are for all following zones. * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
*/ staticint msb_get_zone_from_lba(int lba)
{ if (lba < 494) return 0; return ((lba - 494) / 496) + 1;
}
/* Get zone of physical block. Trivial */ staticint msb_get_zone_from_pba(int pba)
{ return pba / MS_BLOCKS_IN_ZONE;
}
/* Debug test to validate free block counts */ staticint msb_validate_used_block_bitmap(struct msb_data *msb)
{ int total_free_blocks = 0; int i;
if (!debug) return 0;
for (i = 0; i < msb->zone_count; i++)
total_free_blocks += msb->free_block_count[i];
if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
msb->block_count) == total_free_blocks) return 0;
pr_err("BUG: free block counts don't match the bitmap");
msb->read_only = true; return -EINVAL;
}
/* Mark physical block as used */ staticvoid msb_mark_block_used(struct msb_data *msb, int pba)
{ int zone = msb_get_zone_from_pba(pba);
if (test_bit(pba, msb->used_blocks_bitmap)) {
pr_err( "BUG: attempt to mark already used pba %d as used", pba);
msb->read_only = true; return;
}
if (msb_validate_used_block_bitmap(msb)) return;
/* No races because all IO is single threaded */
__set_bit(pba, msb->used_blocks_bitmap);
msb->free_block_count[zone]--;
}
/* Mark physical block as free */ staticvoid msb_mark_block_unused(struct msb_data *msb, int pba)
{ int zone = msb_get_zone_from_pba(pba);
if (!test_bit(pba, msb->used_blocks_bitmap)) {
pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
msb->read_only = true; return;
}
if (msb_validate_used_block_bitmap(msb)) return;
/* No races because all IO is single threaded */
__clear_bit(pba, msb->used_blocks_bitmap);
msb->free_block_count[zone]++;
}
/* Handler for absence of IO */ staticint h_msb_default_bad(struct memstick_dev *card, struct memstick_request **mrq)
{ return -ENXIO;
}
/* * This function is a handler for reads of one page from device. * Writes output to msb->current_sg, takes sector address from msb->reg.param * Can also be used to read extra data only. Set params accordintly.
*/ staticint h_msb_read_page(struct memstick_dev *card, struct memstick_request **out_mrq)
{ struct msb_data *msb = memstick_get_drvdata(card); struct memstick_request *mrq = *out_mrq = &card->current_mrq; struct scatterlist sg[2];
u8 command, intreg;
if (mrq->error) {
dbg("read_page, unknown error"); return msb_exit_state_machine(msb, mrq->error);
}
again: switch (msb->state) { case MSB_RP_SEND_BLOCK_ADDRESS: /* msb_write_regs sometimes "fails" because it needs to update * the reg window, and thus it returns request for that. * Then we stay in this state and retry
*/ if (!msb_write_regs(msb,
offsetof(struct ms_register, param), sizeof(struct ms_param_register),
(unsignedchar *)&msb->regs.param)) return 0;
case MSB_RP_SEND_INT_REQ:
msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT; /* If dont actually need to send the int read request (only in * serial mode), then just fall through
*/ if (msb_read_int_reg(msb, -1)) return 0;
fallthrough;
case MSB_RP_RECEIVE_INT_REQ_RESULT:
intreg = mrq->data[0];
msb->regs.status.interrupt = intreg;
if (intreg & MEMSTICK_INT_CMDNAK) return msb_exit_state_machine(msb, -EIO);
case MSB_RP_SEND_READ_STATUS_REG: /* read the status register to understand source of the INT_ERR */ if (!msb_read_regs(msb,
offsetof(struct ms_register, status), sizeof(struct ms_status_register))) return 0;
case MSB_RP_SEND_READ_DATA: /* Skip that state if we only read the oob */ if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
msb->state = MSB_RP_RECEIVE_READ_DATA; goto again;
}
if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
dbg("read_page: correctable error");
msb->current_sg_offset += msb->page_size; return msb_exit_state_machine(msb, -EUCLEAN);
} else {
dbg("read_page: INT error, but no status error bits"); return msb_exit_state_machine(msb, -EIO);
}
}
BUG();
}
/* * Handler of writes of exactly one block. * Takes address from msb->regs.param. * Writes same extra data to blocks, also taken * from msb->regs.extra * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if * device refuses to take the command or something else
*/ staticint h_msb_write_block(struct memstick_dev *card, struct memstick_request **out_mrq)
{ struct msb_data *msb = memstick_get_drvdata(card); struct memstick_request *mrq = *out_mrq = &card->current_mrq; struct scatterlist sg[2];
u8 intreg, command;
if (mrq->error) return msb_exit_state_machine(msb, mrq->error);
again: switch (msb->state) {
/* HACK: Jmicon handling of TPCs between 8 and * sizeof(memstick_request.data) is broken due to hardware * bug in PIO mode that is used for these TPCs * Therefore split the write
*/
case MSB_WB_SEND_WRITE_PARAMS: if (!msb_write_regs(msb,
offsetof(struct ms_register, param), sizeof(struct ms_param_register),
&msb->regs.param)) return 0;
msb->state = MSB_WB_SEND_WRITE_OOB; return 0;
case MSB_WB_SEND_WRITE_OOB: if (!msb_write_regs(msb,
offsetof(struct ms_register, extra_data), sizeof(struct ms_extra_data_register),
&msb->regs.extra_data)) return 0;
msb->state = MSB_WB_SEND_WRITE_COMMAND; return 0;
case MSB_WB_SEND_INT_REQ:
msb->state = MSB_WB_RECEIVE_INT_REQ; if (msb_read_int_reg(msb, -1)) return 0;
fallthrough;
case MSB_WB_RECEIVE_INT_REQ:
intreg = mrq->data[0];
msb->regs.status.interrupt = intreg;
/* errors mean out of here, and fast... */ if (intreg & (MEMSTICK_INT_CMDNAK)) return msb_exit_state_machine(msb, -EIO);
if (intreg & MEMSTICK_INT_ERR) return msb_exit_state_machine(msb, -EBADMSG);
/* for last page we need to poll CED */ if (msb->current_page == msb->pages_in_block) { if (intreg & MEMSTICK_INT_CED) return msb_exit_state_machine(msb, 0);
msb->state = MSB_WB_SEND_INT_REQ; goto again;
}
/* for non-last page we need BREQ before writing next chunk */ if (!(intreg & MEMSTICK_INT_BREQ)) {
msb->state = MSB_WB_SEND_INT_REQ; goto again;
}
switch (msb->state) { case MSB_PS_SEND_SWITCH_COMMAND: /* Set the parallel interface on memstick side */
msb->regs.param.system |= MEMSTICK_SYS_PAM;
if (!msb_write_regs(msb,
offsetof(struct ms_register, param),
1,
(unsignedchar *)&msb->regs.param)) return 0;
msb->state = MSB_PS_SWICH_HOST; return 0;
case MSB_PS_SWICH_HOST: /* Set parallel interface on our side + send a dummy request * to see if card responds
*/
host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
msb->state = MSB_PS_CONFIRM; return 0;
case MSB_PS_CONFIRM: return msb_exit_state_machine(msb, 0);
}
if (error == -EUCLEAN) {
pr_notice("correctable error on pba %d, page %d",
pba, page); return 0;
}
return error;
}
/* Reads a block and compares it with data contained in scatterlist orig_sg */ staticint msb_verify_block(struct msb_data *msb, u16 pba, struct scatterlist *orig_sg, int offset)
{ struct scatterlist sg; int page = 0, error;
/* Sector we just wrote to is assumed erased since its pba * was erased. If it wasn't erased, write will succeed * and will just clear the bits that were set in the block * thus test that what we have written, * matches what we expect. * We do trust the blocks that we erased
*/ if (!error && (verify_writes ||
!test_bit(pba, msb->erased_blocks_bitmap)))
error = msb_verify_block(msb, pba, sg, offset);
if (!error) break;
if (current_try > 1 || msb_reset(msb, true)) break;
pr_err("write failed, trying to erase the pba %d", pba);
error = msb_erase_block(msb, pba); if (error) break;
current_try++;
} return error;
}
/* Finds a free block for write replacement */ static u16 msb_get_free_block(struct msb_data *msb, int zone)
{
u16 pos; int pba = zone * MS_BLOCKS_IN_ZONE; int i;
get_random_bytes(&pos, sizeof(pos));
if (!msb->free_block_count[zone]) {
pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
msb->read_only = true; return MS_BLOCK_INVALID;
}
pos %= msb->free_block_count[zone];
dbg_verbose("have %d choices for a free block, selected randomly: %d",
msb->free_block_count[zone], pos);
pba = find_next_zero_bit(msb->used_blocks_bitmap,
msb->block_count, pba); for (i = 0; i < pos; ++i)
pba = find_next_zero_bit(msb->used_blocks_bitmap,
msb->block_count, pba + 1);
dbg_verbose("result of the free blocks scan: pba %d", pba);
if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
pr_err("BUG: can't get a free block");
msb->read_only = true; return MS_BLOCK_INVALID;
}
msb_mark_block_used(msb, pba); return pba;
}
staticint msb_update_block(struct msb_data *msb, u16 lba, struct scatterlist *sg, int offset)
{
u16 pba, new_pba; int error, try;
pba = msb->lba_to_pba_table[lba];
dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
if (pba != MS_BLOCK_INVALID) {
dbg_verbose("setting the update flag on the block");
msb_set_overwrite_flag(msb, pba, 0,
0xFF & ~MEMSTICK_OVERWRITE_UDST);
}
/* can't trust the page if we can't read the oob */ if (error == -EBADMSG) {
pr_notice( "oob of pba %d damaged, will try to erase it", pba);
msb_mark_block_used(msb, pba);
msb_erase_block(msb, pba); continue;
} elseif (error) {
pr_err("unknown error %d on read of oob of pba %d - aborting",
error, pba);
/* Block has LBA not according to zoning*/ if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
pr_notice("pba %05d -> [bad lba %05d] - will erase",
pba, lba);
msb_erase_block(msb, pba); continue;
}
/* No collisions - great */ if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
msb->lba_to_pba_table[lba] = pba; continue;
}
pr_notice("Collision between pba %d and pba %d",
pba, other_block);
if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
pr_notice("pba %d is marked as stable, use it", pba);
msb_erase_block(msb, other_block);
msb->lba_to_pba_table[lba] = pba; continue;
}
if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
pr_notice("pba %d is marked as stable, use it",
other_block);
msb_erase_block(msb, pba); continue;
}
pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
pba, other_block, other_block);
/* Bad pages are copied with 00 page status */ if (error == -EBADMSG) {
pr_err("read error on sector %d, contents probably damaged", page); continue;
}
if (error) return error;
if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
MEMSTICK_OV_PG_NORMAL) {
dbg("page %d is marked as bad", page); continue;
}
set_bit(page, &msb->valid_cache_bitmap);
}
/* Write the cache now */
error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
pba = msb->lba_to_pba_table[msb->cache_block_lba];
/* Mark invalid pages */ if (!error) { for (page = 0; page < msb->pages_in_block; page++) {
if (test_bit(page, &msb->valid_cache_bitmap)) continue;
staticint msb_cache_write(struct msb_data *msb, int lba, int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
{ int error; struct scatterlist sg_tmp[10];
if (msb->read_only) return -EROFS;
if (msb->cache_block_lba == MS_BLOCK_INVALID ||
lba != msb->cache_block_lba) if (add_to_cache_only) return 0;
/* If we need to write different block */ if (msb->cache_block_lba != MS_BLOCK_INVALID &&
lba != msb->cache_block_lba) {
dbg_verbose("first flush the cache");
error = msb_cache_flush(msb); if (error) return error;
}
staticint msb_cache_read(struct msb_data *msb, int lba, int page, struct scatterlist *sg, int offset)
{ int pba = msb->lba_to_pba_table[lba]; struct scatterlist sg_tmp[10]; int error = 0;
if (lba == msb->cache_block_lba &&
test_bit(page, &msb->valid_cache_bitmap)) {
dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
lba, pba, page);
/* Emulated geometry table * This table content isn't that importaint, * One could put here different values, providing that they still * cover whole disk. * 64 MB entry is what windows reports for my 64M memstick
*/
/* Due to a bug in Jmicron driver written by Alex Dubov, * its serial mode barely works, * so we switch to parallel mode right away
*/ if (host->caps & MEMSTICK_CAP_PAR4)
msb_switch_to_parallel(msb);
msb->page_size = sizeof(struct ms_boot_page);
/* Read the boot page */
error = msb_read_boot_blocks(msb); if (error) return -EIO;
boot_block = &msb->boot_page[0];
/* Save interesting attributes from boot page */
msb->block_count = boot_block->attr.number_of_blocks;
msb->page_size = boot_block->attr.page_size;
if ((size_t)msb->page_size > PAGE_SIZE) { /* this isn't supported by linux at all, anyway*/
dbg("device page %d size isn't supported", msb->page_size); return -EINVAL;
}
msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL); if (!msb->block_buffer) return -ENOMEM;
#if 0 /* Now we can switch the interface */ if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
msb_switch_to_parallel(msb); #endif
error = msb_cache_init(msb); if (error) return error;
error = msb_ftl_initialize(msb); if (error) return error;
/* Read the bad block table */
error = msb_read_bad_block_table(msb, 0);
if (error && error != -ENOMEM) {
dbg("failed to read bad block table from primary boot block, trying from backup");
error = msb_read_bad_block_table(msb, 1);
}
if (error) return error;
/* *drum roll* Scan the media */
error = msb_ftl_scan(msb); if (error) {
pr_err("Scan of media failed"); return error;
}
return 0;
}
staticint msb_do_write_request(struct msb_data *msb, int lba, int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
{ int error = 0;
off_t offset = 0;
*sucessfuly_written = 0;
while (offset < len) { if (page == 0 && len - offset >= msb->block_size) {
if (msb->cache_block_lba == lba)
msb_cache_discard(msb);
staticint msb_do_read_request(struct msb_data *msb, int lba, int page, struct scatterlist *sg, int len, int *sucessfuly_read)
{ int error = 0; int offset = 0;
*sucessfuly_read = 0;
if (error && msb->req) {
blk_status_t ret = errno_to_blk_status(error);
dbg_verbose("IO: ending one sector of the request with error");
blk_mq_end_request(req, ret);
spin_lock_irq(&msb->q_lock);
msb->req = NULL;
spin_unlock_irq(&msb->q_lock);
}
if (msb->req)
dbg_verbose("IO: request still pending");
}
}
static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */ static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
/* Take care of unhandled + new requests from now on */
spin_lock_irqsave(&msb->q_lock, flags);
msb->card_dead = true;
spin_unlock_irqrestore(&msb->q_lock, flags);
blk_mq_start_hw_queues(msb->queue);
/* Remove the disk */
del_gendisk(msb->disk);
blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.