struct sddr55_card_info { unsignedlong capacity; /* Size of card in bytes */ int max_log_blks; /* maximum number of logical blocks */ int pageshift; /* log2 of pagesize */ int smallpageshift; /* 1 if pagesize == 256 */ int blocksize; /* Size of block in pages */ int blockshift; /* log2 of blocksize */ int blockmask; /* 2^blockshift - 1 */ int read_only; /* non zero if card is write protected */ int force_read_only; /* non zero if we find a map error*/ int *lba_to_pba; /* logical to physical map */ int *pba_to_lba; /* physical to logical map */ int fatal_error; /* set if we detect something nasty */ unsignedlong last_access; /* number of jiffies since we last talked to device */ unsignedchar sense_data[18];
};
result = sddr55_bulk_transport(us,
DMA_FROM_DEVICE, status, 4);
/* expect to get short transfer if no card fitted */ if (result == USB_STOR_XFER_SHORT || result == USB_STOR_XFER_STALLED) { /* had a short transfer, no card inserted, free map memory */
kfree(info->lba_to_pba);
kfree(info->pba_to_lba);
info->lba_to_pba = NULL;
info->pba_to_lba = NULL;
info->fatal_error = 0;
info->force_read_only = 0;
set_sense_info (2, 0x3a, 0); /* not ready, medium not present */ return USB_STOR_TRANSPORT_FAILED;
}
// Since we only read in one block at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer.
len = min_t(unsignedint, sectors, info->blocksize >>
info->smallpageshift) * PAGESIZE;
buffer = kmalloc(len, GFP_NOIO); if (buffer == NULL) return USB_STOR_TRANSPORT_ERROR; /* out of memory */
offset = 0;
sg = NULL;
while (sectors>0) {
/* have we got to end? */ if (lba >= info->max_log_blks) break;
unsignedshort pages; int i; unsignedint len, offset; struct scatterlist *sg;
/* check if we are allowed to write */ if (info->read_only || info->force_read_only) {
set_sense_info (7, 0x27, 0); /* read only */ return USB_STOR_TRANSPORT_FAILED;
}
// Since we only write one block at a time, we have to create // a bounce buffer and move the data a piece at a time between the // bounce buffer and the actual transfer buffer.
if (pba == NOT_ALLOCATED) { /* no pba allocated for this lba, find a free pba to use */
int max_pba = (info->max_log_blks / 250 ) * 256; int found_count = 0; int found_pba = -1;
/* set pba to first block in zone lba is in */
pba = (lba / 1000) * 1024;
usb_stor_dbg(us, "No PBA for LBA %04X\n", lba);
if (max_pba > 1024)
max_pba = 1024;
/* * Scan through the map looking for an unused block * leave 16 unused blocks at start (or as many as * possible) since the sddr55 seems to reuse a used * block when it shouldn't if we don't leave space.
*/ for (i = 0; i < max_pba; i++, pba++) { if (info->pba_to_lba[pba] == UNUSED_BLOCK) {
found_pba = pba; if (found_count++ > 16) break;
}
}
/* check status for error */ if (status[0] == 0xff && status[1] == 0x4) {
info->pba_to_lba[new_pba] = BAD_BLOCK;
set_sense_info (3, 0x0c, 0);
result = USB_STOR_TRANSPORT_FAILED; goto leave;
}
usb_stor_dbg(us, "Updating maps for LBA %04X: old PBA %04X, new PBA %04X\n",
lba, pba, new_pba);
/* update the lba<->pba maps, note new_pba might be the same as pba */
info->lba_to_pba[lba] = new_pba;
info->pba_to_lba[pba] = UNUSED_BLOCK;
/* check that new_pba wasn't already being used */ if (info->pba_to_lba[new_pba] != UNUSED_BLOCK) {
printk(KERN_ERR "sddr55 error: new PBA %04X already in use for LBA %04X\n",
new_pba, info->pba_to_lba[new_pba]);
info->fatal_error = 1;
set_sense_info (3, 0x31, 0);
result = USB_STOR_TRANSPORT_FAILED; goto leave;
}
/* update the pba<->lba maps for new_pba */
info->pba_to_lba[new_pba] = lba % 1000;
case 0x6e: // 1MB case 0xe8: case 0xec:
info->pageshift = 8;
info->smallpageshift = 1; return 0x00100000;
case 0xea: // 2MB case 0x64:
info->pageshift = 8;
info->smallpageshift = 1;
fallthrough; case 0x5d: // 5d is a ROM card with pagesize 512. return 0x00200000;
case 0xe3: // 4MB case 0xe5: case 0x6b: case 0xd5: return 0x00400000;
/* * Every 1024 physical blocks ("zone"), the LBA numbers * go back to zero, but are within a higher * block of LBA's. Also, there is a maximum of * 1000 LBA's per zone. In other words, in PBA * 1024-2047 you will find LBA 0-999 which are * really LBA 1000-1999. Yes, this wastes 24 * physical blocks per zone. Go figure. * These devices can have blocks go bad, so there * are 24 spare blocks to use when blocks do go bad.
*/
/* * SDDR55 returns 0xffff for a bad block, and 0x400 for the * CIS block. (Is this true for cards 8MB or less??) * Record these in the physical to logical map
*/
info->pba_to_lba[i] = lba;
if (lba >= max_lba) { continue;
}
if (info->lba_to_pba[lba + zone * 1000] != NOT_ALLOCATED &&
!info->force_read_only) {
printk(KERN_WARNING "sddr55: map inconsistency at LBA %04X\n",
lba + zone * 1000);
info->force_read_only = 1;
}
/* * only check card status if the map isn't allocated, ie no card seen yet * or if it's been over half a second since we last accessed it
*/ if (info->lba_to_pba == NULL || time_after(jiffies, info->last_access + HZ/2)) {
/* check to see if a card is fitted */
result = sddr55_status (us); if (result) {
result = sddr55_status (us); if (!result) {
set_sense_info (6, 0x28, 0); /* new media, set unit attention, not ready to ready */
} return USB_STOR_TRANSPORT_FAILED;
}
}
/* * if we detected a problem with the map when writing, * don't allow any more access
*/ if (info->fatal_error) {
if (!capacity) {
set_sense_info (3, 0x30, 0); /* incompatible medium */ return USB_STOR_TRANSPORT_FAILED;
}
info->capacity = capacity;
/* * figure out the maximum logical block number, allowing for * the fact that only 250 out of every 256 are used
*/
info->max_log_blks = ((info->capacity >> (info->pageshift + info->blockshift)) / 256) * 250;
/* * Last page in the card, adjust as we only use 250 out of * every 256 pages
*/
capacity = (capacity / 256) * 250;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.