/* * Sector aligned buffer routines for buffer create/read/write/access
*/
/* * Verify the log-relative block number and length in basic blocks are valid for * an operation involving the given XFS log buffer. Returns true if the fields * are valid, false otherwise.
*/ staticinlinebool
xlog_verify_bno( struct xlog *log,
xfs_daddr_t blk_no, int bbcount)
{ if (blk_no < 0 || blk_no >= log->l_logBBsize) returnfalse; if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize) returnfalse; returntrue;
}
/* * Allocate a buffer to hold log data. The buffer needs to be able to map to * a range of nbblks basic blocks at any valid offset within the log.
*/ staticchar *
xlog_alloc_buffer( struct xlog *log, int nbblks)
{ /* * Pass log block 0 since we don't have an addr yet, buffer will be * verified on read.
*/ if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks); return NULL;
}
/* * We do log I/O in units of log sectors (a power-of-2 multiple of the * basic block size), so we round up the requested size to accommodate * the basic blocks required for complete log sectors. * * In addition, the buffer may be used for a non-sector-aligned block * offset, in which case an I/O of the requested size could extend * beyond the end of the buffer. If the requested size is only 1 basic * block it will never straddle a sector boundary, so this won't be an * issue. Nor will this be a problem if the log I/O is done in basic * blocks (sector size 1). But otherwise we extend the buffer by one * extra log sector to ensure there's space to accommodate this * possibility.
*/ if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize); return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
}
/* * Return the address of the start of the given block number's data * in a log buffer. The buffer covers a log sector-aligned region.
*/ staticinlineunsignedint
xlog_align( struct xlog *log,
xfs_daddr_t blk_no)
{ return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
}
/* * check log record header for recovery
*/ STATICint
xlog_header_check_recover(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
/* * IRIX doesn't write the h_fmt field and leaves it zeroed * (XLOG_FMT_UNKNOWN). This stops us from trying to recover * a dirty log created in IRIX.
*/ if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
xfs_warn(mp, "dirty log written in incompatible format - can't recover");
xlog_header_check_dump(mp, head); return -EFSCORRUPTED;
} if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
&head->h_fs_uuid))) {
xfs_warn(mp, "dirty log entry has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head); return -EFSCORRUPTED;
} return 0;
}
/* * read the head block of the log and check the header
*/ STATICint
xlog_header_check_mount(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
if (uuid_is_null(&head->h_fs_uuid)) { /* * IRIX doesn't write the h_fs_uuid or h_fmt fields. If * h_fs_uuid is null, we assume this log was last mounted * by IRIX and continue.
*/
xfs_warn(mp, "null uuid in log - IRIX style log");
} elseif (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
&head->h_fs_uuid))) {
xfs_warn(mp, "log has mismatched uuid - can't recover");
xlog_header_check_dump(mp, head); return -EFSCORRUPTED;
} return 0;
}
/* * This routine finds (to an approximation) the first block in the physical * log which contains the given cycle. It uses a binary search algorithm. * Note that the algorithm can not be perfect because the disk will not * necessarily be perfect.
*/ STATICint
xlog_find_cycle_start( struct xlog *log, char *buffer,
xfs_daddr_t first_blk,
xfs_daddr_t *last_blk,
uint cycle)
{ char *offset;
xfs_daddr_t mid_blk;
xfs_daddr_t end_blk;
uint mid_cycle; int error;
/* * Check that a range of blocks does not contain stop_on_cycle_no. * Fill in *new_blk with the block offset where such a block is * found, or with -1 (an invalid block number) if there is no such * block in the range. The scan needs to occur from front to back * and the pointer into the region must be updated since a later * routine will need to perform another test.
*/ STATICint
xlog_find_verify_cycle( struct xlog *log,
xfs_daddr_t start_blk, int nbblks,
uint stop_on_cycle_no,
xfs_daddr_t *new_blk)
{
xfs_daddr_t i, j;
uint cycle; char *buffer;
xfs_daddr_t bufblks; char *buf = NULL; int error = 0;
/* * Greedily allocate a buffer big enough to handle the full * range of basic blocks we'll be examining. If that fails, * try a smaller size. We need to be able to read at least * a log sector, or we're out of luck.
*/
bufblks = roundup_pow_of_two(nbblks); while (bufblks > log->l_logBBsize)
bufblks >>= 1; while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
bufblks >>= 1; if (bufblks < log->l_sectBBsize) return -ENOMEM;
}
for (i = start_blk; i < start_blk + nbblks; i += bufblks) { int bcount;
bcount = min(bufblks, (start_blk + nbblks - i));
error = xlog_bread(log, i, bcount, buffer, &buf); if (error) goto out;
/* * Potentially backup over partial log record write. * * In the typical case, last_blk is the number of the block directly after * a good log record. Therefore, we subtract one to get the block number * of the last block in the given buffer. extra_bblks contains the number * of blocks we would have read on a previous read. This happens when the * last log record is split over the end of the physical log. * * extra_bblks is the number of blocks potentially verified on a previous * call to this routine.
*/ STATICint
xlog_find_verify_log_record( struct xlog *log,
xfs_daddr_t start_blk,
xfs_daddr_t *last_blk, int extra_bblks)
{
xfs_daddr_t i; char *buffer; char *offset = NULL;
xlog_rec_header_t *head = NULL; int error = 0; int smallmem = 0; int num_blks = *last_blk - start_blk; int xhdrs;
for (i = (*last_blk) - 1; i >= 0; i--) { if (i < start_blk) { /* valid log record not found */
xfs_warn(log->l_mp, "Log inconsistent (didn't find previous header)");
ASSERT(0);
error = -EFSCORRUPTED; goto out;
}
if (smallmem) {
error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out;
}
head = (xlog_rec_header_t *)offset;
if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) break;
if (!smallmem)
offset -= BBSIZE;
}
/* * We hit the beginning of the physical log & still no header. Return * to caller. If caller can handle a return of -1, then this routine * will be called again for the end of the physical log.
*/ if (i == -1) {
error = 1; goto out;
}
/* * We have the final block of the good log (the first block * of the log record _before_ the head. So we check the uuid.
*/ if ((error = xlog_header_check_mount(log->l_mp, head))) goto out;
/* * We may have found a log record header before we expected one. * last_blk will be the 1st block # with a given cycle #. We may end * up reading an entire log record. In this case, we don't want to * reset last_blk. Only when last_blk points in the middle of a log * record do we update last_blk.
*/
xhdrs = xlog_logrec_hblks(log, head);
if (*last_blk - i + extra_bblks !=
BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
*last_blk = i;
out:
kvfree(buffer); return error;
}
/* * Head is defined to be the point of the log where the next log write * could go. This means that incomplete LR writes at the end are * eliminated when calculating the head. We aren't guaranteed that previous * LR have complete transactions. We only know that a cycle number of * current cycle number -1 won't be present in the log if we start writing * from our current block number. * * last_blk contains the block number of the first block with a given * cycle number. * * Return: zero if normal, non-zero if error.
*/ STATICint
xlog_find_head( struct xlog *log,
xfs_daddr_t *return_head_blk)
{ char *buffer; char *offset;
xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; int num_scan_bblks;
uint first_half_cycle, last_half_cycle;
uint stop_on_cycle; int error, log_bbnum = log->l_logBBsize;
/* Is the end of the log device zeroed? */
error = xlog_find_zeroed(log, &first_blk); if (error < 0) {
xfs_warn(log->l_mp, "empty log check failed"); return error;
} if (error == 1) {
*return_head_blk = first_blk;
/* Is the whole lot zeroed? */ if (!first_blk) { /* Linux XFS shouldn't generate totally zeroed logs - * mkfs etc write a dummy unmount record to a fresh * log so we can store the uuid in there
*/
xfs_warn(log->l_mp, "totally zeroed log");
}
return 0;
}
first_blk = 0; /* get cycle # of 1st block */
buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM;
/* * If the 1st half cycle number is equal to the last half cycle number, * then the entire log is stamped with the same cycle number. In this * case, head_blk can't be set to zero (which makes sense). The below * math doesn't work out properly with head_blk equal to zero. Instead, * we set it to log_bbnum which is an invalid block number, but this * value makes the math correct. If head_blk doesn't changed through * all the tests below, *head_blk is set to zero at the very end rather * than log_bbnum. In a sense, log_bbnum and zero are the same block * in a circular file.
*/ if (first_half_cycle == last_half_cycle) { /* * In this case we believe that the entire log should have * cycle number last_half_cycle. We need to scan backwards * from the end verifying that there are no holes still * containing last_half_cycle - 1. If we find such a hole, * then the start of that hole will be the new head. The * simple case looks like * x | x ... | x - 1 | x * Another case that fits this picture would be * x | x + 1 | x ... | x * In this case the head really is somewhere at the end of the * log, as one of the latest writes at the beginning was * incomplete. * One more case is * x | x + 1 | x ... | x - 1 | x * This is really the combination of the above two cases, and * the head has to end up at the start of the x-1 hole at the * end of the log. * * In the 256k log case, we will read from the beginning to the * end of the log and search for cycle numbers equal to x-1. * We don't worry about the x+1 blocks that we encounter, * because we know that they cannot be the head since the log * started with x.
*/
head_blk = log_bbnum;
stop_on_cycle = last_half_cycle - 1;
} else { /* * In this case we want to find the first block with cycle * number matching last_half_cycle. We expect the log to be * some variation on * x + 1 ... | x ... | x * The first block with cycle number x (last_half_cycle) will * be where the new head belongs. First we do a binary search * for the first occurrence of last_half_cycle. The binary * search may not be totally accurate, so then we scan back * from there looking for occurrences of last_half_cycle before * us. If that backwards scan wraps around the beginning of * the log, then we look for occurrences of last_half_cycle - 1 * at the end of the log. The cases we're looking for look * like * v binary search stopped here * x + 1 ... | x | x + 1 | x ... | x * ^ but we want to locate this spot * or * <---------> less than scan distance * x + 1 ... | x ... | x - 1 | x * ^ we want to locate this spot
*/
stop_on_cycle = last_half_cycle;
error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
last_half_cycle); if (error) goto out_free_buffer;
}
/* * Now validate the answer. Scan back some number of maximum possible * blocks and make sure each one has the expected cycle number. The * maximum is determined by the total possible amount of buffering * in the in-core log. The following number can be made tighter if * we actually look at the block size of the filesystem.
*/
num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); if (head_blk >= num_scan_bblks) { /* * We are guaranteed that the entire check can be performed * in one buffer.
*/
start_blk = head_blk - num_scan_bblks; if ((error = xlog_find_verify_cycle(log,
start_blk, num_scan_bblks,
stop_on_cycle, &new_blk))) goto out_free_buffer; if (new_blk != -1)
head_blk = new_blk;
} else { /* need to read 2 parts of log */ /* * We are going to scan backwards in the log in two parts. * First we scan the physical end of the log. In this part * of the log, we are looking for blocks with cycle number * last_half_cycle - 1. * If we find one, then we know that the log starts there, as * we've found a hole that didn't get written in going around * the end of the physical log. The simple case for this is * x + 1 ... | x ... | x - 1 | x * <---------> less than scan distance * If all of the blocks at the end of the log have cycle number * last_half_cycle, then we check the blocks at the start of * the log looking for occurrences of last_half_cycle. If we * find one, then our current estimate for the location of the * first occurrence of last_half_cycle is wrong and we move * back to the hole we've found. This case looks like * x + 1 ... | x | x + 1 | x ... * ^ binary search stopped here * Another case we need to handle that only occurs in 256k * logs is * x + 1 ... | x ... | x+1 | x ... * ^ binary search stops here * In a 256k log, the scan at the end of the log will see the * x + 1 blocks. We need to skip past those since that is * certainly not the head of the log. By searching for * last_half_cycle-1 we accomplish that.
*/
ASSERT(head_blk <= INT_MAX &&
(xfs_daddr_t) num_scan_bblks >= head_blk);
start_blk = log_bbnum - (num_scan_bblks - head_blk); if ((error = xlog_find_verify_cycle(log, start_blk,
num_scan_bblks - (int)head_blk,
(stop_on_cycle - 1), &new_blk))) goto out_free_buffer; if (new_blk != -1) {
head_blk = new_blk; goto validate_head;
}
/* * Scan beginning of log now. The last part of the physical * log is good. This scan needs to verify that it doesn't find * the last_half_cycle.
*/
start_blk = 0;
ASSERT(head_blk <= INT_MAX); if ((error = xlog_find_verify_cycle(log,
start_blk, (int)head_blk,
stop_on_cycle, &new_blk))) goto out_free_buffer; if (new_blk != -1)
head_blk = new_blk;
}
validate_head: /* * Now we need to make sure head_blk is not pointing to a block in * the middle of a log record.
*/
num_scan_bblks = XLOG_REC_SHIFT(log); if (head_blk >= num_scan_bblks) {
start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
/* start ptr at last block ptr before head_blk */
error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); if (error == 1)
error = -EIO; if (error) goto out_free_buffer;
} else {
start_blk = 0;
ASSERT(head_blk <= INT_MAX);
error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); if (error < 0) goto out_free_buffer; if (error == 1) { /* We hit the beginning of the log during our search */
start_blk = log_bbnum - (num_scan_bblks - head_blk);
new_blk = log_bbnum;
ASSERT(start_blk <= INT_MAX &&
(xfs_daddr_t) log_bbnum-start_blk >= 0);
ASSERT(head_blk <= INT_MAX);
error = xlog_find_verify_log_record(log, start_blk,
&new_blk, (int)head_blk); if (error == 1)
error = -EIO; if (error) goto out_free_buffer; if (new_blk != log_bbnum)
head_blk = new_blk;
} elseif (error) goto out_free_buffer;
}
kvfree(buffer); if (head_blk == log_bbnum)
*return_head_blk = 0; else
*return_head_blk = head_blk; /* * When returning here, we have a good block number. Bad block * means that during a previous crash, we didn't have a clean break * from cycle number N to cycle number N-1. In this case, we need * to find the first block with cycle number N-1.
*/ return 0;
out_free_buffer:
kvfree(buffer); if (error)
xfs_warn(log->l_mp, "failed to find log head"); return error;
}
/* * Seek backwards in the log for log record headers. * * Given a starting log block, walk backwards until we find the provided number * of records or hit the provided tail block. The return value is the number of * records encountered or a negative error code. The log block and buffer * pointer of the last record seen are returned in rblk and rhead respectively.
*/ STATICint
xlog_rseek_logrec_hdr( struct xlog *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk, int count, char *buffer,
xfs_daddr_t *rblk, struct xlog_rec_header **rhead, bool *wrapped)
{ int i; int error; int found = 0; char *offset = NULL;
xfs_daddr_t end_blk;
*wrapped = false;
/* * Walk backwards from the head block until we hit the tail or the first * block in the log.
*/
end_blk = head_blk > tail_blk ? tail_blk : 0; for (i = (int) head_blk - 1; i >= end_blk; i--) {
error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error;
/* * If we haven't hit the tail block or the log record header count, * start looking again from the end of the physical log. Note that * callers can pass head == tail if the tail is not yet known.
*/ if (tail_blk >= head_blk && found != count) { for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error;
/* * Seek forward in the log for log record headers. * * Given head and tail blocks, walk forward from the tail block until we find * the provided number of records or hit the head block. The return value is the * number of records encountered or a negative error code. The log block and * buffer pointer of the last record seen are returned in rblk and rhead * respectively.
*/ STATICint
xlog_seek_logrec_hdr( struct xlog *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk, int count, char *buffer,
xfs_daddr_t *rblk, struct xlog_rec_header **rhead, bool *wrapped)
{ int i; int error; int found = 0; char *offset = NULL;
xfs_daddr_t end_blk;
*wrapped = false;
/* * Walk forward from the tail block until we hit the head or the last * block in the log.
*/
end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; for (i = (int) tail_blk; i <= end_blk; i++) {
error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error;
/* * If we haven't hit the head block or the log record header count, * start looking again from the start of the physical log.
*/ if (tail_blk > head_blk && found != count) { for (i = 0; i < (int) head_blk; i++) {
error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error;
/* * Calculate distance from head to tail (i.e., unused space in the log).
*/ staticinlineint
xlog_tail_distance( struct xlog *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk)
{ if (head_blk < tail_blk) return tail_blk - head_blk;
/* * Verify the log tail. This is particularly important when torn or incomplete * writes have been detected near the front of the log and the head has been * walked back accordingly. * * We also have to handle the case where the tail was pinned and the head * blocked behind the tail right before a crash. If the tail had been pushed * immediately prior to the crash and the subsequent checkpoint was only * partially written, it's possible it overwrote the last referenced tail in the * log with garbage. This is not a coherency problem because the tail must have * been pushed before it can be overwritten, but appears as log corruption to * recovery because we have no way to know the tail was updated if the * subsequent checkpoint didn't write successfully. * * Therefore, CRC check the log from tail to head. If a failure occurs and the * offending record is within max iclog bufs from the head, walk the tail * forward and retry until a valid tail is found or corruption is detected out * of the range of a possible overwrite.
*/ STATICint
xlog_verify_tail( struct xlog *log,
xfs_daddr_t head_blk,
xfs_daddr_t *tail_blk, int hsize)
{ struct xlog_rec_header *thead; char *buffer;
xfs_daddr_t first_bad; int error = 0; bool wrapped;
xfs_daddr_t tmp_tail;
xfs_daddr_t orig_tail = *tail_blk;
buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM;
/* * Make sure the tail points to a record (returns positive count on * success).
*/
error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
&tmp_tail, &thead, &wrapped); if (error < 0) goto out; if (*tail_blk != tmp_tail)
*tail_blk = tmp_tail;
/* * Run a CRC check from the tail to the head. We can't just check * MAX_ICLOGS records past the tail because the tail may point to stale * blocks cleared during the search for the head/tail. These blocks are * overwritten with zero-length records and thus record count is not a * reliable indicator of the iclog state before a crash.
*/
first_bad = 0;
error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
XLOG_RECOVER_CRCPASS, &first_bad); while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { int tail_distance;
/* * Is corruption within range of the head? If so, retry from * the next record. Otherwise return an error.
*/
tail_distance = xlog_tail_distance(log, head_blk, first_bad); if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize)) break;
/* skip to the next record; returns positive count on success */
error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
buffer, &tmp_tail, &thead, &wrapped); if (error < 0) goto out;
/* * Detect and trim torn writes from the head of the log. * * Storage without sector atomicity guarantees can result in torn writes in the * log in the event of a crash. Our only means to detect this scenario is via * CRC verification. While we can't always be certain that CRC verification * failure is due to a torn write vs. an unrelated corruption, we do know that * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of * the log and treat failures in this range as torn writes as a matter of * policy. In the event of CRC failure, the head is walked back to the last good * record in the log and the tail is updated from that record and verified.
*/ STATICint
xlog_verify_head( struct xlog *log,
xfs_daddr_t *head_blk, /* in/out: unverified head */
xfs_daddr_t *tail_blk, /* out: tail block */ char *buffer,
xfs_daddr_t *rhead_blk, /* start blk of last record */ struct xlog_rec_header **rhead, /* ptr to last record */ bool *wrapped) /* last rec. wraps phys. log */
{ struct xlog_rec_header *tmp_rhead; char *tmp_buffer;
xfs_daddr_t first_bad;
xfs_daddr_t tmp_rhead_blk; int found; int error; bool tmp_wrapped;
/* * Check the head of the log for torn writes. Search backwards from the * head until we hit the tail or the maximum number of log record I/Os * that could have been in flight at one time. Use a temporary buffer so * we don't trash the rhead/buffer pointers from the caller.
*/
tmp_buffer = xlog_alloc_buffer(log, 1); if (!tmp_buffer) return -ENOMEM;
error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
XLOG_MAX_ICLOGS, tmp_buffer,
&tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
kvfree(tmp_buffer); if (error < 0) return error;
/* * Now run a CRC verification pass over the records starting at the * block found above to the current head. If a CRC failure occurs, the * log block of the first bad record is saved in first_bad.
*/
error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
XLOG_RECOVER_CRCPASS, &first_bad); if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { /* * We've hit a potential torn write. Reset the error and warn * about it.
*/
error = 0;
xfs_warn(log->l_mp, "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
first_bad, *head_blk);
/* * Get the header block and buffer pointer for the last good * record before the bad record. * * Note that xlog_find_tail() clears the blocks at the new head * (i.e., the records with invalid CRC) if the cycle number * matches the current cycle.
*/
found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
buffer, rhead_blk, rhead, wrapped); if (found < 0) return found; if (found == 0) /* XXX: right thing to do here? */ return -EIO;
/* * Reset the head block to the starting block of the first bad * log record and set the tail block based on the last good * record. * * Bail out if the updated head/tail match as this indicates * possible corruption outside of the acceptable * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
*/
*head_blk = first_bad;
*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn)); if (*head_blk == *tail_blk) {
ASSERT(0); return 0;
}
} if (error) return error;
/* * We need to make sure we handle log wrapping properly, so we can't use the * calculated logbno directly. Make sure it wraps to the correct bno inside the * log. * * The log is limited to 32 bit sizes, so we use the appropriate modulus * operation here and cast it back to a 64 bit daddr on return.
*/ staticinline xfs_daddr_t
xlog_wrap_logbno( struct xlog *log,
xfs_daddr_t bno)
{ int mod;
/* * Check whether the head of the log points to an unmount record. In other * words, determine whether the log is clean. If so, update the in-core state * appropriately.
*/ staticint
xlog_check_unmount_rec( struct xlog *log,
xfs_daddr_t *head_blk,
xfs_daddr_t *tail_blk, struct xlog_rec_header *rhead,
xfs_daddr_t rhead_blk, char *buffer, bool *clean)
{ struct xlog_op_header *op_head;
xfs_daddr_t umount_data_blk;
xfs_daddr_t after_umount_blk; int hblks; int error; char *offset;
*clean = false;
/* * Look for unmount record. If we find it, then we know there was a * clean unmount. Since 'i' could be the last block in the physical * log, we convert to a log block before comparing to the head_blk. * * Save the current tail lsn to use to pass to xlog_clear_stale_blocks() * below. We won't want to clear the unmount record if there is one, so * we pass the lsn of the unmount record rather than the block after it.
*/
hblks = xlog_logrec_hblks(log, rhead);
after_umount_blk = xlog_wrap_logbno(log,
rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
op_head = (struct xlog_op_header *)offset; if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { /* * Set tail and last sync so that newly written log * records will point recovery to after the current * unmount record.
*/
xlog_assign_atomic_lsn(&log->l_tail_lsn,
log->l_curr_cycle, after_umount_blk);
log->l_ailp->ail_head_lsn =
atomic64_read(&log->l_tail_lsn);
*tail_blk = after_umount_blk;
*clean = true;
}
}
return 0;
}
staticvoid
xlog_set_state( struct xlog *log,
xfs_daddr_t head_blk, struct xlog_rec_header *rhead,
xfs_daddr_t rhead_blk, bool bump_cycle)
{ /* * Reset log values according to the state of the log when we * crashed. In the case where head_blk == 0, we bump curr_cycle * one because the next write starts a new cycle rather than * continuing the cycle of the last good log record. At this * point we have guaranteed that all partial log records have been * accounted for. Therefore, we know that the last good log record * written was complete and ended exactly on the end boundary * of the physical log.
*/
log->l_prev_block = rhead_blk;
log->l_curr_block = (int)head_blk;
log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); if (bump_cycle)
log->l_curr_cycle++;
atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
log->l_ailp->ail_head_lsn = be64_to_cpu(rhead->h_lsn);
}
/* * Find the sync block number or the tail of the log. * * This will be the block number of the last record to have its * associated buffers synced to disk. Every log record header has * a sync lsn embedded in it. LSNs hold block numbers, so it is easy * to get a sync block number. The only concern is to figure out which * log record header to believe. * * The following algorithm uses the log record header with the largest * lsn. The entire log record does not need to be valid. We only care * that the header is valid. * * We could speed up search by using current head_blk buffer, but it is not * available.
*/ STATICint
xlog_find_tail( struct xlog *log,
xfs_daddr_t *head_blk,
xfs_daddr_t *tail_blk)
{
xlog_rec_header_t *rhead; char *offset = NULL; char *buffer; int error;
xfs_daddr_t rhead_blk;
xfs_lsn_t tail_lsn; bool wrapped = false; bool clean = false;
/* * Find previous log record
*/ if ((error = xlog_find_head(log, head_blk))) return error;
ASSERT(*head_blk < INT_MAX);
buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM; if (*head_blk == 0) { /* special case */
error = xlog_bread(log, 0, 1, buffer, &offset); if (error) goto done;
if (xlog_get_cycle(offset) == 0) {
*tail_blk = 0; /* leave all other log inited values alone */ goto done;
}
}
/* * Search backwards through the log looking for the log record header * block. This wraps all the way back around to the head so something is * seriously wrong if we can't find it.
*/
error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
&rhead_blk, &rhead, &wrapped); if (error < 0) goto done; if (!error) {
xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
error = -EFSCORRUPTED; goto done;
}
*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
/* * Set the log state based on the current head record.
*/
xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
tail_lsn = atomic64_read(&log->l_tail_lsn);
/* * Look for an unmount record at the head of the log. This sets the log * state to determine whether recovery is necessary.
*/
error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
rhead_blk, buffer, &clean); if (error) goto done;
/* * Verify the log head if the log is not clean (e.g., we have anything * but an unmount record at the head). This uses CRC verification to * detect and trim torn writes. If discovered, CRC failures are * considered torn writes and the log head is trimmed accordingly. * * Note that we can only run CRC verification when the log is dirty * because there's no guarantee that the log data behind an unmount * record is compatible with the current architecture.
*/ if (!clean) {
xfs_daddr_t orig_head = *head_blk;
/* update in-core state again if the head changed */ if (*head_blk != orig_head) {
xlog_set_state(log, *head_blk, rhead, rhead_blk,
wrapped);
tail_lsn = atomic64_read(&log->l_tail_lsn);
error = xlog_check_unmount_rec(log, head_blk, tail_blk,
rhead, rhead_blk, buffer,
&clean); if (error) goto done;
}
}
/* * Note that the unmount was clean. If the unmount was not clean, we * need to know this to rebuild the superblock counters from the perag * headers if we have a filesystem using non-persistent counters.
*/ if (clean)
xfs_set_clean(log->l_mp);
/* * Make sure that there are no blocks in front of the head * with the same cycle number as the head. This can happen * because we allow multiple outstanding log writes concurrently, * and the later writes might make it out before earlier ones. * * We use the lsn from before modifying it so that we'll never * overwrite the unmount record after a clean unmount. * * Do this only if we are going to recover the filesystem * * NOTE: This used to say "if (!readonly)" * However on Linux, we can & do recover a read-only filesystem. * We only skip recovery if NORECOVERY is specified on mount, * in which case we would not be here. * * But... if the -device- itself is readonly, just skip this. * We can't recover this device anyway, so it won't matter.
*/ if (!xfs_readonly_buftarg(log->l_targ))
error = xlog_clear_stale_blocks(log, tail_lsn);
done:
kvfree(buffer);
if (error)
xfs_warn(log->l_mp, "failed to locate log tail"); return error;
}
/* * Is the log zeroed at all? * * The last binary search should be changed to perform an X block read * once X becomes small enough. You can then search linearly through * the X blocks. This will cut down on the number of reads we need to do. * * If the log is partially zeroed, this routine will pass back the blkno * of the first block with cycle number 0. It won't have a complete LR * preceding it. * * Return: * 0 => the log is completely written to * 1 => use *blk_no as the first block of the log * <0 => error has occurred
*/ STATICint
xlog_find_zeroed( struct xlog *log,
xfs_daddr_t *blk_no)
{ char *buffer; char *offset;
uint first_cycle, last_cycle;
xfs_daddr_t new_blk, last_blk, start_blk;
xfs_daddr_t num_scan_bblks; int error, log_bbnum = log->l_logBBsize; int ret = 1;
last_cycle = xlog_get_cycle(offset); if (last_cycle != 0) { /* log completely written to */
ret = 0; goto out_free_buffer;
}
/* we have a partially zeroed log */
last_blk = log_bbnum-1;
error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0); if (error) goto out_free_buffer;
/* * Validate the answer. Because there is no way to guarantee that * the entire log is made up of log records which are the same size, * we scan over the defined maximum blocks. At this point, the maximum * is not chosen to mean anything special. XXXmiken
*/
num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
ASSERT(num_scan_bblks <= INT_MAX);
/* * We search for any instances of cycle number 0 that occur before * our current estimate of the head. What we're trying to detect is * 1 ... | 0 | 1 | 0... * ^ binary search ends here
*/ if ((error = xlog_find_verify_cycle(log, start_blk,
(int)num_scan_bblks, 0, &new_blk))) goto out_free_buffer; if (new_blk != -1)
last_blk = new_blk;
/* * Potentially backup over partial log record write. We don't need * to search the end of the log because we know it is zero.
*/
error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); if (error == 1)
error = -EIO; if (error) goto out_free_buffer;
/* * These are simple subroutines used by xlog_clear_stale_blocks() below * to initialize a buffer full of empty log record headers and write * them into the log.
*/ STATICvoid
xlog_add_record( struct xlog *log, char *buf, int cycle, int block, int tail_cycle, int tail_block)
{
xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
STATICint
xlog_write_log_records( struct xlog *log, int cycle, int start_block, int blocks, int tail_cycle, int tail_block)
{ char *offset; char *buffer; int balign, ealign; int sectbb = log->l_sectBBsize; int end_block = start_block + blocks; int bufblks; int error = 0; int i, j = 0;
/* * Greedily allocate a buffer big enough to handle the full * range of basic blocks to be written. If that fails, try * a smaller size. We need to be able to write at least a * log sector, or we're out of luck.
*/
bufblks = roundup_pow_of_two(blocks); while (bufblks > log->l_logBBsize)
bufblks >>= 1; while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
bufblks >>= 1; if (bufblks < sectbb) return -ENOMEM;
}
/* We may need to do a read at the start to fill in part of * the buffer in the starting sector not covered by the first * write below.
*/
balign = round_down(start_block, sectbb); if (balign != start_block) {
error = xlog_bread_noalign(log, start_block, 1, buffer); if (error) goto out_free_buffer;
j = start_block - balign;
}
for (i = start_block; i < end_block; i += bufblks) { int bcount, endcount;
/* We may need to do a read at the end to fill in part of * the buffer in the final sector not covered by the write. * If this is the same sector as the above read, skip it.
*/
ealign = round_down(end_block, sectbb); if (j == 0 && (start_block + endcount > ealign)) {
error = xlog_bread_noalign(log, ealign, sectbb,
buffer + BBTOB(ealign - start_block)); if (error) break;
/* * This routine is called to blow away any incomplete log writes out * in front of the log head. We do this so that we won't become confused * if we come up, write only a little bit more, and then crash again. * If we leave the partial log records out there, this situation could * cause us to think those partial writes are valid blocks since they * have the current cycle number. We get rid of them by overwriting them * with empty log records with the old cycle number rather than the * current one. * * The tail lsn is passed in rather than taken from * the log so that we will not write over the unmount record after a * clean unmount in a 512 block log. Doing so would leave the log without * any valid log records in it until a new one was written. If we crashed * during that time we would not be able to recover.
*/ STATICint
xlog_clear_stale_blocks( struct xlog *log,
xfs_lsn_t tail_lsn)
{ int tail_cycle, head_cycle; int tail_block, head_block; int tail_distance, max_distance; int distance; int error;
/* * Figure out the distance between the new head of the log * and the tail. We want to write over any blocks beyond the * head that we may have written just before the crash, but * we don't want to overwrite the tail of the log.
*/ if (head_cycle == tail_cycle) { /* * The tail is behind the head in the physical log, * so the distance from the head to the tail is the * distance from the head to the end of the log plus * the distance from the beginning of the log to the * tail.
*/ if (XFS_IS_CORRUPT(log->l_mp,
head_block < tail_block ||
head_block >= log->l_logBBsize)) return -EFSCORRUPTED;
tail_distance = tail_block + (log->l_logBBsize - head_block);
} else { /* * The head is behind the tail in the physical log, * so the distance from the head to the tail is just * the tail block minus the head block.
*/ if (XFS_IS_CORRUPT(log->l_mp,
head_block >= tail_block ||
head_cycle != tail_cycle + 1)) return -EFSCORRUPTED;
tail_distance = tail_block - head_block;
}
/* * If the head is right up against the tail, we can't clear * anything.
*/ if (tail_distance <= 0) {
ASSERT(tail_distance == 0); return 0;
}
max_distance = XLOG_TOTAL_REC_SHIFT(log); /* * Take the smaller of the maximum amount of outstanding I/O * we could have and the distance to the tail to clear out. * We take the smaller so that we don't overwrite the tail and * we don't waste all day writing from the head to the tail * for no reason.
*/
max_distance = min(max_distance, tail_distance);
if ((head_block + max_distance) <= log->l_logBBsize) { /* * We can stomp all the blocks we need to without * wrapping around the end of the log. Just do it * in a single write. Use the cycle number of the * current cycle minus one so that the log will look like: * n ... | n - 1 ...
*/
error = xlog_write_log_records(log, (head_cycle - 1),
head_block, max_distance, tail_cycle,
tail_block); if (error) return error;
} else { /* * We need to wrap around the end of the physical log in * order to clear all the blocks. Do it in two separate * I/Os. The first write should be from the head to the * end of the physical log, and it should use the current * cycle number minus one just like above.
*/
distance = log->l_logBBsize - head_block;
error = xlog_write_log_records(log, (head_cycle - 1),
head_block, distance, tail_cycle,
tail_block);
if (error) return error;
/* * Now write the blocks at the start of the physical log. * This writes the remainder of the blocks we want to clear. * It uses the current cycle number since we're now on the * same cycle as the head so that we get: * n ... n ... | n - 1 ... * ^^^^^ blocks we're writing
*/
distance = max_distance - (log->l_logBBsize - head_block);
error = xlog_write_log_records(log, head_cycle, 0, distance,
tail_cycle, tail_block); if (error) return error;
}
return 0;
}
/* * Release the recovered intent item in the AIL that matches the given intent * type and intent id.
*/ void
xlog_recover_release_intent( struct xlog *log, unsignedshort intent_type,
uint64_t intent_id)
{ struct xfs_defer_pending *dfp, *n;
list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { struct xfs_log_item *lip = dfp->dfp_intent;
if (lip->li_type != intent_type) continue; if (!lip->li_ops->iop_match(lip, intent_id)) continue;
ASSERT(xlog_item_is_intent(lip));
xfs_defer_cancel_recovery(log->l_mp, dfp);
}
}
int
xlog_recover_iget( struct xfs_mount *mp,
xfs_ino_t ino, struct xfs_inode **ipp)
{ int error;
error = xfs_qm_dqattach(*ipp); if (error) {
xfs_irele(*ipp); return error;
}
if (VFS_I(*ipp)->i_nlink == 0)
xfs_iflags_set(*ipp, XFS_IRECOVERY);
return 0;
}
/* * Get an inode so that we can recover a log operation. * * Log intent items that target inodes effectively contain a file handle. * Check that the generation number matches the intent item like we do for * other file handles. Log intent items defined after this validation weakness * was identified must use this function.
*/ int
xlog_recover_iget_handle( struct xfs_mount *mp,
xfs_ino_t ino,
uint32_t gen, struct xfs_inode **ipp)
{ struct xfs_inode *ip; int error;
error = xlog_recover_iget(mp, ino, &ip); if (error) return error;
if (VFS_I(ip)->i_generation != gen) {
xfs_irele(ip); return -EFSCORRUPTED;
}
for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++) if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type) return xlog_recover_item_ops[i];
return NULL;
}
/* * Sort the log items in the transaction. * * The ordering constraints are defined by the inode allocation and unlink * behaviour. The rules are: * * 1. Every item is only logged once in a given transaction. Hence it * represents the last logged state of the item. Hence ordering is * dependent on the order in which operations need to be performed so * required initial conditions are always met. * * 2. Cancelled buffers are recorded in pass 1 in a separate table and * there's nothing to replay from them so we can simply cull them * from the transaction. However, we can't do that until after we've * replayed all the other items because they may be dependent on the * cancelled buffer and replaying the cancelled buffer can remove it * form the cancelled buffer table. Hence they have to be done last. * * 3. Inode allocation buffers must be replayed before inode items that * read the buffer and replay changes into it. For filesystems using the * ICREATE transactions, this means XFS_LI_ICREATE objects need to get * treated the same as inode allocation buffers as they create and * initialise the buffers directly. * * 4. Inode unlink buffers must be replayed after inode items are replayed. * This ensures that inodes are completely flushed to the inode buffer * in a "free" state before we remove the unlinked inode list pointer. * * Hence the ordering needs to be inode allocation buffers first, inode items * second, inode unlink buffers third and cancelled buffers last. * * But there's a problem with that - we can't tell an inode allocation buffer * apart from a regular buffer, so we can't separate them. We can, however, * tell an inode unlink buffer from the others, and so we can separate them out * from all the other buffers and move them to last. * * Hence, 4 lists, in order from head to tail: * - buffer_list for all buffers except cancelled/inode unlink buffers * - item_list for all non-buffer items * - inode_buffer_list for inode unlink buffers * - cancel_list for the cancelled buffers * * Note that we add objects to the tail of the lists so that first-to-last * ordering is preserved within the lists. Adding objects to the head of the * list means when we traverse from the head we walk them in last-to-first * order. For cancelled buffers and inode unlink buffers this doesn't matter, * but for all other items there may be specific ordering that we need to * preserve.
*/ STATICint
xlog_recover_reorder_trans( struct xlog *log, struct xlog_recover *trans, int pass)
{ struct xlog_recover_item *item, *n; int error = 0;
LIST_HEAD(sort_list);
LIST_HEAD(cancel_list);
LIST_HEAD(buffer_list);
LIST_HEAD(inode_buffer_list);
LIST_HEAD(item_list);
item->ri_ops = xlog_find_item_ops(item); if (!item->ri_ops) {
xfs_warn(log->l_mp, "%s: unrecognized type of log operation (%d)",
__func__, ITEM_TYPE(item));
ASSERT(0); /* * return the remaining items back to the transaction * item list so they can be freed in caller.
*/ if (!list_empty(&sort_list))
list_splice_init(&sort_list, &trans->r_itemq);
error = -EFSCORRUPTED; break;
}
if (item->ri_ops->reorder)
fate = item->ri_ops->reorder(item);
switch (fate) { case XLOG_REORDER_BUFFER_LIST:
list_move_tail(&item->ri_list, &buffer_list); break; case XLOG_REORDER_CANCEL_LIST:
trace_xfs_log_recover_item_reorder_head(log,
trans, item, pass);
list_move(&item->ri_list, &cancel_list); break; case XLOG_REORDER_INODE_BUFFER_LIST:
list_move(&item->ri_list, &inode_buffer_list); break; case XLOG_REORDER_ITEM_LIST:
trace_xfs_log_recover_item_reorder_tail(log,
trans, item, pass);
list_move_tail(&item->ri_list, &item_list); break;
}
}
ASSERT(list_empty(&sort_list)); if (!list_empty(&buffer_list))
list_splice(&buffer_list, &trans->r_itemq); if (!list_empty(&item_list))
list_splice_tail(&item_list, &trans->r_itemq); if (!list_empty(&inode_buffer_list))
list_splice_tail(&inode_buffer_list, &trans->r_itemq); if (!list_empty(&cancel_list))
list_splice_tail(&cancel_list, &trans->r_itemq); return error;
}
/* * Create a deferred work structure for resuming and tracking the progress of a * log intent item that was found during recovery.
*/ void
xlog_recover_intent_item( struct xlog *log, struct xfs_log_item *lip,
xfs_lsn_t lsn, conststruct xfs_defer_op_type *ops)
{
ASSERT(xlog_item_is_intent(lip));
/* * Insert the intent into the AIL directly and drop one reference so * that finishing or canceling the work will drop the other.
*/
xfs_trans_ail_insert(log->l_ailp, lip, lsn);
lip->li_ops->iop_unpin(lip, 0);
}
if (item->ri_ops->commit_pass2)
error = item->ri_ops->commit_pass2(log, buffer_list,
item, trans->r_lsn); if (error) return error;
}
return error;
}
/* * Perform the transaction. * * If the transaction modifies a buffer or inode, do it now. Otherwise, * EFIs and EFDs get queued up by adding entries into the AIL for them.
*/ STATICint
xlog_recover_commit_trans( struct xlog *log, struct xlog_recover *trans, int pass, struct list_head *buffer_list)
{ int error = 0; int items_queued = 0; struct xlog_recover_item *item; struct xlog_recover_item *next;
LIST_HEAD (ra_list);
LIST_HEAD (done_list);
#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
hlist_del_init(&trans->r_list);
error = xlog_recover_reorder_trans(log, trans, pass); if (error) return error;
/* * If the transaction is empty, the header was split across this and the * previous record. Copy the rest of the header.
*/ if (list_empty(&trans->r_itemq)) {
ASSERT(len <= sizeof(struct xfs_trans_header)); if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__); return -EFSCORRUPTED;
}
/* * The next region to add is the start of a new region. It could be * a whole region or it could be the first part of a new region. Because * of this, the assumption here is that the type and size fields of all * format structures fit into the first 32 bits of the structure. * * This works because all regions must be 32 bit aligned. Therefore, we * either have both fields or we have neither field. In the case we have * neither field, the data part of the region is zero length. We only have * a log_op_header and can throw away the header since a new one will appear * later. If we have at least 4 bytes, then we can determine how many regions * will appear in the current log item.
*/ STATICint
xlog_recover_add_to_trans( struct xlog *log, struct xlog_recover *trans, char *dp, int len)
{ struct xfs_inode_log_format *in_f; /* any will do */ struct xlog_recover_item *item; char *ptr;
if (!len) return 0; if (list_empty(&trans->r_itemq)) { /* we need to catch log corruptions here */ if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
xfs_warn(log->l_mp, "%s: bad header magic number",
__func__);
ASSERT(0); return -EFSCORRUPTED;
}
if (len > sizeof(struct xfs_trans_header)) {
xfs_warn(log->l_mp, "%s: bad header length", __func__);
ASSERT(0); return -EFSCORRUPTED;
}
/* * The transaction header can be arbitrarily split across op * records. If we don't have the whole thing here, copy what we * do have and handle the rest in the next record.
*/ if (len == sizeof(struct xfs_trans_header))
xlog_recover_add_item(&trans->r_itemq);
memcpy(&trans->r_theader, dp, len); return 0;
}
/* take the tail entry */
item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
ri_list); if (item->ri_total != 0 &&
item->ri_total == item->ri_cnt) { /* tail item is in use, get a new one */
xlog_recover_add_item(&trans->r_itemq);
item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, ri_list);
}
if (item->ri_total == 0) { /* first region to be added */ if (in_f->ilf_size == 0 ||
in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
xfs_warn(log->l_mp, "bad number of regions (%d) in inode log format",
in_f->ilf_size);
ASSERT(0);
kvfree(ptr); return -EFSCORRUPTED;
}
/* Description region is ri_buf[0] */
item->ri_buf[item->ri_cnt].iov_base = ptr;
item->ri_buf[item->ri_cnt].iov_len = len;
item->ri_cnt++;
trace_xfs_log_recover_item_add(log, trans, item, 0); return 0;
}
/* * Free up any resources allocated by the transaction * * Remember that EFIs, EFDs, and IUNLINKs are handled later.
*/ STATICvoid
xlog_recover_free_trans( struct xlog_recover *trans)
{ struct xlog_recover_item *item, *n; int i;
hlist_del_init(&trans->r_list);
list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { /* Free the regions in the item. */
list_del(&item->ri_list); for (i = 0; i < item->ri_cnt; i++)
kvfree(item->ri_buf[i].iov_base); /* Free the item itself */
kfree(item->ri_buf);
kfree(item);
} /* Free the transaction recover structure */
kfree(trans);
}
/* * On error or completion, trans is freed.
*/ STATICint
xlog_recovery_process_trans( struct xlog *log, struct xlog_recover *trans, char *dp, unsignedint len, unsignedint flags, int pass, struct list_head *buffer_list)
{ int error = 0; bool freeit = false;
/* mask off ophdr transaction container flags */
flags &= ~XLOG_END_TRANS; if (flags & XLOG_WAS_CONT_TRANS)
flags &= ~XLOG_CONTINUE_TRANS;
/* * Callees must not free the trans structure. We'll decide if we need to * free it or not based on the operation being done and it's result.
*/ switch (flags) { /* expected flag values */ case 0: case XLOG_CONTINUE_TRANS:
error = xlog_recover_add_to_trans(log, trans, dp, len); break; case XLOG_WAS_CONT_TRANS:
error = xlog_recover_add_to_cont_trans(log, trans, dp, len); break; case XLOG_COMMIT_TRANS:
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.25 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.