Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * This file is part of UBIFS.
0004  *
0005  * Copyright (C) 2006-2008 Nokia Corporation.
0006  * Copyright (C) 2006, 2007 University of Szeged, Hungary
0007  *
0008  * Authors: Artem Bityutskiy (Битюцкий Артём)
0009  *          Adrian Hunter
0010  *          Zoltan Sogor
0011  */
0012 
0013 /*
0014  * This file implements UBIFS I/O subsystem which provides various I/O-related
0015  * helper functions (reading/writing/checking/validating nodes) and implements
0016  * write-buffering support. Write buffers help to save space which otherwise
0017  * would have been wasted for padding to the nearest minimal I/O unit boundary.
0018  * Instead, data first goes to the write-buffer and is flushed when the
0019  * buffer is full or when it is not used for some time (by timer). This is
0020  * similar to the mechanism is used by JFFS2.
0021  *
0022  * UBIFS distinguishes between minimum write size (@c->min_io_size) and maximum
0023  * write size (@c->max_write_size). The latter is the maximum amount of bytes
0024  * the underlying flash is able to program at a time, and writing in
0025  * @c->max_write_size units should presumably be faster. Obviously,
0026  * @c->min_io_size <= @c->max_write_size. Write-buffers are of
0027  * @c->max_write_size bytes in size for maximum performance. However, when a
0028  * write-buffer is flushed, only the portion of it (aligned to @c->min_io_size
0029  * boundary) which contains data is written, not the whole write-buffer,
0030  * because this is more space-efficient.
0031  *
0032  * This optimization adds few complications to the code. Indeed, on the one
0033  * hand, we want to write in optimal @c->max_write_size bytes chunks, which
0034  * also means aligning writes at the @c->max_write_size bytes offsets. On the
0035  * other hand, we do not want to waste space when synchronizing the write
0036  * buffer, so during synchronization we writes in smaller chunks. And this makes
0037  * the next write offset to be not aligned to @c->max_write_size bytes. So the
0038  * have to make sure that the write-buffer offset (@wbuf->offs) becomes aligned
0039  * to @c->max_write_size bytes again. We do this by temporarily shrinking
0040  * write-buffer size (@wbuf->size).
0041  *
0042  * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by
0043  * mutexes defined inside these objects. Since sometimes upper-level code
0044  * has to lock the write-buffer (e.g. journal space reservation code), many
0045  * functions related to write-buffers have "nolock" suffix which means that the
0046  * caller has to lock the write-buffer before calling this function.
0047  *
0048  * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not
0049  * aligned, UBIFS starts the next node from the aligned address, and the padded
0050  * bytes may contain any rubbish. In other words, UBIFS does not put padding
0051  * bytes in those small gaps. Common headers of nodes store real node lengths,
0052  * not aligned lengths. Indexing nodes also store real lengths in branches.
0053  *
0054  * UBIFS uses padding when it pads to the next min. I/O unit. In this case it
0055  * uses padding nodes or padding bytes, if the padding node does not fit.
0056  *
0057  * All UBIFS nodes are protected by CRC checksums and UBIFS checks CRC when
0058  * they are read from the flash media.
0059  */
0060 
0061 #include <linux/crc32.h>
0062 #include <linux/slab.h>
0063 #include "ubifs.h"
0064 
0065 /**
0066  * ubifs_ro_mode - switch UBIFS to read read-only mode.
0067  * @c: UBIFS file-system description object
0068  * @err: error code which is the reason of switching to R/O mode
0069  */
0070 void ubifs_ro_mode(struct ubifs_info *c, int err)
0071 {
0072     if (!c->ro_error) {
0073         c->ro_error = 1;
0074         c->no_chk_data_crc = 0;
0075         c->vfs_sb->s_flags |= SB_RDONLY;
0076         ubifs_warn(c, "switched to read-only mode, error %d", err);
0077         dump_stack();
0078     }
0079 }
0080 
0081 /*
0082  * Below are simple wrappers over UBI I/O functions which include some
0083  * additional checks and UBIFS debugging stuff. See corresponding UBI function
0084  * for more information.
0085  */
0086 
0087 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
0088            int len, int even_ebadmsg)
0089 {
0090     int err;
0091 
0092     err = ubi_read(c->ubi, lnum, buf, offs, len);
0093     /*
0094      * In case of %-EBADMSG print the error message only if the
0095      * @even_ebadmsg is true.
0096      */
0097     if (err && (err != -EBADMSG || even_ebadmsg)) {
0098         ubifs_err(c, "reading %d bytes from LEB %d:%d failed, error %d",
0099               len, lnum, offs, err);
0100         dump_stack();
0101     }
0102     return err;
0103 }
0104 
0105 int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
0106             int len)
0107 {
0108     int err;
0109 
0110     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0111     if (c->ro_error)
0112         return -EROFS;
0113     if (!dbg_is_tst_rcvry(c))
0114         err = ubi_leb_write(c->ubi, lnum, buf, offs, len);
0115     else
0116         err = dbg_leb_write(c, lnum, buf, offs, len);
0117     if (err) {
0118         ubifs_err(c, "writing %d bytes to LEB %d:%d failed, error %d",
0119               len, lnum, offs, err);
0120         ubifs_ro_mode(c, err);
0121         dump_stack();
0122     }
0123     return err;
0124 }
0125 
0126 int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
0127 {
0128     int err;
0129 
0130     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0131     if (c->ro_error)
0132         return -EROFS;
0133     if (!dbg_is_tst_rcvry(c))
0134         err = ubi_leb_change(c->ubi, lnum, buf, len);
0135     else
0136         err = dbg_leb_change(c, lnum, buf, len);
0137     if (err) {
0138         ubifs_err(c, "changing %d bytes in LEB %d failed, error %d",
0139               len, lnum, err);
0140         ubifs_ro_mode(c, err);
0141         dump_stack();
0142     }
0143     return err;
0144 }
0145 
0146 int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
0147 {
0148     int err;
0149 
0150     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0151     if (c->ro_error)
0152         return -EROFS;
0153     if (!dbg_is_tst_rcvry(c))
0154         err = ubi_leb_unmap(c->ubi, lnum);
0155     else
0156         err = dbg_leb_unmap(c, lnum);
0157     if (err) {
0158         ubifs_err(c, "unmap LEB %d failed, error %d", lnum, err);
0159         ubifs_ro_mode(c, err);
0160         dump_stack();
0161     }
0162     return err;
0163 }
0164 
0165 int ubifs_leb_map(struct ubifs_info *c, int lnum)
0166 {
0167     int err;
0168 
0169     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0170     if (c->ro_error)
0171         return -EROFS;
0172     if (!dbg_is_tst_rcvry(c))
0173         err = ubi_leb_map(c->ubi, lnum);
0174     else
0175         err = dbg_leb_map(c, lnum);
0176     if (err) {
0177         ubifs_err(c, "mapping LEB %d failed, error %d", lnum, err);
0178         ubifs_ro_mode(c, err);
0179         dump_stack();
0180     }
0181     return err;
0182 }
0183 
0184 int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
0185 {
0186     int err;
0187 
0188     err = ubi_is_mapped(c->ubi, lnum);
0189     if (err < 0) {
0190         ubifs_err(c, "ubi_is_mapped failed for LEB %d, error %d",
0191               lnum, err);
0192         dump_stack();
0193     }
0194     return err;
0195 }
0196 
0197 static void record_magic_error(struct ubifs_stats_info *stats)
0198 {
0199     if (stats)
0200         stats->magic_errors++;
0201 }
0202 
0203 static void record_node_error(struct ubifs_stats_info *stats)
0204 {
0205     if (stats)
0206         stats->node_errors++;
0207 }
0208 
0209 static void record_crc_error(struct ubifs_stats_info *stats)
0210 {
0211     if (stats)
0212         stats->crc_errors++;
0213 }
0214 
0215 /**
0216  * ubifs_check_node - check node.
0217  * @c: UBIFS file-system description object
0218  * @buf: node to check
0219  * @len: node length
0220  * @lnum: logical eraseblock number
0221  * @offs: offset within the logical eraseblock
0222  * @quiet: print no messages
0223  * @must_chk_crc: indicates whether to always check the CRC
0224  *
0225  * This function checks node magic number and CRC checksum. This function also
0226  * validates node length to prevent UBIFS from becoming crazy when an attacker
0227  * feeds it a file-system image with incorrect nodes. For example, too large
0228  * node length in the common header could cause UBIFS to read memory outside of
0229  * allocated buffer when checking the CRC checksum.
0230  *
0231  * This function may skip data nodes CRC checking if @c->no_chk_data_crc is
0232  * true, which is controlled by corresponding UBIFS mount option. However, if
0233  * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is
0234  * checked. Similarly, if @c->mounting or @c->remounting_rw is true (we are
0235  * mounting or re-mounting to R/W mode), @c->no_chk_data_crc is ignored and CRC
0236  * is checked. This is because during mounting or re-mounting from R/O mode to
0237  * R/W mode we may read journal nodes (when replying the journal or doing the
0238  * recovery) and the journal nodes may potentially be corrupted, so checking is
0239  * required.
0240  *
0241  * This function returns zero in case of success and %-EUCLEAN in case of bad
0242  * CRC or magic.
0243  */
0244 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int len,
0245              int lnum, int offs, int quiet, int must_chk_crc)
0246 {
0247     int err = -EINVAL, type, node_len;
0248     uint32_t crc, node_crc, magic;
0249     const struct ubifs_ch *ch = buf;
0250 
0251     ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
0252     ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
0253 
0254     magic = le32_to_cpu(ch->magic);
0255     if (magic != UBIFS_NODE_MAGIC) {
0256         if (!quiet)
0257             ubifs_err(c, "bad magic %#08x, expected %#08x",
0258                   magic, UBIFS_NODE_MAGIC);
0259         record_magic_error(c->stats);
0260         err = -EUCLEAN;
0261         goto out;
0262     }
0263 
0264     type = ch->node_type;
0265     if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
0266         if (!quiet)
0267             ubifs_err(c, "bad node type %d", type);
0268         record_node_error(c->stats);
0269         goto out;
0270     }
0271 
0272     node_len = le32_to_cpu(ch->len);
0273     if (node_len + offs > c->leb_size)
0274         goto out_len;
0275 
0276     if (c->ranges[type].max_len == 0) {
0277         if (node_len != c->ranges[type].len)
0278             goto out_len;
0279     } else if (node_len < c->ranges[type].min_len ||
0280            node_len > c->ranges[type].max_len)
0281         goto out_len;
0282 
0283     if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting &&
0284         !c->remounting_rw && c->no_chk_data_crc)
0285         return 0;
0286 
0287     crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
0288     node_crc = le32_to_cpu(ch->crc);
0289     if (crc != node_crc) {
0290         if (!quiet)
0291             ubifs_err(c, "bad CRC: calculated %#08x, read %#08x",
0292                   crc, node_crc);
0293         record_crc_error(c->stats);
0294         err = -EUCLEAN;
0295         goto out;
0296     }
0297 
0298     return 0;
0299 
0300 out_len:
0301     if (!quiet)
0302         ubifs_err(c, "bad node length %d", node_len);
0303 out:
0304     if (!quiet) {
0305         ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
0306         ubifs_dump_node(c, buf, len);
0307         dump_stack();
0308     }
0309     return err;
0310 }
0311 
0312 /**
0313  * ubifs_pad - pad flash space.
0314  * @c: UBIFS file-system description object
0315  * @buf: buffer to put padding to
0316  * @pad: how many bytes to pad
0317  *
0318  * The flash media obliges us to write only in chunks of %c->min_io_size and
0319  * when we have to write less data we add padding node to the write-buffer and
0320  * pad it to the next minimal I/O unit's boundary. Padding nodes help when the
0321  * media is being scanned. If the amount of wasted space is not enough to fit a
0322  * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes
0323  * pattern (%UBIFS_PADDING_BYTE).
0324  *
0325  * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is
0326  * used.
0327  */
0328 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
0329 {
0330     uint32_t crc;
0331 
0332     ubifs_assert(c, pad >= 0);
0333 
0334     if (pad >= UBIFS_PAD_NODE_SZ) {
0335         struct ubifs_ch *ch = buf;
0336         struct ubifs_pad_node *pad_node = buf;
0337 
0338         ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
0339         ch->node_type = UBIFS_PAD_NODE;
0340         ch->group_type = UBIFS_NO_NODE_GROUP;
0341         ch->padding[0] = ch->padding[1] = 0;
0342         ch->sqnum = 0;
0343         ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
0344         pad -= UBIFS_PAD_NODE_SZ;
0345         pad_node->pad_len = cpu_to_le32(pad);
0346         crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
0347         ch->crc = cpu_to_le32(crc);
0348         memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
0349     } else if (pad > 0)
0350         /* Too little space, padding node won't fit */
0351         memset(buf, UBIFS_PADDING_BYTE, pad);
0352 }
0353 
0354 /**
0355  * next_sqnum - get next sequence number.
0356  * @c: UBIFS file-system description object
0357  */
0358 static unsigned long long next_sqnum(struct ubifs_info *c)
0359 {
0360     unsigned long long sqnum;
0361 
0362     spin_lock(&c->cnt_lock);
0363     sqnum = ++c->max_sqnum;
0364     spin_unlock(&c->cnt_lock);
0365 
0366     if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
0367         if (sqnum >= SQNUM_WATERMARK) {
0368             ubifs_err(c, "sequence number overflow %llu, end of life",
0369                   sqnum);
0370             ubifs_ro_mode(c, -EINVAL);
0371         }
0372         ubifs_warn(c, "running out of sequence numbers, end of life soon");
0373     }
0374 
0375     return sqnum;
0376 }
0377 
0378 void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad)
0379 {
0380     struct ubifs_ch *ch = node;
0381     unsigned long long sqnum = next_sqnum(c);
0382 
0383     ubifs_assert(c, len >= UBIFS_CH_SZ);
0384 
0385     ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
0386     ch->len = cpu_to_le32(len);
0387     ch->group_type = UBIFS_NO_NODE_GROUP;
0388     ch->sqnum = cpu_to_le64(sqnum);
0389     ch->padding[0] = ch->padding[1] = 0;
0390 
0391     if (pad) {
0392         len = ALIGN(len, 8);
0393         pad = ALIGN(len, c->min_io_size) - len;
0394         ubifs_pad(c, node + len, pad);
0395     }
0396 }
0397 
0398 void ubifs_crc_node(struct ubifs_info *c, void *node, int len)
0399 {
0400     struct ubifs_ch *ch = node;
0401     uint32_t crc;
0402 
0403     crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
0404     ch->crc = cpu_to_le32(crc);
0405 }
0406 
0407 /**
0408  * ubifs_prepare_node_hmac - prepare node to be written to flash.
0409  * @c: UBIFS file-system description object
0410  * @node: the node to pad
0411  * @len: node length
0412  * @hmac_offs: offset of the HMAC in the node
0413  * @pad: if the buffer has to be padded
0414  *
0415  * This function prepares node at @node to be written to the media - it
0416  * calculates node CRC, fills the common header, and adds proper padding up to
0417  * the next minimum I/O unit if @pad is not zero. if @hmac_offs is positive then
0418  * a HMAC is inserted into the node at the given offset.
0419  *
0420  * This function returns 0 for success or a negative error code otherwise.
0421  */
0422 int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
0423                 int hmac_offs, int pad)
0424 {
0425     int err;
0426 
0427     ubifs_init_node(c, node, len, pad);
0428 
0429     if (hmac_offs > 0) {
0430         err = ubifs_node_insert_hmac(c, node, len, hmac_offs);
0431         if (err)
0432             return err;
0433     }
0434 
0435     ubifs_crc_node(c, node, len);
0436 
0437     return 0;
0438 }
0439 
0440 /**
0441  * ubifs_prepare_node - prepare node to be written to flash.
0442  * @c: UBIFS file-system description object
0443  * @node: the node to pad
0444  * @len: node length
0445  * @pad: if the buffer has to be padded
0446  *
0447  * This function prepares node at @node to be written to the media - it
0448  * calculates node CRC, fills the common header, and adds proper padding up to
0449  * the next minimum I/O unit if @pad is not zero.
0450  */
0451 void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
0452 {
0453     /*
0454      * Deliberately ignore return value since this function can only fail
0455      * when a hmac offset is given.
0456      */
0457     ubifs_prepare_node_hmac(c, node, len, 0, pad);
0458 }
0459 
0460 /**
0461  * ubifs_prep_grp_node - prepare node of a group to be written to flash.
0462  * @c: UBIFS file-system description object
0463  * @node: the node to pad
0464  * @len: node length
0465  * @last: indicates the last node of the group
0466  *
0467  * This function prepares node at @node to be written to the media - it
0468  * calculates node CRC and fills the common header.
0469  */
0470 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
0471 {
0472     uint32_t crc;
0473     struct ubifs_ch *ch = node;
0474     unsigned long long sqnum = next_sqnum(c);
0475 
0476     ubifs_assert(c, len >= UBIFS_CH_SZ);
0477 
0478     ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
0479     ch->len = cpu_to_le32(len);
0480     if (last)
0481         ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
0482     else
0483         ch->group_type = UBIFS_IN_NODE_GROUP;
0484     ch->sqnum = cpu_to_le64(sqnum);
0485     ch->padding[0] = ch->padding[1] = 0;
0486     crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
0487     ch->crc = cpu_to_le32(crc);
0488 }
0489 
0490 /**
0491  * wbuf_timer_callback - write-buffer timer callback function.
0492  * @timer: timer data (write-buffer descriptor)
0493  *
0494  * This function is called when the write-buffer timer expires.
0495  */
0496 static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
0497 {
0498     struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
0499 
0500     dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
0501     wbuf->need_sync = 1;
0502     wbuf->c->need_wbuf_sync = 1;
0503     ubifs_wake_up_bgt(wbuf->c);
0504     return HRTIMER_NORESTART;
0505 }
0506 
0507 /**
0508  * new_wbuf_timer - start new write-buffer timer.
0509  * @c: UBIFS file-system description object
0510  * @wbuf: write-buffer descriptor
0511  */
0512 static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
0513 {
0514     ktime_t softlimit = ms_to_ktime(dirty_writeback_interval * 10);
0515     unsigned long long delta = dirty_writeback_interval;
0516 
0517     /* centi to milli, milli to nano, then 10% */
0518     delta *= 10ULL * NSEC_PER_MSEC / 10ULL;
0519 
0520     ubifs_assert(c, !hrtimer_active(&wbuf->timer));
0521     ubifs_assert(c, delta <= ULONG_MAX);
0522 
0523     if (wbuf->no_timer)
0524         return;
0525     dbg_io("set timer for jhead %s, %llu-%llu millisecs",
0526            dbg_jhead(wbuf->jhead),
0527            div_u64(ktime_to_ns(softlimit), USEC_PER_SEC),
0528            div_u64(ktime_to_ns(softlimit) + delta, USEC_PER_SEC));
0529     hrtimer_start_range_ns(&wbuf->timer, softlimit, delta,
0530                    HRTIMER_MODE_REL);
0531 }
0532 
0533 /**
0534  * cancel_wbuf_timer - cancel write-buffer timer.
0535  * @wbuf: write-buffer descriptor
0536  */
0537 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
0538 {
0539     if (wbuf->no_timer)
0540         return;
0541     wbuf->need_sync = 0;
0542     hrtimer_cancel(&wbuf->timer);
0543 }
0544 
0545 /**
0546  * ubifs_wbuf_sync_nolock - synchronize write-buffer.
0547  * @wbuf: write-buffer to synchronize
0548  *
0549  * This function synchronizes write-buffer @buf and returns zero in case of
0550  * success or a negative error code in case of failure.
0551  *
0552  * Note, although write-buffers are of @c->max_write_size, this function does
0553  * not necessarily writes all @c->max_write_size bytes to the flash. Instead,
0554  * if the write-buffer is only partially filled with data, only the used part
0555  * of the write-buffer (aligned on @c->min_io_size boundary) is synchronized.
0556  * This way we waste less space.
0557  */
0558 int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
0559 {
0560     struct ubifs_info *c = wbuf->c;
0561     int err, dirt, sync_len;
0562 
0563     cancel_wbuf_timer_nolock(wbuf);
0564     if (!wbuf->used || wbuf->lnum == -1)
0565         /* Write-buffer is empty or not seeked */
0566         return 0;
0567 
0568     dbg_io("LEB %d:%d, %d bytes, jhead %s",
0569            wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
0570     ubifs_assert(c, !(wbuf->avail & 7));
0571     ubifs_assert(c, wbuf->offs + wbuf->size <= c->leb_size);
0572     ubifs_assert(c, wbuf->size >= c->min_io_size);
0573     ubifs_assert(c, wbuf->size <= c->max_write_size);
0574     ubifs_assert(c, wbuf->size % c->min_io_size == 0);
0575     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0576     if (c->leb_size - wbuf->offs >= c->max_write_size)
0577         ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
0578 
0579     if (c->ro_error)
0580         return -EROFS;
0581 
0582     /*
0583      * Do not write whole write buffer but write only the minimum necessary
0584      * amount of min. I/O units.
0585      */
0586     sync_len = ALIGN(wbuf->used, c->min_io_size);
0587     dirt = sync_len - wbuf->used;
0588     if (dirt)
0589         ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
0590     err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len);
0591     if (err)
0592         return err;
0593 
0594     spin_lock(&wbuf->lock);
0595     wbuf->offs += sync_len;
0596     /*
0597      * Now @wbuf->offs is not necessarily aligned to @c->max_write_size.
0598      * But our goal is to optimize writes and make sure we write in
0599      * @c->max_write_size chunks and to @c->max_write_size-aligned offset.
0600      * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make
0601      * sure that @wbuf->offs + @wbuf->size is aligned to
0602      * @c->max_write_size. This way we make sure that after next
0603      * write-buffer flush we are again at the optimal offset (aligned to
0604      * @c->max_write_size).
0605      */
0606     if (c->leb_size - wbuf->offs < c->max_write_size)
0607         wbuf->size = c->leb_size - wbuf->offs;
0608     else if (wbuf->offs & (c->max_write_size - 1))
0609         wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
0610     else
0611         wbuf->size = c->max_write_size;
0612     wbuf->avail = wbuf->size;
0613     wbuf->used = 0;
0614     wbuf->next_ino = 0;
0615     spin_unlock(&wbuf->lock);
0616 
0617     if (wbuf->sync_callback)
0618         err = wbuf->sync_callback(c, wbuf->lnum,
0619                       c->leb_size - wbuf->offs, dirt);
0620     return err;
0621 }
0622 
0623 /**
0624  * ubifs_wbuf_seek_nolock - seek write-buffer.
0625  * @wbuf: write-buffer
0626  * @lnum: logical eraseblock number to seek to
0627  * @offs: logical eraseblock offset to seek to
0628  *
0629  * This function targets the write-buffer to logical eraseblock @lnum:@offs.
0630  * The write-buffer has to be empty. Returns zero in case of success and a
0631  * negative error code in case of failure.
0632  */
0633 int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs)
0634 {
0635     const struct ubifs_info *c = wbuf->c;
0636 
0637     dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
0638     ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt);
0639     ubifs_assert(c, offs >= 0 && offs <= c->leb_size);
0640     ubifs_assert(c, offs % c->min_io_size == 0 && !(offs & 7));
0641     ubifs_assert(c, lnum != wbuf->lnum);
0642     ubifs_assert(c, wbuf->used == 0);
0643 
0644     spin_lock(&wbuf->lock);
0645     wbuf->lnum = lnum;
0646     wbuf->offs = offs;
0647     if (c->leb_size - wbuf->offs < c->max_write_size)
0648         wbuf->size = c->leb_size - wbuf->offs;
0649     else if (wbuf->offs & (c->max_write_size - 1))
0650         wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs;
0651     else
0652         wbuf->size = c->max_write_size;
0653     wbuf->avail = wbuf->size;
0654     wbuf->used = 0;
0655     spin_unlock(&wbuf->lock);
0656 
0657     return 0;
0658 }
0659 
0660 /**
0661  * ubifs_bg_wbufs_sync - synchronize write-buffers.
0662  * @c: UBIFS file-system description object
0663  *
0664  * This function is called by background thread to synchronize write-buffers.
0665  * Returns zero in case of success and a negative error code in case of
0666  * failure.
0667  */
0668 int ubifs_bg_wbufs_sync(struct ubifs_info *c)
0669 {
0670     int err, i;
0671 
0672     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0673     if (!c->need_wbuf_sync)
0674         return 0;
0675     c->need_wbuf_sync = 0;
0676 
0677     if (c->ro_error) {
0678         err = -EROFS;
0679         goto out_timers;
0680     }
0681 
0682     dbg_io("synchronize");
0683     for (i = 0; i < c->jhead_cnt; i++) {
0684         struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
0685 
0686         cond_resched();
0687 
0688         /*
0689          * If the mutex is locked then wbuf is being changed, so
0690          * synchronization is not necessary.
0691          */
0692         if (mutex_is_locked(&wbuf->io_mutex))
0693             continue;
0694 
0695         mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
0696         if (!wbuf->need_sync) {
0697             mutex_unlock(&wbuf->io_mutex);
0698             continue;
0699         }
0700 
0701         err = ubifs_wbuf_sync_nolock(wbuf);
0702         mutex_unlock(&wbuf->io_mutex);
0703         if (err) {
0704             ubifs_err(c, "cannot sync write-buffer, error %d", err);
0705             ubifs_ro_mode(c, err);
0706             goto out_timers;
0707         }
0708     }
0709 
0710     return 0;
0711 
0712 out_timers:
0713     /* Cancel all timers to prevent repeated errors */
0714     for (i = 0; i < c->jhead_cnt; i++) {
0715         struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
0716 
0717         mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
0718         cancel_wbuf_timer_nolock(wbuf);
0719         mutex_unlock(&wbuf->io_mutex);
0720     }
0721     return err;
0722 }
0723 
0724 /**
0725  * ubifs_wbuf_write_nolock - write data to flash via write-buffer.
0726  * @wbuf: write-buffer
0727  * @buf: node to write
0728  * @len: node length
0729  *
0730  * This function writes data to flash via write-buffer @wbuf. This means that
0731  * the last piece of the node won't reach the flash media immediately if it
0732  * does not take whole max. write unit (@c->max_write_size). Instead, the node
0733  * will sit in RAM until the write-buffer is synchronized (e.g., by timer, or
0734  * because more data are appended to the write-buffer).
0735  *
0736  * This function returns zero in case of success and a negative error code in
0737  * case of failure. If the node cannot be written because there is no more
0738  * space in this logical eraseblock, %-ENOSPC is returned.
0739  */
0740 int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
0741 {
0742     struct ubifs_info *c = wbuf->c;
0743     int err, n, written = 0, aligned_len = ALIGN(len, 8);
0744 
0745     dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
0746            dbg_ntype(((struct ubifs_ch *)buf)->node_type),
0747            dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
0748     ubifs_assert(c, len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
0749     ubifs_assert(c, wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
0750     ubifs_assert(c, !(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
0751     ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size);
0752     ubifs_assert(c, wbuf->size >= c->min_io_size);
0753     ubifs_assert(c, wbuf->size <= c->max_write_size);
0754     ubifs_assert(c, wbuf->size % c->min_io_size == 0);
0755     ubifs_assert(c, mutex_is_locked(&wbuf->io_mutex));
0756     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0757     ubifs_assert(c, !c->space_fixup);
0758     if (c->leb_size - wbuf->offs >= c->max_write_size)
0759         ubifs_assert(c, !((wbuf->offs + wbuf->size) % c->max_write_size));
0760 
0761     if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
0762         err = -ENOSPC;
0763         goto out;
0764     }
0765 
0766     cancel_wbuf_timer_nolock(wbuf);
0767 
0768     if (c->ro_error)
0769         return -EROFS;
0770 
0771     if (aligned_len <= wbuf->avail) {
0772         /*
0773          * The node is not very large and fits entirely within
0774          * write-buffer.
0775          */
0776         memcpy(wbuf->buf + wbuf->used, buf, len);
0777         if (aligned_len > len) {
0778             ubifs_assert(c, aligned_len - len < 8);
0779             ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
0780         }
0781 
0782         if (aligned_len == wbuf->avail) {
0783             dbg_io("flush jhead %s wbuf to LEB %d:%d",
0784                    dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
0785             err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
0786                           wbuf->offs, wbuf->size);
0787             if (err)
0788                 goto out;
0789 
0790             spin_lock(&wbuf->lock);
0791             wbuf->offs += wbuf->size;
0792             if (c->leb_size - wbuf->offs >= c->max_write_size)
0793                 wbuf->size = c->max_write_size;
0794             else
0795                 wbuf->size = c->leb_size - wbuf->offs;
0796             wbuf->avail = wbuf->size;
0797             wbuf->used = 0;
0798             wbuf->next_ino = 0;
0799             spin_unlock(&wbuf->lock);
0800         } else {
0801             spin_lock(&wbuf->lock);
0802             wbuf->avail -= aligned_len;
0803             wbuf->used += aligned_len;
0804             spin_unlock(&wbuf->lock);
0805         }
0806 
0807         goto exit;
0808     }
0809 
0810     if (wbuf->used) {
0811         /*
0812          * The node is large enough and does not fit entirely within
0813          * current available space. We have to fill and flush
0814          * write-buffer and switch to the next max. write unit.
0815          */
0816         dbg_io("flush jhead %s wbuf to LEB %d:%d",
0817                dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
0818         memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
0819         err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
0820                       wbuf->size);
0821         if (err)
0822             goto out;
0823 
0824         wbuf->offs += wbuf->size;
0825         len -= wbuf->avail;
0826         aligned_len -= wbuf->avail;
0827         written += wbuf->avail;
0828     } else if (wbuf->offs & (c->max_write_size - 1)) {
0829         /*
0830          * The write-buffer offset is not aligned to
0831          * @c->max_write_size and @wbuf->size is less than
0832          * @c->max_write_size. Write @wbuf->size bytes to make sure the
0833          * following writes are done in optimal @c->max_write_size
0834          * chunks.
0835          */
0836         dbg_io("write %d bytes to LEB %d:%d",
0837                wbuf->size, wbuf->lnum, wbuf->offs);
0838         err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
0839                       wbuf->size);
0840         if (err)
0841             goto out;
0842 
0843         wbuf->offs += wbuf->size;
0844         len -= wbuf->size;
0845         aligned_len -= wbuf->size;
0846         written += wbuf->size;
0847     }
0848 
0849     /*
0850      * The remaining data may take more whole max. write units, so write the
0851      * remains multiple to max. write unit size directly to the flash media.
0852      * We align node length to 8-byte boundary because we anyway flash wbuf
0853      * if the remaining space is less than 8 bytes.
0854      */
0855     n = aligned_len >> c->max_write_shift;
0856     if (n) {
0857         int m = n - 1;
0858 
0859         dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
0860                wbuf->offs);
0861 
0862         if (m) {
0863             /* '(n-1)<<c->max_write_shift < len' is always true. */
0864             m <<= c->max_write_shift;
0865             err = ubifs_leb_write(c, wbuf->lnum, buf + written,
0866                           wbuf->offs, m);
0867             if (err)
0868                 goto out;
0869             wbuf->offs += m;
0870             aligned_len -= m;
0871             len -= m;
0872             written += m;
0873         }
0874 
0875         /*
0876          * The non-written len of buf may be less than 'n' because
0877          * parameter 'len' is not 8 bytes aligned, so here we read
0878          * min(len, n) bytes from buf.
0879          */
0880         n = 1 << c->max_write_shift;
0881         memcpy(wbuf->buf, buf + written, min(len, n));
0882         if (n > len) {
0883             ubifs_assert(c, n - len < 8);
0884             ubifs_pad(c, wbuf->buf + len, n - len);
0885         }
0886 
0887         err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
0888         if (err)
0889             goto out;
0890         wbuf->offs += n;
0891         aligned_len -= n;
0892         len -= min(len, n);
0893         written += n;
0894     }
0895 
0896     spin_lock(&wbuf->lock);
0897     if (aligned_len) {
0898         /*
0899          * And now we have what's left and what does not take whole
0900          * max. write unit, so write it to the write-buffer and we are
0901          * done.
0902          */
0903         memcpy(wbuf->buf, buf + written, len);
0904         if (aligned_len > len) {
0905             ubifs_assert(c, aligned_len - len < 8);
0906             ubifs_pad(c, wbuf->buf + len, aligned_len - len);
0907         }
0908     }
0909 
0910     if (c->leb_size - wbuf->offs >= c->max_write_size)
0911         wbuf->size = c->max_write_size;
0912     else
0913         wbuf->size = c->leb_size - wbuf->offs;
0914     wbuf->avail = wbuf->size - aligned_len;
0915     wbuf->used = aligned_len;
0916     wbuf->next_ino = 0;
0917     spin_unlock(&wbuf->lock);
0918 
0919 exit:
0920     if (wbuf->sync_callback) {
0921         int free = c->leb_size - wbuf->offs - wbuf->used;
0922 
0923         err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
0924         if (err)
0925             goto out;
0926     }
0927 
0928     if (wbuf->used)
0929         new_wbuf_timer_nolock(c, wbuf);
0930 
0931     return 0;
0932 
0933 out:
0934     ubifs_err(c, "cannot write %d bytes to LEB %d:%d, error %d",
0935           len, wbuf->lnum, wbuf->offs, err);
0936     ubifs_dump_node(c, buf, written + len);
0937     dump_stack();
0938     ubifs_dump_leb(c, wbuf->lnum);
0939     return err;
0940 }
0941 
0942 /**
0943  * ubifs_write_node_hmac - write node to the media.
0944  * @c: UBIFS file-system description object
0945  * @buf: the node to write
0946  * @len: node length
0947  * @lnum: logical eraseblock number
0948  * @offs: offset within the logical eraseblock
0949  * @hmac_offs: offset of the HMAC within the node
0950  *
0951  * This function automatically fills node magic number, assigns sequence
0952  * number, and calculates node CRC checksum. The length of the @buf buffer has
0953  * to be aligned to the minimal I/O unit size. This function automatically
0954  * appends padding node and padding bytes if needed. Returns zero in case of
0955  * success and a negative error code in case of failure.
0956  */
0957 int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum,
0958               int offs, int hmac_offs)
0959 {
0960     int err, buf_len = ALIGN(len, c->min_io_size);
0961 
0962     dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
0963            lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
0964            buf_len);
0965     ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
0966     ubifs_assert(c, offs % c->min_io_size == 0 && offs < c->leb_size);
0967     ubifs_assert(c, !c->ro_media && !c->ro_mount);
0968     ubifs_assert(c, !c->space_fixup);
0969 
0970     if (c->ro_error)
0971         return -EROFS;
0972 
0973     err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1);
0974     if (err)
0975         return err;
0976 
0977     err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
0978     if (err)
0979         ubifs_dump_node(c, buf, len);
0980 
0981     return err;
0982 }
0983 
0984 /**
0985  * ubifs_write_node - write node to the media.
0986  * @c: UBIFS file-system description object
0987  * @buf: the node to write
0988  * @len: node length
0989  * @lnum: logical eraseblock number
0990  * @offs: offset within the logical eraseblock
0991  *
0992  * This function automatically fills node magic number, assigns sequence
0993  * number, and calculates node CRC checksum. The length of the @buf buffer has
0994  * to be aligned to the minimal I/O unit size. This function automatically
0995  * appends padding node and padding bytes if needed. Returns zero in case of
0996  * success and a negative error code in case of failure.
0997  */
0998 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
0999              int offs)
1000 {
1001     return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1);
1002 }
1003 
1004 /**
1005  * ubifs_read_node_wbuf - read node from the media or write-buffer.
1006  * @wbuf: wbuf to check for un-written data
1007  * @buf: buffer to read to
1008  * @type: node type
1009  * @len: node length
1010  * @lnum: logical eraseblock number
1011  * @offs: offset within the logical eraseblock
1012  *
1013  * This function reads a node of known type and length, checks it and stores
1014  * in @buf. If the node partially or fully sits in the write-buffer, this
1015  * function takes data from the buffer, otherwise it reads the flash media.
1016  * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative
1017  * error code in case of failure.
1018  */
1019 int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
1020              int lnum, int offs)
1021 {
1022     const struct ubifs_info *c = wbuf->c;
1023     int err, rlen, overlap;
1024     struct ubifs_ch *ch = buf;
1025 
1026     dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
1027            dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
1028     ubifs_assert(c, wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1029     ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1030     ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
1031 
1032     spin_lock(&wbuf->lock);
1033     overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
1034     if (!overlap) {
1035         /* We may safely unlock the write-buffer and read the data */
1036         spin_unlock(&wbuf->lock);
1037         return ubifs_read_node(c, buf, type, len, lnum, offs);
1038     }
1039 
1040     /* Don't read under wbuf */
1041     rlen = wbuf->offs - offs;
1042     if (rlen < 0)
1043         rlen = 0;
1044 
1045     /* Copy the rest from the write-buffer */
1046     memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
1047     spin_unlock(&wbuf->lock);
1048 
1049     if (rlen > 0) {
1050         /* Read everything that goes before write-buffer */
1051         err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
1052         if (err && err != -EBADMSG)
1053             return err;
1054     }
1055 
1056     if (type != ch->node_type) {
1057         ubifs_err(c, "bad node type (%d but expected %d)",
1058               ch->node_type, type);
1059         goto out;
1060     }
1061 
1062     err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1063     if (err) {
1064         ubifs_err(c, "expected node type %d", type);
1065         return err;
1066     }
1067 
1068     rlen = le32_to_cpu(ch->len);
1069     if (rlen != len) {
1070         ubifs_err(c, "bad node length %d, expected %d", rlen, len);
1071         goto out;
1072     }
1073 
1074     return 0;
1075 
1076 out:
1077     ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
1078     ubifs_dump_node(c, buf, len);
1079     dump_stack();
1080     return -EINVAL;
1081 }
1082 
1083 /**
1084  * ubifs_read_node - read node.
1085  * @c: UBIFS file-system description object
1086  * @buf: buffer to read to
1087  * @type: node type
1088  * @len: node length (not aligned)
1089  * @lnum: logical eraseblock number
1090  * @offs: offset within the logical eraseblock
1091  *
1092  * This function reads a node of known type and length, checks it and
1093  * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched
1094  * and a negative error code in case of failure.
1095  */
1096 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
1097             int lnum, int offs)
1098 {
1099     int err, l;
1100     struct ubifs_ch *ch = buf;
1101 
1102     dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
1103     ubifs_assert(c, lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
1104     ubifs_assert(c, len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
1105     ubifs_assert(c, !(offs & 7) && offs < c->leb_size);
1106     ubifs_assert(c, type >= 0 && type < UBIFS_NODE_TYPES_CNT);
1107 
1108     err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
1109     if (err && err != -EBADMSG)
1110         return err;
1111 
1112     if (type != ch->node_type) {
1113         ubifs_errc(c, "bad node type (%d but expected %d)",
1114                ch->node_type, type);
1115         goto out;
1116     }
1117 
1118     err = ubifs_check_node(c, buf, len, lnum, offs, 0, 0);
1119     if (err) {
1120         ubifs_errc(c, "expected node type %d", type);
1121         return err;
1122     }
1123 
1124     l = le32_to_cpu(ch->len);
1125     if (l != len) {
1126         ubifs_errc(c, "bad node length %d, expected %d", l, len);
1127         goto out;
1128     }
1129 
1130     return 0;
1131 
1132 out:
1133     ubifs_errc(c, "bad node at LEB %d:%d, LEB mapping status %d", lnum,
1134            offs, ubi_is_mapped(c->ubi, lnum));
1135     if (!c->probing) {
1136         ubifs_dump_node(c, buf, len);
1137         dump_stack();
1138     }
1139     return -EINVAL;
1140 }
1141 
1142 /**
1143  * ubifs_wbuf_init - initialize write-buffer.
1144  * @c: UBIFS file-system description object
1145  * @wbuf: write-buffer to initialize
1146  *
1147  * This function initializes write-buffer. Returns zero in case of success
1148  * %-ENOMEM in case of failure.
1149  */
1150 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
1151 {
1152     size_t size;
1153 
1154     wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL);
1155     if (!wbuf->buf)
1156         return -ENOMEM;
1157 
1158     size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
1159     wbuf->inodes = kmalloc(size, GFP_KERNEL);
1160     if (!wbuf->inodes) {
1161         kfree(wbuf->buf);
1162         wbuf->buf = NULL;
1163         return -ENOMEM;
1164     }
1165 
1166     wbuf->used = 0;
1167     wbuf->lnum = wbuf->offs = -1;
1168     /*
1169      * If the LEB starts at the max. write size aligned address, then
1170      * write-buffer size has to be set to @c->max_write_size. Otherwise,
1171      * set it to something smaller so that it ends at the closest max.
1172      * write size boundary.
1173      */
1174     size = c->max_write_size - (c->leb_start % c->max_write_size);
1175     wbuf->avail = wbuf->size = size;
1176     wbuf->sync_callback = NULL;
1177     mutex_init(&wbuf->io_mutex);
1178     spin_lock_init(&wbuf->lock);
1179     wbuf->c = c;
1180     wbuf->next_ino = 0;
1181 
1182     hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1183     wbuf->timer.function = wbuf_timer_callback_nolock;
1184     return 0;
1185 }
1186 
1187 /**
1188  * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array.
1189  * @wbuf: the write-buffer where to add
1190  * @inum: the inode number
1191  *
1192  * This function adds an inode number to the inode array of the write-buffer.
1193  */
1194 void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
1195 {
1196     if (!wbuf->buf)
1197         /* NOR flash or something similar */
1198         return;
1199 
1200     spin_lock(&wbuf->lock);
1201     if (wbuf->used)
1202         wbuf->inodes[wbuf->next_ino++] = inum;
1203     spin_unlock(&wbuf->lock);
1204 }
1205 
1206 /**
1207  * wbuf_has_ino - returns if the wbuf contains data from the inode.
1208  * @wbuf: the write-buffer
1209  * @inum: the inode number
1210  *
1211  * This function returns with %1 if the write-buffer contains some data from the
1212  * given inode otherwise it returns with %0.
1213  */
1214 static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
1215 {
1216     int i, ret = 0;
1217 
1218     spin_lock(&wbuf->lock);
1219     for (i = 0; i < wbuf->next_ino; i++)
1220         if (inum == wbuf->inodes[i]) {
1221             ret = 1;
1222             break;
1223         }
1224     spin_unlock(&wbuf->lock);
1225 
1226     return ret;
1227 }
1228 
1229 /**
1230  * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode.
1231  * @c: UBIFS file-system description object
1232  * @inode: inode to synchronize
1233  *
1234  * This function synchronizes write-buffers which contain nodes belonging to
1235  * @inode. Returns zero in case of success and a negative error code in case of
1236  * failure.
1237  */
1238 int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
1239 {
1240     int i, err = 0;
1241 
1242     for (i = 0; i < c->jhead_cnt; i++) {
1243         struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
1244 
1245         if (i == GCHD)
1246             /*
1247              * GC head is special, do not look at it. Even if the
1248              * head contains something related to this inode, it is
1249              * a _copy_ of corresponding on-flash node which sits
1250              * somewhere else.
1251              */
1252             continue;
1253 
1254         if (!wbuf_has_ino(wbuf, inode->i_ino))
1255             continue;
1256 
1257         mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
1258         if (wbuf_has_ino(wbuf, inode->i_ino))
1259             err = ubifs_wbuf_sync_nolock(wbuf);
1260         mutex_unlock(&wbuf->io_mutex);
1261 
1262         if (err) {
1263             ubifs_ro_mode(c, err);
1264             return err;
1265         }
1266     }
1267     return 0;
1268 }