Skip to content

Commit

Permalink
Fix typos
Browse files Browse the repository at this point in the history
Signed-off-by: Andrea Gelmini <[email protected]>
  • Loading branch information
Gelma committed Sep 2, 2024
1 parent 4e7795e commit 5d31af7
Show file tree
Hide file tree
Showing 45 changed files with 67 additions and 69 deletions.
2 changes: 1 addition & 1 deletion fs/bcachefs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ config BCACHEFS_ERASURE_CODING
depends on BCACHEFS_FS
select QUOTACTL
help
This enables the "erasure_code" filesysystem and inode option, which
This enables the "erasure_code" filesystem and inode option, which
organizes data into reed-solomon stripes instead of ordinary
replication.

Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/alloc_background.c
Original file line number Diff line number Diff line change
Expand Up @@ -1380,7 +1380,7 @@ static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_tran

if (fsck_err_on(!bch2_dev_bucket_exists(c, pos),
trans, need_discard_freespace_key_to_invalid_dev_bucket,
"entry in %s btree for nonexistant dev:bucket %llu:%llu",
"entry in %s btree for nonexistent dev:bucket %llu:%llu",
bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
goto delete;

Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/alloc_foreground.c
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
if (!bkey_eq(bp_pos, POS_MAX)) {
/*
* Bucket may have data in it - we don't call
* bc2h_trans_inconnsistent() because fsck hasn't
* bc2h_trans_inconsistent() because fsck hasn't
* finished yet
*/
ob = NULL;
Expand Down Expand Up @@ -1552,7 +1552,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
mutex_init(&c->write_points_hash_lock);
c->write_points_nr = ARRAY_SIZE(c->write_points);

/* open bucket 0 is a sentinal NULL: */
/* open bucket 0 is a sentinel NULL: */
spin_lock_init(&c->open_buckets[0].lock);

for (ob = c->open_buckets + 1;
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/bcachefs.h
Original file line number Diff line number Diff line change
Expand Up @@ -733,7 +733,7 @@ struct bch_fs {
struct percpu_ref writes;
#endif
/*
* Analagous to c->writes, for asynchronous ops that don't necessarily
* Analogous to c->writes, for asynchronous ops that don't necessarily
* need fs to be read-write
*/
refcount_t ro_ref;
Expand Down
8 changes: 3 additions & 5 deletions fs/bcachefs/bcachefs_format.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ struct bkey {
*
* Specifically, when i was designing bkey, I wanted the header to be no
* bigger than necessary so that bkey_packed could use the rest. That means that
* decently offten extent keys will fit into only 8 bytes, instead of spilling over
* decently often extent keys will fit into only 8 bytes, instead of spilling over
* to 16.
*
* But packed_bkey treats the part after the header - the packed section -
Expand All @@ -251,7 +251,7 @@ struct bkey {
* So that constrains the key part of a bkig endian bkey to start right
* after the header.
*
* If we ever do a bkey_v2 and need to expand the hedaer by another byte for
* If we ever do a bkey_v2 and need to expand the header by another byte for
* some reason - that will clean up this wart.
*/
__aligned(8)
Expand Down Expand Up @@ -499,8 +499,6 @@ struct bch_sb_field {
#include "disk_groups_format.h"
#include "extents_format.h"
#include "ec_format.h"
#include "dirent_format.h"
#include "disk_groups_format.h"
#include "inode_format.h"
#include "journal_seq_blacklist_format.h"
#include "logged_ops_format.h"
Expand Down Expand Up @@ -758,7 +756,7 @@ struct bch_sb {

/*
* Flags:
* BCH_SB_INITALIZED - set on first mount
* BCH_SB_INITIALIZED - set on first mount
* BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
* behaviour of mount/recovery path:
* BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/bcachefs_ioctl.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ struct bch_ioctl_start {
* may be either offline or offline.
*
* Will fail removing @dev would leave us with insufficient read write devices
* or degraded/unavailable data, unless the approprate BCH_FORCE_IF_* flags are
* or degraded/unavailable data, unless the appropriate BCH_FORCE_IF_* flags are
* set.
*/

Expand All @@ -154,7 +154,7 @@ struct bch_ioctl_start {
*
* Will fail (similarly to BCH_IOCTL_DISK_SET_STATE) if offlining @dev would
* leave us with insufficient read write devices or degraded/unavailable data,
* unless the approprate BCH_FORCE_IF_* flags are set.
* unless the appropriate BCH_FORCE_IF_* flags are set.
*/

struct bch_ioctl_disk {
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/bset.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
* 4 in memory - we lazily resort as needed.
*
* We implement code here for creating and maintaining auxiliary search trees
* (described below) for searching an individial bset, and on top of that we
* (described below) for searching an individual bset, and on top of that we
* implement a btree iterator.
*
* BTREE ITERATOR:
Expand Down Expand Up @@ -178,7 +178,7 @@ static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree
* it used to be 64, but I realized the lookup code would touch slightly less
* memory if it was 128.
*
* It definites the number of bytes (in struct bset) per struct bkey_float in
* It defines the number of bytes (in struct bset) per struct bkey_float in
* the auxiliar search tree - when we're done searching the bset_float tree we
* have this many bytes left that we do a linear search over.
*
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/btree_cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -1068,7 +1068,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
/*
* Check b->hash_val _before_ calling btree_node_lock() - this might not
* be the node we want anymore, and trying to lock the wrong node could
* cause an unneccessary transaction restart:
* cause an unnecessary transaction restart:
*/
if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
!b ||
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/btree_gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -625,7 +625,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,

/*
* We require a commit before key_trigger() because
* key_trigger(BTREE_TRIGGER_GC) is not idempotant; we'll calculate the
* key_trigger(BTREE_TRIGGER_GC) is not independent; we'll calculate the
* wrong result if we run it multiple times.
*/
unsigned flags = !iter ? BTREE_TRIGGER_is_root : 0;
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/btree_iter.c
Original file line number Diff line number Diff line change
Expand Up @@ -2372,7 +2372,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
}

/*
* iter->pos should be mononotically increasing, and always be
* iter->pos should be monotonically increasing, and always be
* equal to the key we just returned - except extents can
* straddle iter->pos:
*/
Expand Down Expand Up @@ -3070,7 +3070,7 @@ u32 bch2_trans_begin(struct btree_trans *trans)

/*
* If the transaction wasn't restarted, we're presuming to be
* doing something new: dont keep iterators excpt the ones that
* doing something new: don't keep iterators except the ones that
* are in use - except for the subvolumes btree:
*/
if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/btree_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ struct btree_insert_entry {
/* Number of btree paths we preallocate, usually enough */
#define BTREE_ITER_INITIAL 64
/*
* Lmiit for btree_trans_too_many_iters(); this is enough that almost all code
* Limit for btree_trans_too_many_iters(); this is enough that almost all code
* paths should run inside this limit, and if they don't it usually indicates a
* bug (leaking/duplicated btree paths).
*
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/btree_update.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
* For use when splitting extents in existing snapshots:
*
* If @old_pos is an interior snapshot node, iterate over descendent snapshot
* nodes: for every descendent snapshot in whiche @old_pos is overwritten and
* nodes: for every descendent snapshot in which @old_pos is overwritten and
* not visible, emit a whiteout at @new_pos.
*/
static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/btree_update_interior.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ struct btree_update {
struct keylist parent_keys;
/*
* Enough room for btree_split's keys without realloc - btree node
* pointers never have crc/compression info, so we only need to acount
* pointers never have crc/compression info, so we only need to account
* for the pointers for three keys
*/
u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/btree_write_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
* journal replay has to split/rewrite nodes to make room for
* its updates.
*
* And for those new acounting updates, updates to the same
* And for those new accounting updates, updates to the same
* counters get accumulated as they're flushed from the journal
* to the write buffer - see the patch for eytzingcer tree
* accumulated. So we could only overflow if the number of
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/checksum.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
/*
* bch2_checksum state is an abstraction of the checksum state calculated over different pages.
* it features page merging without having the checksum algorithm lose its state.
* for native checksum aglorithms (like crc), a default seed value will do.
* for native checksum algorithms (like crc), a default seed value will do.
* for hash-like algorithms, a state needs to be stored
*/

Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/data_update.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
* other updates
* @new: extent with new pointers that we'll be adding to @insert
*
* Fist, drop rewrite_ptrs from @new:
* First, drop rewrite_ptrs from @new:
*/
i = 0;
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
Expand Down Expand Up @@ -701,7 +701,7 @@ int bch2_data_update_init(struct btree_trans *trans,

/*
* If device(s) were set to durability=0 after data was written to them
* we can end up with a duribilty=0 extent, and the normal algorithm
* we can end up with a durability=0 extent, and the normal algorithm
* that tries not to increase durability doesn't work:
*/
if (!(durability_have + durability_removing))
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/disk_accounting.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
* expensive, so we also have
*
* - In memory accounting, where accounting is stored as an array of percpu
* counters, indexed by an eytzinger array of disk acounting keys/bpos (which
* counters, indexed by an eytzinger array of disk accounting keys/bpos (which
* are the same thing, excepting byte swabbing on big endian).
*
* Cheap to read, but non persistent.
Expand Down Expand Up @@ -371,7 +371,7 @@ void bch2_accounting_mem_gc(struct bch_fs *c)
* Read out accounting keys for replicas entries, as an array of
* bch_replicas_usage entries.
*
* Note: this may be deprecated/removed at smoe point in the future and replaced
* Note: this may be deprecated/removed at some point in the future and replaced
* with something more general, it exists to support the ioctl used by the
* 'bcachefs fs usage' command.
*/
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/disk_accounting_format.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
* Here, the key has considerably more structure than a typical key (bpos); an
* accounting key is 'struct disk_accounting_pos', which is a union of bpos.
*
* More specifically: a key is just a muliword integer (where word endianness
* More specifically: a key is just a multiword integer (where word endianness
* matches native byte order), so we're treating bpos as an opaque 20 byte
* integer and mapping bch_accounting_key to that.
*
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/errcode.h
Original file line number Diff line number Diff line change
Expand Up @@ -293,4 +293,4 @@ static inline long bch2_err_class(long err)

const char *bch2_blk_status_to_str(blk_status_t);

#endif /* _BCACHFES_ERRCODE_H */
#endif /* _BCACHEFS_ERRCODE_H */
2 changes: 1 addition & 1 deletion fs/bcachefs/error.c
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ int __bch2_fsck_err(struct bch_fs *c,
if (s) {
/*
* We may be called multiple times for the same error on
* transaction restart - this memoizes instead of asking the user
* transaction restart - this memorizes instead of asking the user
* multiple times for the same error:
*/
if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/eytzinger.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#endif

/*
* Traversal for trees in eytzinger layout - a full binary tree layed out in an
* Traversal for trees in eytzinger layout - a full binary tree laid out in an
* array.
*
* Consider using an eytzinger tree any time you would otherwise be doing binary
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/fs-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -877,7 +877,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
goto err;

/*
* due to alignment, we might have remapped slightly more than requsted
* due to alignment, we might have remapped slightly more than requested
*/
ret = min((u64) ret << 9, (u64) len);

Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ struct bch_inode_info {
*
* XXX: a device may have had a flush issued by some other codepath. It
* would be better to keep for each device a sequence number that's
* incremented when we isusue a cache flush, and track here the sequence
* incremented when we issue a cache flush, and track here the sequence
* number that needs flushing.
*/
struct bch_devs_mask ei_devs_need_flush;
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/fsck.c
Original file line number Diff line number Diff line change
Expand Up @@ -569,7 +569,7 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
/*
* We know that @id is a descendant of @ancestor, we're checking if
* we've seen a key that overwrote @ancestor - i.e. also a descendent of
* @ascestor and with @id as a descendent.
* @ancestor and with @id as a descendent.
*
* But we already know that we're scanning IDs between @id and @ancestor
* numerically, since snapshot ID lists are kept sorted, so if we find
Expand Down Expand Up @@ -1984,7 +1984,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *

if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol,
trans, subvol_fs_path_parent_wrong,
"subvol with wrong fs_path_parent, should be be %u\n%s",
"subvol with wrong fs_path_parent, should be %u\n%s",
parent_subvol,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
struct bkey_i_subvolume *n =
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -1161,7 +1161,7 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
/*
* if we ran check_inodes() unlinked inodes will have already been
* cleaned up but the write buffer will be out of sync; therefore we
* alway need a write buffer flush
* always need a write buffer flush
*/
ret = bch2_btree_write_buffer_flush_sync(trans);
if (ret)
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/io_misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
}

/*
* Returns -BCH_ERR_transacton_restart if we had to drop locks:
* Returns -BCH_ERR_transaction_restart if we had to drop locks:
*/
int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
subvol_inum inum, u64 end,
Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/io_write.c
Original file line number Diff line number Diff line change
Expand Up @@ -1494,7 +1494,7 @@ static void __bch2_write(struct bch_write_op *op)
/*
* Sync or no?
*
* If we're running asynchronously, wne may still want to block
* If we're running asynchronously, we may still want to block
* synchronously here if we weren't able to submit all of the IO at
* once, as that signals backpressure to the caller.
*/
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/journal.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
j->err_seq = journal_cur_seq(j);
spin_unlock(&j->lock);

bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
bch_err(c, "Journal stuck! Have a pre-reservation but journal full (error %s)",
bch2_journal_errors[error]);
bch2_journal_debug_to_text(&buf, j);
bch_err(c, "%s", buf.buf);
Expand Down Expand Up @@ -803,7 +803,7 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
unwritten_seq++) {
struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);

/* journal flush already in flight, or flush requseted */
/* journal flush already in flight, or flush requested */
if (buf->must_flush)
goto out;

Expand Down
2 changes: 1 addition & 1 deletion fs/bcachefs/journal_io.c
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
/*
* genradixes are indexed by a ulong, not a u64, so we can't index them
* by sequence number directly: Assume instead that they will all fall
* within the range of +-2billion of the filrst one we find.
* within the range of +-2billion of the first one we find.
*/
if (!c->journal_entries_base_seq)
c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/journal_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ union journal_res_state {
#define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */

/*
* We stash some journal state as sentinal values in cur_entry_offset:
* We stash some journal state as sentinel values in cur_entry_offset:
* note - cur_entry_offset is in units of u64s
*/
#define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1)
Expand Down Expand Up @@ -202,7 +202,7 @@ struct journal {
darray_u64 early_journal_entries;

/*
* Protects journal_buf->data, when accessing without a jorunal
* Protects journal_buf->data, when accessing without a journal
* reservation: for synchronization between the btree write buffer code
* and the journal write path:
*/
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/mean_and_variance.h
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ struct mean_and_variance {
u128_u sum_squares;
};

/* expontentially weighted variant */
/* exponentially weighted variant */
struct mean_and_variance_weighted {
s64 mean;
u64 variance;
Expand Down Expand Up @@ -200,4 +200,4 @@ u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s,
u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s,
u8 weight);

#endif // MEAN_AND_VAIRANCE_H_
#endif // MEAN_AND_VARIANCE_H_
2 changes: 1 addition & 1 deletion fs/bcachefs/mean_and_variance_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ static void mean_and_variance_basic_test(struct kunit *test)
}

/*
* Test values computed using a spreadsheet from the psuedocode at the bottom:
* Test values computed using a spreadsheet from the pseudocode at the bottom:
* https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf
*/

Expand Down
Loading

0 comments on commit 5d31af7

Please sign in to comment.