Skip to content

Commit

Permalink
bcachefs: Ignore cached data when calculating fragmentation
Browse files Browse the repository at this point in the history
Signed-off-by: Kent Overstreet <[email protected]>
  • Loading branch information
koverstreet committed Jan 11, 2022
1 parent 2897392 commit 215ae52
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 17 deletions.
2 changes: 1 addition & 1 deletion fs/bcachefs/alloc_background.c
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
u64 now, u64 last_seq_ondisk)
{
unsigned used = bucket_sectors_used(m);
unsigned used = m.cached_sectors;

if (used) {
/*
Expand Down
4 changes: 2 additions & 2 deletions fs/bcachefs/buckets.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,8 +287,8 @@ static inline int is_unavailable_bucket(struct bucket_mark m)
static inline int bucket_sectors_fragmented(struct bch_dev *ca,
struct bucket_mark m)
{
return bucket_sectors_used(m)
? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
return m.dirty_sectors
? max(0, (int) ca->mi.bucket_size - (int) m.dirty_sectors)
: 0;
}

Expand Down
5 changes: 0 additions & 5 deletions fs/bcachefs/buckets.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,6 @@ static inline u8 ptr_stale(struct bch_dev *ca,

/* bucket gc marks */

static inline unsigned bucket_sectors_used(struct bucket_mark mark)
{
return mark.dirty_sectors + mark.cached_sectors;
}

static inline bool is_available_bucket(struct bucket_mark mark)
{
return !mark.dirty_sectors && !mark.stripe;
Expand Down
21 changes: 12 additions & 9 deletions fs/bcachefs/movinggc.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,14 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
.dev = p.ptr.dev,
.offset = p.ptr.offset,
};
ssize_t i;

ssize_t i = eytzinger0_find_le(h->data, h->used,
sizeof(h->data[0]),
bucket_offset_cmp, &search);
if (p.ptr.cached)
continue;

i = eytzinger0_find_le(h->data, h->used,
sizeof(h->data[0]),
bucket_offset_cmp, &search);
#if 0
/* eytzinger search verify code: */
ssize_t j = -1, k;
Expand Down Expand Up @@ -185,8 +189,7 @@ static int bch2_copygc(struct bch_fs *c)

if (m.owned_by_allocator ||
m.data_type != BCH_DATA_user ||
!bucket_sectors_used(m) ||
bucket_sectors_used(m) >= ca->mi.bucket_size)
m.dirty_sectors >= ca->mi.bucket_size)
continue;

WARN_ON(m.stripe && !g->stripe_redundancy);
Expand All @@ -195,9 +198,9 @@ static int bch2_copygc(struct bch_fs *c)
.dev = dev_idx,
.gen = m.gen,
.replicas = 1 + g->stripe_redundancy,
.fragmentation = bucket_sectors_used(m) * (1U << 15)
.fragmentation = m.dirty_sectors * (1U << 15)
/ ca->mi.bucket_size,
.sectors = bucket_sectors_used(m),
.sectors = m.dirty_sectors,
.offset = bucket_to_sector(ca, b),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
Expand Down Expand Up @@ -263,8 +266,8 @@ static int bch2_copygc(struct bch_fs *c)
m = READ_ONCE(buckets->b[b].mark);

if (i->gen == m.gen &&
bucket_sectors_used(m)) {
sectors_not_moved += bucket_sectors_used(m);
m.dirty_sectors) {
sectors_not_moved += m.dirty_sectors;
buckets_not_moved++;
}
}
Expand Down

0 comments on commit 215ae52

Please sign in to comment.