Skip to content

Commit

Permalink
btrfs: factor out prepare_allocation() for extent allocation
Browse files Browse the repository at this point in the history
This function finally factor out prepare_allocation() form
find_free_extent(). This function is called before the allocation loop
and a specific allocator function like prepare_allocation_clustered()
should initialize their private information and can set proper hint_byte
to indicate where to start the allocation with.

Reviewed-by: Josef Bacik <[email protected]>
Signed-off-by: Naohiro Aota <[email protected]>
Reviewed-by: David Sterba <[email protected]>
Signed-off-by: David Sterba <[email protected]>
  • Loading branch information
naota authored and kdave committed Mar 23, 2020
1 parent 45d8e03 commit 7e89540
Showing 1 changed file with 68 additions and 42 deletions.
110 changes: 68 additions & 42 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -3853,6 +3853,71 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
return -ENOSPC;
}

static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
struct btrfs_key *ins)
{
/*
* If our free space is heavily fragmented we may not be able to make
* big contiguous allocations, so instead of doing the expensive search
* for free space, simply return ENOSPC with our max_extent_size so we
* can go ahead and search for a more manageable chunk.
*
* If our max_extent_size is large enough for our allocation simply
* disable clustering since we will likely not be able to find enough
* space to create a cluster and induce latency trying.
*/
if (space_info->max_extent_size) {
spin_lock(&space_info->lock);
if (space_info->max_extent_size &&
ffe_ctl->num_bytes > space_info->max_extent_size) {
ins->offset = space_info->max_extent_size;
spin_unlock(&space_info->lock);
return -ENOSPC;
} else if (space_info->max_extent_size) {
ffe_ctl->use_cluster = false;
}
spin_unlock(&space_info->lock);
}

ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info,
&ffe_ctl->empty_cluster);
if (ffe_ctl->last_ptr) {
struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr;

spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
ffe_ctl->hint_byte = last_ptr->window_start;
if (last_ptr->fragmented) {
/*
* We still set window_start so we can keep track of the
* last place we found an allocation to try and save
* some time.
*/
ffe_ctl->hint_byte = last_ptr->window_start;
ffe_ctl->use_cluster = false;
}
spin_unlock(&last_ptr->lock);
}

return 0;
}

static int prepare_allocation(struct btrfs_fs_info *fs_info,
struct find_free_extent_ctl *ffe_ctl,
struct btrfs_space_info *space_info,
struct btrfs_key *ins)
{
switch (ffe_ctl->policy) {
case BTRFS_EXTENT_ALLOC_CLUSTERED:
return prepare_allocation_clustered(fs_info, ffe_ctl,
space_info, ins);
default:
BUG();
}
}

/*
* walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole:
Expand Down Expand Up @@ -3922,48 +3987,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
return -ENOSPC;
}

/*
* If our free space is heavily fragmented we may not be able to make
* big contiguous allocations, so instead of doing the expensive search
* for free space, simply return ENOSPC with our max_extent_size so we
* can go ahead and search for a more manageable chunk.
*
* If our max_extent_size is large enough for our allocation simply
* disable clustering since we will likely not be able to find enough
* space to create a cluster and induce latency trying.
*/
if (unlikely(space_info->max_extent_size)) {
spin_lock(&space_info->lock);
if (space_info->max_extent_size &&
num_bytes > space_info->max_extent_size) {
ins->offset = space_info->max_extent_size;
spin_unlock(&space_info->lock);
return -ENOSPC;
} else if (space_info->max_extent_size) {
ffe_ctl.use_cluster = false;
}
spin_unlock(&space_info->lock);
}

ffe_ctl.last_ptr = fetch_cluster_info(fs_info, space_info,
&ffe_ctl.empty_cluster);
if (ffe_ctl.last_ptr) {
struct btrfs_free_cluster *last_ptr = ffe_ctl.last_ptr;

spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
ffe_ctl.hint_byte = last_ptr->window_start;
if (last_ptr->fragmented) {
/*
* We still set window_start so we can keep track of the
* last place we found an allocation to try and save
* some time.
*/
ffe_ctl.hint_byte = last_ptr->window_start;
ffe_ctl.use_cluster = false;
}
spin_unlock(&last_ptr->lock);
}
ret = prepare_allocation(fs_info, &ffe_ctl, space_info, ins);
if (ret < 0)
return ret;

ffe_ctl.search_start = max(ffe_ctl.search_start,
first_logical_byte(fs_info, 0));
Expand Down

0 comments on commit 7e89540

Please sign in to comment.