Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,171 @@
/*
* Copyright (C) 2013 Fusion IO. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../volumes.h"
#include "../disk-io.h"
#include "../qgroup.h"
static struct vfsmount *test_mnt = NULL;
static const struct super_operations btrfs_test_super_ops = {
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_test_destroy_inode,
};
static struct dentry *btrfs_test_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_pseudo(fs_type, "btrfs_test:", &btrfs_test_super_ops,
NULL, BTRFS_TEST_MAGIC);
}
static struct file_system_type test_type = {
.name = "btrfs_test_fs",
.mount = btrfs_test_mount,
.kill_sb = kill_anon_super,
};
struct inode *btrfs_new_test_inode(void)
{
return new_inode(test_mnt->mnt_sb);
}
int btrfs_init_test_fs(void)
{
int ret;
ret = register_filesystem(&test_type);
if (ret) {
printk(KERN_ERR "btrfs: cannot register test file system\n");
return ret;
}
test_mnt = kern_mount(&test_type);
if (IS_ERR(test_mnt)) {
printk(KERN_ERR "btrfs: cannot mount test file system\n");
unregister_filesystem(&test_type);
return ret;
}
return 0;
}
void btrfs_destroy_test_fs(void)
{
kern_unmount(test_mnt);
unregister_filesystem(&test_type);
}
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
{
struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
GFP_NOFS);
if (!fs_info)
return fs_info;
fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
GFP_NOFS);
if (!fs_info->fs_devices) {
kfree(fs_info);
return NULL;
}
fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block),
GFP_NOFS);
if (!fs_info->super_copy) {
kfree(fs_info->fs_devices);
kfree(fs_info);
return NULL;
}
if (init_srcu_struct(&fs_info->subvol_srcu)) {
kfree(fs_info->fs_devices);
kfree(fs_info->super_copy);
kfree(fs_info);
return NULL;
}
spin_lock_init(&fs_info->buffer_lock);
spin_lock_init(&fs_info->qgroup_lock);
spin_lock_init(&fs_info->qgroup_op_lock);
spin_lock_init(&fs_info->super_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
spin_lock_init(&fs_info->tree_mod_seq_lock);
mutex_init(&fs_info->qgroup_ioctl_lock);
mutex_init(&fs_info->qgroup_rescan_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
fs_info->running_transaction = NULL;
fs_info->qgroup_tree = RB_ROOT;
fs_info->qgroup_ulist = NULL;
atomic64_set(&fs_info->tree_mod_seq, 0);
INIT_LIST_HEAD(&fs_info->dirty_qgroups);
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
return fs_info;
}
static void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
{
struct radix_tree_iter iter;
void **slot;
spin_lock(&fs_info->buffer_lock);
restart:
radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
struct extent_buffer *eb;
eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
if (!eb)
continue;
/* Shouldn't happen but that kind of thinking creates CVE's */
if (radix_tree_exception(eb)) {
if (radix_tree_deref_retry(eb))
goto restart;
continue;
}
spin_unlock(&fs_info->buffer_lock);
free_extent_buffer_stale(eb);
spin_lock(&fs_info->buffer_lock);
}
spin_unlock(&fs_info->buffer_lock);
btrfs_free_qgroup_config(fs_info);
btrfs_free_fs_roots(fs_info);
cleanup_srcu_struct(&fs_info->subvol_srcu);
kfree(fs_info->super_copy);
kfree(fs_info->fs_devices);
kfree(fs_info);
}
void btrfs_free_dummy_root(struct btrfs_root *root)
{
if (!root)
return;
if (root->node)
free_extent_buffer(root->node);
if (root->fs_info)
btrfs_free_dummy_fs_info(root->fs_info);
kfree(root);
}

View file

@ -0,0 +1,68 @@
/*
* Copyright (C) 2013 Fusion IO. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_TESTS
#define __BTRFS_TESTS
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt, ##__VA_ARGS__)
struct btrfs_root;
int btrfs_test_free_space_cache(void);
int btrfs_test_extent_buffer_operations(void);
int btrfs_test_extent_io(void);
int btrfs_test_inodes(void);
int btrfs_test_qgroups(void);
int btrfs_init_test_fs(void);
void btrfs_destroy_test_fs(void);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
void btrfs_free_dummy_root(struct btrfs_root *root);
#else
static inline int btrfs_test_free_space_cache(void)
{
return 0;
}
static inline int btrfs_test_extent_buffer_operations(void)
{
return 0;
}
static inline int btrfs_init_test_fs(void)
{
return 0;
}
static inline void btrfs_destroy_test_fs(void)
{
}
static inline int btrfs_test_extent_io(void)
{
return 0;
}
static inline int btrfs_test_inodes(void)
{
return 0;
}
static inline int btrfs_test_qgroups(void)
{
return 0;
}
#endif
#endif

View file

@ -0,0 +1,229 @@
/*
* Copyright (C) 2013 Fusion IO. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/slab.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../extent_io.h"
#include "../disk-io.h"
static int test_btrfs_split_item(void)
{
struct btrfs_path *path;
struct btrfs_root *root;
struct extent_buffer *eb;
struct btrfs_item *item;
char *value = "mary had a little lamb";
char *split1 = "mary had a little";
char *split2 = " lamb";
char *split3 = "mary";
char *split4 = " had a little";
char buf[32];
struct btrfs_key key;
u32 value_len = strlen(value);
int ret = 0;
test_msg("Running btrfs_split_item tests\n");
root = btrfs_alloc_dummy_root();
if (IS_ERR(root)) {
test_msg("Could not allocate root\n");
return PTR_ERR(root);
}
path = btrfs_alloc_path();
if (!path) {
test_msg("Could not allocate path\n");
kfree(root);
return -ENOMEM;
}
path->nodes[0] = eb = alloc_dummy_extent_buffer(0, 4096);
if (!eb) {
test_msg("Could not allocate dummy buffer\n");
ret = -ENOMEM;
goto out;
}
path->slots[0] = 0;
key.objectid = 0;
key.type = BTRFS_EXTENT_CSUM_KEY;
key.offset = 0;
setup_items_for_insert(root, path, &key, &value_len, value_len,
value_len + sizeof(struct btrfs_item), 1);
item = btrfs_item_nr(0);
write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
value_len);
key.offset = 3;
/*
* Passing NULL trans here should be safe because we have plenty of
* space in this leaf to split the item without having to split the
* leaf.
*/
ret = btrfs_split_item(NULL, root, path, &key, 17);
if (ret) {
test_msg("Split item failed %d\n", ret);
goto out;
}
/*
* Read the first slot, it should have the original key and contain only
* 'mary had a little'
*/
btrfs_item_key_to_cpu(eb, &key, 0);
if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
key.offset != 0) {
test_msg("Invalid key at slot 0\n");
ret = -EINVAL;
goto out;
}
item = btrfs_item_nr(0);
if (btrfs_item_size(eb, item) != strlen(split1)) {
test_msg("Invalid len in the first split\n");
ret = -EINVAL;
goto out;
}
read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0),
strlen(split1));
if (memcmp(buf, split1, strlen(split1))) {
test_msg("Data in the buffer doesn't match what it should "
"in the first split have='%.*s' want '%s'\n",
(int)strlen(split1), buf, split1);
ret = -EINVAL;
goto out;
}
btrfs_item_key_to_cpu(eb, &key, 1);
if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
key.offset != 3) {
test_msg("Invalid key at slot 1\n");
ret = -EINVAL;
goto out;
}
item = btrfs_item_nr(1);
if (btrfs_item_size(eb, item) != strlen(split2)) {
test_msg("Invalid len in the second split\n");
ret = -EINVAL;
goto out;
}
read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1),
strlen(split2));
if (memcmp(buf, split2, strlen(split2))) {
test_msg("Data in the buffer doesn't match what it should "
"in the second split\n");
ret = -EINVAL;
goto out;
}
key.offset = 1;
/* Do it again so we test memmoving the other items in the leaf */
ret = btrfs_split_item(NULL, root, path, &key, 4);
if (ret) {
test_msg("Second split item failed %d\n", ret);
goto out;
}
btrfs_item_key_to_cpu(eb, &key, 0);
if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
key.offset != 0) {
test_msg("Invalid key at slot 0\n");
ret = -EINVAL;
goto out;
}
item = btrfs_item_nr(0);
if (btrfs_item_size(eb, item) != strlen(split3)) {
test_msg("Invalid len in the first split\n");
ret = -EINVAL;
goto out;
}
read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0),
strlen(split3));
if (memcmp(buf, split3, strlen(split3))) {
test_msg("Data in the buffer doesn't match what it should "
"in the third split");
ret = -EINVAL;
goto out;
}
btrfs_item_key_to_cpu(eb, &key, 1);
if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
key.offset != 1) {
test_msg("Invalid key at slot 1\n");
ret = -EINVAL;
goto out;
}
item = btrfs_item_nr(1);
if (btrfs_item_size(eb, item) != strlen(split4)) {
test_msg("Invalid len in the second split\n");
ret = -EINVAL;
goto out;
}
read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1),
strlen(split4));
if (memcmp(buf, split4, strlen(split4))) {
test_msg("Data in the buffer doesn't match what it should "
"in the fourth split\n");
ret = -EINVAL;
goto out;
}
btrfs_item_key_to_cpu(eb, &key, 2);
if (key.objectid != 0 || key.type != BTRFS_EXTENT_CSUM_KEY ||
key.offset != 3) {
test_msg("Invalid key at slot 2\n");
ret = -EINVAL;
goto out;
}
item = btrfs_item_nr(2);
if (btrfs_item_size(eb, item) != strlen(split2)) {
test_msg("Invalid len in the second split\n");
ret = -EINVAL;
goto out;
}
read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 2),
strlen(split2));
if (memcmp(buf, split2, strlen(split2))) {
test_msg("Data in the buffer doesn't match what it should "
"in the last chunk\n");
ret = -EINVAL;
goto out;
}
out:
btrfs_free_path(path);
kfree(root);
return ret;
}
int btrfs_test_extent_buffer_operations(void)
{
test_msg("Running extent buffer operation tests");
return test_btrfs_split_item();
}

View file

@ -0,0 +1,276 @@
/*
* Copyright (C) 2013 Fusion IO. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/pagemap.h>
#include <linux/sched.h>
#include "btrfs-tests.h"
#include "../extent_io.h"
#define PROCESS_UNLOCK (1 << 0)
#define PROCESS_RELEASE (1 << 1)
#define PROCESS_TEST_LOCKED (1 << 2)
static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
unsigned long flags)
{
int ret;
struct page *pages[16];
unsigned long index = start >> PAGE_CACHE_SHIFT;
unsigned long end_index = end >> PAGE_CACHE_SHIFT;
unsigned long nr_pages = end_index - index + 1;
int i;
int count = 0;
int loops = 0;
while (nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
min_t(unsigned long, nr_pages,
ARRAY_SIZE(pages)), pages);
for (i = 0; i < ret; i++) {
if (flags & PROCESS_TEST_LOCKED &&
!PageLocked(pages[i]))
count++;
if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
unlock_page(pages[i]);
page_cache_release(pages[i]);
if (flags & PROCESS_RELEASE)
page_cache_release(pages[i]);
}
nr_pages -= ret;
index += ret;
cond_resched();
loops++;
if (loops > 100000) {
printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret);
break;
}
}
return count;
}
static int test_find_delalloc(void)
{
struct inode *inode;
struct extent_io_tree tmp;
struct page *page;
struct page *locked_page = NULL;
unsigned long index = 0;
u64 total_dirty = 256 * 1024 * 1024;
u64 max_bytes = 128 * 1024 * 1024;
u64 start, end, test_start;
u64 found;
int ret = -EINVAL;
inode = btrfs_new_test_inode();
if (!inode) {
test_msg("Failed to allocate test inode\n");
return -ENOMEM;
}
extent_io_tree_init(&tmp, &inode->i_data);
/*
* First go through and create and mark all of our pages dirty, we pin
* everything to make sure our pages don't get evicted and screw up our
* test.
*/
for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
if (!page) {
test_msg("Failed to allocate test page\n");
ret = -ENOMEM;
goto out;
}
SetPageDirty(page);
if (index) {
unlock_page(page);
} else {
page_cache_get(page);
locked_page = page;
}
}
/* Test this scenario
* |--- delalloc ---|
* |--- search ---|
*/
set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_NOFS);
start = 0;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_msg("Should have found at least one delalloc\n");
goto out_bits;
}
if (start != 0 || end != 4095) {
test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
start, end);
goto out_bits;
}
unlock_extent(&tmp, start, end);
unlock_page(locked_page);
page_cache_release(locked_page);
/*
* Test this scenario
*
* |--- delalloc ---|
* |--- search ---|
*/
test_start = 64 * 1024 * 1024;
locked_page = find_lock_page(inode->i_mapping,
test_start >> PAGE_CACHE_SHIFT);
if (!locked_page) {
test_msg("Couldn't find the locked page\n");
goto out_bits;
}
set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_NOFS);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_msg("Couldn't find delalloc in our range\n");
goto out_bits;
}
if (start != test_start || end != max_bytes - 1) {
test_msg("Expected start %Lu end %Lu, got start %Lu, end "
"%Lu\n", test_start, max_bytes - 1, start, end);
goto out_bits;
}
if (process_page_range(inode, start, end,
PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
test_msg("There were unlocked pages in the range\n");
goto out_bits;
}
unlock_extent(&tmp, start, end);
/* locked_page was unlocked above */
page_cache_release(locked_page);
/*
* Test this scenario
* |--- delalloc ---|
* |--- search ---|
*/
test_start = max_bytes + 4096;
locked_page = find_lock_page(inode->i_mapping, test_start >>
PAGE_CACHE_SHIFT);
if (!locked_page) {
test_msg("Could'nt find the locked page\n");
goto out_bits;
}
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (found) {
test_msg("Found range when we shouldn't have\n");
goto out_bits;
}
if (end != (u64)-1) {
test_msg("Did not return the proper end offset\n");
goto out_bits;
}
/*
* Test this scenario
* [------- delalloc -------|
* [max_bytes]|-- search--|
*
* We are re-using our test_start from above since it works out well.
*/
set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_NOFS);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_msg("Didn't find our range\n");
goto out_bits;
}
if (start != test_start || end != total_dirty - 1) {
test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
test_start, total_dirty - 1, start, end);
goto out_bits;
}
if (process_page_range(inode, start, end,
PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
test_msg("Pages in range were not all locked\n");
goto out_bits;
}
unlock_extent(&tmp, start, end);
/*
* Now to test where we run into a page that is no longer dirty in the
* range we want to find.
*/
page = find_get_page(inode->i_mapping, (max_bytes + (1 * 1024 * 1024))
>> PAGE_CACHE_SHIFT);
if (!page) {
test_msg("Couldn't find our page\n");
goto out_bits;
}
ClearPageDirty(page);
page_cache_release(page);
/* We unlocked it in the previous test */
lock_page(locked_page);
start = test_start;
end = 0;
/*
* Currently if we fail to find dirty pages in the delalloc range we
* will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
&end, max_bytes);
if (!found) {
test_msg("Didn't find our range\n");
goto out_bits;
}
if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) {
test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
test_start, test_start + PAGE_CACHE_SIZE - 1, start,
end);
goto out_bits;
}
if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
PROCESS_UNLOCK)) {
test_msg("Pages in range were not all locked\n");
goto out_bits;
}
ret = 0;
out_bits:
clear_extent_bits(&tmp, 0, total_dirty - 1,
(unsigned long)-1, GFP_NOFS);
out:
if (locked_page)
page_cache_release(locked_page);
process_page_range(inode, 0, total_dirty - 1,
PROCESS_UNLOCK | PROCESS_RELEASE);
iput(inode);
return ret;
}
int btrfs_test_extent_io(void)
{
test_msg("Running find delalloc tests\n");
return test_find_delalloc();
}

View file

@ -0,0 +1,909 @@
/*
* Copyright (C) 2013 Fusion IO. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/slab.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../free-space-cache.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
static struct btrfs_block_group_cache *init_test_block_group(void)
{
struct btrfs_block_group_cache *cache;
cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache)
return NULL;
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
GFP_NOFS);
if (!cache->free_space_ctl) {
kfree(cache);
return NULL;
}
cache->key.objectid = 0;
cache->key.offset = 1024 * 1024 * 1024;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = 4096;
cache->full_stripe_len = 4096;
spin_lock_init(&cache->lock);
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
INIT_LIST_HEAD(&cache->bg_list);
btrfs_init_free_space_ctl(cache);
return cache;
}
/*
* This test just does basic sanity checking, making sure we can add an exten
* entry and remove space from either end and the middle, and make sure we can
* remove space that covers adjacent extent entries.
*/
static int test_extents(struct btrfs_block_group_cache *cache)
{
int ret = 0;
test_msg("Running extent only tests\n");
/* First just make sure we can remove an entire entry */
ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
if (ret) {
test_msg("Error adding initial extents %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
if (ret) {
test_msg("Error removing extent %d\n", ret);
return ret;
}
if (test_check_exists(cache, 0, 4 * 1024 * 1024)) {
test_msg("Full remove left some lingering space\n");
return -1;
}
/* Ok edge and middle cases now */
ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
if (ret) {
test_msg("Error adding half extent %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 1 * 1024 * 1024);
if (ret) {
test_msg("Error removing tail end %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
if (ret) {
test_msg("Error removing front end %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096);
if (ret) {
test_msg("Error removing middle piece %d\n", ret);
return ret;
}
if (test_check_exists(cache, 0, 1 * 1024 * 1024)) {
test_msg("Still have space at the front\n");
return -1;
}
if (test_check_exists(cache, 2 * 1024 * 1024, 4096)) {
test_msg("Still have space in the middle\n");
return -1;
}
if (test_check_exists(cache, 3 * 1024 * 1024, 1 * 1024 * 1024)) {
test_msg("Still have space at the end\n");
return -1;
}
/* Cleanup */
__btrfs_remove_free_space_cache(cache->free_space_ctl);
return 0;
}
static int test_bitmaps(struct btrfs_block_group_cache *cache)
{
u64 next_bitmap_offset;
int ret;
test_msg("Running bitmap only tests\n");
ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't create a bitmap entry %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
if (ret) {
test_msg("Error removing bitmap full range %d\n", ret);
return ret;
}
if (test_check_exists(cache, 0, 4 * 1024 * 1024)) {
test_msg("Left some space in bitmap\n");
return -1;
}
ret = test_add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't add to our bitmap entry %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 2 * 1024 * 1024);
if (ret) {
test_msg("Couldn't remove middle chunk %d\n", ret);
return ret;
}
/*
* The first bitmap we have starts at offset 0 so the next one is just
* at the end of the first bitmap.
*/
next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
/* Test a bit straddling two bitmaps */
ret = test_add_free_space_entry(cache, next_bitmap_offset -
(2 * 1024 * 1024), 4 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't add space that straddles two bitmaps %d\n",
ret);
return ret;
}
ret = btrfs_remove_free_space(cache, next_bitmap_offset -
(1 * 1024 * 1024), 2 * 1024 * 1024);
if (ret) {
test_msg("Couldn't remove overlapping space %d\n", ret);
return ret;
}
if (test_check_exists(cache, next_bitmap_offset - (1 * 1024 * 1024),
2 * 1024 * 1024)) {
test_msg("Left some space when removing overlapping\n");
return -1;
}
__btrfs_remove_free_space_cache(cache->free_space_ctl);
return 0;
}
/* This is the high grade jackassery */
static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
{
u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
int ret;
test_msg("Running bitmap and extent tests\n");
/*
* First let's do something simple, an extent at the same offset as the
* bitmap, but the free space completely in the extent and then
* completely in the bitmap.
*/
ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 1 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't create bitmap entry %d\n", ret);
return ret;
}
ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
if (ret) {
test_msg("Couldn't add extent entry %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
if (ret) {
test_msg("Couldn't remove extent entry %d\n", ret);
return ret;
}
if (test_check_exists(cache, 0, 1 * 1024 * 1024)) {
test_msg("Left remnants after our remove\n");
return -1;
}
/* Now to add back the extent entry and remove from the bitmap */
ret = test_add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
if (ret) {
test_msg("Couldn't re-add extent entry %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 4 * 1024 * 1024, 1 * 1024 * 1024);
if (ret) {
test_msg("Couldn't remove from bitmap %d\n", ret);
return ret;
}
if (test_check_exists(cache, 4 * 1024 * 1024, 1 * 1024 * 1024)) {
test_msg("Left remnants in the bitmap\n");
return -1;
}
/*
* Ok so a little more evil, extent entry and bitmap at the same offset,
* removing an overlapping chunk.
*/
ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 4 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't add to a bitmap %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 512 * 1024, 3 * 1024 * 1024);
if (ret) {
test_msg("Couldn't remove overlapping space %d\n", ret);
return ret;
}
if (test_check_exists(cache, 512 * 1024, 3 * 1024 * 1024)) {
test_msg("Left over pieces after removing overlapping\n");
return -1;
}
__btrfs_remove_free_space_cache(cache->free_space_ctl);
/* Now with the extent entry offset into the bitmap */
ret = test_add_free_space_entry(cache, 4 * 1024 * 1024, 4 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't add space to the bitmap %d\n", ret);
return ret;
}
ret = test_add_free_space_entry(cache, 2 * 1024 * 1024, 2 * 1024 * 1024, 0);
if (ret) {
test_msg("Couldn't add extent to the cache %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 4 * 1024 * 1024);
if (ret) {
test_msg("Problem removing overlapping space %d\n", ret);
return ret;
}
if (test_check_exists(cache, 3 * 1024 * 1024, 4 * 1024 * 1024)) {
test_msg("Left something behind when removing space");
return -1;
}
/*
* This has blown up in the past, the extent entry starts before the
* bitmap entry, but we're trying to remove an offset that falls
* completely within the bitmap range and is in both the extent entry
* and the bitmap entry, looks like this
*
* [ extent ]
* [ bitmap ]
* [ del ]
*/
__btrfs_remove_free_space_cache(cache->free_space_ctl);
ret = test_add_free_space_entry(cache, bitmap_offset + 4 * 1024 * 1024,
4 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't add bitmap %d\n", ret);
return ret;
}
ret = test_add_free_space_entry(cache, bitmap_offset - 1 * 1024 * 1024,
5 * 1024 * 1024, 0);
if (ret) {
test_msg("Couldn't add extent entry %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, bitmap_offset + 1 * 1024 * 1024,
5 * 1024 * 1024);
if (ret) {
test_msg("Failed to free our space %d\n", ret);
return ret;
}
if (test_check_exists(cache, bitmap_offset + 1 * 1024 * 1024,
5 * 1024 * 1024)) {
test_msg("Left stuff over\n");
return -1;
}
__btrfs_remove_free_space_cache(cache->free_space_ctl);
/*
* This blew up before, we have part of the free space in a bitmap and
* then the entirety of the rest of the space in an extent. This used
* to return -EAGAIN back from btrfs_remove_extent, make sure this
* doesn't happen.
*/
ret = test_add_free_space_entry(cache, 1 * 1024 * 1024, 2 * 1024 * 1024, 1);
if (ret) {
test_msg("Couldn't add bitmap entry %d\n", ret);
return ret;
}
ret = test_add_free_space_entry(cache, 3 * 1024 * 1024, 1 * 1024 * 1024, 0);
if (ret) {
test_msg("Couldn't add extent entry %d\n", ret);
return ret;
}
ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 3 * 1024 * 1024);
if (ret) {
test_msg("Error removing bitmap and extent overlapping %d\n", ret);
return ret;
}
__btrfs_remove_free_space_cache(cache->free_space_ctl);
return 0;
}
/* Used by test_steal_space_from_bitmap_to_extent(). */
static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
return ctl->free_extents > 0;
}
/* Used by test_steal_space_from_bitmap_to_extent(). */
static int
check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache,
const int num_extents,
const int num_bitmaps)
{
if (cache->free_space_ctl->free_extents != num_extents) {
test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n",
cache->free_space_ctl->free_extents, num_extents);
return -EINVAL;
}
if (cache->free_space_ctl->total_bitmaps != num_bitmaps) {
test_msg("Incorrect # of extent entries in the cache: %d, expected %d\n",
cache->free_space_ctl->total_bitmaps, num_bitmaps);
return -EINVAL;
}
return 0;
}
/* Used by test_steal_space_from_bitmap_to_extent(). */
static int check_cache_empty(struct btrfs_block_group_cache *cache)
{
u64 offset;
u64 max_extent_size;
/*
* Now lets confirm that there's absolutely no free space left to
* allocate.
*/
if (cache->free_space_ctl->free_space != 0) {
test_msg("Cache free space is not 0\n");
return -EINVAL;
}
/* And any allocation request, no matter how small, should fail now. */
offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0,
&max_extent_size);
if (offset != 0) {
test_msg("Space allocation did not fail, returned offset: %llu",
offset);
return -EINVAL;
}
/* And no extent nor bitmap entries in the cache anymore. */
return check_num_extents_and_bitmaps(cache, 0, 0);
}
/*
* Before we were able to steal free space from a bitmap entry to an extent
* entry, we could end up with 2 entries representing a contiguous free space.
* One would be an extent entry and the other a bitmap entry. Since in order
* to allocate space to a caller we use only 1 entry, we couldn't return that
* whole range to the caller if it was requested. This forced the caller to
* either assume ENOSPC or perform several smaller space allocations, which
* wasn't optimal as they could be spread all over the block group while under
* concurrency (extra overhead and fragmentation).
*
* This stealing approach is benefical, since we always prefer to allocate from
* extent entries, both for clustered and non-clustered allocation requests.
*/
static int
test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
{
int ret;
u64 offset;
u64 max_extent_size;
bool (*use_bitmap_op)(struct btrfs_free_space_ctl *,
struct btrfs_free_space *);
test_msg("Running space stealing from bitmap to extent\n");
/*
* For this test, we want to ensure we end up with an extent entry
* immediately adjacent to a bitmap entry, where the bitmap starts
* at an offset where the extent entry ends. We keep adding and
* removing free space to reach into this state, but to get there
* we need to reach a point where marking new free space doesn't
* result in adding new extent entries or merging the new space
* with existing extent entries - the space ends up being marked
* in an existing bitmap that covers the new free space range.
*
* To get there, we need to reach the threshold defined set at
* cache->free_space_ctl->extents_thresh, which currently is
* 256 extents on a x86_64 system at least, and a few other
* conditions (check free_space_cache.c). Instead of making the
* test much longer and complicated, use a "use_bitmap" operation
* that forces use of bitmaps as soon as we have at least 1
* extent entry.
*/
use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
/*
* Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
*/
ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 - 256 * 1024,
128 * 1024, 0);
if (ret) {
test_msg("Couldn't add extent entry %d\n", ret);
return ret;
}
/* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */
ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 512 * 1024,
128 * 1024 * 1024 - 512 * 1024, 1);
if (ret) {
test_msg("Couldn't add bitmap entry %d\n", ret);
return ret;
}
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return ret;
/*
* Now make only the first 256Kb of the bitmap marked as free, so that
* we end up with only the following ranges marked as free space:
*
* [128Mb - 256Kb, 128Mb - 128Kb[
* [128Mb + 512Kb, 128Mb + 768Kb[
*/
ret = btrfs_remove_free_space(cache,
128 * 1024 * 1024 + 768 * 1024,
128 * 1024 * 1024 - 768 * 1024);
if (ret) {
test_msg("Failed to free part of bitmap space %d\n", ret);
return ret;
}
/* Confirm that only those 2 ranges are marked as free. */
if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
128 * 1024)) {
test_msg("Free space range missing\n");
return -ENOENT;
}
if (!test_check_exists(cache, 128 * 1024 * 1024 + 512 * 1024,
256 * 1024)) {
test_msg("Free space range missing\n");
return -ENOENT;
}
/*
* Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked
* as free anymore.
*/
if (test_check_exists(cache, 128 * 1024 * 1024 + 768 * 1024,
128 * 1024 * 1024 - 768 * 1024)) {
test_msg("Bitmap region not removed from space cache\n");
return -EINVAL;
}
/*
* Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is
* covered by the bitmap, isn't marked as free.
*/
if (test_check_exists(cache, 128 * 1024 * 1024 + 256 * 1024,
256 * 1024)) {
test_msg("Invalid bitmap region marked as free\n");
return -EINVAL;
}
/*
* Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered
* by the bitmap too, isn't marked as free either.
*/
if (test_check_exists(cache, 128 * 1024 * 1024,
256 * 1024)) {
test_msg("Invalid bitmap region marked as free\n");
return -EINVAL;
}
/*
* Now lets mark the region [128Mb, 128Mb + 512Kb[ as free too. But,
* lets make sure the free space cache marks it as free in the bitmap,
* and doesn't insert a new extent entry to represent this region.
*/
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 512 * 1024);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
}
/* Confirm the region is marked as free. */
if (!test_check_exists(cache, 128 * 1024 * 1024, 512 * 1024)) {
test_msg("Bitmap region not marked as free\n");
return -ENOENT;
}
/*
* Confirm that no new extent entries or bitmap entries were added to
* the cache after adding that free space region.
*/
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return ret;
/*
* Now lets add a small free space region to the right of the previous
* one, which is not contiguous with it and is part of the bitmap too.
* The goal is to test that the bitmap entry space stealing doesn't
* steal this space region.
*/
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 + 16 * 1024 * 1024,
4096);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
}
/*
* Confirm that no new extent entries or bitmap entries were added to
* the cache after adding that free space region.
*/
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return ret;
/*
* Now mark the region [128Mb - 128Kb, 128Mb[ as free too. This will
* expand the range covered by the existing extent entry that represents
* the free space [128Mb - 256Kb, 128Mb - 128Kb[.
*/
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 128 * 1024,
128 * 1024);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
}
/* Confirm the region is marked as free. */
if (!test_check_exists(cache, 128 * 1024 * 1024 - 128 * 1024,
128 * 1024)) {
test_msg("Extent region not marked as free\n");
return -ENOENT;
}
/*
* Confirm that our extent entry didn't stole all free space from the
* bitmap, because of the small 4Kb free space region.
*/
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return ret;
/*
* So now we have the range [128Mb - 256Kb, 128Mb + 768Kb[ as free
* space. Without stealing bitmap free space into extent entry space,
* we would have all this free space represented by 2 entries in the
* cache:
*
* extent entry covering range: [128Mb - 256Kb, 128Mb[
* bitmap entry covering range: [128Mb, 128Mb + 768Kb[
*
* Attempting to allocate the whole free space (1Mb) would fail, because
* we can't allocate from multiple entries.
* With the bitmap free space stealing, we get a single extent entry
* that represents the 1Mb free space, and therefore we're able to
* allocate the whole free space at once.
*/
if (!test_check_exists(cache, 128 * 1024 * 1024 - 256 * 1024,
1 * 1024 * 1024)) {
test_msg("Expected region not marked as free\n");
return -ENOENT;
}
if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 4096)) {
test_msg("Cache free space is not 1Mb + 4Kb\n");
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
0, 1 * 1024 * 1024, 0,
&max_extent_size);
if (offset != (128 * 1024 * 1024 - 256 * 1024)) {
test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
offset);
return -EINVAL;
}
/* All that remains is a 4Kb free space region in a bitmap. Confirm. */
ret = check_num_extents_and_bitmaps(cache, 1, 1);
if (ret)
return ret;
if (cache->free_space_ctl->free_space != 4096) {
test_msg("Cache free space is not 4Kb\n");
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
0, 4096, 0,
&max_extent_size);
if (offset != (128 * 1024 * 1024 + 16 * 1024 * 1024)) {
test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
offset);
return -EINVAL;
}
ret = check_cache_empty(cache);
if (ret)
return ret;
__btrfs_remove_free_space_cache(cache->free_space_ctl);
/*
* Now test a similar scenario, but where our extent entry is located
* to the right of the bitmap entry, so that we can check that stealing
* space from a bitmap to the front of an extent entry works.
*/
/*
* Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[
*/
ret = test_add_free_space_entry(cache, 128 * 1024 * 1024 + 128 * 1024,
128 * 1024, 0);
if (ret) {
test_msg("Couldn't add extent entry %d\n", ret);
return ret;
}
/* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */
ret = test_add_free_space_entry(cache, 0,
128 * 1024 * 1024 - 512 * 1024, 1);
if (ret) {
test_msg("Couldn't add bitmap entry %d\n", ret);
return ret;
}
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return ret;
/*
* Now make only the last 256Kb of the bitmap marked as free, so that
* we end up with only the following ranges marked as free space:
*
* [128Mb + 128b, 128Mb + 256Kb[
* [128Mb - 768Kb, 128Mb - 512Kb[
*/
ret = btrfs_remove_free_space(cache,
0,
128 * 1024 * 1024 - 768 * 1024);
if (ret) {
test_msg("Failed to free part of bitmap space %d\n", ret);
return ret;
}
/* Confirm that only those 2 ranges are marked as free. */
if (!test_check_exists(cache, 128 * 1024 * 1024 + 128 * 1024,
128 * 1024)) {
test_msg("Free space range missing\n");
return -ENOENT;
}
if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
256 * 1024)) {
test_msg("Free space range missing\n");
return -ENOENT;
}
/*
* Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked
* as free anymore.
*/
if (test_check_exists(cache, 0,
128 * 1024 * 1024 - 768 * 1024)) {
test_msg("Bitmap region not removed from space cache\n");
return -EINVAL;
}
/*
* Confirm that the region [128Mb - 512Kb, 128Mb[, which is
* covered by the bitmap, isn't marked as free.
*/
if (test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
512 * 1024)) {
test_msg("Invalid bitmap region marked as free\n");
return -EINVAL;
}
/*
* Now lets mark the region [128Mb - 512Kb, 128Mb[ as free too. But,
* lets make sure the free space cache marks it as free in the bitmap,
* and doesn't insert a new extent entry to represent this region.
*/
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024 - 512 * 1024,
512 * 1024);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
}
/* Confirm the region is marked as free. */
if (!test_check_exists(cache, 128 * 1024 * 1024 - 512 * 1024,
512 * 1024)) {
test_msg("Bitmap region not marked as free\n");
return -ENOENT;
}
/*
* Confirm that no new extent entries or bitmap entries were added to
* the cache after adding that free space region.
*/
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return ret;
/*
* Now lets add a small free space region to the left of the previous
* one, which is not contiguous with it and is part of the bitmap too.
* The goal is to test that the bitmap entry space stealing doesn't
* steal this space region.
*/
ret = btrfs_add_free_space(cache, 32 * 1024 * 1024, 8192);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
}
/*
* Now mark the region [128Mb, 128Mb + 128Kb[ as free too. This will
* expand the range covered by the existing extent entry that represents
* the free space [128Mb + 128Kb, 128Mb + 256Kb[.
*/
ret = btrfs_add_free_space(cache, 128 * 1024 * 1024, 128 * 1024);
if (ret) {
test_msg("Error adding free space: %d\n", ret);
return ret;
}
/* Confirm the region is marked as free. */
if (!test_check_exists(cache, 128 * 1024 * 1024, 128 * 1024)) {
test_msg("Extent region not marked as free\n");
return -ENOENT;
}
/*
* Confirm that our extent entry didn't stole all free space from the
* bitmap, because of the small 8Kb free space region.
*/
ret = check_num_extents_and_bitmaps(cache, 2, 1);
if (ret)
return ret;
/*
* So now we have the range [128Mb - 768Kb, 128Mb + 256Kb[ as free
* space. Without stealing bitmap free space into extent entry space,
* we would have all this free space represented by 2 entries in the
* cache:
*
* extent entry covering range: [128Mb, 128Mb + 256Kb[
* bitmap entry covering range: [128Mb - 768Kb, 128Mb[
*
* Attempting to allocate the whole free space (1Mb) would fail, because
* we can't allocate from multiple entries.
* With the bitmap free space stealing, we get a single extent entry
* that represents the 1Mb free space, and therefore we're able to
* allocate the whole free space at once.
*/
if (!test_check_exists(cache, 128 * 1024 * 1024 - 768 * 1024,
1 * 1024 * 1024)) {
test_msg("Expected region not marked as free\n");
return -ENOENT;
}
if (cache->free_space_ctl->free_space != (1 * 1024 * 1024 + 8192)) {
test_msg("Cache free space is not 1Mb + 8Kb\n");
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
0, 1 * 1024 * 1024, 0,
&max_extent_size);
if (offset != (128 * 1024 * 1024 - 768 * 1024)) {
test_msg("Failed to allocate 1Mb from space cache, returned offset is: %llu\n",
offset);
return -EINVAL;
}
/* All that remains is a 8Kb free space region in a bitmap. Confirm. */
ret = check_num_extents_and_bitmaps(cache, 1, 1);
if (ret)
return ret;
if (cache->free_space_ctl->free_space != 8192) {
test_msg("Cache free space is not 8Kb\n");
return -EINVAL;
}
offset = btrfs_find_space_for_alloc(cache,
0, 8192, 0,
&max_extent_size);
if (offset != (32 * 1024 * 1024)) {
test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
offset);
return -EINVAL;
}
ret = check_cache_empty(cache);
if (ret)
return ret;
cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
__btrfs_remove_free_space_cache(cache->free_space_ctl);
return 0;
}
int btrfs_test_free_space_cache(void)
{
struct btrfs_block_group_cache *cache;
int ret;
test_msg("Running btrfs free space cache tests\n");
cache = init_test_block_group();
if (!cache) {
test_msg("Couldn't run the tests\n");
return 0;
}
ret = test_extents(cache);
if (ret)
goto out;
ret = test_bitmaps(cache);
if (ret)
goto out;
ret = test_bitmaps_and_extents(cache);
if (ret)
goto out;
ret = test_steal_space_from_bitmap_to_extent(cache);
out:
__btrfs_remove_free_space_cache(cache->free_space_ctl);
kfree(cache->free_space_ctl);
kfree(cache);
test_msg("Free space cache tests finished\n");
return ret;
}

View file

@ -0,0 +1,928 @@
/*
* Copyright (C) 2013 Fusion IO. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../btrfs_inode.h"
#include "../disk-io.h"
#include "../extent_io.h"
#include "../volumes.h"
static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
u64 ram_bytes, u64 offset, u64 disk_bytenr,
u64 disk_len, u32 type, u8 compression, int slot)
{
struct btrfs_path path;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf = root->node;
struct btrfs_key key;
u32 value_len = sizeof(struct btrfs_file_extent_item);
if (type == BTRFS_FILE_EXTENT_INLINE)
value_len += len;
memset(&path, 0, sizeof(path));
path.nodes[0] = leaf;
path.slots[0] = slot;
key.objectid = BTRFS_FIRST_FREE_OBJECTID;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = start;
setup_items_for_insert(root, &path, &key, &value_len, value_len,
value_len + sizeof(struct btrfs_item), 1);
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, 1);
btrfs_set_file_extent_type(leaf, fi, type);
btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_len);
btrfs_set_file_extent_offset(leaf, fi, offset);
btrfs_set_file_extent_num_bytes(leaf, fi, len);
btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
btrfs_set_file_extent_compression(leaf, fi, compression);
btrfs_set_file_extent_encryption(leaf, fi, 0);
btrfs_set_file_extent_other_encoding(leaf, fi, 0);
}
static void insert_inode_item_key(struct btrfs_root *root)
{
struct btrfs_path path;
struct extent_buffer *leaf = root->node;
struct btrfs_key key;
u32 value_len = 0;
memset(&path, 0, sizeof(path));
path.nodes[0] = leaf;
path.slots[0] = 0;
key.objectid = BTRFS_INODE_ITEM_KEY;
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
setup_items_for_insert(root, &path, &key, &value_len, value_len,
value_len + sizeof(struct btrfs_item), 1);
}
/*
* Build the most complicated map of extents the earth has ever seen. We want
* this so we can test all of the corner cases of btrfs_get_extent. Here is a
* diagram of how the extents will look though this may not be possible we still
* want to make sure everything acts normally (the last number is not inclusive)
*
* [0 - 5][5 - 6][6 - 10][10 - 4096][ 4096 - 8192 ][8192 - 12288]
* [hole ][inline][ hole ][ regular ][regular1 split][ hole ]
*
* [ 12288 - 20480][20480 - 24576][ 24576 - 28672 ][28672 - 36864][36864 - 45056]
* [regular1 split][ prealloc1 ][prealloc1 written][ prealloc1 ][ compressed ]
*
* [45056 - 49152][49152-53248][53248-61440][61440-65536][ 65536+81920 ]
* [ compressed1 ][ regular ][compressed1][ regular ][ hole but no extent]
*
* [81920-86016]
* [ regular ]
*/
static void setup_file_extents(struct btrfs_root *root)
{
int slot = 0;
u64 disk_bytenr = 1 * 1024 * 1024;
u64 offset = 0;
/* First we want a hole */
insert_extent(root, offset, 5, 5, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
slot);
slot++;
offset += 5;
/*
* Now we want an inline extent, I don't think this is possible but hey
* why not? Also keep in mind if we have an inline extent it counts as
* the whole first page. If we were to expand it we would have to cow
* and we wouldn't have an inline extent anymore.
*/
insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
slot);
slot++;
offset = 4096;
/* Now another hole */
insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
slot);
slot++;
offset += 4;
/* Now for a regular extent */
insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
disk_bytenr += 4096;
offset += 4095;
/*
* Now for 3 extents that were split from a hole punch so we test
* offsets properly.
*/
insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
offset += 4096;
insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG,
0, slot);
slot++;
offset += 4096;
insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
offset += 8192;
disk_bytenr += 16384;
/* Now for a unwritten prealloc extent */
insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
offset += 4096;
/*
* We want to jack up disk_bytenr a little more so the em stuff doesn't
* merge our records.
*/
disk_bytenr += 8192;
/*
* Now for a partially written prealloc extent, basically the same as
* the hole punch example above. Ram_bytes never changes when you mark
* extents written btw.
*/
insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
offset += 4096;
insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
offset += 4096;
insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
slot++;
offset += 8192;
disk_bytenr += 16384;
/* Now a normal compressed extent */
insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096,
BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
slot++;
offset += 8192;
/* No merges */
disk_bytenr += 8192;
/* Now a split compressed extent */
insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096,
BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
slot++;
offset += 4096;
insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
offset += 4096;
insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096,
BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
slot++;
offset += 8192;
disk_bytenr += 8192;
/* Now extents that have a hole but no hole extent */
insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
BTRFS_FILE_EXTENT_REG, 0, slot);
slot++;
offset += 16384;
disk_bytenr += 4096;
insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
BTRFS_FILE_EXTENT_REG, 0, slot);
}
static unsigned long prealloc_only = 0;
static unsigned long compressed_only = 0;
static unsigned long vacancy_only = 0;
static noinline int test_btrfs_get_extent(void)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
struct extent_map *em = NULL;
u64 orig_start;
u64 disk_bytenr;
u64 offset;
int ret = -ENOMEM;
inode = btrfs_new_test_inode();
if (!inode) {
test_msg("Couldn't allocate inode\n");
return ret;
}
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
root = btrfs_alloc_dummy_root();
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
}
/*
* We do this since btrfs_get_extent wants to assign em->bdev to
* root->fs_info->fs_devices->latest_bdev.
*/
root->fs_info = btrfs_alloc_dummy_fs_info();
if (!root->fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
root->node = alloc_dummy_extent_buffer(0, 4096);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
}
/*
* We will just free a dummy node if it's ref count is 2 so we need an
* extra ref so our searches don't accidently release our page.
*/
extent_buffer_get(root->node);
btrfs_set_header_nritems(root->node, 0);
btrfs_set_header_level(root->node, 0);
ret = -EINVAL;
/* First with no extents */
BTRFS_I(inode)->root = root;
em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0);
if (IS_ERR(em)) {
em = NULL;
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != EXTENT_MAP_HOLE) {
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
test_msg("Vacancy flag wasn't set properly\n");
goto out;
}
free_extent_map(em);
btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
/*
* All of the magic numbers are based on the mapping setup in
* setup_file_extents, so if you change anything there you need to
* update the comment and update the expected values below.
*/
setup_file_extents(root);
em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != EXTENT_MAP_HOLE) {
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
if (em->start != 0 || em->len != 5) {
test_msg("Unexpected extent wanted start 0 len 5, got start "
"%llu len %llu\n", em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != EXTENT_MAP_INLINE) {
test_msg("Expected an inline, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4091) {
test_msg("Unexpected extent wanted start %llu len 1, got start "
"%llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
/*
* We don't test anything else for inline since it doesn't get set
* unless we have a page for it to write into. Maybe we should change
* this?
*/
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != EXTENT_MAP_HOLE) {
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4) {
test_msg("Unexpected extent wanted start %llu len 4, got start "
"%llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
/* Regular extent */
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4095) {
test_msg("Unexpected extent wanted start %llu len 4095, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
/* The next 3 are split extents */
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
disk_bytenr = em->block_start;
orig_start = em->start;
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != EXTENT_MAP_HOLE) {
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 8192) {
test_msg("Unexpected extent wanted start %llu len 8192, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
if (em->orig_start != orig_start) {
test_msg("Wrong orig offset, want %llu, have %llu\n",
orig_start, em->orig_start);
goto out;
}
disk_bytenr += (em->start - orig_start);
if (em->block_start != disk_bytenr) {
test_msg("Wrong block start, want %llu, have %llu\n",
disk_bytenr, em->block_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
/* Prealloc extent */
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
test_msg("Unexpected flags set, want %lu have %lu\n",
prealloc_only, em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
/* The next 3 are a half written prealloc extent */
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
test_msg("Unexpected flags set, want %lu have %lu\n",
prealloc_only, em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
disk_bytenr = em->block_start;
orig_start = em->start;
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_HOLE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
if (em->orig_start != orig_start) {
test_msg("Unexpected orig offset, wanted %llu, have %llu\n",
orig_start, em->orig_start);
goto out;
}
if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
test_msg("Unexpected block start, wanted %llu, have %llu\n",
disk_bytenr + (em->start - em->orig_start),
em->block_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 8192) {
test_msg("Unexpected extent wanted start %llu len 8192, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != prealloc_only) {
test_msg("Unexpected flags set, want %lu have %lu\n",
prealloc_only, em->flags);
goto out;
}
if (em->orig_start != orig_start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", orig_start,
em->orig_start);
goto out;
}
if (em->block_start != (disk_bytenr + (em->start - em->orig_start))) {
test_msg("Unexpected block start, wanted %llu, have %llu\n",
disk_bytenr + (em->start - em->orig_start),
em->block_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
/* Now for the compressed extent */
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 8192) {
test_msg("Unexpected extent wanted start %llu len 8192, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
test_msg("Unexpected flags set, want %lu have %lu\n",
compressed_only, em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n",
em->start, em->orig_start);
goto out;
}
if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
test_msg("Unexpected compress type, wanted %d, got %d\n",
BTRFS_COMPRESS_ZLIB, em->compress_type);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
/* Split compressed extent */
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
test_msg("Unexpected flags set, want %lu have %lu\n",
compressed_only, em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n",
em->start, em->orig_start);
goto out;
}
if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
test_msg("Unexpected compress type, wanted %d, got %d\n",
BTRFS_COMPRESS_ZLIB, em->compress_type);
goto out;
}
disk_bytenr = em->block_start;
orig_start = em->start;
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != disk_bytenr) {
test_msg("Block start does not match, want %llu got %llu\n",
disk_bytenr, em->block_start);
goto out;
}
if (em->start != offset || em->len != 8192) {
test_msg("Unexpected extent wanted start %llu len 8192, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != compressed_only) {
test_msg("Unexpected flags set, want %lu have %lu\n",
compressed_only, em->flags);
goto out;
}
if (em->orig_start != orig_start) {
test_msg("Wrong orig offset, want %llu, have %llu\n",
em->start, orig_start);
goto out;
}
if (em->compress_type != BTRFS_COMPRESS_ZLIB) {
test_msg("Unexpected compress type, wanted %d, got %d\n",
BTRFS_COMPRESS_ZLIB, em->compress_type);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
/* A hole between regular extents but no hole extent */
em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096 * 1024, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != EXTENT_MAP_HOLE) {
test_msg("Expected a hole extent, got %llu\n", em->block_start);
goto out;
}
/*
* Currently we just return a length that we requested rather than the
* length of the actual hole, if this changes we'll have to change this
* test.
*/
if (em->start != offset || em->len != 12288) {
test_msg("Unexpected extent wanted start %llu len 12288, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != vacancy_only) {
test_msg("Unexpected flags set, want %lu have %lu\n",
vacancy_only, em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
offset = em->start + em->len;
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != offset || em->len != 4096) {
test_msg("Unexpected extent wanted start %llu len 4096, got "
"start %llu len %llu\n", offset, em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, want 0 have %lu\n", em->flags);
goto out;
}
if (em->orig_start != em->start) {
test_msg("Wrong orig offset, want %llu, have %llu\n", em->start,
em->orig_start);
goto out;
}
ret = 0;
out:
if (!IS_ERR(em))
free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
return ret;
}
static int test_hole_first(void)
{
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
struct extent_map *em = NULL;
int ret = -ENOMEM;
inode = btrfs_new_test_inode();
if (!inode) {
test_msg("Couldn't allocate inode\n");
return ret;
}
BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
root = btrfs_alloc_dummy_root();
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
}
root->fs_info = btrfs_alloc_dummy_fs_info();
if (!root->fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
root->node = alloc_dummy_extent_buffer(0, 4096);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
}
extent_buffer_get(root->node);
btrfs_set_header_nritems(root->node, 0);
btrfs_set_header_level(root->node, 0);
BTRFS_I(inode)->root = root;
ret = -EINVAL;
/*
* Need a blank inode item here just so we don't confuse
* btrfs_get_extent.
*/
insert_inode_item_key(root);
insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096,
BTRFS_FILE_EXTENT_REG, 0, 1);
em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != EXTENT_MAP_HOLE) {
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
if (em->start != 0 || em->len != 4096) {
test_msg("Unexpected extent wanted start 0 len 4096, got start "
"%llu len %llu\n", em->start, em->len);
goto out;
}
if (em->flags != vacancy_only) {
test_msg("Wrong flags, wanted %lu, have %lu\n", vacancy_only,
em->flags);
goto out;
}
free_extent_map(em);
em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0);
if (IS_ERR(em)) {
test_msg("Got an error when we shouldn't have\n");
goto out;
}
if (em->block_start != 4096) {
test_msg("Expected a real extent, got %llu\n", em->block_start);
goto out;
}
if (em->start != 4096 || em->len != 4096) {
test_msg("Unexpected extent wanted start 4096 len 4096, got "
"start %llu len %llu\n", em->start, em->len);
goto out;
}
if (em->flags != 0) {
test_msg("Unexpected flags set, wanted 0 got %lu\n",
em->flags);
goto out;
}
ret = 0;
out:
if (!IS_ERR(em))
free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
return ret;
}
int btrfs_test_inodes(void)
{
int ret;
set_bit(EXTENT_FLAG_COMPRESSED, &compressed_only);
set_bit(EXTENT_FLAG_VACANCY, &vacancy_only);
set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
test_msg("Running btrfs_get_extent tests\n");
ret = test_btrfs_get_extent();
if (ret)
return ret;
test_msg("Running hole first btrfs_get_extent test\n");
return test_hole_first();
}

View file

@ -0,0 +1,470 @@
/*
* Copyright (C) 2013 Facebook. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../transaction.h"
#include "../disk-io.h"
#include "../qgroup.h"
static void init_dummy_trans(struct btrfs_trans_handle *trans)
{
memset(trans, 0, sizeof(*trans));
trans->transid = 1;
INIT_LIST_HEAD(&trans->qgroup_ref_list);
trans->type = __TRANS_DUMMY;
}
static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
u64 num_bytes, u64 parent, u64 root_objectid)
{
struct btrfs_trans_handle trans;
struct btrfs_extent_item *item;
struct btrfs_extent_inline_ref *iref;
struct btrfs_tree_block_info *block_info;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key ins;
u32 size = sizeof(*item) + sizeof(*iref) + sizeof(*block_info);
int ret;
init_dummy_trans(&trans);
ins.objectid = bytenr;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ins.offset = num_bytes;
path = btrfs_alloc_path();
if (!path) {
test_msg("Couldn't allocate path\n");
return -ENOMEM;
}
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(&trans, root, path, &ins, size);
if (ret) {
test_msg("Couldn't insert ref %d\n", ret);
btrfs_free_path(path);
return ret;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
btrfs_set_extent_refs(leaf, item, 1);
btrfs_set_extent_generation(leaf, item, 1);
btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_TREE_BLOCK);
block_info = (struct btrfs_tree_block_info *)(item + 1);
btrfs_set_tree_block_level(leaf, block_info, 1);
iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
if (parent > 0) {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_SHARED_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
} else {
btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_TREE_BLOCK_REF_KEY);
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
btrfs_free_path(path);
return 0;
}
static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
u64 parent, u64 root_objectid)
{
struct btrfs_trans_handle trans;
struct btrfs_extent_item *item;
struct btrfs_path *path;
struct btrfs_key key;
u64 refs;
int ret;
init_dummy_trans(&trans);
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
path = btrfs_alloc_path();
if (!path) {
test_msg("Couldn't allocate path\n");
return -ENOMEM;
}
path->leave_spinning = 1;
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_msg("Couldn't find extent ref\n");
btrfs_free_path(path);
return ret;
}
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_extent_item);
refs = btrfs_extent_refs(path->nodes[0], item);
btrfs_set_extent_refs(path->nodes[0], item, refs + 1);
btrfs_release_path(path);
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
key.offset = root_objectid;
}
ret = btrfs_insert_empty_item(&trans, root, path, &key, 0);
if (ret)
test_msg("Failed to insert backref\n");
btrfs_free_path(path);
return ret;
}
static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
u64 num_bytes)
{
struct btrfs_trans_handle trans;
struct btrfs_key key;
struct btrfs_path *path;
int ret;
init_dummy_trans(&trans);
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
path = btrfs_alloc_path();
if (!path) {
test_msg("Couldn't allocate path\n");
return -ENOMEM;
}
path->leave_spinning = 1;
ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
if (ret) {
test_msg("Didn't find our key %d\n", ret);
btrfs_free_path(path);
return ret;
}
btrfs_del_item(&trans, root, path);
btrfs_free_path(path);
return 0;
}
static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
u64 num_bytes, u64 parent, u64 root_objectid)
{
struct btrfs_trans_handle trans;
struct btrfs_extent_item *item;
struct btrfs_path *path;
struct btrfs_key key;
u64 refs;
int ret;
init_dummy_trans(&trans);
key.objectid = bytenr;
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = num_bytes;
path = btrfs_alloc_path();
if (!path) {
test_msg("Couldn't allocate path\n");
return -ENOMEM;
}
path->leave_spinning = 1;
ret = btrfs_search_slot(&trans, root, &key, path, 0, 1);
if (ret) {
test_msg("Couldn't find extent ref\n");
btrfs_free_path(path);
return ret;
}
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_extent_item);
refs = btrfs_extent_refs(path->nodes[0], item);
btrfs_set_extent_refs(path->nodes[0], item, refs - 1);
btrfs_release_path(path);
key.objectid = bytenr;
if (parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
key.offset = parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
key.offset = root_objectid;
}
ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
if (ret) {
test_msg("Couldn't find backref %d\n", ret);
btrfs_free_path(path);
return ret;
}
btrfs_del_item(&trans, root, path);
btrfs_free_path(path);
return ret;
}
static int test_no_shared_qgroup(struct btrfs_root *root)
{
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
init_dummy_trans(&trans);
test_msg("Qgroup basic add\n");
ret = btrfs_create_qgroup(NULL, fs_info, 5, NULL);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
}
ret = btrfs_qgroup_record_ref(&trans, fs_info, 5, 4096, 4096,
BTRFS_QGROUP_OPER_ADD_EXCL, 0);
if (ret) {
test_msg("Couldn't add space to a qgroup %d\n", ret);
return ret;
}
ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
if (ret)
return ret;
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
if (ret) {
test_msg("Delayed qgroup accounting failed %d\n", ret);
return ret;
}
if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
ret = remove_extent_item(root, 4096, 4096);
if (ret)
return -EINVAL;
ret = btrfs_qgroup_record_ref(&trans, fs_info, 5, 4096, 4096,
BTRFS_QGROUP_OPER_SUB_EXCL, 0);
if (ret) {
test_msg("Couldn't remove space from the qgroup %d\n", ret);
return -EINVAL;
}
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
if (ret) {
test_msg("Qgroup accounting failed %d\n", ret);
return -EINVAL;
}
if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
return 0;
}
/*
* Add a ref for two different roots to make sure the shared value comes out
* right, also remove one of the roots and make sure the exclusive count is
* adjusted properly.
*/
static int test_multiple_refs(struct btrfs_root *root)
{
struct btrfs_trans_handle trans;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
init_dummy_trans(&trans);
test_msg("Qgroup multiple refs test\n");
/* We have 5 created already from the previous test */
ret = btrfs_create_qgroup(NULL, fs_info, 256, NULL);
if (ret) {
test_msg("Couldn't create a qgroup %d\n", ret);
return ret;
}
ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
if (ret)
return ret;
ret = btrfs_qgroup_record_ref(&trans, fs_info, 5, 4096, 4096,
BTRFS_QGROUP_OPER_ADD_EXCL, 0);
if (ret) {
test_msg("Couldn't add space to a qgroup %d\n", ret);
return ret;
}
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
if (ret) {
test_msg("Delayed qgroup accounting failed %d\n", ret);
return ret;
}
if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
ret = add_tree_ref(root, 4096, 4096, 0, 256);
if (ret)
return ret;
ret = btrfs_qgroup_record_ref(&trans, fs_info, 256, 4096, 4096,
BTRFS_QGROUP_OPER_ADD_SHARED, 0);
if (ret) {
test_msg("Qgroup record ref failed %d\n", ret);
return ret;
}
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
if (ret) {
test_msg("Qgroup accounting failed %d\n", ret);
return ret;
}
if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
ret = remove_extent_ref(root, 4096, 4096, 0, 256);
if (ret)
return ret;
ret = btrfs_qgroup_record_ref(&trans, fs_info, 256, 4096, 4096,
BTRFS_QGROUP_OPER_SUB_SHARED, 0);
if (ret) {
test_msg("Qgroup record ref failed %d\n", ret);
return ret;
}
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
if (ret) {
test_msg("Qgroup accounting failed %d\n", ret);
return ret;
}
if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
test_msg("Qgroup counts didn't match expected values\n");
return -EINVAL;
}
return 0;
}
int btrfs_test_qgroups(void)
{
struct btrfs_root *root;
struct btrfs_root *tmp_root;
int ret = 0;
root = btrfs_alloc_dummy_root();
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
return PTR_ERR(root);
}
root->fs_info = btrfs_alloc_dummy_fs_info();
if (!root->fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
ret = -ENOMEM;
goto out;
}
/*
* Can't use bytenr 0, some things freak out
* *cough*backref walking code*cough*
*/
root->node = alloc_test_extent_buffer(root->fs_info, 4096, 4096);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
goto out;
}
btrfs_set_header_level(root->node, 0);
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 8192;
tmp_root = btrfs_alloc_dummy_root();
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
goto out;
}
tmp_root->root_key.objectid = 5;
root->fs_info->fs_root = tmp_root;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
if (ret) {
test_msg("Couldn't insert fs root %d\n", ret);
goto out;
}
tmp_root = btrfs_alloc_dummy_root();
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
goto out;
}
tmp_root->root_key.objectid = 256;
ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
if (ret) {
test_msg("Couldn't insert fs root %d\n", ret);
goto out;
}
/* We are using this root as our extent root */
root->fs_info->extent_root = root;
/*
* Some of the paths we test assume we have a filled out fs_info, so we
* just need to addt he root in there so we don't panic.
*/
root->fs_info->tree_root = root;
root->fs_info->quota_root = root;
root->fs_info->quota_enabled = 1;
test_msg("Running qgroup tests\n");
ret = test_no_shared_qgroup(root);
if (ret)
goto out;
ret = test_multiple_refs(root);
out:
btrfs_free_dummy_root(root);
return ret;
}