Files
linux_media/tools/testing/radix-tree/iteration_check.c
Matthew Wilcox 148deab223 radix-tree: improve multiorder iterators
This fixes several interlinked problems with the iterators in the
presence of multiorder entries.

1. radix_tree_iter_next() would only advance by one slot, which would
   result in the iterators returning the same entry more than once if
   there were sibling entries.

2. radix_tree_next_slot() could return an internal pointer instead of
   a user pointer if a tagged multiorder entry was immediately followed by
   an entry of lower order.

3. radix_tree_next_slot() expanded to a lot more code than it used to
   when multiorder support was compiled in.  And I wasn't comfortable with
   entry_to_node() being in a header file.

Fixing radix_tree_iter_next() for the presence of sibling entries
necessarily involves examining the contents of the radix tree, so we now
need to pass 'slot' to radix_tree_iter_next(), and we need to change the
calling convention so it is called *before* dropping the lock which
protects the tree.  Also rename it to radix_tree_iter_resume(), as some
people thought it was necessary to call radix_tree_iter_next() each time
around the loop.

radix_tree_next_slot() becomes closer to how it looked before multiorder
support was introduced.  It only checks to see if the next entry in the
chunk is a sibling entry or a pointer to a node; this should be rare
enough that handling this case out of line is not a performance impact
(and such impact is amortised by the fact that the entry we just
processed was a multiorder entry).  Also, radix_tree_next_slot() used to
force a new chunk lookup for untagged entries, which is more expensive
than the out of line sibling entry skipping.

Link: http://lkml.kernel.org/r/1480369871-5271-55-git-send-email-mawilcox@linuxonhyperv.com
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Tested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-12-14 16:04:10 -08:00

208 lines
4.6 KiB
C

/*
* iteration_check.c: test races having to do with radix tree iteration
* Copyright (c) 2016 Intel Corporation
* Author: Ross Zwisler <ross.zwisler@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/radix-tree.h>
#include <pthread.h>
#include "test.h"
#define NUM_THREADS 4
#define TAG 0
static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_t threads[NUM_THREADS];
static unsigned int seeds[3];
RADIX_TREE(tree, GFP_KERNEL);
bool test_complete;
/* relentlessly fill the tree with tagged entries */
static void *add_entries_fn(void *arg)
{
int pgoff;
rcu_register_thread();
while (!test_complete) {
for (pgoff = 0; pgoff < 100; pgoff++) {
pthread_mutex_lock(&tree_lock);
if (item_insert(&tree, pgoff) == 0)
item_tag_set(&tree, pgoff, TAG);
pthread_mutex_unlock(&tree_lock);
}
}
rcu_unregister_thread();
return NULL;
}
/*
* Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
* things that have been removed and randomly resetting our iteration to the
* next chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
* radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
* NULL 'slot' variable.
*/
static void *tagged_iteration_fn(void *arg)
{
struct radix_tree_iter iter;
void **slot;
rcu_register_thread();
while (!test_complete) {
rcu_read_lock();
radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
void *entry;
int i;
/* busy wait to let removals happen */
for (i = 0; i < 1000000; i++)
;
entry = radix_tree_deref_slot(slot);
if (unlikely(!entry))
continue;
if (radix_tree_deref_retry(entry)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
if (rand_r(&seeds[0]) % 50 == 0) {
slot = radix_tree_iter_resume(slot, &iter);
rcu_read_unlock();
rcu_barrier();
rcu_read_lock();
}
}
rcu_read_unlock();
}
rcu_unregister_thread();
return NULL;
}
/*
* Iterate over the entries, doing a radix_tree_iter_retry() as we find things
* that have been removed and randomly resetting our iteration to the next
* chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
* radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
* NULL 'slot' variable.
*/
static void *untagged_iteration_fn(void *arg)
{
struct radix_tree_iter iter;
void **slot;
rcu_register_thread();
while (!test_complete) {
rcu_read_lock();
radix_tree_for_each_slot(slot, &tree, &iter, 0) {
void *entry;
int i;
/* busy wait to let removals happen */
for (i = 0; i < 1000000; i++)
;
entry = radix_tree_deref_slot(slot);
if (unlikely(!entry))
continue;
if (radix_tree_deref_retry(entry)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
if (rand_r(&seeds[1]) % 50 == 0) {
slot = radix_tree_iter_resume(slot, &iter);
rcu_read_unlock();
rcu_barrier();
rcu_read_lock();
}
}
rcu_read_unlock();
}
rcu_unregister_thread();
return NULL;
}
/*
* Randomly remove entries to help induce radix_tree_iter_retry() calls in the
* two iteration functions.
*/
static void *remove_entries_fn(void *arg)
{
rcu_register_thread();
while (!test_complete) {
int pgoff;
pgoff = rand_r(&seeds[2]) % 100;
pthread_mutex_lock(&tree_lock);
item_delete(&tree, pgoff);
pthread_mutex_unlock(&tree_lock);
}
rcu_unregister_thread();
return NULL;
}
/* This is a unit test for a bug found by the syzkaller tester */
void iteration_test(void)
{
int i;
printf("Running iteration tests for 10 seconds\n");
test_complete = false;
for (i = 0; i < 3; i++)
seeds[i] = rand();
if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
perror("pthread_create");
exit(1);
}
if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
perror("pthread_create");
exit(1);
}
if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
perror("pthread_create");
exit(1);
}
if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
perror("pthread_create");
exit(1);
}
sleep(10);
test_complete = true;
for (i = 0; i < NUM_THREADS; i++) {
if (pthread_join(threads[i], NULL)) {
perror("pthread_join");
exit(1);
}
}
item_kill_tree(&tree);
}