2009-07-04 15:29:02 +02:00
|
|
|
/*
|
|
|
|
* include/linux/idr.h
|
2016-01-20 05:19:53 +01:00
|
|
|
*
|
2009-07-04 15:29:02 +02:00
|
|
|
* 2002-10-18 written by Jim Houston jim.houston@ccur.com
|
|
|
|
* Copyright (C) 2002 by Concurrent Computer Corporation
|
|
|
|
* Distributed under the GNU GPL license version 2.
|
|
|
|
*
|
|
|
|
* Small id to pointer translation service avoiding fixed sized
|
|
|
|
* tables.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __IDR_H__
|
|
|
|
#define __IDR_H__
|
|
|
|
|
2011-06-24 12:45:58 +02:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/bitops.h>
|
2016-01-27 06:30:28 +01:00
|
|
|
#include <linux/init.h>
|
2014-12-27 16:42:08 +01:00
|
|
|
#include <linux/rcupdate.h>
|
2009-07-04 15:29:02 +02:00
|
|
|
|
2013-03-19 07:14:59 +01:00
|
|
|
/*
|
|
|
|
* We want shallower trees and thus more bits covered at each layer. 8
|
|
|
|
* bits gives us large enough first layer for most use cases and maximum
|
|
|
|
* tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
|
|
|
|
* 1k on 32bit.
|
|
|
|
*/
|
|
|
|
#define IDR_BITS 8
|
2009-07-04 15:29:02 +02:00
|
|
|
#define IDR_SIZE (1 << IDR_BITS)
|
|
|
|
#define IDR_MASK ((1 << IDR_BITS)-1)
|
|
|
|
|
|
|
|
struct idr_layer {
|
2013-03-19 07:14:59 +01:00
|
|
|
int prefix; /* the ID prefix of this idr_layer */
|
2014-08-23 12:29:27 +02:00
|
|
|
int layer; /* distance from leaf */
|
2011-06-24 12:45:58 +02:00
|
|
|
struct idr_layer __rcu *ary[1<<IDR_BITS];
|
2016-01-20 05:19:53 +01:00
|
|
|
int count; /* When zero, we can release it */
|
2014-08-23 12:29:27 +02:00
|
|
|
union {
|
|
|
|
/* A zero bit means "space here" */
|
|
|
|
DECLARE_BITMAP(bitmap, IDR_SIZE);
|
2016-01-20 05:19:53 +01:00
|
|
|
struct rcu_head rcu_head;
|
2014-08-23 12:29:27 +02:00
|
|
|
};
|
2009-07-04 15:29:02 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct idr {
|
2013-03-19 07:14:59 +01:00
|
|
|
struct idr_layer __rcu *hint; /* the last layer allocated from */
|
2016-01-20 05:19:53 +01:00
|
|
|
struct idr_layer __rcu *top;
|
2013-03-19 07:14:59 +01:00
|
|
|
int layers; /* only valid w/o concurrent changes */
|
2013-10-21 01:07:47 +02:00
|
|
|
int cur; /* current pos for cyclic allocation */
|
2013-03-19 07:14:59 +01:00
|
|
|
spinlock_t lock;
|
2014-08-23 12:29:27 +02:00
|
|
|
int id_free_cnt;
|
|
|
|
struct idr_layer *id_free;
|
2009-07-04 15:29:02 +02:00
|
|
|
};
|
|
|
|
|
2016-01-20 05:19:53 +01:00
|
|
|
#define IDR_INIT(name) \
|
|
|
|
{ \
|
2013-03-19 07:14:59 +01:00
|
|
|
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
2009-07-04 15:29:02 +02:00
|
|
|
}
|
|
|
|
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
|
|
|
|
|
|
|
|
/**
|
2011-06-24 12:45:58 +02:00
|
|
|
* DOC: idr sync
|
2009-07-04 15:29:02 +02:00
|
|
|
* idr synchronization (stolen from radix-tree.h)
|
|
|
|
*
|
|
|
|
* idr_find() is able to be called locklessly, using RCU. The caller must
|
|
|
|
* ensure calls to this function are made within rcu_read_lock() regions.
|
|
|
|
* Other readers (lock-free or otherwise) and modifications may be running
|
|
|
|
* concurrently.
|
|
|
|
*
|
|
|
|
* It is still required that the caller manage the synchronization and
|
|
|
|
* lifetimes of the items. So if RCU lock-free lookups are used, typically
|
|
|
|
* this would mean that the items have their own locks, or are amenable to
|
|
|
|
* lock-free access; and that the items are freed by RCU (or only freed after
|
|
|
|
* having been deleted from the idr tree *and* a synchronize_rcu() grace
|
|
|
|
* period).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is what we export.
|
|
|
|
*/
|
|
|
|
|
2013-03-19 07:14:59 +01:00
|
|
|
void *idr_find_slowpath(struct idr *idp, int id);
|
|
|
|
void idr_preload(gfp_t gfp_mask);
|
|
|
|
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
|
2013-10-21 01:07:47 +02:00
|
|
|
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
|
2009-07-04 15:29:02 +02:00
|
|
|
int idr_for_each(struct idr *idp,
|
|
|
|
int (*fn)(int id, void *p, void *data), void *data);
|
|
|
|
void *idr_get_next(struct idr *idp, int *nextid);
|
|
|
|
void *idr_replace(struct idr *idp, void *ptr, int id);
|
|
|
|
void idr_remove(struct idr *idp, int id);
|
|
|
|
void idr_destroy(struct idr *idp);
|
|
|
|
void idr_init(struct idr *idp);
|
2014-08-23 12:29:27 +02:00
|
|
|
bool idr_is_empty(struct idr *idp);
|
2009-07-04 15:29:02 +02:00
|
|
|
|
2013-03-19 07:14:59 +01:00
|
|
|
/**
|
|
|
|
* idr_preload_end - end preload section started with idr_preload()
|
|
|
|
*
|
|
|
|
* Each idr_preload() should be matched with an invocation of this
|
|
|
|
* function. See idr_preload() for details.
|
|
|
|
*/
|
|
|
|
static inline void idr_preload_end(void)
|
|
|
|
{
|
|
|
|
// preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* idr_find - return pointer for given id
|
2013-10-21 01:07:47 +02:00
|
|
|
* @idr: idr handle
|
2013-03-19 07:14:59 +01:00
|
|
|
* @id: lookup key
|
|
|
|
*
|
|
|
|
* Return the pointer given the id it has been registered with. A %NULL
|
|
|
|
* return indicates that @id is not valid or you passed %NULL in
|
|
|
|
* idr_get_new().
|
|
|
|
*
|
|
|
|
* This function can be called under rcu_read_lock(), given that the leaf
|
|
|
|
* pointers lifetimes are correctly managed.
|
|
|
|
*/
|
|
|
|
static inline void *idr_find(struct idr *idr, int id)
|
|
|
|
{
|
|
|
|
struct idr_layer *hint = rcu_dereference_raw(idr->hint);
|
|
|
|
|
|
|
|
if (hint && (id & ~IDR_MASK) == hint->prefix)
|
|
|
|
return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
|
|
|
|
|
|
|
|
return idr_find_slowpath(idr, id);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-10-21 01:07:47 +02:00
|
|
|
* idr_for_each_entry - iterate over an idr's elements of a given type
|
|
|
|
* @idp: idr handle
|
|
|
|
* @entry: the type * to use as cursor
|
|
|
|
* @id: id entry's key
|
|
|
|
*
|
|
|
|
* @entry and @id do not need to be initialized before the loop, and
|
|
|
|
* after normal terminatinon @entry is left with the value NULL. This
|
|
|
|
* is convenient for a "not found" value.
|
|
|
|
*/
|
|
|
|
#define idr_for_each_entry(idp, entry, id) \
|
|
|
|
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
|
|
|
|
|
2017-07-28 21:22:53 +02:00
|
|
|
/**
|
|
|
|
* idr_for_each_entry - continue iteration over an idr's elements of a given type
|
|
|
|
* @idp: idr handle
|
|
|
|
* @entry: the type * to use as cursor
|
|
|
|
* @id: id entry's key
|
|
|
|
*
|
|
|
|
* Continue to iterate over list of given type, continuing after
|
|
|
|
* the current position.
|
|
|
|
*/
|
|
|
|
#define idr_for_each_entry_continue(idp, entry, id) \
|
|
|
|
for ((entry) = idr_get_next((idp), &(id)); \
|
|
|
|
entry; \
|
|
|
|
++id, (entry) = idr_get_next((idp), &(id)))
|
|
|
|
|
2009-07-04 15:29:02 +02:00
|
|
|
/*
|
|
|
|
* IDA - IDR based id allocator, use when translation from id to
|
|
|
|
* pointer isn't necessary.
|
2011-06-24 12:45:58 +02:00
|
|
|
*
|
|
|
|
* IDA_BITMAP_LONGS is calculated to be one less to accommodate
|
|
|
|
* ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
|
2009-07-04 15:29:02 +02:00
|
|
|
*/
|
|
|
|
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
|
2011-06-24 12:45:58 +02:00
|
|
|
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
|
2016-01-20 05:19:53 +01:00
|
|
|
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
|
2009-07-04 15:29:02 +02:00
|
|
|
|
|
|
|
struct ida_bitmap {
|
|
|
|
long nr_busy;
|
|
|
|
unsigned long bitmap[IDA_BITMAP_LONGS];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ida {
|
|
|
|
struct idr idr;
|
|
|
|
struct ida_bitmap *free_bitmap;
|
|
|
|
};
|
|
|
|
|
2013-03-19 07:14:59 +01:00
|
|
|
#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
|
2009-07-04 15:29:02 +02:00
|
|
|
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
|
|
|
|
|
2011-06-24 12:45:58 +02:00
|
|
|
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
|
2009-07-04 15:29:02 +02:00
|
|
|
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
|
|
|
|
void ida_remove(struct ida *ida, int id);
|
|
|
|
void ida_destroy(struct ida *ida);
|
|
|
|
void ida_init(struct ida *ida);
|
|
|
|
|
2013-10-21 01:07:47 +02:00
|
|
|
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
|
|
|
|
gfp_t gfp_mask);
|
|
|
|
void ida_simple_remove(struct ida *ida, unsigned int id);
|
2013-03-19 07:14:59 +01:00
|
|
|
|
2013-10-21 01:07:47 +02:00
|
|
|
/**
|
|
|
|
* ida_get_new - allocate new ID
|
|
|
|
* @ida: idr handle
|
|
|
|
* @p_id: pointer to the allocated handle
|
|
|
|
*
|
|
|
|
* Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
|
|
|
|
*/
|
|
|
|
static inline int ida_get_new(struct ida *ida, int *p_id)
|
|
|
|
{
|
|
|
|
return ida_get_new_above(ida, 0, p_id);
|
|
|
|
}
|
2013-03-19 07:14:59 +01:00
|
|
|
|
2013-10-21 01:07:47 +02:00
|
|
|
void __init idr_init_cache(void);
|
2009-07-04 15:29:02 +02:00
|
|
|
|
|
|
|
#endif /* __IDR_H__ */
|