Skip to content
Snippets Groups Projects
astobj2.c 149 KiB
Newer Older
  • Learn to ignore specific revisions
  • /*
     * astobj2 - replacement containers for asterisk data structures.
     *
     * Copyright (C) 2006 Marta Carbone, Luigi Rizzo - Univ. di Pisa, Italy
     *
     * See http://www.asterisk.org for more information about
     * the Asterisk project. Please do not directly contact
     * any of the maintainers of this project for assistance;
     * the project provides a web site, mailing lists and IRC
     * channels for your use.
     *
     * This program is free software, distributed under the terms of
     * the GNU General Public License Version 2. See the LICENSE file
     * at the top of the source tree.
     */
    
    
    /*! \file
     *
     * \brief Functions implementing astobj2 objects.
     *
     * \author Richard Mudgett <rmudgett@digium.com>
    
    
    /*** MODULEINFO
    	<support_level>core</support_level>
     ***/
    
    
    #include "asterisk.h"
    
    ASTERISK_FILE_VERSION(__FILE__, "$Revision$")
    
    
    #include "asterisk/_private.h"
    
    #include "asterisk/astobj2.h"
    
    #include "asterisk/dlinkedlists.h"
    
    #include "asterisk/utils.h"
    #include "asterisk/cli.h"
    
    #if defined(TEST_FRAMEWORK)
    /* We are building with the test framework enabled so enable AO2 debug tests as well. */
    #define AO2_DEBUG 1
    #endif	/* defined(TEST_FRAMEWORK) */
    
    
     * astobj2 objects are always preceded by this data structure,
    
     * which contains a reference counter,
     * option flags and a pointer to a destructor.
    
     * The refcount is used to decide when it is time to
     * invoke the destructor.
     * The magic number is used for consistency check.
     */
    struct __priv_data {
    	int ref_counter;
    	ao2_destructor_fn destructor_fn;
    
    	/*! User data size for stats */
    
    	size_t data_size;
    
    	/*! The ao2 object option flags */
    	uint32_t options;
    
    	/*! magic number.  This is used to verify that a pointer passed in is a
    	 *  valid astobj2 */
    	uint32_t magic;
    };
    
    #define	AO2_MAGIC	0xa570b123
    
    /*!
     * What an astobj2 object looks like: fixed-size private data
     * followed by variable-size user data.
     */
    struct astobj2 {
    	struct __priv_data priv_data;
    	void *user_data[0];
    };
    
    
    struct ao2_lock_priv {
    	ast_mutex_t lock;
    };
    
    /* AstObj2 with recursive lock. */
    struct astobj2_lock {
    	struct ao2_lock_priv mutex;
    	struct __priv_data priv_data;
    	void *user_data[0];
    };
    
    struct ao2_rwlock_priv {
    	ast_rwlock_t lock;
    	/*! Count of the number of threads holding a lock on this object. -1 if it is the write lock. */
    	int num_lockers;
    };
    
    /* AstObj2 with RW lock. */
    struct astobj2_rwlock {
    	struct ao2_rwlock_priv rwlock;
    	struct __priv_data priv_data;
    	void *user_data[0];
    };
    
    #if defined(AST_DEVMODE)
    #define AO2_DEVMODE_STAT(stat)	stat
    #else
    #define AO2_DEVMODE_STAT(stat)
    #endif	/* defined(AST_DEVMODE) */
    
    
    #ifdef AO2_DEBUG
    
    struct ao2_stats {
    	volatile int total_objects;
    	volatile int total_mem;
    	volatile int total_containers;
    	volatile int total_refs;
    	volatile int total_locked;
    };
    
    static struct ao2_stats ao2;
    
    #ifdef HAVE_BKTR
    
    #include <execinfo.h>    /* for backtrace */
    
    #ifdef HAVE_BKTR
    	int depth;
    	int idx;
    
    Tilghman Lesher's avatar
    Tilghman Lesher committed
    	void *addresses[N1];
    	char **strings;
    
    
    	depth = backtrace(addresses, N1);
    	strings = ast_bt_get_symbols(addresses, depth);
    	ast_verbose("backtrace returned: %d\n", depth);
    	for (idx = 0; idx < depth; ++idx) {
    		ast_verbose("%d: %p %s\n", idx, addresses[idx], strings[idx]);
    
    #define INTERNAL_OBJ_MUTEX(user_data) \
    	((struct astobj2_lock *) (((char *) (user_data)) - sizeof(struct astobj2_lock)))
    
    #define INTERNAL_OBJ_RWLOCK(user_data) \
    	((struct astobj2_rwlock *) (((char *) (user_data)) - sizeof(struct astobj2_rwlock)))
    
    
    /*!
     * \brief convert from a pointer _p to a user-defined object
     *
     * \return the pointer to the astobj2 structure
     */
    static inline struct astobj2 *INTERNAL_OBJ(void *user_data)
    {
    	struct astobj2 *p;
    
    	if (!user_data) {
    		ast_log(LOG_ERROR, "user_data is NULL\n");
    		return NULL;
    	}
    
    	p = (struct astobj2 *) ((char *) user_data - sizeof(*p));
    
    	if (AO2_MAGIC != p->priv_data.magic) {
    		if (p->priv_data.magic) {
    
    			ast_log(LOG_ERROR, "bad magic number 0x%x for object %p\n",
    				p->priv_data.magic, user_data);
    
    				"bad magic number for object %p. Object is likely destroyed.\n",
    				user_data);
    
    		ast_assert(0);
    
    		return NULL;
    
    	}
    
    	return p;
    }
    
    /*!
     * \brief convert from a pointer _p to an astobj2 object
     *
     * \return the pointer to the user-defined portion.
     */
    #define EXTERNAL_OBJ(_p)	((_p) == NULL ? NULL : (_p)->user_data)
    
    
    int __ao2_lock(void *user_data, enum ao2_lock_req lock_how, const char *file, const char *func, int line, const char *var)
    
    	struct astobj2 *obj = INTERNAL_OBJ(user_data);
    	struct astobj2_lock *obj_mutex;
    	struct astobj2_rwlock *obj_rwlock;
    	int res = 0;
    
    	if (obj == NULL) {
    
    	switch (obj->priv_data.options & AO2_ALLOC_OPT_LOCK_MASK) {
    	case AO2_ALLOC_OPT_LOCK_MUTEX:
    		obj_mutex = INTERNAL_OBJ_MUTEX(user_data);
    		res = __ast_pthread_mutex_lock(file, line, func, var, &obj_mutex->mutex.lock);
    #ifdef AO2_DEBUG
    		if (!res) {
    			ast_atomic_fetchadd_int(&ao2.total_locked, 1);
    		}
    #endif
    		break;
    	case AO2_ALLOC_OPT_LOCK_RWLOCK:
    		obj_rwlock = INTERNAL_OBJ_RWLOCK(user_data);
    		switch (lock_how) {
    		case AO2_LOCK_REQ_MUTEX:
    		case AO2_LOCK_REQ_WRLOCK:
    			res = __ast_rwlock_wrlock(file, line, func, &obj_rwlock->rwlock.lock, var);
    			if (!res) {
    				ast_atomic_fetchadd_int(&obj_rwlock->rwlock.num_lockers, -1);
    #ifdef AO2_DEBUG
    				ast_atomic_fetchadd_int(&ao2.total_locked, 1);
    #endif
    			}
    			break;
    		case AO2_LOCK_REQ_RDLOCK:
    			res = __ast_rwlock_rdlock(file, line, func, &obj_rwlock->rwlock.lock, var);
    			if (!res) {
    				ast_atomic_fetchadd_int(&obj_rwlock->rwlock.num_lockers, +1);
    
    #ifdef AO2_DEBUG
    
    				ast_atomic_fetchadd_int(&ao2.total_locked, 1);
    
    			}
    			break;
    		}
    		break;
    	case AO2_ALLOC_OPT_LOCK_NOLOCK:
    		/* The ao2 object has no lock. */
    		break;
    	default:
    		ast_log(__LOG_ERROR, file, line, func, "Invalid lock option on ao2 object %p\n",
    			user_data);
    		return -1;
    	}
    
    	return res;
    
    int __ao2_unlock(void *user_data, const char *file, const char *func, int line, const char *var)
    
    	struct astobj2 *obj = INTERNAL_OBJ(user_data);
    	struct astobj2_lock *obj_mutex;
    	struct astobj2_rwlock *obj_rwlock;
    	int res = 0;
    	int current_value;
    
    	if (obj == NULL) {
    
    	switch (obj->priv_data.options & AO2_ALLOC_OPT_LOCK_MASK) {
    	case AO2_ALLOC_OPT_LOCK_MUTEX:
    		obj_mutex = INTERNAL_OBJ_MUTEX(user_data);
    		res = __ast_pthread_mutex_unlock(file, line, func, var, &obj_mutex->mutex.lock);
    
    #ifdef AO2_DEBUG
    
    		if (!res) {
    			ast_atomic_fetchadd_int(&ao2.total_locked, -1);
    		}
    
    		break;
    	case AO2_ALLOC_OPT_LOCK_RWLOCK:
    		obj_rwlock = INTERNAL_OBJ_RWLOCK(user_data);
    
    		current_value = ast_atomic_fetchadd_int(&obj_rwlock->rwlock.num_lockers, -1) - 1;
    		if (current_value < 0) {
    			/* It was a WRLOCK that we are unlocking.  Fix the count. */
    			ast_atomic_fetchadd_int(&obj_rwlock->rwlock.num_lockers, -current_value);
    		}
    		res = __ast_rwlock_unlock(file, line, func, &obj_rwlock->rwlock.lock, var);
    #ifdef AO2_DEBUG
    		if (!res) {
    			ast_atomic_fetchadd_int(&ao2.total_locked, -1);
    		}
    #endif
    		break;
    	case AO2_ALLOC_OPT_LOCK_NOLOCK:
    		/* The ao2 object has no lock. */
    		break;
    	default:
    		ast_log(__LOG_ERROR, file, line, func, "Invalid lock option on ao2 object %p\n",
    			user_data);
    		res = -1;
    		break;
    	}
    	return res;
    
    int __ao2_trylock(void *user_data, enum ao2_lock_req lock_how, const char *file, const char *func, int line, const char *var)
    
    	struct astobj2 *obj = INTERNAL_OBJ(user_data);
    	struct astobj2_lock *obj_mutex;
    	struct astobj2_rwlock *obj_rwlock;
    	int res = 0;
    
    	if (obj == NULL) {
    
    	switch (obj->priv_data.options & AO2_ALLOC_OPT_LOCK_MASK) {
    	case AO2_ALLOC_OPT_LOCK_MUTEX:
    		obj_mutex = INTERNAL_OBJ_MUTEX(user_data);
    		res = __ast_pthread_mutex_trylock(file, line, func, var, &obj_mutex->mutex.lock);
    
    		if (!res) {
    			ast_atomic_fetchadd_int(&ao2.total_locked, 1);
    		}
    
    		break;
    	case AO2_ALLOC_OPT_LOCK_RWLOCK:
    		obj_rwlock = INTERNAL_OBJ_RWLOCK(user_data);
    		switch (lock_how) {
    		case AO2_LOCK_REQ_MUTEX:
    		case AO2_LOCK_REQ_WRLOCK:
    			res = __ast_rwlock_trywrlock(file, line, func, &obj_rwlock->rwlock.lock, var);
    			if (!res) {
    				ast_atomic_fetchadd_int(&obj_rwlock->rwlock.num_lockers, -1);
    #ifdef AO2_DEBUG
    				ast_atomic_fetchadd_int(&ao2.total_locked, 1);
    #endif
    			}
    			break;
    		case AO2_LOCK_REQ_RDLOCK:
    			res = __ast_rwlock_tryrdlock(file, line, func, &obj_rwlock->rwlock.lock, var);
    			if (!res) {
    				ast_atomic_fetchadd_int(&obj_rwlock->rwlock.num_lockers, +1);
    #ifdef AO2_DEBUG
    				ast_atomic_fetchadd_int(&ao2.total_locked, 1);
    #endif
    			}
    			break;
    		}
    		break;
    	case AO2_ALLOC_OPT_LOCK_NOLOCK:
    		/* The ao2 object has no lock. */
    		return 0;
    	default:
    		ast_log(__LOG_ERROR, file, line, func, "Invalid lock option on ao2 object %p\n",
    			user_data);
    		return -1;
    	}
    
    	return res;
    
    /*!
     * \internal
     * \brief Adjust an object's lock to the requested level.
     *
     * \param user_data An ao2 object to adjust lock level.
     * \param lock_how What level to adjust lock.
     * \param keep_stronger TRUE if keep original lock level if it is stronger.
     *
     * \pre The ao2 object is already locked.
     *
     * \details
     * An ao2 object with a RWLOCK will have its lock level adjusted
     * to the specified level if it is not already there.  An ao2
     * object with a different type of lock is not affected.
     *
     * \return Original lock level.
    
    static enum ao2_lock_req adjust_lock(void *user_data, enum ao2_lock_req lock_how, int keep_stronger)
    
    {
    	struct astobj2 *obj = INTERNAL_OBJ(user_data);
    
    	struct astobj2_rwlock *obj_rwlock;
    	enum ao2_lock_req orig_lock;
    
    	switch (obj->priv_data.options & AO2_ALLOC_OPT_LOCK_MASK) {
    	case AO2_ALLOC_OPT_LOCK_RWLOCK:
    		obj_rwlock = INTERNAL_OBJ_RWLOCK(user_data);
    		if (obj_rwlock->rwlock.num_lockers < 0) {
    			orig_lock = AO2_LOCK_REQ_WRLOCK;
    		} else {
    			orig_lock = AO2_LOCK_REQ_RDLOCK;
    
    		switch (lock_how) {
    		case AO2_LOCK_REQ_MUTEX:
    			lock_how = AO2_LOCK_REQ_WRLOCK;
    			/* Fall through */
    		case AO2_LOCK_REQ_WRLOCK:
    			if (lock_how != orig_lock) {
    				/* Switch from read lock to write lock. */
    				ao2_unlock(user_data);
    				ao2_wrlock(user_data);
    			}
    			break;
    		case AO2_LOCK_REQ_RDLOCK:
    			if (!keep_stronger && lock_how != orig_lock) {
    				/* Switch from write lock to read lock. */
    				ao2_unlock(user_data);
    				ao2_rdlock(user_data);
    			}
    			break;
    
    		break;
    	default:
    		ast_log(LOG_ERROR, "Invalid lock option on ao2 object %p\n", user_data);
    		/* Fall through */
    	case AO2_ALLOC_OPT_LOCK_NOLOCK:
    	case AO2_ALLOC_OPT_LOCK_MUTEX:
    		orig_lock = AO2_LOCK_REQ_MUTEX;
    		break;
    
    
    	return orig_lock;
    
    void *ao2_object_get_lockaddr(void *user_data)
    
    {
    	struct astobj2 *obj = INTERNAL_OBJ(user_data);
    
    	struct astobj2_lock *obj_mutex;
    
    	if (obj == NULL) {
    
    		return NULL;
    	}
    
    	switch (obj->priv_data.options & AO2_ALLOC_OPT_LOCK_MASK) {
    	case AO2_ALLOC_OPT_LOCK_MUTEX:
    		obj_mutex = INTERNAL_OBJ_MUTEX(user_data);
    		return &obj_mutex->mutex.lock;
    	default:
    		break;
    	}
    
    	return NULL;
    
    static int internal_ao2_ref(void *user_data, int delta, const char *file, int line, const char *func)
    
    {
    	struct astobj2 *obj = INTERNAL_OBJ(user_data);
    
    	struct astobj2_lock *obj_mutex;
    	struct astobj2_rwlock *obj_rwlock;
    
    	if (obj == NULL) {
    
    	/* if delta is 0, just return the refcount */
    
    	if (delta == 0) {
    		return obj->priv_data.ref_counter;
    	}
    
    
    	/* we modify with an atomic operation the reference counter */
    	ret = ast_atomic_fetchadd_int(&obj->priv_data.ref_counter, delta);
    	current_value = ret + delta;
    
    #ifdef AO2_DEBUG
    
    	ast_atomic_fetchadd_int(&ao2.total_refs, delta);
    #endif
    
    
    	if (0 < current_value) {
    		/* The object still lives. */
    		return ret;
    	}
    
    
    	/* this case must never happen */
    
    	if (current_value < 0) {
    
    		ast_log(__LOG_ERROR, file, line, func,
    
    			"Invalid refcount %d on ao2 object %p\n", current_value, user_data);
    	}
    
    	/* last reference, destroy the object */
    	if (obj->priv_data.destructor_fn != NULL) {
    		obj->priv_data.destructor_fn(user_data);
    	}
    
    #ifdef AO2_DEBUG
    
    	ast_atomic_fetchadd_int(&ao2.total_mem, - obj->priv_data.data_size);
    	ast_atomic_fetchadd_int(&ao2.total_objects, -1);
    
    	/* In case someone uses an object after it's been freed */
    	obj->priv_data.magic = 0;
    
    
    	switch (obj->priv_data.options & AO2_ALLOC_OPT_LOCK_MASK) {
    	case AO2_ALLOC_OPT_LOCK_MUTEX:
    		obj_mutex = INTERNAL_OBJ_MUTEX(user_data);
    		ast_mutex_destroy(&obj_mutex->mutex.lock);
    
    		ast_free(obj_mutex);
    		break;
    	case AO2_ALLOC_OPT_LOCK_RWLOCK:
    		obj_rwlock = INTERNAL_OBJ_RWLOCK(user_data);
    		ast_rwlock_destroy(&obj_rwlock->rwlock.lock);
    
    		ast_free(obj_rwlock);
    		break;
    	case AO2_ALLOC_OPT_LOCK_NOLOCK:
    
    		break;
    	default:
    
    		ast_log(__LOG_ERROR, file, line, func,
    
    			"Invalid lock option on ao2 object %p\n", user_data);
    		break;
    
    int __ao2_ref_debug(void *user_data, int delta, const char *tag, const char *file, int line, const char *func)
    
    {
    	struct astobj2 *obj = INTERNAL_OBJ(user_data);
    
    
    		return -1;
    
    	if (ref_log) {
    		if (obj->priv_data.ref_counter + delta == 0) {
    			fprintf(ref_log, "%p,%d,%d,%s,%d,%s,**destructor**,%s\n", user_data, delta, ast_get_tid(), file, line, func, tag);
    			fflush(ref_log);
    		} else if (delta != 0) {
    			fprintf(ref_log, "%p,%s%d,%d,%s,%d,%s,%d,%s\n", user_data, (delta < 0 ? "" : "+"),
    				delta, ast_get_tid(), file, line, func, obj ? obj->priv_data.ref_counter : -1, tag);
    			fflush(ref_log);
    
    	return internal_ao2_ref(user_data, delta, file, line, func);
    
    }
    
    int __ao2_ref(void *user_data, int delta)
    {
    	return internal_ao2_ref(user_data, delta, __FILE__, __LINE__, __FUNCTION__);
    }
    
    
    void __ao2_cleanup_debug(void *obj, const char *file, int line, const char *function)
    {
    	if (obj) {
    		__ao2_ref_debug(obj, -1, "ao2_cleanup", file, line, function);
    	}
    }
    
    void __ao2_cleanup(void *obj)
    
    static void *internal_ao2_alloc(size_t data_size, ao2_destructor_fn destructor_fn, unsigned int options, const char *file, int line, const char *func)
    
    {
    	/* allocation */
    	struct astobj2 *obj;
    
    	struct astobj2_lock *obj_mutex;
    	struct astobj2_rwlock *obj_rwlock;
    
    	switch (options & AO2_ALLOC_OPT_LOCK_MASK) {
    	case AO2_ALLOC_OPT_LOCK_MUTEX:
    
    		obj_mutex = __ast_calloc(1, sizeof(*obj_mutex) + data_size, file, line, func);
    
    		obj_mutex = ast_calloc(1, sizeof(*obj_mutex) + data_size);
    
    		if (obj_mutex == NULL) {
    			return NULL;
    		}
    
    		ast_mutex_init(&obj_mutex->mutex.lock);
    		obj = (struct astobj2 *) &obj_mutex->priv_data;
    		break;
    	case AO2_ALLOC_OPT_LOCK_RWLOCK:
    #if defined(__AST_DEBUG_MALLOC)
    
    		obj_rwlock = __ast_calloc(1, sizeof(*obj_rwlock) + data_size, file, line, func);
    
    #else
    		obj_rwlock = ast_calloc(1, sizeof(*obj_rwlock) + data_size);
    #endif
    		if (obj_rwlock == NULL) {
    			return NULL;
    		}
    
    		ast_rwlock_init(&obj_rwlock->rwlock.lock);
    		obj = (struct astobj2 *) &obj_rwlock->priv_data;
    		break;
    	case AO2_ALLOC_OPT_LOCK_NOLOCK:
    #if defined(__AST_DEBUG_MALLOC)
    
    		obj = __ast_calloc(1, sizeof(*obj) + data_size, file, line, func);
    
    #else
    		obj = ast_calloc(1, sizeof(*obj) + data_size);
    #endif
    		if (obj == NULL) {
    			return NULL;
    		}
    		break;
    	default:
    		/* Invalid option value. */
    
    		ast_log(__LOG_DEBUG, file, line, func, "Invalid lock option requested\n");
    
    	/* Initialize common ao2 values. */
    
    	obj->priv_data.ref_counter = 1;
    	obj->priv_data.destructor_fn = destructor_fn;	/* can be NULL */
    
    	obj->priv_data.data_size = data_size;
    	obj->priv_data.options = options;
    	obj->priv_data.magic = AO2_MAGIC;
    
    	ast_atomic_fetchadd_int(&ao2.total_objects, 1);
    	ast_atomic_fetchadd_int(&ao2.total_mem, data_size);
    	ast_atomic_fetchadd_int(&ao2.total_refs, 1);
    
    
    	/* return a pointer to the user data */
    	return EXTERNAL_OBJ(obj);
    }
    
    
    void *__ao2_alloc_debug(size_t data_size, ao2_destructor_fn destructor_fn, unsigned int options, const char *tag,
    
    	const char *file, int line, const char *func, int ref_debug)
    
    	if ((obj = internal_ao2_alloc(data_size, destructor_fn, options, file, line, func)) == NULL) {
    
    	if (ref_log) {
    		fprintf(ref_log, "%p,+1,%d,%s,%d,%s,**constructor**,%s\n", obj, ast_get_tid(), file, line, func, tag);
    		fflush(ref_log);
    
    void *__ao2_alloc(size_t data_size, ao2_destructor_fn destructor_fn, unsigned int options)
    
    	return internal_ao2_alloc(data_size, destructor_fn, options, __FILE__, __LINE__, __FUNCTION__);
    
    void __ao2_global_obj_release(struct ao2_global_obj *holder, const char *tag, const char *file, int line, const char *func, const char *name)
    
    		/* For sanity */
    
    		ast_log(LOG_ERROR, "Must be called with a global object!\n");
    
    	if (__ast_rwlock_wrlock(file, line, func, &holder->lock, name)) {
    
    		/* Could not get the write lock. */
    
    	/* Release the held ao2 object. */
    	if (holder->obj) {
    
    		if (tag) {
    			__ao2_ref_debug(holder->obj, -1, tag, file, line, func);
    		} else {
    			__ao2_ref(holder->obj, -1);
    		}
    
    	__ast_rwlock_unlock(file, line, func, &holder->lock, name);
    
    void *__ao2_global_obj_replace(struct ao2_global_obj *holder, void *obj, const char *tag, const char *file, int line, const char *func, const char *name)
    
    		/* For sanity */
    
    		ast_log(LOG_ERROR, "Must be called with a global object!\n");
    
    		return NULL;
    	}
    
    	if (__ast_rwlock_wrlock(file, line, func, &holder->lock, name)) {
    
    		/* Could not get the write lock. */
    
    		return NULL;
    	}
    
    	if (obj) {
    
    		if (tag) {
    			__ao2_ref_debug(obj, +1, tag, file, line, func);
    		} else {
    			__ao2_ref(obj, +1);
    		}
    
    	obj_old = holder->obj;
    	holder->obj = obj;
    
    	__ast_rwlock_unlock(file, line, func, &holder->lock, name);
    
    int __ao2_global_obj_replace_unref(struct ao2_global_obj *holder, void *obj, const char *tag, const char *file, int line, const char *func, const char *name)
    {
    	void *obj_old;
    
    	obj_old = __ao2_global_obj_replace(holder, obj, tag, file, line, func, name);
    	if (obj_old) {
    
    		if (tag) {
    			__ao2_ref_debug(obj_old, -1, tag, file, line, func);
    		} else {
    			__ao2_ref(obj_old, -1);
    		}
    
    		return 1;
    	}
    	return 0;
    }
    
    void *__ao2_global_obj_ref(struct ao2_global_obj *holder, const char *tag, const char *file, int line, const char *func, const char *name)
    
    		/* For sanity */
    
    		ast_log(LOG_ERROR, "Must be called with a global object!\n");
    
    		return NULL;
    	}
    
    	if (__ast_rwlock_rdlock(file, line, func, &holder->lock, name)) {
    
    		/* Could not get the read lock. */
    
    	if (obj) {
    
    		if (tag) {
    			__ao2_ref_debug(obj, +1, tag, file, line, func);
    		} else {
    			__ao2_ref(obj, +1);
    		}
    
    	__ast_rwlock_unlock(file, line, func, &holder->lock, name);
    
    enum ao2_callback_type {
    	AO2_CALLBACK_DEFAULT,
    	AO2_CALLBACK_WITH_DATA,
    };
    
    enum ao2_container_insert {
    	/*! The node was inserted into the container. */
    	AO2_CONTAINER_INSERT_NODE_INSERTED,
    	/*! The node object replaced an existing node object. */
    	AO2_CONTAINER_INSERT_NODE_OBJ_REPLACED,
    	/*! The node was rejected (duplicate). */
    	AO2_CONTAINER_INSERT_NODE_REJECTED,
    };
    
    enum ao2_container_rtti {
    	/*! This is a hash container */
    	AO2_CONTAINER_RTTI_HASH,
    
    	/*! This is a red-black tree container */
    	AO2_CONTAINER_RTTI_RBTREE,
    
     * \brief Generic container node.
     *
     * \details This is the base container node type that contains
     * values common to all container nodes.
    
    struct ao2_container_node {
    	/*! Stored object in node. */
    	void *obj;
    	/*! Container holding the node.  (Does not hold a reference.) */
    	struct ao2_container *my_container;
    	/*! TRUE if the node is linked into the container. */
    	unsigned int is_linked:1;
    
    /*!
     * \brief Destroy this container.
     *
     * \param self Container to operate upon.
     *
     * \return Nothing
     */
    typedef void (*ao2_container_destroy_fn)(struct ao2_container *self);
    
     * \brief Create an empty copy of this container.
     *
     * \param self Container to operate upon.
     *
     * \retval empty-container on success.
     * \retval NULL on error.
    
    typedef struct ao2_container *(*ao2_container_alloc_empty_clone_fn)(struct ao2_container *self);
    
     * \brief Create an empty copy of this container. (Debug version)
    
     * \param self Container to operate upon.
     * \param tag used for debugging.
     * \param file Debug file name invoked from
     * \param line Debug line invoked from
     * \param func Debug function name invoked from
     * \param ref_debug TRUE if to output a debug reference message.
    
     * \retval empty-container on success.
     * \retval NULL on error.
    
    typedef struct ao2_container *(*ao2_container_alloc_empty_clone_debug_fn)(struct ao2_container *self, const char *tag, const char *file, int line, const char *func, int ref_debug);
    
    /*!
     * \brief Create a new container node.
     *
     * \param self Container to operate upon.
     * \param obj_new Object to put into the node.
     * \param tag used for debugging.
     * \param file Debug file name invoked from
     * \param line Debug line invoked from
     * \param func Debug function name invoked from
     *
     * \retval initialized-node on success.
     * \retval NULL on error.
    
    typedef struct ao2_container_node *(*ao2_container_new_node_fn)(struct ao2_container *self, void *obj_new, const char *tag, const char *file, int line, const char *func);
    
    /*!
     * \brief Insert a node into this container.
     *
     * \param self Container to operate upon.
     * \param node Container node to insert into the container.
     *
     * \return enum ao2_container_insert value.
     */
    typedef enum ao2_container_insert (*ao2_container_insert_fn)(struct ao2_container *self, struct ao2_container_node *node);
    
    /*!
     * \brief Find the first container node in a traversal.
     *
     * \param self Container to operate upon.
     * \param flags search_flags to control traversing the container
     * \param arg Comparison callback arg parameter.
     * \param v_state Traversal state to restart container traversal.
     *
     * \retval node-ptr of found node (Reffed).
     * \retval NULL when no node found.
     */
    typedef struct ao2_container_node *(*ao2_container_find_first_fn)(struct ao2_container *self, enum search_flags flags, void *arg, void *v_state);
    
    /*!
     * \brief Find the next container node in a traversal.
     *
     * \param self Container to operate upon.
     * \param v_state Traversal state to restart container traversal.
     * \param prev Previous node returned by the traversal search functions.
     *    The ref ownership is passed back to this function.
     *
     * \retval node-ptr of found node (Reffed).
     * \retval NULL when no node found.
     */
    typedef struct ao2_container_node *(*ao2_container_find_next_fn)(struct ao2_container *self, void *v_state, struct ao2_container_node *prev);
    
    /*!
     * \brief Cleanup the container traversal state.
     *
     * \param v_state Traversal state to cleanup.
     *
     * \return Nothing
     */
    typedef void (*ao2_container_find_cleanup_fn)(void *v_state);
    
    /*!
     * \brief Find the next non-empty iteration node in the container.
     *
     * \param self Container to operate upon.
     * \param prev Previous node returned by the iterator.
     * \param flags search_flags to control iterating the container.
     *   Only AO2_ITERATOR_DESCENDING is useful by the method.
     *
     * \note The container is already locked.
     *
     * \retval node on success.
     * \retval NULL on error or no more nodes in the container.
     */
    typedef struct ao2_container_node *(*ao2_iterator_next_fn)(struct ao2_container *self, struct ao2_container_node *prev, enum ao2_iterator_flags flags);
    
    /*!
     * \brief Display contents of the specified container.
     *
     * \param self Container to dump.
     * \param where User data needed by prnt to determine where to put output.
     * \param prnt Print output callback function to use.
     * \param prnt_obj Callback function to print the given object's key. (NULL if not available)
     *
     * \return Nothing
     */
    typedef void (*ao2_container_display)(struct ao2_container *self, void *where, ao2_prnt_fn *prnt, ao2_prnt_obj_fn *prnt_obj);
    
    
    /*!
     * \brief Display statistics of the specified container.
     *
     * \param self Container to display statistics.
    
     * \param where User data needed by prnt to determine where to put output.
    
     * \param prnt Print output callback function to use.
     *
     * \note The container is already locked for reading.
     *
     * \return Nothing
     */
    
    typedef void (*ao2_container_statistics)(struct ao2_container *self, void *where, ao2_prnt_fn *prnt);
    
    /*!
     * \brief Perform an integrity check on the specified container.
     *
     * \param self Container to check integrity.
     *
     * \note The container is already locked for reading.
     *
     * \retval 0 on success.
     * \retval -1 on error.
     */
    typedef int (*ao2_container_integrity)(struct ao2_container *self);
    
    /*! Container virtual methods template. */
    struct ao2_container_methods {
    	/*! Run Time Type Identification */
    	enum ao2_container_rtti type;
    	/*! Destroy this container. */
    	ao2_container_destroy_fn destroy;
    	/*! \brief Create an empty copy of this container. */
    	ao2_container_alloc_empty_clone_fn alloc_empty_clone;
    	/*! \brief Create an empty copy of this container. (Debug version) */
    	ao2_container_alloc_empty_clone_debug_fn alloc_empty_clone_debug;
    	/*! Create a new container node. */
    	ao2_container_new_node_fn new_node;
    	/*! Insert a node into this container. */
    	ao2_container_insert_fn insert;
    	/*! Traverse the container, find the first node. */
    	ao2_container_find_first_fn traverse_first;
    	/*! Traverse the container, find the next node. */
    	ao2_container_find_next_fn traverse_next;
    	/*! Traverse the container, cleanup state. */
    	ao2_container_find_cleanup_fn traverse_cleanup;
    	/*! Find the next iteration element in the container. */
    	ao2_iterator_next_fn iterator_next;
    #if defined(AST_DEVMODE)
    
    	/*! Display container contents. (Method for debug purposes) */
    	ao2_container_display dump;
    
    	/*! Display container debug statistics. (Method for debug purposes) */
    	ao2_container_statistics stats;
    	/*! Perform an integrity check on the container. (Method for debug purposes) */
    	ao2_container_integrity integrity;
    #endif	/* defined(AST_DEVMODE) */
    };
    
    /*!
     * \brief Generic container type.
     *
     * \details This is the base container type that contains values
     * common to all container types.
     *
     * \todo Linking and unlinking container objects is typically
     * expensive, as it involves a malloc()/free() of a small object
     * which is very inefficient.  To optimize this, we can allocate
     * larger arrays of container nodes when we run out of them, and
     * then manage our own freelist.  This will be more efficient as
     * we can do the freelist management while we hold the lock
     * (that we need anyway).
     */
    struct ao2_container {
    	/*! Container virtual method table. */
    	const struct ao2_container_methods *v_table;
    	/*! Container sort function if the container is sorted. */
    	ao2_sort_fn *sort_fn;
    	/*! Container traversal matching function for ao2_find. */
    	ao2_callback_fn *cmp_fn;
    	/*! The container option flags */
    	uint32_t options;
    	/*! Number of elements in the container. */
    	int elements;
    #if defined(AST_DEVMODE)
    	/*! Number of nodes in the container. */
    	int nodes;
    	/*! Maximum number of empty nodes in the container. (nodes - elements) */
    	int max_empty_nodes;
    #endif	/* defined(AST_DEVMODE) */
    	/*!
    	 * \brief TRUE if the container is being destroyed.
    	 *
    	 * \note The destruction traversal should override any requested
    	 * search order to do the most efficient order for destruction.