/*
 * Mutexes: blocking mutual exclusion locks
 *
 * started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 * This file contains the main data structure and API definitions.
 */
#ifndef __LINUX_MUTEX_H
#define __LINUX_MUTEX_H

#include <asm/current.h>
#include <linux/list.h>
#include <linux/spinlock_types.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/atomic.h>
#include <asm/processor.h>

/*
 * Simple, straightforward mutexes with strict semantics:
 *
 * - only one task can hold the mutex at a time
 * - only the owner can unlock the mutex
 * - multiple unlocks are not permitted
 * - recursive locking is not permitted
 * - a mutex object must be initialized via the API
 * - a mutex object must not be initialized via memset or copying
 * - task may not exit with mutex held
 * - memory areas where held locks reside must not be freed
 * - held mutexes must not be reinitialized
 * - mutexes may not be used in hardware or software interrupt
 *   contexts such as tasklets and timers
 *
 * These semantics are fully enforced when DEBUG_MUTEXES is
 * enabled. Furthermore, besides enforcing the above rules, the mutex
 * debugging code also implements a number of additional features
 * that make lock debugging easier and faster:
 *
 * - uses symbolic names of mutexes, whenever they are printed in debug output
 * - point-of-acquire tracking, symbolic lookup of function names
 * - list of all locks held in the system, printout of them
 * - owner tracking
 * - detects self-recursing locks and prints out all relevant info
 * - detects multi-task circular deadlocks and prints out all affected
 *   locks and tasks (and only those tasks)
 */
struct mutex {
	/* 1: unlocked, 0: locked, negative: locked, possible waiters */
	struct list_head	wait_list;
    atomic_t            count;
};

/*
 * This is the control structure for tasks blocked on mutex,
 * which resides on the blocked task's kernel stack:
 */
struct mutex_waiter {
	struct list_head	list;
    int                *task;
};


#define __MUTEX_INITIALIZER(lockname) \
                { .wait_list = LIST_HEAD_INIT(lockname.wait_list), \
                  .count = ATOMIC_INIT(1) \
                }

#define DEFINE_MUTEX(mutexname) \
        struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)

void __attribute__ ((fastcall)) __attribute__ ((dllimport))
     mutex_init(struct mutex*)__asm__("MutexInit");
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
     mutex_lock(struct mutex*)__asm__("MutexLock");
void __attribute__ ((fastcall)) __attribute__ ((dllimport))
     mutex_unlock(struct mutex*)__asm__("MutexUnlock");

static inline int mutex_lock_interruptible(struct mutex *lock)
{
    mutex_lock(lock);
    return 0;
}

# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)


/**
 * mutex_is_locked - is the mutex locked
 * @lock: the mutex to be queried
 *
 * Returns 1 if the mutex is locked, 0 if unlocked.
 */
static inline int mutex_is_locked(struct mutex *lock)
{
	return atomic_read(&lock->count) != 1;
}

static inline int mutex_trylock(struct mutex *lock)
{
    if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
        return 1;
    return 0;
}

static inline void mutex_destroy(struct mutex *lock)
{

};

#endif