ddk: update

git-svn-id: svn://kolibrios.org@6102 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge)
2016-01-27 05:30:28 +00:00
parent b096a68434
commit cf125b5b49
49 changed files with 7225 additions and 580 deletions

View File

@@ -107,7 +107,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> (32 - shift));
return (word << shift) | (word >> ((-shift) & 31));
}
/**

View File

@@ -13,10 +13,15 @@
#ifndef _DEVICE_H_
#define _DEVICE_H_
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/atomic.h>
#include <linux/gfp.h>
struct device;
enum probe_type {
PROBE_DEFAULT_STRATEGY,
@@ -35,6 +40,7 @@ struct device_driver {
struct device {
struct device *parent;
struct kobject kobj;
const char *init_name; /* initial name of the device */
struct device_driver *driver; /* which driver has allocated this
device */
@@ -55,12 +61,34 @@ struct device {
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
unsigned long dma_pfn_offset;
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
{
return container_of(kobj, struct device, kobj);
}
static inline const char *dev_name(const struct device *dev)
{
/* Use the init name until the kobject becomes available */
if (dev->init_name)
return dev->init_name;
return kobject_name(&dev->kobj);
}
extern __printf(2, 3)
int dev_set_name(struct device *dev, const char *name, ...);

View File

@@ -1,8 +1,44 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
#include <linux/sizes.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
extern void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag);
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#define DMA_MASK_NONE 0x0ULL
static inline int valid_dma_direction(int dma_direction)
{
return ((dma_direction == DMA_BIDIRECTIONAL) ||
(dma_direction == DMA_TO_DEVICE) ||
(dma_direction == DMA_FROM_DEVICE));
}
static inline int is_device_dma_capable(struct device *dev)
{
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
#ifndef dma_max_pfn
static inline unsigned long dma_max_pfn(struct device *dev)
{
return *dev->dma_mask >> PAGE_SHIFT;
}
#endif
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret = dma_alloc_coherent(dev, size, dma_handle,
flag | __GFP_ZERO);
return ret;
}
#endif

View File

@@ -11,7 +11,12 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
#include <linux/scatterlist.h>
#include <asm/io.h>
struct device;
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation);
void dma_pool_destroy(struct dma_pool *pool);

View File

@@ -517,7 +517,7 @@ struct fb_cursor_user {
* Register/unregister for framebuffer events
*/
/* The resolution of the passed in fb_info about to change */
/* The resolution of the passed in fb_info about to change */
#define FB_EVENT_MODE_CHANGE 0x01
/* The display on this fb_info is beeing suspended, no access to the
* framebuffer is allowed any more after that call returns
@@ -550,7 +550,7 @@ struct fb_cursor_user {
#define FB_EVENT_GET_REQ 0x0D
/* Unbind from the console if possible */
#define FB_EVENT_FB_UNBIND 0x0E
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
/* A hardware display blank early change occured */
#define FB_EARLY_EVENT_BLANK 0x10
@@ -843,8 +843,8 @@ struct fb_info {
int flags;
struct mutex lock; /* Lock for open/release/ioctl funcs */
struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
// struct work_struct queue; /* Framebuffer event queue */
// struct fb_pixmap pixmap; /* Image hardware mapper */
@@ -855,12 +855,12 @@ struct fb_info {
#ifdef CONFIG_FB_BACKLIGHT
/* assigned backlight device */
/* set before framebuffer registration,
/* set before framebuffer registration,
remove after unregister */
struct backlight_device *bl_dev;
/* Backlight level curve */
struct mutex bl_curve_mutex;
struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
#ifdef CONFIG_FB_DEFERRED_IO
@@ -875,9 +875,12 @@ struct fb_info {
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
#endif
char __iomem *screen_base; /* Virtual address */
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
void *pseudo_palette; /* Fake palette of 16 colors */
union {
char __iomem *screen_base; /* Virtual address */
char *screen_buffer;
};
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
void *pseudo_palette; /* Fake palette of 16 colors */
#define FBINFO_STATE_RUNNING 0
#define FBINFO_STATE_SUSPENDED 1
u32 state; /* Hardware state i.e suspend */
@@ -985,11 +988,11 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
* `Generic' versions of the frame buffer device operations
*/
extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
/*
* Drawing operations where framebuffer is in system RAM
@@ -1007,7 +1010,7 @@ extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
@@ -1068,11 +1071,11 @@ static inline bool fb_be_math(struct fb_info *info)
}
/* drivers/video/fbsysfs.c */
//extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
//extern void framebuffer_release(struct fb_info *info);
//extern int fb_init_device(struct fb_info *fb_info);
//extern void fb_cleanup_device(struct fb_info *head);
//extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
extern int fb_init_device(struct fb_info *fb_info);
extern void fb_cleanup_device(struct fb_info *head);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
/* drivers/video/fbmon.c */
#define FB_MAXTIMINGS 0
@@ -1173,7 +1176,7 @@ struct dmt_videomode {
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
extern const struct fb_videomode cea_modes[65];
extern const struct dmt_videomode dmt_modes[];
struct fb_modelist {

View File

@@ -10,18 +10,13 @@
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
struct device;
struct platform_device
{
struct device dev;
};
struct firmware {
size_t size;
const u8 *data;
};
struct module;
struct device;
struct builtin_fw {
char *name;

View File

@@ -0,0 +1,8 @@
#ifndef _GCD_H
#define _GCD_H
#include <linux/compiler.h>
unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__;
#endif /* _GCD_H */

View File

@@ -258,7 +258,7 @@ struct vm_area_struct;
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return gfp_flags & __GFP_DIRECT_RECLAIM;
return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM);
}
#ifdef CONFIG_HIGHMEM

View File

@@ -15,7 +15,7 @@
#include <syscall.h>
#include <linux/types.h>
#include <linux/bitops.h>
//#include <linux/init.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
/*

View File

@@ -0,0 +1 @@
//stub

View File

@@ -0,0 +1 @@
//stub

View File

@@ -0,0 +1,71 @@
/* interrupt.h */
#ifndef _LINUX_INTERRUPT_H
#define _LINUX_INTERRUPT_H
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/irqreturn.h>
#include <linux/kref.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
* linux/ioport.h to select the interrupt line behaviour. When
* requesting an interrupt without specifying a IRQF_TRIGGER, the
* setting should be assumed to be "as already configured", which
* may be as per machine or firmware initialisation.
*/
#define IRQF_TRIGGER_NONE 0x00000000
#define IRQF_TRIGGER_RISING 0x00000001
#define IRQF_TRIGGER_FALLING 0x00000002
#define IRQF_TRIGGER_HIGH 0x00000004
#define IRQF_TRIGGER_LOW 0x00000008
#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
#define IRQF_TRIGGER_PROBE 0x00000010
/*
* These flags used only by the kernel as part of the
* irq handling routines.
*
* IRQF_SHARED - allow sharing the irq among several devices
* IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
* registered first in an shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
* that this interrupt will wake the system from a suspended
* state. See Documentation/power/suspend-and-interrupts.txt
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
* resume time.
* IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
#define __IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
#endif

View File

@@ -0,0 +1 @@
//stub

View File

@@ -23,12 +23,6 @@ struct resource {
struct resource *parent, *sibling, *child;
};
struct resource_list {
struct resource_list *next;
struct resource *res;
struct pci_dev *dev;
};
/*
* IO resources have these defined flags.
*/
@@ -176,6 +170,15 @@ static inline unsigned long resource_type(const struct resource *res)
{
return res->flags & IORESOURCE_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET)
return false;
return r1->start <= r2->start && r1->end >= r2->end;
}
#endif /* __ASSEMBLY__ */

View File

@@ -161,6 +161,10 @@
*/
#define lower_32_bits(n) ((u32)(n))
struct completion;
struct pt_regs;
struct user;
#ifdef CONFIG_PREEMPT_VOLUNTARY
extern int _cond_resched(void);
# define might_resched() _cond_resched()
@@ -660,12 +664,25 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
void free (void *ptr);
typedef unsigned long pgprotval_t;
typedef struct
{
u32 code;
u32 data[5];
}kevent_t;
typedef union
{
struct
{
u32 handle;
u32 euid;
};
u64 raw;
}evhandle_t;
struct file
{
@@ -678,7 +695,6 @@ struct file
struct vm_area_struct {};
struct address_space {};
#define in_dbg_master() (0)
#define HZ 100
@@ -791,11 +807,6 @@ struct pagelist {
#define get_page(a)
#define put_page(a)
#define pci_map_page(dev, page, offset, size, direction) \
(dma_addr_t)( (offset)+page_to_phys(page))
#define pci_unmap_page(dev, dma_address, size, direction)
#define IS_ENABLED(a) 0
@@ -848,7 +859,6 @@ typedef u64 async_cookie_t;
#define iowrite32(v, addr) writel((v), (addr))
#define __init
#define CONFIG_PAGE_OFFSET 0

View File

@@ -18,7 +18,7 @@
#include <linux/types.h>
#include <linux/list.h>
//#include <linux/sysfs.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/kref.h>

View File

@@ -19,7 +19,6 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
@@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
/**
* kref_put_spinlock_irqsave - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
* @lock: lock to take in release case
*
* Behaves identical to kref_put with one exception. If the reference count
* drops to zero, the lock will be taken atomically wrt dropping the reference
* count. The release function has to call spin_unlock() without _irqrestore.
*/
static inline int kref_put_spinlock_irqsave(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
unsigned long flags;
WARN_ON(release == NULL);
if (atomic_add_unless(&kref->refcount, -1, 1))
return 0;
spin_lock_irqsave(lock, flags);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
local_irq_restore(flags);
return 1;
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef PCH_DMA_H
#define PCH_DMA_H
enum pch_dma_width {
PCH_DMA_WIDTH_1_BYTE,
PCH_DMA_WIDTH_2_BYTES,
PCH_DMA_WIDTH_4_BYTES,
};
struct pch_dma_slave {
struct device *dma_dev;
unsigned int chan_id;
dma_addr_t tx_reg;
dma_addr_t rx_reg;
enum pch_dma_width width;
};
#endif

View File

@@ -0,0 +1,11 @@
#ifndef _LINUX_PCI_DMA_H
#define _LINUX_PCI_DMA_H
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME);
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME);
#define pci_unmap_addr dma_unmap_addr
#define pci_unmap_addr_set dma_unmap_addr_set
#define pci_unmap_len dma_unmap_len
#define pci_unmap_len_set dma_unmap_len_set
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@
#define __KERNEL_PRINTK__
#include <stdarg.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/cache.h>

View File

@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/rcupdate.h>
struct rb_node {
unsigned long __rb_parent_color;

View File

@@ -0,0 +1,77 @@
/*
* Copyright (C) 2015, Intel Corporation
* Author: Jiang Liu <jiang.liu@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef _LINUX_RESOURCE_EXT_H
#define _LINUX_RESOURCE_EXT_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/ioport.h>
#include <linux/slab.h>
/* Represent resource window for bridge devices */
struct resource_win {
struct resource res; /* In master (CPU) address space */
resource_size_t offset; /* Translation offset for bridge */
};
/*
* Common resource list management data structure and interfaces to support
* ACPI, PNP and PCI host bridge etc.
*/
struct resource_entry {
struct list_head node;
struct resource *res; /* In master (CPU) address space */
resource_size_t offset; /* Translation offset for bridge */
struct resource __res; /* Default storage for res */
};
extern struct resource_entry *
resource_list_create_entry(struct resource *res, size_t extra_size);
extern void resource_list_free(struct list_head *head);
static inline void resource_list_add(struct resource_entry *entry,
struct list_head *head)
{
list_add(&entry->node, head);
}
static inline void resource_list_add_tail(struct resource_entry *entry,
struct list_head *head)
{
list_add_tail(&entry->node, head);
}
static inline void resource_list_del(struct resource_entry *entry)
{
list_del(&entry->node);
}
static inline void resource_list_free_entry(struct resource_entry *entry)
{
kfree(entry);
}
static inline void
resource_list_destroy_entry(struct resource_entry *entry)
{
resource_list_del(entry);
resource_list_free_entry(entry);
}
#define resource_list_for_each_entry(entry, list) \
list_for_each_entry((entry), (list), node)
#define resource_list_for_each_entry_safe(entry, tmp, list) \
list_for_each_entry_safe((entry), (tmp), (list), node)
#endif /* _LINUX_RESOURCE_EXT_H */

View File

@@ -0,0 +1,47 @@
/*
* include/linux/sizes.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __LINUX_SIZES_H__
#define __LINUX_SIZES_H__
#define SZ_1 0x00000001
#define SZ_2 0x00000002
#define SZ_4 0x00000004
#define SZ_8 0x00000008
#define SZ_16 0x00000010
#define SZ_32 0x00000020
#define SZ_64 0x00000040
#define SZ_128 0x00000080
#define SZ_256 0x00000100
#define SZ_512 0x00000200
#define SZ_1K 0x00000400
#define SZ_2K 0x00000800
#define SZ_4K 0x00001000
#define SZ_8K 0x00002000
#define SZ_16K 0x00004000
#define SZ_32K 0x00008000
#define SZ_64K 0x00010000
#define SZ_128K 0x00020000
#define SZ_256K 0x00040000
#define SZ_512K 0x00080000
#define SZ_1M 0x00100000
#define SZ_2M 0x00200000
#define SZ_4M 0x00400000
#define SZ_8M 0x00800000
#define SZ_16M 0x01000000
#define SZ_32M 0x02000000
#define SZ_64M 0x04000000
#define SZ_128M 0x08000000
#define SZ_256M 0x10000000
#define SZ_512M 0x20000000
#define SZ_1G 0x40000000
#define SZ_2G 0x80000000
#endif /* __LINUX_SIZES_H__ */

View File

@@ -0,0 +1,261 @@
/*
* sysfs.h - definitions for the device driver filesystem
*
* Copyright (c) 2001,2002 Patrick Mochel
* Copyright (c) 2004 Silicon Graphics, Inc.
* Copyright (c) 2007 SUSE Linux Products GmbH
* Copyright (c) 2007 Tejun Heo <teheo@suse.de>
*
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
#ifndef _SYSFS_H_
#define _SYSFS_H_
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/atomic.h>
struct kobject;
struct module;
struct bin_attribute;
enum kobj_ns_type;
struct attribute {
const char *name;
umode_t mode;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
bool ignore_lockdep:1;
struct lock_class_key *key;
struct lock_class_key skey;
#endif
};
#ifdef CONFIG_SYSFS
int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
void sysfs_remove_dir(struct kobject *kobj);
int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
const void *new_ns);
int __must_check sysfs_move_dir_ns(struct kobject *kobj,
struct kobject *new_parent_kobj,
const void *new_ns);
int __must_check sysfs_create_mount_point(struct kobject *parent_kobj,
const char *name);
void sysfs_remove_mount_point(struct kobject *parent_kobj,
const char *name);
int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
void sysfs_remove_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target,
const char *name);
int __must_check sysfs_create_link_nowarn(struct kobject *kobj,
struct kobject *target,
const char *name);
void sysfs_remove_link(struct kobject *kobj, const char *name);
int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target,
const char *old_name, const char *new_name,
const void *new_ns);
void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
const char *name);
int __must_check sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
int __must_check sysfs_create_groups(struct kobject *kobj,
const struct attribute_group **groups);
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_groups(struct kobject *kobj,
const struct attribute_group **groups);
int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
void sysfs_remove_file_from_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
int sysfs_merge_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_unmerge_group(struct kobject *kobj,
const struct attribute_group *grp);
int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
struct kobject *target, const char *link_name);
void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
const char *link_name);
int __compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj,
struct kobject *target_kobj,
const char *target_name);
void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
int __must_check sysfs_init(void);
static inline void sysfs_enable_ns(struct kernfs_node *kn)
{
return kernfs_enable_ns(kn);
}
#else /* CONFIG_SYSFS */
static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
{
return 0;
}
static inline void sysfs_remove_dir(struct kobject *kobj)
{
}
static inline int sysfs_rename_dir_ns(struct kobject *kobj,
const char *new_name, const void *new_ns)
{
return 0;
}
static inline int sysfs_move_dir_ns(struct kobject *kobj,
struct kobject *new_parent_kobj,
const void *new_ns)
{
return 0;
}
static inline int sysfs_create_mount_point(struct kobject *parent_kobj,
const char *name)
{
return 0;
}
static inline void sysfs_remove_mount_point(struct kobject *parent_kobj,
const char *name)
{
}
static inline int sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
{
return 0;
}
static inline int sysfs_create_files(struct kobject *kobj,
const struct attribute **attr)
{
return 0;
}
static inline int sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode)
{
return 0;
}
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
{
}
static inline bool sysfs_remove_file_self(struct kobject *kobj,
const struct attribute *attr)
{
return false;
}
static inline void sysfs_remove_files(struct kobject *kobj,
const struct attribute **attr)
{
}
static inline int sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
return 0;
}
static inline void sysfs_remove_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
}
#define sysfs_create_link(kobj,target, name) (0)
static inline int sysfs_create_link_nowarn(struct kobject *kobj,
struct kobject *target,
const char *name)
{
return 0;
}
#define sysfs_remove_link(kobj, name)
static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
const char *old_name,
const char *new_name, const void *ns)
{
return 0;
}
static inline void sysfs_delete_link(struct kobject *k, struct kobject *t,
const char *name)
{
}
static inline int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
return 0;
}
static inline void sysfs_remove_file_from_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
}
static inline int sysfs_add_link_to_group(struct kobject *kobj,
const char *group_name, struct kobject *target,
const char *link_name)
{
return 0;
}
static inline void sysfs_remove_link_from_group(struct kobject *kobj,
const char *group_name, const char *link_name)
{
}
static inline int __compat_only_sysfs_link_entry_to_kobj(
struct kobject *kobj,
struct kobject *target_kobj,
const char *target_name)
{
return 0;
}
static inline void sysfs_notify(struct kobject *kobj, const char *dir,
const char *attr)
{
}
static inline int __must_check sysfs_init(void)
{
return 0;
}
#endif /* CONFIG_SYSFS */
#endif /* _SYSFS_H_ */

View File

@@ -35,7 +35,7 @@ typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
#ifdef CONFIG_UID16
#ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;

View File

@@ -7,29 +7,95 @@
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <asm/current.h>
#include <syscall.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
/* __wait_queue::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
struct __wait_queue {
unsigned int flags;
void *private;
wait_queue_func_t func;
struct list_head task_list;
evhandle_t evnt;
};
struct wait_bit_key {
void *flags;
int bit_nr;
#define WAIT_ATOMIC_T_BIT_NR -1
unsigned long timeout;
};
struct wait_bit_queue {
struct wait_bit_key key;
wait_queue_t wait;
};
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
struct __wait_queue
{
wait_queue_func_t func;
struct list_head task_list;
evhandle_t evnt;
};
struct task_struct;
struct __wait_queue_head
/*
* Macros for declaration and initialisaton of the datatypes
*/
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
.private = tsk, \
.func = default_wake_function, \
.task_list = { NULL, NULL } }
#define DECLARE_WAITQUEUE(name, tsk) \
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.task_list = { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
{ .flags = word, .bit_nr = bit, }
#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
#ifdef CONFIG_LOCKDEP
# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
({ init_waitqueue_head(&name); name; })
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
#else
# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
#endif
static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
spinlock_t lock;
struct list_head task_list;
};
q->flags = 0;
q->private = p;
q->func = default_wake_function;
}
static inline void
init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
{
q->flags = 0;
q->private = NULL;
q->func = func;
}
static inline int waitqueue_active(wait_queue_head_t *q)
{
return !list_empty(&q->task_list);
@@ -41,10 +107,57 @@ extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
{
list_add(&new->task_list, &head->task_list);
list_add(&new->task_list, &head->task_list);
}
/*
* Used for wake-one threads:
*/
static inline void
__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue(q, wait);
}
static inline void __add_wait_queue_tail(wait_queue_head_t *head,
wait_queue_t *new)
{
list_add_tail(&new->task_list, &head->task_list);
}
static inline void
__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
{
wait->flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(q, wait);
}
static inline void
__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
{
list_del(&old->task_list);
}
typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
void wake_up_bit(void *, int);
void wake_up_atomic_t(atomic_t *);
int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
wait_queue_head_t *bit_waitqueue(void *, int);
/*
#define __wait_event(wq, condition) \
do { \
DEFINE_WAIT(__wait); \
@@ -128,6 +241,39 @@ do{ \
__ret; \
})
static inline
void wake_up(wait_queue_head_t *q)
{
wait_queue_t *curr;
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
{
// printf("raise event \n");
kevent_t event;
event.code = -1;
RaiseEvent(curr->evnt, 0, &event);
}
spin_unlock_irqrestore(&q->lock, flags);
}
static inline
void wake_up_interruptible(wait_queue_head_t *q)
{
wait_queue_t *curr;
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
{
// printf("raise event \n");
kevent_t event;
event.code = -1;
RaiseEvent(curr->evnt, 0, &event);
}
spin_unlock_irqrestore(&q->lock, flags);
}
static inline
void wake_up_all(wait_queue_head_t *q)
@@ -139,7 +285,6 @@ void wake_up_all(wait_queue_head_t *q)
list_for_each_entry(curr, &q->task_list, task_list)
{
// printf("raise event \n");
kevent_t event;
event.code = -1;
RaiseEvent(curr->evnt, 0, &event);

View File

@@ -7,11 +7,12 @@
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <syscall.h>
struct workqueue_struct;
struct work_struct;
@@ -19,44 +20,102 @@ typedef void (*work_func_t)(struct work_struct *work);
void __stdcall delayed_work_timer_fn(unsigned long __data);
/*
* Workqueue flags and constants. For details, please refer to
* Documentation/workqueue.txt.
* The first word is the work queue pointer and the flags rolled into
* one
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
#ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
#else
WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
#endif
WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
WORK_STRUCT_COLOR_BITS = 4,
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
#ifdef CONFIG_DEBUG_OBJECTS_WORK
WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
#else
WORK_STRUCT_STATIC = 0,
#endif
/*
* The last color is no color used for works which don't
* participate in workqueue flushing.
*/
WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
WORK_NO_COLOR = WORK_NR_COLORS,
struct workqueue_struct {
spinlock_t lock;
struct list_head worklist;
struct list_head delayed_worklist;
/* not bound to any CPU, prefer the local CPU */
WORK_CPU_UNBOUND = NR_CPUS,
/*
* Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
* This makes pwqs aligned to 256 bytes and allows 15 workqueue
* flush colors.
*/
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,
/* data contains off-queue information when !WORK_STRUCT_PWQ */
WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
__WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
/*
* When a work item is off queue, its high bits point to the last
* pool it was on. Cap at 31 bits and use the highest number to
* indicate that no pool is associated.
*/
WORK_OFFQ_FLAG_BITS = 1,
WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
/* convenience constants */
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
/* bit mask for work_busy() return values */
WORK_BUSY_PENDING = 1 << 0,
WORK_BUSY_RUNNING = 1 << 1,
/* maximum string length for set_worker_desc() */
WORKER_DESC_LEN = 24,
};
struct work_struct {
struct list_head entry;
struct workqueue_struct *data;
work_func_t func;
struct list_head entry;
struct workqueue_struct *data;
work_func_t func;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
};
#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
#define WORK_DATA_STATIC_INIT() \
ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
struct delayed_work {
struct work_struct work;
unsigned int delay;
struct work_struct work;
unsigned int delay;
/* target workqueue and CPU ->timer uses to queue ->work */
struct workqueue_struct *wq;
int cpu;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -64,6 +123,15 @@ static inline struct delayed_work *to_delayed_work(struct work_struct *work)
return container_of(work, struct delayed_work, work);
}
struct execute_work {
struct work_struct work;
};
struct workqueue_struct {
spinlock_t lock;
struct list_head worklist;
struct list_head delayed_worklist;
};
extern struct workqueue_struct *system_wq;
void run_workqueue(struct workqueue_struct *cwq);