From 4ab605b8d3fe619a5f361e0ec11df8e55fc16339 Mon Sep 17 00:00:00 2001 From: "Sergey Semyonov (Serge)" Date: Mon, 1 Feb 2016 21:30:55 +0000 Subject: [PATCH] ddk: fixed bug with emty list in wake_up() git-svn-id: svn://kolibrios.org@6125 a494cfbc-eb01-0410-851d-a64ba20cac60 --- drivers/include/asm-generic/pci-dma-compat.h | 4 +- drivers/include/drm/drm_gem.h | 14 +-- drivers/include/linux/pci.h | 10 +- drivers/include/linux/wait.h | 32 ++++--- drivers/include/linux/workqueue.h | 98 +++++++++++++++++++- 5 files changed, 133 insertions(+), 25 deletions(-) diff --git a/drivers/include/asm-generic/pci-dma-compat.h b/drivers/include/asm-generic/pci-dma-compat.h index 189ddc21d5..819bd87f58 100644 --- a/drivers/include/asm-generic/pci-dma-compat.h +++ b/drivers/include/asm-generic/pci-dma-compat.h @@ -96,12 +96,12 @@ pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) #ifdef CONFIG_PCI static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { - return dma_set_mask(&dev->dev, mask); + return 0; } static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { - return dma_set_coherent_mask(&dev->dev, mask); + return 0; } #endif diff --git a/drivers/include/drm/drm_gem.h b/drivers/include/drm/drm_gem.h index 9dcf91323b..15e7f00738 100644 --- a/drivers/include/drm/drm_gem.h +++ b/drivers/include/drm/drm_gem.h @@ -152,14 +152,16 @@ drm_gem_object_unreference(struct drm_gem_object *obj) static inline void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) { - if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) { - struct drm_device *dev = obj->dev; + struct drm_device *dev; - mutex_lock(&dev->struct_mutex); - if (likely(atomic_dec_and_test(&obj->refcount.refcount))) - drm_gem_object_free(&obj->refcount); + if (!obj) + return; + + dev = obj->dev; + if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex)) mutex_unlock(&dev->struct_mutex); - } + else + might_lock(&dev->struct_mutex); } int drm_gem_handle_create(struct drm_file *file_priv, diff --git a/drivers/include/linux/pci.h b/drivers/include/linux/pci.h index 292832110d..924cfd73a1 100644 --- a/drivers/include/linux/pci.h +++ b/drivers/include/linux/pci.h @@ -1166,11 +1166,19 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); +static inline void +_pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, + struct resource *res) +{ + region->start = res->start; + region->end = res->end; +} + static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) { struct pci_bus_region region; - pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]); + _pcibios_resource_to_bus(pdev, ®ion, &pdev->resource[bar]); return region.start; } diff --git a/drivers/include/linux/wait.h b/drivers/include/linux/wait.h index c279fd0808..e7ac83d260 100644 --- a/drivers/include/linux/wait.h +++ b/drivers/include/linux/wait.h @@ -248,12 +248,15 @@ void wake_up(wait_queue_head_t *q) unsigned long flags; spin_lock_irqsave(&q->lock, flags); - curr = list_first_entry(&q->task_list, typeof(*curr), task_list); + curr = list_first_entry_or_null(&q->task_list, typeof(*curr), task_list); + if(curr != NULL) { -// printf("raise event \n"); - kevent_t event; - event.code = -1; - RaiseEvent(curr->evnt, 0, &event); + if(!WARN_ON(curr->evnt.handle == 0)) + { + kevent_t event = {0}; + event.code = -1; + RaiseEvent(curr->evnt, 0, &event); + } } spin_unlock_irqrestore(&q->lock, flags); } @@ -265,12 +268,15 @@ void wake_up_interruptible(wait_queue_head_t *q) unsigned long flags; spin_lock_irqsave(&q->lock, flags); - curr = list_first_entry(&q->task_list, typeof(*curr), task_list); + curr = list_first_entry_or_null(&q->task_list, typeof(*curr), task_list); + if(curr != NULL) { -// printf("raise event \n"); - kevent_t event; - event.code = -1; - RaiseEvent(curr->evnt, 0, &event); + if(!WARN_ON(curr->evnt.handle == 0)) + { + kevent_t event = {0}; + event.code = -1; + RaiseEvent(curr->evnt, 0, &event); + } } spin_unlock_irqrestore(&q->lock, flags); } @@ -280,12 +286,12 @@ void wake_up_all(wait_queue_head_t *q) { wait_queue_t *curr; unsigned long flags; - spin_lock_irqsave(&q->lock, flags); list_for_each_entry(curr, &q->task_list, task_list) { -// printf("raise event \n"); - kevent_t event; + if(WARN_ON(curr->evnt.handle == 0)) + continue; + kevent_t event = {0}; event.code = -1; RaiseEvent(curr->evnt, 0, &event); } diff --git a/drivers/include/linux/workqueue.h b/drivers/include/linux/workqueue.h index 59cc42984c..74c4d6858b 100644 --- a/drivers/include/linux/workqueue.h +++ b/drivers/include/linux/workqueue.h @@ -132,6 +132,86 @@ struct workqueue_struct { struct list_head worklist; struct list_head delayed_worklist; }; + +/* + * Workqueue flags and constants. For details, please refer to + * Documentation/workqueue.txt. + */ +enum { + WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ + WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ + WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ + WQ_HIGHPRI = 1 << 4, /* high priority */ + WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */ + WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */ + + /* + * Per-cpu workqueues are generally preferred because they tend to + * show better performance thanks to cache locality. Per-cpu + * workqueues exclude the scheduler from choosing the CPU to + * execute the worker threads, which has an unfortunate side effect + * of increasing power consumption. + * + * The scheduler considers a CPU idle if it doesn't have any task + * to execute and tries to keep idle cores idle to conserve power; + * however, for example, a per-cpu work item scheduled from an + * interrupt handler on an idle CPU will force the scheduler to + * excute the work item on that CPU breaking the idleness, which in + * turn may lead to more scheduling choices which are sub-optimal + * in terms of power consumption. + * + * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default + * but become unbound if workqueue.power_efficient kernel param is + * specified. Per-cpu workqueues which are identified to + * contribute significantly to power-consumption are identified and + * marked with this flag and enabling the power_efficient mode + * leads to noticeable power saving at the cost of small + * performance disadvantage. + * + * http://thread.gmane.org/gmane.linux.kernel/1480396 + */ + WQ_POWER_EFFICIENT = 1 << 7, + + __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ + __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ + + WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ + WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, +}; + +/* unbound wq's aren't per-cpu, scale max_active according to #cpus */ +#define WQ_UNBOUND_MAX_ACTIVE \ + max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) + +/* + * System-wide workqueues which are always present. + * + * system_wq is the one used by schedule[_delayed]_work[_on](). + * Multi-CPU multi-threaded. There are users which expect relatively + * short queue flush time. Don't queue works which can run for too + * long. + * + * system_highpri_wq is similar to system_wq but for work items which + * require WQ_HIGHPRI. + * + * system_long_wq is similar to system_wq but may host long running + * works. Queue flushing might take relatively long. + * + * system_unbound_wq is unbound workqueue. Workers are not bound to + * any specific CPU, not concurrency managed, and all queued works are + * executed immediately as long as max_active limit is not reached and + * resources are available. + * + * system_freezable_wq is equivalent to system_wq except that it's + * freezable. + * + * *_power_efficient_wq are inclined towards saving power and converted + * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise, + * they are same as their non-power-efficient counterparts - e.g. + * system_power_efficient_wq is identical to system_wq if + * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info. + */ extern struct workqueue_struct *system_wq; void run_workqueue(struct workqueue_struct *cwq); @@ -139,9 +219,21 @@ void run_workqueue(struct workqueue_struct *cwq); struct workqueue_struct *alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active); - -#define alloc_ordered_workqueue(fmt, flags, args...) \ - alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) +/** + * alloc_ordered_workqueue - allocate an ordered workqueue + * @fmt: printf format for the name of the workqueue + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) + * @args...: args for @fmt + * + * Allocate an ordered workqueue. An ordered workqueue executes at + * most one work item at any given time in the queued order. They are + * implemented as unbound workqueues with @max_active of one. + * + * RETURNS: + * Pointer to the allocated workqueue on success, %NULL on failure. + */ +#define alloc_ordered_workqueue(fmt, flags, args...) \ + alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) bool queue_work(struct workqueue_struct *wq, struct work_struct *work); int queue_delayed_work(struct workqueue_struct *wq,