diff --git a/drivers/ddk/Makefile b/drivers/ddk/Makefile index 5b64b5e26..326368511 100644 --- a/drivers/ddk/Makefile +++ b/drivers/ddk/Makefile @@ -14,7 +14,7 @@ DEFINES = -DKOLIBRI -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DMI -DCONFIG_TINY_RCU DEFINES+= -DCONFIG_X86_L1_CACHE_SHIFT=6 -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE CFLAGS = -c -Os $(INCLUDES) $(DEFINES) -march=i686 -fomit-frame-pointer -fno-builtin-printf \ --mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 +-mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -fno-ident NAME:= libddk @@ -36,7 +36,6 @@ NAME_SRCS:= \ linux/firmware.c \ linux/hdmi.c \ linux/kasprintf.c \ - linux/kref.c \ linux/list_sort.c \ linux/mutex.c \ linux/rbtree.c \ diff --git a/drivers/ddk/core.S b/drivers/ddk/core.S index 5dafcb514..c71a59af0 100644 --- a/drivers/ddk/core.S +++ b/drivers/ddk/core.S @@ -27,6 +27,7 @@ .global _FreeKernelSpace .global _FreePage + .global _GetClockNs .global _GetCpuFreq .global _GetDisplay .global _GetEvent @@ -103,6 +104,8 @@ .def _FreeKernelSpace; .scl 2; .type 32; .endef .def _FreePage; .scl 2; .type 32; .endef + .def _GetClockNs; .scl 2; .type 32; .endef + .def _GetDisplay; .scl 2; .type 32; .endef .def _GetDisplay; .scl 2; .type 32; .endef @@ -181,6 +184,7 @@ _DiskMediaChanged: _FreeKernelSpace: _FreePage: +_GetClockNs: _GetCpuFreq: _GetDisplay: _GetEvent: @@ -259,6 +263,7 @@ _WaitEventTimeout: .ascii " -export:FreeKernelSpace" # stdcall .ascii " -export:FreePage" # + .ascii " -export:GetClockNs" # .ascii " -export:GetCpuFreq" # .ascii " -export:GetDisplay" # stdcall .ascii " -export:GetEvent" # diff --git a/drivers/ddk/linux/bitmap.c b/drivers/ddk/linux/bitmap.c index 3f95a7456..fdbd186ab 100644 --- a/drivers/ddk/linux/bitmap.c +++ b/drivers/ddk/linux/bitmap.c @@ -41,36 +41,6 @@ * for the best explanations of this ordering. */ -int __bitmap_empty(const unsigned long *bitmap, unsigned int bits) -{ - unsigned int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (bitmap[k]) - return 0; - - if (bits % BITS_PER_LONG) - if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) - return 0; - - return 1; -} -EXPORT_SYMBOL(__bitmap_empty); - -int __bitmap_full(const unsigned long *bitmap, unsigned int bits) -{ - unsigned int k, lim = bits/BITS_PER_LONG; - for (k = 0; k < lim; ++k) - if (~bitmap[k]) - return 0; - - if (bits % BITS_PER_LONG) - if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) - return 0; - - return 1; -} -EXPORT_SYMBOL(__bitmap_full); - int __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { @@ -103,18 +73,18 @@ EXPORT_SYMBOL(__bitmap_complement); * @dst : destination bitmap * @src : source bitmap * @shift : shift by this many bits - * @bits : bitmap size, in bits + * @nbits : bitmap size, in bits * * Shifting right (dividing) means moving bits in the MS -> LS bit * direction. Zeros are fed into the vacated MS positions and the * LS bits shifted off the bottom are lost. */ -void __bitmap_shift_right(unsigned long *dst, - const unsigned long *src, int shift, int bits) +void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned shift, unsigned nbits) { - int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; - int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; - unsigned long mask = (1UL << left) - 1; + unsigned k, lim = BITS_TO_LONGS(nbits); + unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; + unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); for (k = 0; off + k < lim; ++k) { unsigned long upper, lower; @@ -126,17 +96,15 @@ void __bitmap_shift_right(unsigned long *dst, upper = 0; else { upper = src[off + k + 1]; - if (off + k + 1 == lim - 1 && left) + if (off + k + 1 == lim - 1) upper &= mask; + upper <<= (BITS_PER_LONG - rem); } lower = src[off + k]; - if (left && off + k == lim - 1) + if (off + k == lim - 1) lower &= mask; - dst[k] = lower >> rem; - if (rem) - dst[k] |= upper << (BITS_PER_LONG - rem); - if (left && k == lim - 1) - dst[k] &= mask; + lower >>= rem; + dst[k] = lower | upper; } if (off) memset(&dst[lim - off], 0, off*sizeof(unsigned long)); @@ -149,18 +117,19 @@ EXPORT_SYMBOL(__bitmap_shift_right); * @dst : destination bitmap * @src : source bitmap * @shift : shift by this many bits - * @bits : bitmap size, in bits + * @nbits : bitmap size, in bits * * Shifting left (multiplying) means moving bits in the LS -> MS * direction. Zeros are fed into the vacated LS bit positions * and those MS bits shifted off the top are lost. */ -void __bitmap_shift_left(unsigned long *dst, - const unsigned long *src, int shift, int bits) +void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) { - int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; - int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; + int k; + unsigned int lim = BITS_TO_LONGS(nbits); + unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; for (k = lim - off - 1; k >= 0; --k) { unsigned long upper, lower; @@ -169,17 +138,11 @@ void __bitmap_shift_left(unsigned long *dst, * word below and make them the bottom rem bits of result. */ if (rem && k > 0) - lower = src[k - 1]; + lower = src[k - 1] >> (BITS_PER_LONG - rem); else lower = 0; - upper = src[k]; - if (left && k == lim - 1) - upper &= (1UL << left) - 1; - dst[k + off] = upper << rem; - if (rem) - dst[k + off] |= lower >> (BITS_PER_LONG - rem); - if (left && k + off == lim - 1) - dst[k + off] &= (1UL << left) - 1; + upper = src[k] << rem; + dst[k + off] = lower | upper; } if (off) memset(dst, 0, off*sizeof(unsigned long)); @@ -382,10 +345,10 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area_off); /** * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap - * @pos: a bit position in @buf (0 <= @pos < @bits) - * @bits: number of valid bit positions in @buf + * @pos: a bit position in @buf (0 <= @pos < @nbits) + * @nbits: number of valid bit positions in @buf * - * Map the bit at position @pos in @buf (of length @bits) to the + * Map the bit at position @pos in @buf (of length @nbits) to the * ordinal of which set bit it is. If it is not set or if @pos * is not a valid bit position, map to -1. * @@ -397,56 +360,40 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area_off); * * The bit positions 0 through @bits are valid positions in @buf. */ -static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) +static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits) { - int i, ord; - - if (pos < 0 || pos >= bits || !test_bit(pos, buf)) + if (pos >= nbits || !test_bit(pos, buf)) return -1; - i = find_first_bit(buf, bits); - ord = 0; - while (i < pos) { - i = find_next_bit(buf, bits, i + 1); - ord++; - } - BUG_ON(i != pos); - - return ord; + return __bitmap_weight(buf, pos); } /** * bitmap_ord_to_pos - find position of n-th set bit in bitmap * @buf: pointer to bitmap * @ord: ordinal bit position (n-th set bit, n >= 0) - * @bits: number of valid bit positions in @buf + * @nbits: number of valid bit positions in @buf * * Map the ordinal offset of bit @ord in @buf to its position in @buf. - * Value of @ord should be in range 0 <= @ord < weight(buf), else - * results are undefined. + * Value of @ord should be in range 0 <= @ord < weight(buf). If @ord + * >= weight(buf), returns @nbits. * * If for example, just bits 4 through 7 are set in @buf, then @ord * values 0 through 3 will get mapped to 4 through 7, respectively, - * and all other @ord values return undefined values. When @ord value 3 + * and all other @ord values returns @nbits. When @ord value 3 * gets mapped to (returns) @pos value 7 in this example, that means * that the 3rd set bit (starting with 0th) is at position 7 in @buf. * - * The bit positions 0 through @bits are valid positions in @buf. + * The bit positions 0 through @nbits-1 are valid positions in @buf. */ -int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) +unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsigned int nbits) { - int pos = 0; + unsigned int pos; - if (ord >= 0 && ord < bits) { - int i; - - for (i = find_first_bit(buf, bits); - i < bits && ord > 0; - i = find_next_bit(buf, bits, i + 1)) + for (pos = find_first_bit(buf, nbits); + pos < nbits && ord; + pos = find_next_bit(buf, nbits, pos + 1)) ord--; - if (i < bits && ord == 0) - pos = i; - } return pos; } @@ -457,7 +404,7 @@ int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) * @src: subset to be remapped * @old: defines domain of map * @new: defines range of map - * @bits: number of bits in each of these bitmaps + * @nbits: number of bits in each of these bitmaps * * Let @old and @new define a mapping of bit positions, such that * whatever position is held by the n-th set bit in @old is mapped @@ -485,22 +432,22 @@ int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) */ void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, - int bits) + unsigned int nbits) { - int oldbit, w; + unsigned int oldbit, w; if (dst == src) /* following doesn't handle inplace remaps */ return; - bitmap_zero(dst, bits); + bitmap_zero(dst, nbits); - w = bitmap_weight(new, bits); - for_each_set_bit(oldbit, src, bits) { - int n = bitmap_pos_to_ord(old, oldbit, bits); + w = bitmap_weight(new, nbits); + for_each_set_bit(oldbit, src, nbits) { + int n = bitmap_pos_to_ord(old, oldbit, nbits); if (n < 0 || w == 0) set_bit(oldbit, dst); /* identity map */ else - set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); + set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst); } } EXPORT_SYMBOL(bitmap_remap); @@ -557,7 +504,7 @@ EXPORT_SYMBOL(bitmap_bitremap); * read it, you're overqualified for your current job.) * * In other words, @orig is mapped onto (surjectively) @dst, - * using the the map { | the n-th bit of @relmap is the + * using the map { | the n-th bit of @relmap is the * m-th set bit of @relmap }. * * Any set bits in @orig above bit number W, where W is the @@ -644,9 +591,9 @@ EXPORT_SYMBOL(bitmap_bitremap); * All bits in @dst not set by the above rule are cleared. */ void bitmap_onto(unsigned long *dst, const unsigned long *orig, - const unsigned long *relmap, int bits) + const unsigned long *relmap, unsigned int bits) { - int n, m; /* same meaning as in above comment */ + unsigned int n, m; /* same meaning as in above comment */ if (dst == orig) /* following doesn't handle inplace mappings */ return; @@ -677,22 +624,22 @@ EXPORT_SYMBOL(bitmap_onto); * @dst: resulting smaller bitmap * @orig: original larger bitmap * @sz: specified size - * @bits: number of bits in each of these bitmaps + * @nbits: number of bits in each of these bitmaps * * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. * Clear all other bits in @dst. See further the comment and * Example [2] for bitmap_onto() for why and how to use this. */ void bitmap_fold(unsigned long *dst, const unsigned long *orig, - int sz, int bits) + unsigned int sz, unsigned int nbits) { - int oldbit; + unsigned int oldbit; if (dst == orig) /* following doesn't handle inplace mappings */ return; - bitmap_zero(dst, bits); + bitmap_zero(dst, nbits); - for_each_set_bit(oldbit, orig, bits) + for_each_set_bit(oldbit, orig, nbits) set_bit(oldbit % sz, dst); } EXPORT_SYMBOL(bitmap_fold); @@ -845,16 +792,17 @@ EXPORT_SYMBOL(bitmap_allocate_region); * * Require nbits % BITS_PER_LONG == 0. */ -void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) +#ifdef __BIG_ENDIAN +void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits) { - unsigned long *d = dst; - int i; + unsigned int i; for (i = 0; i < nbits/BITS_PER_LONG; i++) { if (BITS_PER_LONG == 64) - d[i] = cpu_to_le64(src[i]); + dst[i] = cpu_to_le64(src[i]); else - d[i] = cpu_to_le32(src[i]); + dst[i] = cpu_to_le32(src[i]); } } EXPORT_SYMBOL(bitmap_copy_le); +#endif diff --git a/drivers/ddk/linux/mutex.c b/drivers/ddk/linux/mutex.c index e8c5c2739..5bbc5c110 100644 --- a/drivers/ddk/linux/mutex.c +++ b/drivers/ddk/linux/mutex.c @@ -24,6 +24,27 @@ #include #include #include + +struct kos_taskdata +{ + u32 event_mask; + u32 pid; + u16 r0; + u8 state; + u8 r1; + u16 r2; + u8 wnd_number; + u8 r3; + u32 mem_start; + u32 counter_sum; + u32 counter_add; + u32 cpu_usage; +}__attribute__((packed)); + +static inline void mutex_set_owner(struct mutex *lock) +{ +} + /* * A negative mutex count indicates that waiters are sleeping waiting for the * mutex. @@ -43,43 +64,29 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) } +static inline int __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) +{ + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); + struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); + + if (!hold_ctx) + return 0; + + if (unlikely(ctx == hold_ctx)) + return -EALREADY; + + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { + return -EDEADLK; + } + + return 0; +} + + static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) { -#ifdef CONFIG_DEBUG_MUTEXES - /* - * If this WARN_ON triggers, you used ww_mutex_lock to acquire, - * but released with a normal mutex_unlock in this call. - * - * This should never happen, always use ww_mutex_unlock. - */ - DEBUG_LOCKS_WARN_ON(ww->ctx); - - /* - * Not quite done after calling ww_acquire_done() ? - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); - - if (ww_ctx->contending_lock) { - /* - * After -EDEADLK you tried to - * acquire a different ww_mutex? Bad! - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); - - /* - * You called ww_mutex_lock after receiving -EDEADLK, - * but 'forgot' to unlock everything else first? - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); - ww_ctx->contending_lock = NULL; - } - - /* - * Naughty, using a different class will lead to undefined behavior! - */ - DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); -#endif ww_ctx->acquired++; } @@ -90,28 +97,143 @@ void ww_mutex_unlock(struct ww_mutex *lock) * into 'unlocked' state: */ if (lock->ctx) { - if (lock->ctx->acquired > 0) - lock->ctx->acquired--; - lock->ctx = NULL; + if (lock->ctx->acquired > 0) + lock->ctx->acquired--; + lock->ctx = NULL; } MutexUnlock(&lock->base); } -int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +static inline int __mutex_fastpath_lock_retval(atomic_t *count) { - MutexLock(&lock->base); + if (unlikely(atomic_dec_return(count) < 0)) + return -1; + else + return 0; +} + +static __always_inline void +ww_mutex_set_context_fastpath(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx) +{ + u32 flags; + struct mutex_waiter *cur; + + ww_mutex_lock_acquired(lock, ctx); + + lock->ctx = ctx; + + /* + * The lock->ctx update should be visible on all cores before + * the atomic read is done, otherwise contended waiters might be + * missed. The contended waiters will either see ww_ctx == NULL + * and keep spinning, or it will acquire wait_lock, add itself + * to waiter list and sleep. + */ + smp_mb(); /* ^^^ */ + + /* + * Check if lock is contended, if not there is nobody to wake up + */ + if (likely(atomic_read(&lock->base.count) == 0)) + return; + + /* + * Uh oh, we raced in fastpath, wake up everyone in this case, + * so they can see the new lock->ctx. + */ + flags = safe_cli(); + list_for_each_entry(cur, &lock->base.wait_list, list) { + ((struct kos_taskdata*)cur->task)->state = 0; + } + safe_sti(flags); +} + +ww_mutex_set_context_slowpath(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx) +{ + struct mutex_waiter *cur; + ww_mutex_lock_acquired(lock, ctx); lock->ctx = ctx; - return 0; + /* + * Give any possible sleeping processes the chance to wake up, + * so they can recheck if they have to back off. + */ + list_for_each_entry(cur, &lock->base.wait_list, list) { + ((struct kos_taskdata*)cur->task)->state = 0; + } +} + +int __ww_mutex_lock_slowpath(struct ww_mutex *ww, struct ww_acquire_ctx *ctx) +{ + struct mutex *lock; + struct mutex_waiter waiter; + struct kos_taskdata* taskdata; + u32 eflags; + int ret = 0; + + lock = &ww->base; + taskdata = (struct kos_taskdata*)(0x80003010); + waiter.task = (u32*)taskdata; + + eflags = safe_cli(); + + list_add_tail(&waiter.list, &lock->wait_list); + + for(;;) + { + if( atomic_xchg(&lock->count, -1) == 1) + break; + + if (ctx->acquired > 0) { + ret = __ww_mutex_lock_check_stamp(lock, ctx); + if (ret) + goto err; + }; + taskdata->state = 1; + change_task(); + }; + + if (likely(list_empty(&lock->wait_list))) + atomic_set(&lock->count, 0); + + ww_mutex_set_context_slowpath(ww, ctx); + +err: + list_del(&waiter.list); + safe_sti(eflags); + + return ret; +} + + +int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ + int ret; + + ret = __mutex_fastpath_lock_retval(&lock->base.count); + + if (likely(!ret)) { + ww_mutex_set_context_fastpath(lock, ctx); + mutex_set_owner(&lock->base); + } else + ret = __ww_mutex_lock_slowpath(lock, ctx); + return ret; } int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { - MutexLock(&lock->base); - ww_mutex_lock_acquired(lock, ctx); - lock->ctx = ctx; + int ret; - return 0; + ret = __mutex_fastpath_lock_retval(&lock->base.count); + + if (likely(!ret)) { + ww_mutex_set_context_fastpath(lock, ctx); + mutex_set_owner(&lock->base); + } else + ret = __ww_mutex_lock_slowpath(lock, ctx); + return ret; } diff --git a/drivers/ddk/linux/scatterlist.c b/drivers/ddk/linux/scatterlist.c index dada2b1d7..af76daf2a 100644 --- a/drivers/ddk/linux/scatterlist.c +++ b/drivers/ddk/linux/scatterlist.c @@ -25,14 +25,14 @@ struct scatterlist *sg_next(struct scatterlist *sg) #ifdef CONFIG_DEBUG_SG BUG_ON(sg->sg_magic != SG_MAGIC); #endif - if (sg_is_last(sg)) - return NULL; + if (sg_is_last(sg)) + return NULL; - sg++; - if (unlikely(sg_is_chain(sg))) - sg = sg_chain_ptr(sg); + sg++; + if (unlikely(sg_is_chain(sg))) + sg = sg_chain_ptr(sg); - return sg; + return sg; } EXPORT_SYMBOL(sg_next); @@ -54,6 +54,38 @@ int sg_nents(struct scatterlist *sg) } EXPORT_SYMBOL(sg_nents); +/** + * sg_nents_for_len - return total count of entries in scatterlist + * needed to satisfy the supplied length + * @sg: The scatterlist + * @len: The total required length + * + * Description: + * Determines the number of entries in sg that are required to meet + * the supplied length, taking into acount chaining as well + * + * Returns: + * the number of sg entries needed, negative error on failure + * + **/ +int sg_nents_for_len(struct scatterlist *sg, u64 len) +{ + int nents; + u64 total; + + if (!len) + return 0; + + for (nents = 0, total = 0; sg; sg = sg_next(sg)) { + nents++; + total += sg->length; + if (total >= len) + return nents; + } + + return -EINVAL; +} +EXPORT_SYMBOL(sg_nents_for_len); /** * sg_last - return the last scatterlist entry in a list @@ -71,16 +103,12 @@ EXPORT_SYMBOL(sg_nents); **/ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - struct scatterlist *ret = &sgl[nents - 1]; -#else struct scatterlist *sg, *ret = NULL; unsigned int i; for_each_sg(sgl, sg, nents, i) ret = sg; -#endif #ifdef CONFIG_DEBUG_SG BUG_ON(sgl[0].sg_magic != SG_MAGIC); BUG_ON(!sg_is_last(ret)); @@ -101,15 +129,15 @@ EXPORT_SYMBOL(sg_last); **/ void sg_init_table(struct scatterlist *sgl, unsigned int nents) { - memset(sgl, 0, sizeof(*sgl) * nents); + memset(sgl, 0, sizeof(*sgl) * nents); #ifdef CONFIG_DEBUG_SG - { - unsigned int i; - for (i = 0; i < nents; i++) - sgl[i].sg_magic = SG_MAGIC; - } + { + unsigned int i; + for (i = 0; i < nents; i++) + sgl[i].sg_magic = SG_MAGIC; + } #endif - sg_mark_end(&sgl[nents - 1]); + sg_mark_end(&sgl[nents - 1]); } EXPORT_SYMBOL(sg_init_table); @@ -157,40 +185,40 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) void __sg_free_table(struct sg_table *table, unsigned int max_ents, bool skip_first_chunk, sg_free_fn *free_fn) { - struct scatterlist *sgl, *next; + struct scatterlist *sgl, *next; - if (unlikely(!table->sgl)) - return; + if (unlikely(!table->sgl)) + return; - sgl = table->sgl; - while (table->orig_nents) { - unsigned int alloc_size = table->orig_nents; - unsigned int sg_size; + sgl = table->sgl; + while (table->orig_nents) { + unsigned int alloc_size = table->orig_nents; + unsigned int sg_size; - /* - * If we have more than max_ents segments left, - * then assign 'next' to the sg table after the current one. - * sg_size is then one less than alloc size, since the last - * element is the chain pointer. - */ - if (alloc_size > max_ents) { - next = sg_chain_ptr(&sgl[max_ents - 1]); - alloc_size = max_ents; - sg_size = alloc_size - 1; - } else { - sg_size = alloc_size; - next = NULL; - } + /* + * If we have more than max_ents segments left, + * then assign 'next' to the sg table after the current one. + * sg_size is then one less than alloc size, since the last + * element is the chain pointer. + */ + if (alloc_size > max_ents) { + next = sg_chain_ptr(&sgl[max_ents - 1]); + alloc_size = max_ents; + sg_size = alloc_size - 1; + } else { + sg_size = alloc_size; + next = NULL; + } - table->orig_nents -= sg_size; + table->orig_nents -= sg_size; if (skip_first_chunk) skip_first_chunk = false; else free_fn(sgl, alloc_size); - sgl = next; - } + sgl = next; + } - table->sgl = NULL; + table->sgl = NULL; } EXPORT_SYMBOL(__sg_free_table); @@ -228,8 +256,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, unsigned int max_ents, struct scatterlist *first_chunk, gfp_t gfp_mask, sg_alloc_fn *alloc_fn) { - struct scatterlist *sg, *prv; - unsigned int left; + struct scatterlist *sg, *prv; + unsigned int left; memset(table, 0, sizeof(*table)); @@ -240,18 +268,18 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, return -EINVAL; #endif - left = nents; - prv = NULL; - do { - unsigned int sg_size, alloc_size = left; + left = nents; + prv = NULL; + do { + unsigned int sg_size, alloc_size = left; - if (alloc_size > max_ents) { - alloc_size = max_ents; - sg_size = alloc_size - 1; - } else - sg_size = alloc_size; + if (alloc_size > max_ents) { + alloc_size = max_ents; + sg_size = alloc_size - 1; + } else + sg_size = alloc_size; - left -= sg_size; + left -= sg_size; if (first_chunk) { sg = first_chunk; @@ -259,41 +287,41 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, } else { sg = alloc_fn(alloc_size, gfp_mask); } - if (unlikely(!sg)) { - /* - * Adjust entry count to reflect that the last - * entry of the previous table won't be used for - * linkage. Without this, sg_kfree() may get - * confused. - */ - if (prv) - table->nents = ++table->orig_nents; + if (unlikely(!sg)) { + /* + * Adjust entry count to reflect that the last + * entry of the previous table won't be used for + * linkage. Without this, sg_kfree() may get + * confused. + */ + if (prv) + table->nents = ++table->orig_nents; return -ENOMEM; - } + } - sg_init_table(sg, alloc_size); - table->nents = table->orig_nents += sg_size; + sg_init_table(sg, alloc_size); + table->nents = table->orig_nents += sg_size; - /* - * If this is the first mapping, assign the sg table header. - * If this is not the first mapping, chain previous part. - */ - if (prv) - sg_chain(prv, max_ents, sg); - else - table->sgl = sg; + /* + * If this is the first mapping, assign the sg table header. + * If this is not the first mapping, chain previous part. + */ + if (prv) + sg_chain(prv, max_ents, sg); + else + table->sgl = sg; - /* - * If no more entries after this one, mark the end - */ - if (!left) - sg_mark_end(&sg[sg_size - 1]); + /* + * If no more entries after this one, mark the end + */ + if (!left) + sg_mark_end(&sg[sg_size - 1]); - prv = sg; - } while (left); + prv = sg; + } while (left); - return 0; + return 0; } EXPORT_SYMBOL(__sg_alloc_table); @@ -325,40 +353,320 @@ EXPORT_SYMBOL(sg_alloc_table); void __sg_page_iter_start(struct sg_page_iter *piter, - struct scatterlist *sglist, unsigned int nents, - unsigned long pgoffset) + struct scatterlist *sglist, unsigned int nents, + unsigned long pgoffset) { - piter->__pg_advance = 0; - piter->__nents = nents; + piter->__pg_advance = 0; + piter->__nents = nents; - piter->sg = sglist; - piter->sg_pgoffset = pgoffset; + piter->sg = sglist; + piter->sg_pgoffset = pgoffset; } EXPORT_SYMBOL(__sg_page_iter_start); static int sg_page_count(struct scatterlist *sg) { - return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; + return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; } bool __sg_page_iter_next(struct sg_page_iter *piter) { - if (!piter->__nents || !piter->sg) - return false; + if (!piter->__nents || !piter->sg) + return false; - piter->sg_pgoffset += piter->__pg_advance; - piter->__pg_advance = 1; + piter->sg_pgoffset += piter->__pg_advance; + piter->__pg_advance = 1; - while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { - piter->sg_pgoffset -= sg_page_count(piter->sg); - piter->sg = sg_next(piter->sg); - if (!--piter->__nents || !piter->sg) - return false; - } + while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { + piter->sg_pgoffset -= sg_page_count(piter->sg); + piter->sg = sg_next(piter->sg); + if (!--piter->__nents || !piter->sg) + return false; + } - return true; + return true; } EXPORT_SYMBOL(__sg_page_iter_next); +/** + * sg_miter_start - start mapping iteration over a sg list + * @miter: sg mapping iter to be started + * @sgl: sg list to iterate over + * @nents: number of sg entries + * + * Description: + * Starts mapping iterator @miter. + * + * Context: + * Don't care. + */ +void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, + unsigned int nents, unsigned int flags) +{ + memset(miter, 0, sizeof(struct sg_mapping_iter)); + __sg_page_iter_start(&miter->piter, sgl, nents, 0); + WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); + miter->__flags = flags; +} +EXPORT_SYMBOL(sg_miter_start); +static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) +{ + if (!miter->__remaining) { + struct scatterlist *sg; + unsigned long pgoffset; + + if (!__sg_page_iter_next(&miter->piter)) + return false; + + sg = miter->piter.sg; + pgoffset = miter->piter.sg_pgoffset; + + miter->__offset = pgoffset ? 0 : sg->offset; + miter->__remaining = sg->offset + sg->length - + (pgoffset << PAGE_SHIFT) - miter->__offset; + miter->__remaining = min_t(unsigned long, miter->__remaining, + PAGE_SIZE - miter->__offset); + } + + return true; +} + +/** + * sg_miter_skip - reposition mapping iterator + * @miter: sg mapping iter to be skipped + * @offset: number of bytes to plus the current location + * + * Description: + * Sets the offset of @miter to its current location plus @offset bytes. + * If mapping iterator @miter has been proceeded by sg_miter_next(), this + * stops @miter. + * + * Context: + * Don't care if @miter is stopped, or not proceeded yet. + * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. + * + * Returns: + * true if @miter contains the valid mapping. false if end of sg + * list is reached. + */ +bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) +{ + sg_miter_stop(miter); + + while (offset) { + off_t consumed; + + if (!sg_miter_get_next_page(miter)) + return false; + + consumed = min_t(off_t, offset, miter->__remaining); + miter->__offset += consumed; + miter->__remaining -= consumed; + offset -= consumed; + } + + return true; +} +EXPORT_SYMBOL(sg_miter_skip); + +/** + * sg_miter_next - proceed mapping iterator to the next mapping + * @miter: sg mapping iter to proceed + * + * Description: + * Proceeds @miter to the next mapping. @miter should have been started + * using sg_miter_start(). On successful return, @miter->page, + * @miter->addr and @miter->length point to the current mapping. + * + * Context: + * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled + * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. + * + * Returns: + * true if @miter contains the next mapping. false if end of sg + * list is reached. + */ +bool sg_miter_next(struct sg_mapping_iter *miter) +{ + sg_miter_stop(miter); + + /* + * Get to the next page if necessary. + * __remaining, __offset is adjusted by sg_miter_stop + */ + if (!sg_miter_get_next_page(miter)) + return false; + + miter->page = sg_page_iter_page(&miter->piter); + miter->consumed = miter->length = miter->__remaining; + + if (miter->__flags & SG_MITER_ATOMIC) + miter->addr = kmap_atomic(miter->page) + miter->__offset; + else + miter->addr = kmap(miter->page) + miter->__offset; + + return true; +} +EXPORT_SYMBOL(sg_miter_next); + +/** + * sg_miter_stop - stop mapping iteration + * @miter: sg mapping iter to be stopped + * + * Description: + * Stops mapping iterator @miter. @miter should have been started + * started using sg_miter_start(). A stopped iteration can be + * resumed by calling sg_miter_next() on it. This is useful when + * resources (kmap) need to be released during iteration. + * + * Context: + * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care + * otherwise. + */ +void sg_miter_stop(struct sg_mapping_iter *miter) +{ + WARN_ON(miter->consumed > miter->length); + + /* drop resources from the last iteration */ + if (miter->addr) { + miter->__offset += miter->consumed; + miter->__remaining -= miter->consumed; + + if (miter->__flags & SG_MITER_ATOMIC) { + WARN_ON_ONCE(preemptible()); + kunmap_atomic(miter->addr); + } else + kunmap(miter->page); + + miter->page = NULL; + miter->addr = NULL; + miter->length = 0; + miter->consumed = 0; + } +} +EXPORT_SYMBOL(sg_miter_stop); + +/** + * sg_copy_buffer - Copy data between a linear buffer and an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying + * @to_buffer: transfer direction (true == from an sg list to a + * buffer, false == from a buffer to an sg list + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer) +{ + unsigned int offset = 0; + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + + if (to_buffer) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + + sg_miter_start(&miter, sgl, nents, sg_flags); + + if (!sg_miter_skip(&miter, skip)) + return false; + + local_irq_save(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; + + len = min(miter.length, buflen - offset); + + if (to_buffer) + memcpy(buf + offset, miter.addr, len); + else + memcpy(miter.addr, buf + offset, len); + + offset += len; + } + + sg_miter_stop(&miter); + + local_irq_restore(flags); + return offset; +} +EXPORT_SYMBOL(sg_copy_buffer); + +/** + * sg_copy_from_buffer - Copy from a linear buffer to an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, + const void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); +} +EXPORT_SYMBOL(sg_copy_from_buffer); + +/** + * sg_copy_to_buffer - Copy from an SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @buflen: The number of bytes to copy + * + * Returns the number of copied bytes. + * + **/ +size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); +} +EXPORT_SYMBOL(sg_copy_to_buffer); + +/** + * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy from + * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying + * + * Returns the number of copied bytes. + * + **/ +size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, + const void *buf, size_t buflen, off_t skip) +{ + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); +} +EXPORT_SYMBOL(sg_pcopy_from_buffer); + +/** + * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer + * @sgl: The SG list + * @nents: Number of SG entries + * @buf: Where to copy to + * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying + * + * Returns the number of copied bytes. + * + **/ +size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip) +{ + return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); +} +EXPORT_SYMBOL(sg_pcopy_to_buffer); diff --git a/drivers/ddk/linux/time.c b/drivers/ddk/linux/time.c index d048f3f48..427e77a64 100644 --- a/drivers/ddk/linux/time.c +++ b/drivers/ddk/linux/time.c @@ -1,4 +1,35 @@ +/* + * linux/kernel/time.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * This file contains the interface functions for the various + * time related system calls: time, stime, gettimeofday, settimeofday, + * adjtime + */ +/* + * Modification history kernel/time.c + * + * 1993-09-02 Philip Gladstone + * Created file with time related functions from sched/core.c and adjtimex() + * 1993-10-08 Torsten Duwe + * adjtime interface update and CMOS clock write code + * 1995-08-13 Torsten Duwe + * kernel PLL updated to 1994-12-13 specs (rfc-1589) + * 1999-01-16 Ulrich Windl + * Introduced error checking for many cases in adjtimex(). + * Updated NTP code according to technical memorandum Jan '96 + * "A Kernel Model for Precision Timekeeping" by Dave Mills + * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10) + * (Even though the technical memorandum forbids it) + * 2004-07-14 Christoph Lameter + * Added getnstimeofday to allow the posix timer functions to return + * with nanosecond accuracy + */ + #include +#include +#include @@ -45,21 +76,28 @@ #define NSEC_PER_SEC 1000000000L #define FSEC_PER_SEC 1000000000000000LL - +# define USER_HZ 100 +/* + * Convert jiffies to milliseconds and back. + * + * Avoid unnecessary multiplications/divisions in the + * two most common HZ cases: + */ unsigned int jiffies_to_msecs(const unsigned long j) { #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) - return (MSEC_PER_SEC / HZ) * j; + return (MSEC_PER_SEC / HZ) * j; #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) - return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); #else # if BITS_PER_LONG == 32 - return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; + return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32; # else - return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; + return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN; # endif #endif } +EXPORT_SYMBOL(jiffies_to_msecs); unsigned int jiffies_to_usecs(const unsigned long j) { @@ -69,17 +107,218 @@ unsigned int jiffies_to_usecs(const unsigned long j) return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); #else # if BITS_PER_LONG == 32 - return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; + return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; # else - return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; + return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; # endif #endif } +EXPORT_SYMBOL(jiffies_to_usecs); +/** + * timespec_trunc - Truncate timespec to a granularity + * @t: Timespec + * @gran: Granularity in ns. + * + * Truncate a timespec to a granularity. Always rounds down. gran must + * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns). + */ +struct timespec timespec_trunc(struct timespec t, unsigned gran) +{ + /* Avoid division in the common cases 1 ns and 1 s. */ + if (gran == 1) { + /* nothing */ + } else if (gran == NSEC_PER_SEC) { + t.tv_nsec = 0; + } else if (gran > 1 && gran < NSEC_PER_SEC) { + t.tv_nsec -= t.tv_nsec % gran; + } else { + WARN(1, "illegal file time granularity: %u", gran); + } + return t; +} +EXPORT_SYMBOL(timespec_trunc); /* - * When we convert to jiffies then we interpret incoming values - * the following way: + * mktime64 - Converts date to seconds. + * Converts Gregorian date to seconds since 1970-01-01 00:00:00. + * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 + * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. + * + * [For the Julian calendar (which was used in Russia before 1917, + * Britain & colonies before 1752, anywhere else before 1582, + * and is still in use by some communities) leave out the + * -year/100+year/400 terms, and add 10.] + * + * This algorithm was first published by Gauss (I think). + */ +time64_t mktime64(const unsigned int year0, const unsigned int mon0, + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec) +{ + unsigned int mon = mon0, year = year0; + + /* 1..12 -> 11,12,1..10 */ + if (0 >= (int) (mon -= 2)) { + mon += 12; /* Puts Feb last since it has leap day */ + year -= 1; + } + + return ((((time64_t) + (year/4 - year/100 + year/400 + 367*mon/12 + day) + + year*365 - 719499 + )*24 + hour /* now have hours */ + )*60 + min /* now have minutes */ + )*60 + sec; /* finally seconds */ +} +EXPORT_SYMBOL(mktime64); + +/** + * set_normalized_timespec - set timespec sec and nsec parts and normalize + * + * @ts: pointer to timespec variable to be set + * @sec: seconds to set + * @nsec: nanoseconds to set + * + * Set seconds and nanoseconds field of a timespec variable and + * normalize to the timespec storage format + * + * Note: The tv_nsec part is always in the range of + * 0 <= tv_nsec < NSEC_PER_SEC + * For negative values only the tv_sec field is negative ! + */ +void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec) +{ + while (nsec >= NSEC_PER_SEC) { + /* + * The following asm() prevents the compiler from + * optimising this loop into a modulo operation. See + * also __iter_div_u64_rem() in include/linux/time.h + */ + asm("" : "+rm"(nsec)); + nsec -= NSEC_PER_SEC; + ++sec; + } + while (nsec < 0) { + asm("" : "+rm"(nsec)); + nsec += NSEC_PER_SEC; + --sec; + } + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} +EXPORT_SYMBOL(set_normalized_timespec); + +/** + * ns_to_timespec - Convert nanoseconds to timespec + * @nsec: the nanoseconds value to be converted + * + * Returns the timespec representation of the nsec parameter. + */ +struct timespec ns_to_timespec(const s64 nsec) +{ + struct timespec ts; + s32 rem; + + if (!nsec) + return (struct timespec) {0, 0}; + + ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); + if (unlikely(rem < 0)) { + ts.tv_sec--; + rem += NSEC_PER_SEC; + } + ts.tv_nsec = rem; + + return ts; +} +EXPORT_SYMBOL(ns_to_timespec); + +/** + * ns_to_timeval - Convert nanoseconds to timeval + * @nsec: the nanoseconds value to be converted + * + * Returns the timeval representation of the nsec parameter. + */ +struct timeval ns_to_timeval(const s64 nsec) +{ + struct timespec ts = ns_to_timespec(nsec); + struct timeval tv; + + tv.tv_sec = ts.tv_sec; + tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000; + + return tv; +} +EXPORT_SYMBOL(ns_to_timeval); + +#if BITS_PER_LONG == 32 +/** + * set_normalized_timespec - set timespec sec and nsec parts and normalize + * + * @ts: pointer to timespec variable to be set + * @sec: seconds to set + * @nsec: nanoseconds to set + * + * Set seconds and nanoseconds field of a timespec variable and + * normalize to the timespec storage format + * + * Note: The tv_nsec part is always in the range of + * 0 <= tv_nsec < NSEC_PER_SEC + * For negative values only the tv_sec field is negative ! + */ +void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec) +{ + while (nsec >= NSEC_PER_SEC) { + /* + * The following asm() prevents the compiler from + * optimising this loop into a modulo operation. See + * also __iter_div_u64_rem() in include/linux/time.h + */ + asm("" : "+rm"(nsec)); + nsec -= NSEC_PER_SEC; + ++sec; + } + while (nsec < 0) { + asm("" : "+rm"(nsec)); + nsec += NSEC_PER_SEC; + --sec; + } + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} +EXPORT_SYMBOL(set_normalized_timespec64); + +/** + * ns_to_timespec64 - Convert nanoseconds to timespec64 + * @nsec: the nanoseconds value to be converted + * + * Returns the timespec64 representation of the nsec parameter. + */ +struct timespec64 ns_to_timespec64(const s64 nsec) +{ + struct timespec64 ts; + s32 rem; + + if (!nsec) + return (struct timespec64) {0, 0}; + + ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); + if (unlikely(rem < 0)) { + ts.tv_sec--; + rem += NSEC_PER_SEC; + } + ts.tv_nsec = rem; + + return ts; +} +EXPORT_SYMBOL(ns_to_timespec64); +#endif +/** + * msecs_to_jiffies: - convert milliseconds to jiffies + * @m: time in milliseconds + * + * conversion is done as follows: * * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) * @@ -87,66 +326,36 @@ unsigned int jiffies_to_usecs(const unsigned long j) * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. * * - all other values are converted to jiffies by either multiplying - * the input value by a factor or dividing it with a factor + * the input value by a factor or dividing it with a factor and + * handling any 32-bit overflows. + * for the details see __msecs_to_jiffies() * - * We must also be careful about 32-bit overflows. + * msecs_to_jiffies() checks for the passed in value being a constant + * via __builtin_constant_p() allowing gcc to eliminate most of the + * code, __msecs_to_jiffies() is called if the value passed does not + * allow constant folding and the actual conversion must be done at + * runtime. + * the _msecs_to_jiffies helpers are the HZ dependent conversion + * routines found in include/linux/jiffies.h */ -unsigned long msecs_to_jiffies(const unsigned int m) +unsigned long __msecs_to_jiffies(const unsigned int m) { - /* - * Negative value, means infinite timeout: - */ - if ((int)m < 0) - return MAX_JIFFY_OFFSET; - -#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) - /* - * HZ is equal to or smaller than 1000, and 1000 is a nice - * round multiple of HZ, divide with the factor between them, - * but round upwards: - */ - return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); -#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) - /* - * HZ is larger than 1000, and HZ is a nice round multiple of - * 1000 - simply multiply with the factor between them. - * - * But first make sure the multiplication result cannot - * overflow: - */ - if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) - return MAX_JIFFY_OFFSET; - - return m * (HZ / MSEC_PER_SEC); -#else - /* - * Generic case - multiply, round and divide. But first - * check that if we are doing a net multiplication, that - * we wouldn't overflow: - */ - if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) - return MAX_JIFFY_OFFSET; - - return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) - >> MSEC_TO_HZ_SHR32; -#endif + /* + * Negative value, means infinite timeout: + */ + if ((int)m < 0) + return MAX_JIFFY_OFFSET; + return _msecs_to_jiffies(m); } -EXPORT_SYMBOL(msecs_to_jiffies); +EXPORT_SYMBOL(__msecs_to_jiffies); -unsigned long usecs_to_jiffies(const unsigned int u) +unsigned long __usecs_to_jiffies(const unsigned int u) { - if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) - return MAX_JIFFY_OFFSET; -#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) - return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); -#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) - return u * (HZ / USEC_PER_SEC); -#else - return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) - >> USEC_TO_HZ_SHR32; -#endif + if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + return _usecs_to_jiffies(u); } -EXPORT_SYMBOL(usecs_to_jiffies); +EXPORT_SYMBOL(__usecs_to_jiffies); /* * The TICK_NSEC - 1 rounds up the value to the next resolution. Note @@ -164,30 +373,35 @@ EXPORT_SYMBOL(usecs_to_jiffies); * value to a scaled second value. */ static unsigned long -__timespec_to_jiffies(unsigned long sec, long nsec) +__timespec64_to_jiffies(u64 sec, long nsec) { nsec = nsec + TICK_NSEC - 1; - if (sec >= MAX_SEC_IN_JIFFIES){ - sec = MAX_SEC_IN_JIFFIES; - nsec = 0; - } - return (((u64)sec * SEC_CONVERSION) + - (((u64)nsec * NSEC_CONVERSION) >> - (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; + if (sec >= MAX_SEC_IN_JIFFIES){ + sec = MAX_SEC_IN_JIFFIES; + nsec = 0; + } + return ((sec * SEC_CONVERSION) + + (((u64)nsec * NSEC_CONVERSION) >> + (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; } +static unsigned long +__timespec_to_jiffies(unsigned long sec, long nsec) +{ + return __timespec64_to_jiffies((u64)sec, nsec); +} + unsigned long -timespec_to_jiffies(const struct timespec *value) +timespec64_to_jiffies(const struct timespec64 *value) { - return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); + return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec); } - -EXPORT_SYMBOL(timespec_to_jiffies); +EXPORT_SYMBOL(timespec64_to_jiffies); void -jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) +jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value) { /* * Convert jiffies to nanoseconds and separate with @@ -198,7 +412,185 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) NSEC_PER_SEC, &rem); value->tv_nsec = rem; } -EXPORT_SYMBOL(jiffies_to_timespec); +EXPORT_SYMBOL(jiffies_to_timespec64); + +/* + * We could use a similar algorithm to timespec_to_jiffies (with a + * different multiplier for usec instead of nsec). But this has a + * problem with rounding: we can't exactly add TICK_NSEC - 1 to the + * usec value, since it's not necessarily integral. + * + * We could instead round in the intermediate scaled representation + * (i.e. in units of 1/2^(large scale) jiffies) but that's also + * perilous: the scaling introduces a small positive error, which + * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1 + * units to the intermediate before shifting) leads to accidental + * overflow and overestimates. + * + * At the cost of one additional multiplication by a constant, just + * use the timespec implementation. + */ +unsigned long +timeval_to_jiffies(const struct timeval *value) +{ + return __timespec_to_jiffies(value->tv_sec, + value->tv_usec * NSEC_PER_USEC); +} +EXPORT_SYMBOL(timeval_to_jiffies); + +void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value) +{ + /* + * Convert jiffies to nanoseconds and separate with + * one divide. + */ + u32 rem; + + value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, + NSEC_PER_SEC, &rem); + value->tv_usec = rem / NSEC_PER_USEC; +} +EXPORT_SYMBOL(jiffies_to_timeval); + +/* + * Convert jiffies/jiffies_64 to clock_t and back. + */ +clock_t jiffies_to_clock_t(unsigned long x) +{ +#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 +# if HZ < USER_HZ + return x * (USER_HZ / HZ); +# else + return x / (HZ / USER_HZ); +# endif +#else + return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); +#endif +} +EXPORT_SYMBOL(jiffies_to_clock_t); + +unsigned long clock_t_to_jiffies(unsigned long x) +{ +#if (HZ % USER_HZ)==0 + if (x >= ~0UL / (HZ / USER_HZ)) + return ~0UL; + return x * (HZ / USER_HZ); +#else + /* Don't worry about loss of precision here .. */ + if (x >= ~0UL / HZ * USER_HZ) + return ~0UL; + + /* .. but do try to contain it here */ + return div_u64((u64)x * HZ, USER_HZ); +#endif +} +EXPORT_SYMBOL(clock_t_to_jiffies); + +u64 jiffies_64_to_clock_t(u64 x) +{ +#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 +# if HZ < USER_HZ + x = div_u64(x * USER_HZ, HZ); +# elif HZ > USER_HZ + x = div_u64(x, HZ / USER_HZ); +# else + /* Nothing to do */ +# endif +#else + /* + * There are better ways that don't overflow early, + * but even this doesn't overflow in hundreds of years + * in 64 bits, so.. + */ + x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); +#endif + return x; +} +EXPORT_SYMBOL(jiffies_64_to_clock_t); + +u64 nsec_to_clock_t(u64 x) +{ +#if (NSEC_PER_SEC % USER_HZ) == 0 + return div_u64(x, NSEC_PER_SEC / USER_HZ); +#elif (USER_HZ % 512) == 0 + return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); +#else + /* + * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, + * overflow after 64.99 years. + * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... + */ + return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); +#endif +} + +/** + * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 + * + * @n: nsecs in u64 + * + * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. + * And this doesn't return MAX_JIFFY_OFFSET since this function is designed + * for scheduler, not for use in device drivers to calculate timeout value. + * + * note: + * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) + * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years + */ +u64 nsecs_to_jiffies64(u64 n) +{ +#if (NSEC_PER_SEC % HZ) == 0 + /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ + return div_u64(n, NSEC_PER_SEC / HZ); +#elif (HZ % 512) == 0 + /* overflow after 292 years if HZ = 1024 */ + return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); +#else + /* + * Generic case - optimized for cases where HZ is a multiple of 3. + * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. + */ + return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); +#endif +} +EXPORT_SYMBOL(nsecs_to_jiffies64); + +/** + * nsecs_to_jiffies - Convert nsecs in u64 to jiffies + * + * @n: nsecs in u64 + * + * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. + * And this doesn't return MAX_JIFFY_OFFSET since this function is designed + * for scheduler, not for use in device drivers to calculate timeout value. + * + * note: + * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) + * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years + */ +unsigned long nsecs_to_jiffies(u64 n) +{ + return (unsigned long)nsecs_to_jiffies64(n); +} +EXPORT_SYMBOL_GPL(nsecs_to_jiffies); + +/* + * Add two timespec values and do a safety check for overflow. + * It's assumed that both values are valid (>= 0) + */ +struct timespec timespec_add_safe(const struct timespec lhs, + const struct timespec rhs) +{ + struct timespec res; + + set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + + if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec) + res.tv_sec = TIME_T_MAX; + + return res; +} s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) { @@ -217,21 +609,8 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) return quotient; } -struct timespec ns_to_timespec(const s64 nsec) -{ - struct timespec ts; - s32 rem; - if (!nsec) - return (struct timespec) {0, 0}; - ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); - if (unlikely(rem < 0)) { - ts.tv_sec--; - rem += NSEC_PER_SEC; - } - ts.tv_nsec = rem; - return ts; -} + diff --git a/drivers/ddk/stdio/vsprintf.c b/drivers/ddk/stdio/vsprintf.c index 92827a9ef..2ecc279f5 100644 --- a/drivers/ddk/stdio/vsprintf.c +++ b/drivers/ddk/stdio/vsprintf.c @@ -23,26 +23,13 @@ #include #include +#include #include #include #include #include /* for PAGE_SIZE */ - -static inline u64 div_u64(u64 dividend, u32 divisor) -{ - u32 remainder; - return div_u64_rem(dividend, divisor, &remainder); -} - -static inline s64 div_s64(s64 dividend, s32 divisor) -{ - s32 remainder; - return div_s64_rem(dividend, divisor, &remainder); -} - - #define ZERO_SIZE_PTR ((void *)16) #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ @@ -55,6 +42,7 @@ static inline s64 div_s64(s64 dividend, s32 divisor) #define KSTRTOX_OVERFLOW (1U << 31) const char hex_asc[] = "0123456789abcdef"; +const char hex_asc_upper[] = "0123456789ABCDEF"; /* Works only for digits and letters, but small and fast */ #define TOLOWER(x) ((x) | 0x20) @@ -210,148 +198,152 @@ int skip_atoi(const char **s) { int i = 0; - while (isdigit(**s)) + do { i = i*10 + *((*s)++) - '0'; + } while (isdigit(**s)); return i; } -/* Decimal conversion is by far the most typical, and is used - * for /proc and /sys data. This directly impacts e.g. top performance - * with many processes running. We optimize it for speed - * using ideas described at - * (with permission from the author, Douglas W. Jones). +/* + * Decimal conversion is by far the most typical, and is used for + * /proc and /sys data. This directly impacts e.g. top performance + * with many processes running. We optimize it for speed by emitting + * two characters at a time, using a 200 byte lookup table. This + * roughly halves the number of multiplications compared to computing + * the digits one at a time. Implementation strongly inspired by the + * previous version, which in turn used ideas described at + * (with permission + * from the author, Douglas W. Jones). + * + * It turns out there is precisely one 26 bit fixed-point + * approximation a of 64/100 for which x/100 == (x * (u64)a) >> 32 + * holds for all x in [0, 10^8-1], namely a = 0x28f5c29. The actual + * range happens to be somewhat larger (x <= 1073741898), but that's + * irrelevant for our purpose. + * + * For dividing a number in the range [10^4, 10^6-1] by 100, we still + * need a 32x32->64 bit multiply, so we simply use the same constant. + * + * For dividing a number in the range [100, 10^4-1] by 100, there are + * several options. The simplest is (x * 0x147b) >> 19, which is valid + * for all x <= 43698. */ -#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64 -/* Formats correctly any integer in [0, 999999999] */ -static noinline_for_stack -char *put_dec_full9(char *buf, unsigned q) -{ - unsigned r; +static const u16 decpair[100] = { +#define _(x) (__force u16) cpu_to_le16(((x % 10) | ((x / 10) << 8)) + 0x3030) + _( 0), _( 1), _( 2), _( 3), _( 4), _( 5), _( 6), _( 7), _( 8), _( 9), + _(10), _(11), _(12), _(13), _(14), _(15), _(16), _(17), _(18), _(19), + _(20), _(21), _(22), _(23), _(24), _(25), _(26), _(27), _(28), _(29), + _(30), _(31), _(32), _(33), _(34), _(35), _(36), _(37), _(38), _(39), + _(40), _(41), _(42), _(43), _(44), _(45), _(46), _(47), _(48), _(49), + _(50), _(51), _(52), _(53), _(54), _(55), _(56), _(57), _(58), _(59), + _(60), _(61), _(62), _(63), _(64), _(65), _(66), _(67), _(68), _(69), + _(70), _(71), _(72), _(73), _(74), _(75), _(76), _(77), _(78), _(79), + _(80), _(81), _(82), _(83), _(84), _(85), _(86), _(87), _(88), _(89), + _(90), _(91), _(92), _(93), _(94), _(95), _(96), _(97), _(98), _(99), +#undef _ +}; - /* - * Possible ways to approx. divide by 10 - * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit) - * (x * 0xcccd) >> 19 x < 81920 (x < 262149 when 64-bit mul) - * (x * 0x6667) >> 18 x < 43699 - * (x * 0x3334) >> 17 x < 16389 - * (x * 0x199a) >> 16 x < 16389 - * (x * 0x0ccd) >> 15 x < 16389 - * (x * 0x0667) >> 14 x < 2739 - * (x * 0x0334) >> 13 x < 1029 - * (x * 0x019a) >> 12 x < 1029 - * (x * 0x00cd) >> 11 x < 1029 shorter code than * 0x67 (on i386) - * (x * 0x0067) >> 10 x < 179 - * (x * 0x0034) >> 9 x < 69 same - * (x * 0x001a) >> 8 x < 69 same - * (x * 0x000d) >> 7 x < 69 same, shortest code (on i386) - * (x * 0x0007) >> 6 x < 19 - * See - */ - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 1 */ - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 2 */ - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 3 */ - q = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = (r - 10 * q) + '0'; /* 4 */ - r = (q * (uint64_t)0x1999999a) >> 32; - *buf++ = (q - 10 * r) + '0'; /* 5 */ - /* Now value is under 10000, can avoid 64-bit multiply */ - q = (r * 0x199a) >> 16; - *buf++ = (r - 10 * q) + '0'; /* 6 */ - r = (q * 0xcd) >> 11; - *buf++ = (q - 10 * r) + '0'; /* 7 */ - q = (r * 0xcd) >> 11; - *buf++ = (r - 10 * q) + '0'; /* 8 */ - *buf++ = q + '0'; /* 9 */ - return buf; -} -#endif - -/* Similar to above but do not pad with zeros. - * Code can be easily arranged to print 9 digits too, but our callers - * always call put_dec_full9() instead when the number has 9 decimal digits. - */ +/* + * This will print a single '0' even if r == 0, since we would + * immediately jump to out_r where two 0s would be written but only + * one of them accounted for in buf. This is needed by ip4_string + * below. All other callers pass a non-zero value of r. +*/ static noinline_for_stack char *put_dec_trunc8(char *buf, unsigned r) { unsigned q; - /* Copy of previous function's body with added early returns */ - while (r >= 10000) { - q = r + '0'; - r = (r * (uint64_t)0x1999999a) >> 32; - *buf++ = q - 10*r; - } + /* 1 <= r < 10^8 */ + if (r < 100) + goto out_r; - q = (r * 0x199a) >> 16; /* r <= 9999 */ - *buf++ = (r - 10 * q) + '0'; - if (q == 0) - return buf; - r = (q * 0xcd) >> 11; /* q <= 999 */ - *buf++ = (q - 10 * r) + '0'; - if (r == 0) - return buf; - q = (r * 0xcd) >> 11; /* r <= 99 */ - *buf++ = (r - 10 * q) + '0'; - if (q == 0) - return buf; - *buf++ = q + '0'; /* q <= 9 */ + /* 100 <= r < 10^8 */ + q = (r * (u64)0x28f5c29) >> 32; + *((u16 *)buf) = decpair[r - 100*q]; + buf += 2; + + /* 1 <= q < 10^6 */ + if (q < 100) + goto out_q; + + /* 100 <= q < 10^6 */ + r = (q * (u64)0x28f5c29) >> 32; + *((u16 *)buf) = decpair[q - 100*r]; + buf += 2; + + /* 1 <= r < 10^4 */ + if (r < 100) + goto out_r; + + /* 100 <= r < 10^4 */ + q = (r * 0x147b) >> 19; + *((u16 *)buf) = decpair[r - 100*q]; + buf += 2; +out_q: + /* 1 <= q < 100 */ + r = q; +out_r: + /* 1 <= r < 100 */ + *((u16 *)buf) = decpair[r]; + buf += r < 10 ? 1 : 2; return buf; } -/* There are two algorithms to print larger numbers. - * One is generic: divide by 1000000000 and repeatedly print - * groups of (up to) 9 digits. It's conceptually simple, - * but requires a (unsigned long long) / 1000000000 division. - * - * Second algorithm splits 64-bit unsigned long long into 16-bit chunks, - * manipulates them cleverly and generates groups of 4 decimal digits. - * It so happens that it does NOT require long long division. - * - * If long is > 32 bits, division of 64-bit values is relatively easy, - * and we will use the first algorithm. - * If long long is > 64 bits (strange architecture with VERY large long long), - * second algorithm can't be used, and we again use the first one. - * - * Else (if long is 32 bits and long long is 64 bits) we use second one. - */ +#if BITS_PER_LONG == 64 && BITS_PER_LONG_LONG == 64 +static noinline_for_stack +char *put_dec_full8(char *buf, unsigned r) +{ + unsigned q; -#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64 + /* 0 <= r < 10^8 */ + q = (r * (u64)0x28f5c29) >> 32; + *((u16 *)buf) = decpair[r - 100*q]; + buf += 2; -/* First algorithm: generic */ + /* 0 <= q < 10^6 */ + r = (q * (u64)0x28f5c29) >> 32; + *((u16 *)buf) = decpair[q - 100*r]; + buf += 2; -static + /* 0 <= r < 10^4 */ + q = (r * 0x147b) >> 19; + *((u16 *)buf) = decpair[r - 100*q]; + buf += 2; + + /* 0 <= q < 100 */ + *((u16 *)buf) = decpair[q]; + buf += 2; + return buf; +} + +static noinline_for_stack char *put_dec(char *buf, unsigned long long n) { - if (n >= 100*1000*1000) { - while (n >= 1000*1000*1000) - buf = put_dec_full9(buf, do_div(n, 1000*1000*1000)); if (n >= 100*1000*1000) - return put_dec_full9(buf, n); - } + buf = put_dec_full8(buf, do_div(n, 100*1000*1000)); + /* 1 <= n <= 1.6e11 */ + if (n >= 100*1000*1000) + buf = put_dec_full8(buf, do_div(n, 100*1000*1000)); + /* 1 <= n < 1e8 */ return put_dec_trunc8(buf, n); } -#else +#elif BITS_PER_LONG == 32 && BITS_PER_LONG_LONG == 64 -/* Second algorithm: valid only for 64-bit long longs */ - -/* See comment in put_dec_full9 for choice of constants */ -static noinline_for_stack -void put_dec_full4(char *buf, unsigned q) +static void +put_dec_full4(char *buf, unsigned r) { - unsigned r; - r = (q * 0xccd) >> 15; - buf[0] = (q - 10 * r) + '0'; - q = (r * 0xcd) >> 11; - buf[1] = (r - 10 * q) + '0'; - r = (q * 0xcd) >> 11; - buf[2] = (q - 10 * r) + '0'; - buf[3] = r + '0'; + unsigned q; + + /* 0 <= r < 10^4 */ + q = (r * 0x147b) >> 19; + *((u16 *)buf) = decpair[r - 100*q]; + buf += 2; + /* 0 <= q < 100 */ + *((u16 *)buf) = decpair[q]; } /* @@ -359,9 +351,9 @@ void put_dec_full4(char *buf, unsigned q) * The approximation x/10000 == (x * 0x346DC5D7) >> 43 * holds for all x < 1,128,869,999. The largest value this * helper will ever be asked to convert is 1,125,520,955. - * (d1 in the put_dec code, assuming n is all-ones). + * (second call in the put_dec code, assuming n is all-ones). */ -static +static noinline_for_stack unsigned put_dec_helper4(char *buf, unsigned x) { uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43; @@ -388,6 +380,8 @@ char *put_dec(char *buf, unsigned long long n) d2 = (h ) & 0xffff; d3 = (h >> 16); /* implicit "& 0xffff" */ + /* n = 2^48 d3 + 2^32 d2 + 2^16 d1 + d0 + = 281_4749_7671_0656 d3 + 42_9496_7296 d2 + 6_5536 d1 + d0 */ q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); q = put_dec_helper4(buf, q); @@ -417,7 +411,8 @@ char *put_dec(char *buf, unsigned long long n) */ int num_to_str(char *buf, int size, unsigned long long num) { - char tmp[sizeof(num) * 3]; + /* put_dec requires 2-byte alignment of the buffer. */ + char tmp[sizeof(num) * 3] __aligned(2); int idx, len; /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */ @@ -435,11 +430,11 @@ int num_to_str(char *buf, int size, unsigned long long num) return len; } -#define ZEROPAD 1 /* pad with zero */ -#define SIGN 2 /* unsigned/signed long */ +#define SIGN 1 /* unsigned/signed, must be 1 */ +#define LEFT 2 /* left justified */ #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ -#define LEFT 16 /* left justified */ +#define ZEROPAD 16 /* pad with zero, must be 16 == '0' - ' ' */ #define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ #define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ @@ -478,10 +473,8 @@ static noinline_for_stack char *number(char *buf, char *end, unsigned long long num, struct printf_spec spec) { - /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ - static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ - - char tmp[66]; + /* put_dec requires 2-byte alignment of the buffer. */ + char tmp[3 * sizeof(num)] __aligned(2); char sign; char locase; int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); @@ -517,12 +510,7 @@ char *number(char *buf, char *end, unsigned long long num, /* generate full string in tmp[], in reverse order */ i = 0; if (num < spec.base) - tmp[i++] = digits[num] | locase; - /* Generic code, for any base: - else do { - tmp[i++] = (digits[do_div(num,base)] | locase); - } while (num != 0); - */ + tmp[i++] = hex_asc_upper[num] | locase; else if (spec.base != 10) { /* 8 or 16 */ int mask = spec.base - 1; int shift = 3; @@ -530,7 +518,7 @@ char *number(char *buf, char *end, unsigned long long num, if (spec.base == 16) shift = 4; do { - tmp[i++] = (digits[((unsigned char)num) & mask] | locase); + tmp[i++] = (hex_asc_upper[((unsigned char)num) & mask] | locase); num >>= shift; } while (num); } else { /* base 10 */ @@ -542,7 +530,7 @@ char *number(char *buf, char *end, unsigned long long num, spec.precision = i; /* leading space padding */ spec.field_width -= spec.precision; - if (!(spec.flags & (ZEROPAD+LEFT))) { + if (!(spec.flags & (ZEROPAD | LEFT))) { while (--spec.field_width >= 0) { if (buf < end) *buf = ' '; @@ -570,7 +558,8 @@ char *number(char *buf, char *end, unsigned long long num, } /* zero or space padding */ if (!(spec.flags & LEFT)) { - char c = (spec.flags & ZEROPAD) ? '0' : ' '; + char c = ' ' + (spec.flags & ZEROPAD); + BUILD_BUG_ON(' ' + ZEROPAD != '0'); while (--spec.field_width >= 0) { if (buf < end) *buf = c; @@ -712,8 +701,6 @@ char *resource_string(char *buf, char *end, struct resource *res, #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") -#undef max -#define max(a,b) ((a) > (b) ? (a) : (b)) char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; @@ -742,10 +729,15 @@ char *resource_string(char *buf, char *end, struct resource *res, specp = &mem_spec; decode = 0; } + if (decode && res->flags & IORESOURCE_UNSET) { + p = string(p, pend, "size ", str_spec); + p = number(p, pend, resource_size(res), *specp); + } else { p = number(p, pend, res->start, *specp); if (res->start != res->end) { *p++ = '-'; p = number(p, pend, res->end, *specp); + } } if (decode) { if (res->flags & IORESOURCE_MEM_64) @@ -800,16 +792,105 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec, if (spec.field_width > 0) len = min_t(int, spec.field_width, 64); - for (i = 0; i < len && buf < end - 1; i++) { - buf = hex_byte_pack(buf, addr[i]); + for (i = 0; i < len; ++i) { + if (buf < end) + *buf = hex_asc_hi(addr[i]); + ++buf; + if (buf < end) + *buf = hex_asc_lo(addr[i]); + ++buf; - if (buf < end && separator && i != len - 1) - *buf++ = separator; + if (separator && i != len - 1) { + if (buf < end) + *buf = separator; + ++buf; + } } return buf; } +static noinline_for_stack +char *bitmap_string(char *buf, char *end, unsigned long *bitmap, + struct printf_spec spec, const char *fmt) +{ + const int CHUNKSZ = 32; + int nr_bits = max_t(int, spec.field_width, 0); + int i, chunksz; + bool first = true; + + /* reused to print numbers */ + spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 }; + + chunksz = nr_bits & (CHUNKSZ - 1); + if (chunksz == 0) + chunksz = CHUNKSZ; + + i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ; + for (; i >= 0; i -= CHUNKSZ) { + u32 chunkmask, val; + int word, bit; + + chunkmask = ((1ULL << chunksz) - 1); + word = i / BITS_PER_LONG; + bit = i % BITS_PER_LONG; + val = (bitmap[word] >> bit) & chunkmask; + + if (!first) { + if (buf < end) + *buf = ','; + buf++; + } + first = false; + + spec.field_width = DIV_ROUND_UP(chunksz, 4); + buf = number(buf, end, val, spec); + + chunksz = CHUNKSZ; + } + return buf; +} + +static noinline_for_stack +char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, + struct printf_spec spec, const char *fmt) +{ + int nr_bits = max_t(int, spec.field_width, 0); + /* current bit is 'cur', most recently seen range is [rbot, rtop] */ + int cur, rbot, rtop; + bool first = true; + + /* reused to print numbers */ + spec = (struct printf_spec){ .base = 10 }; + + rbot = cur = find_first_bit(bitmap, nr_bits); + while (cur < nr_bits) { + rtop = cur; + cur = find_next_bit(bitmap, nr_bits, cur + 1); + if (cur < nr_bits && cur <= rtop + 1) + continue; + + if (!first) { + if (buf < end) + *buf = ','; + buf++; + } + first = false; + + buf = number(buf, end, rbot, spec); + if (rbot < rtop) { + if (buf < end) + *buf = '-'; + buf++; + + buf = number(buf, end, rtop, spec); + } + + rbot = cur; + } + return buf; +} + static noinline_for_stack char *mac_address_string(char *buf, char *end, u8 *addr, struct printf_spec spec, const char *fmt) @@ -878,7 +959,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt) break; } for (i = 0; i < 4; i++) { - char temp[3]; /* hold each IP quad in reverse order */ + char temp[4] __aligned(2); /* hold each IP quad in reverse order */ int digits = put_dec_trunc8(temp, addr[index]) - temp; if (leading_zeros) { if (digits < 3) @@ -928,6 +1009,10 @@ int kptr_restrict = 1; * - 'B' For backtraced symbolic direct pointers with offset * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] + * - 'b[l]' For a bitmap, the number of bits is determined by the field + * width which must be explicitly specified either as part of the + * format string '%32b[l]' or through '%*b[l]', [l] selects + * range-list format instead of hex format * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation * - 'm' For a 6-byte MAC address, it prints the hex address without colons @@ -949,6 +1034,17 @@ int kptr_restrict = 1; * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order * - 'I[6S]c' for IPv6 addresses printed as specified by * http://tools.ietf.org/html/rfc5952 + * - 'E[achnops]' For an escaped buffer, where rules are defined by combination + * of the following flags (see string_escape_mem() for the + * details): + * a - ESCAPE_ANY + * c - ESCAPE_SPECIAL + * h - ESCAPE_HEX + * n - ESCAPE_NULL + * o - ESCAPE_OCTAL + * p - ESCAPE_NP + * s - ESCAPE_SPACE + * By default ESCAPE_ANY_NP is used. * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" * Options for %pU are: @@ -978,6 +1074,11 @@ int kptr_restrict = 1; * (default assumed to be phys_addr_t, passed by reference) * - 'd[234]' For a dentry name (optionally 2-4 last components) * - 'D[234]' Same as 'd' but for a struct file + * - 'C' For a clock, it prints the name (Common Clock Framework) or address + * (legacy clock framework) of the clock + * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address + * (legacy clock framework) of the clock + * - 'Cr' For a clock, it prints the current rate of the clock * * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 * function pointers are really function descriptors, which contain a @@ -1188,8 +1289,7 @@ qualifier: case 'p': spec->type = FORMAT_TYPE_PTR; - return fmt - start; - /* skip alnum */ + return ++fmt - start; case '%': spec->type = FORMAT_TYPE_PERCENT_CHAR; @@ -1230,29 +1330,21 @@ qualifier: if (spec->qualifier == 'L') spec->type = FORMAT_TYPE_LONG_LONG; else if (spec->qualifier == 'l') { - if (spec->flags & SIGN) - spec->type = FORMAT_TYPE_LONG; - else - spec->type = FORMAT_TYPE_ULONG; + BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG); + spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN); } else if (_tolower(spec->qualifier) == 'z') { spec->type = FORMAT_TYPE_SIZE_T; } else if (spec->qualifier == 't') { spec->type = FORMAT_TYPE_PTRDIFF; } else if (spec->qualifier == 'H') { - if (spec->flags & SIGN) - spec->type = FORMAT_TYPE_BYTE; - else - spec->type = FORMAT_TYPE_UBYTE; + BUILD_BUG_ON(FORMAT_TYPE_UBYTE + SIGN != FORMAT_TYPE_BYTE); + spec->type = FORMAT_TYPE_UBYTE + (spec->flags & SIGN); } else if (spec->qualifier == 'h') { - if (spec->flags & SIGN) - spec->type = FORMAT_TYPE_SHORT; - else - spec->type = FORMAT_TYPE_USHORT; + BUILD_BUG_ON(FORMAT_TYPE_USHORT + SIGN != FORMAT_TYPE_SHORT); + spec->type = FORMAT_TYPE_USHORT + (spec->flags & SIGN); } else { - if (spec->flags & SIGN) - spec->type = FORMAT_TYPE_INT; - else - spec->type = FORMAT_TYPE_UINT; + BUILD_BUG_ON(FORMAT_TYPE_UINT + SIGN != FORMAT_TYPE_INT); + spec->type = FORMAT_TYPE_UINT + (spec->flags & SIGN); } return ++fmt - start; @@ -1273,6 +1365,8 @@ qualifier: * %pB output the name of a backtrace symbol with its offset * %pR output the address range in a struct resource with decoded flags * %pr output the address range in a struct resource with raw flags + * %pb output the bitmap with field width as the number of bits + * %pbl output the bitmap as range list with field width as the number of bits * %pM output a 6-byte MAC address with colons * %pMR output a 6-byte MAC address with colons in reversed order * %pMF output a 6-byte MAC address with dashes @@ -1290,6 +1384,11 @@ qualifier: * %*pE[achnops] print an escaped buffer * %*ph[CDN] a variable-length hex string with a separator (supports up to 64 * bytes of the input) + * %pC output the name (Common Clock Framework) or address (legacy clock + * framework) of a clock + * %pCn output the name (Common Clock Framework) or address (legacy clock + * framework) of a clock + * %pCr output the current rate of a clock * %n is ignored * * ** Please update Documentation/printk-formats.txt when making changes ** @@ -1312,7 +1411,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) /* Reject out-of-range values early. Large positive sizes are used for unknown buffer sizes. */ - if ((int) size < 0) + if (WARN_ON_ONCE(size > INT_MAX)) return 0; str = buf; @@ -1378,7 +1477,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) break; case FORMAT_TYPE_PTR: - str = pointer(fmt+1, str, end, va_arg(args, void *), + str = pointer(fmt, str, end, va_arg(args, void *), spec); while (isalnum(*fmt)) fmt++; diff --git a/drivers/include/asm-generic/atomic-long.h b/drivers/include/asm-generic/atomic-long.h index d151845fe..398a72a7a 100644 --- a/drivers/include/asm-generic/atomic-long.h +++ b/drivers/include/asm-generic/atomic-long.h @@ -23,236 +23,168 @@ typedef atomic64_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define ATOMIC_LONG_PFX(x) atomic64 ## x -static inline long atomic_long_read(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_read(v); -} - -static inline void atomic_long_set(atomic_long_t *l, long i) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_set(v, i); -} - -static inline void atomic_long_inc(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_inc(v); -} - -static inline void atomic_long_dec(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_dec(v); -} - -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_add(i, v); -} - -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - atomic64_sub(i, v); -} - -static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_sub_and_test(i, v); -} - -static inline int atomic_long_dec_and_test(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_dec_and_test(v); -} - -static inline int atomic_long_inc_and_test(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_inc_and_test(v); -} - -static inline int atomic_long_add_negative(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return atomic64_add_negative(i, v); -} - -static inline long atomic_long_add_return(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_add_return(i, v); -} - -static inline long atomic_long_sub_return(long i, atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_sub_return(i, v); -} - -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_inc_return(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_dec_return(v); -} - -static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) -{ - atomic64_t *v = (atomic64_t *)l; - - return (long)atomic64_add_unless(v, a, u); -} - -#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) - -#define atomic_long_cmpxchg(l, old, new) \ - (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) -#define atomic_long_xchg(v, new) \ - (atomic64_xchg((atomic64_t *)(v), (new))) - -#else /* BITS_PER_LONG == 64 */ +#else typedef atomic_t atomic_long_t; #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) -static inline long atomic_long_read(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; +#define ATOMIC_LONG_PFX(x) atomic ## x - return (long)atomic_read(v); +#endif + +#define ATOMIC_LONG_READ_OP(mo) \ +static inline long atomic_long_read##mo(const atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_read##mo)(v); \ } +ATOMIC_LONG_READ_OP() +ATOMIC_LONG_READ_OP(_acquire) -static inline void atomic_long_set(atomic_long_t *l, long i) -{ - atomic_t *v = (atomic_t *)l; +#undef ATOMIC_LONG_READ_OP - atomic_set(v, i); +#define ATOMIC_LONG_SET_OP(mo) \ +static inline void atomic_long_set##mo(atomic_long_t *l, long i) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + ATOMIC_LONG_PFX(_set##mo)(v, i); \ } +ATOMIC_LONG_SET_OP() +ATOMIC_LONG_SET_OP(_release) + +#undef ATOMIC_LONG_SET_OP + +#define ATOMIC_LONG_ADD_SUB_OP(op, mo) \ +static inline long \ +atomic_long_##op##_return##mo(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v); \ +} +ATOMIC_LONG_ADD_SUB_OP(add,) +ATOMIC_LONG_ADD_SUB_OP(add, _relaxed) +ATOMIC_LONG_ADD_SUB_OP(add, _acquire) +ATOMIC_LONG_ADD_SUB_OP(add, _release) +ATOMIC_LONG_ADD_SUB_OP(sub,) +ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed) +ATOMIC_LONG_ADD_SUB_OP(sub, _acquire) +ATOMIC_LONG_ADD_SUB_OP(sub, _release) + +#undef ATOMIC_LONG_ADD_SUB_OP + +#define atomic_long_cmpxchg_relaxed(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg_acquire(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg_release(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \ + (old), (new))) +#define atomic_long_cmpxchg(l, old, new) \ + (ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new))) + +#define atomic_long_xchg_relaxed(v, new) \ + (ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg_acquire(v, new) \ + (ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg_release(v, new) \ + (ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new))) +#define atomic_long_xchg(v, new) \ + (ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new))) static inline void atomic_long_inc(atomic_long_t *l) { - atomic_t *v = (atomic_t *)l; + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - atomic_inc(v); + ATOMIC_LONG_PFX(_inc)(v); } static inline void atomic_long_dec(atomic_long_t *l) { - atomic_t *v = (atomic_t *)l; + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - atomic_dec(v); + ATOMIC_LONG_PFX(_dec)(v); } -static inline void atomic_long_add(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - atomic_add(i, v); +#define ATOMIC_LONG_OP(op) \ +static inline void \ +atomic_long_##op(long i, atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + ATOMIC_LONG_PFX(_##op)(i, v); \ } -static inline void atomic_long_sub(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; +ATOMIC_LONG_OP(add) +ATOMIC_LONG_OP(sub) +ATOMIC_LONG_OP(and) +ATOMIC_LONG_OP(or) +ATOMIC_LONG_OP(xor) +ATOMIC_LONG_OP(andnot) - atomic_sub(i, v); -} +#undef ATOMIC_LONG_OP static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) { - atomic_t *v = (atomic_t *)l; + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - return atomic_sub_and_test(i, v); + return ATOMIC_LONG_PFX(_sub_and_test)(i, v); } static inline int atomic_long_dec_and_test(atomic_long_t *l) { - atomic_t *v = (atomic_t *)l; + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - return atomic_dec_and_test(v); + return ATOMIC_LONG_PFX(_dec_and_test)(v); } static inline int atomic_long_inc_and_test(atomic_long_t *l) { - atomic_t *v = (atomic_t *)l; + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - return atomic_inc_and_test(v); + return ATOMIC_LONG_PFX(_inc_and_test)(v); } static inline int atomic_long_add_negative(long i, atomic_long_t *l) { - atomic_t *v = (atomic_t *)l; + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - return atomic_add_negative(i, v); + return ATOMIC_LONG_PFX(_add_negative)(i, v); } -static inline long atomic_long_add_return(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_add_return(i, v); +#define ATOMIC_LONG_INC_DEC_OP(op, mo) \ +static inline long \ +atomic_long_##op##_return##mo(atomic_long_t *l) \ +{ \ + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \ + \ + return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v); \ } +ATOMIC_LONG_INC_DEC_OP(inc,) +ATOMIC_LONG_INC_DEC_OP(inc, _relaxed) +ATOMIC_LONG_INC_DEC_OP(inc, _acquire) +ATOMIC_LONG_INC_DEC_OP(inc, _release) +ATOMIC_LONG_INC_DEC_OP(dec,) +ATOMIC_LONG_INC_DEC_OP(dec, _relaxed) +ATOMIC_LONG_INC_DEC_OP(dec, _acquire) +ATOMIC_LONG_INC_DEC_OP(dec, _release) -static inline long atomic_long_sub_return(long i, atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_sub_return(i, v); -} - -static inline long atomic_long_inc_return(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_inc_return(v); -} - -static inline long atomic_long_dec_return(atomic_long_t *l) -{ - atomic_t *v = (atomic_t *)l; - - return (long)atomic_dec_return(v); -} +#undef ATOMIC_LONG_INC_DEC_OP static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) { - atomic_t *v = (atomic_t *)l; + ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; - return (long)atomic_add_unless(v, a, u); + return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u); } -#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) - -#define atomic_long_cmpxchg(l, old, new) \ - (atomic_cmpxchg((atomic_t *)(l), (old), (new))) -#define atomic_long_xchg(v, new) \ - (atomic_xchg((atomic_t *)(v), (new))) - -#endif /* BITS_PER_LONG == 64 */ +#define atomic_long_inc_not_zero(l) \ + ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l)) #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ diff --git a/drivers/include/asm-generic/memory_model.h b/drivers/include/asm-generic/memory_model.h index 14909b0b9..4b4b056a6 100644 --- a/drivers/include/asm-generic/memory_model.h +++ b/drivers/include/asm-generic/memory_model.h @@ -69,6 +69,12 @@ }) #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ +/* + * Convert a physical address to a Page Frame Number and back + */ +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) PFN_PHYS(pfn) + #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page diff --git a/drivers/include/asm/alternative.h b/drivers/include/asm/alternative.h index 37d16ed65..cff9eede0 100644 --- a/drivers/include/asm/alternative.h +++ b/drivers/include/asm/alternative.h @@ -5,6 +5,7 @@ #include #include #include +#include /* * Alternative inline assembly for SMP. @@ -47,8 +48,15 @@ struct alt_instr { s32 repl_offset; /* offset to replacement instruction */ u16 cpuid; /* cpuid bit set for replacement */ u8 instrlen; /* length of original instruction */ - u8 replacementlen; /* length of new instruction, <= instrlen */ -}; + u8 replacementlen; /* length of new instruction */ + u8 padlen; /* length of build-time padding */ +} __packed; + +/* + * Debug flag that can be tested to see whether alternative + * instructions were patched in already: + */ +extern int alternatives_patched; extern void alternative_instructions(void); extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); @@ -75,50 +83,69 @@ static inline int alternatives_text_reserved(void *start, void *end) } #endif /* CONFIG_SMP */ -#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n" +#define b_replacement(num) "664"#num +#define e_replacement(num) "665"#num -#define b_replacement(number) "663"#number -#define e_replacement(number) "664"#number +#define alt_end_marker "663" +#define alt_slen "662b-661b" +#define alt_pad_len alt_end_marker"b-662b" +#define alt_total_slen alt_end_marker"b-661b" +#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" -#define alt_slen "662b-661b" -#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f" +#define __OLDINSTR(oldinstr, num) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \ + "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" -#define ALTINSTR_ENTRY(feature, number) \ +#define OLDINSTR(oldinstr, num) \ + __OLDINSTR(oldinstr, num) \ + alt_end_marker ":\n" + +/* + * max without conditionals. Idea adapted from: + * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax + * + * The additional "-" is needed because gas works with s32s. + */ +#define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))" + +/* + * Pad the second replacement alternative with additional NOPs if it is + * additionally longer than the first replacement alternative. + */ +#define OLDINSTR_2(oldinstr, num1, num2) \ + "661:\n\t" oldinstr "\n662:\n" \ + ".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \ + "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \ + alt_end_marker ":\n" + +#define ALTINSTR_ENTRY(feature, num) \ " .long 661b - .\n" /* label */ \ - " .long " b_replacement(number)"f - .\n" /* new instruction */ \ + " .long " b_replacement(num)"f - .\n" /* new instruction */ \ " .word " __stringify(feature) "\n" /* feature bit */ \ - " .byte " alt_slen "\n" /* source len */ \ - " .byte " alt_rlen(number) "\n" /* replacement len */ + " .byte " alt_total_slen "\n" /* source len */ \ + " .byte " alt_rlen(num) "\n" /* replacement len */ \ + " .byte " alt_pad_len "\n" /* pad len */ -#define DISCARD_ENTRY(number) /* rlen <= slen */ \ - " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n" - -#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \ - b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t" +#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ + b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t" /* alternative assembly primitive: */ #define ALTERNATIVE(oldinstr, newinstr, feature) \ - OLDINSTR(oldinstr) \ + OLDINSTR(oldinstr, 1) \ ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(feature, 1) \ ".popsection\n" \ - ".pushsection .discard,\"aw\",@progbits\n" \ - DISCARD_ENTRY(1) \ - ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ ".popsection" #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ - OLDINSTR(oldinstr) \ + OLDINSTR_2(oldinstr, 1, 2) \ ".pushsection .altinstructions,\"a\"\n" \ ALTINSTR_ENTRY(feature1, 1) \ ALTINSTR_ENTRY(feature2, 2) \ ".popsection\n" \ - ".pushsection .discard,\"aw\",@progbits\n" \ - DISCARD_ENTRY(1) \ - DISCARD_ENTRY(2) \ - ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ @@ -145,6 +172,9 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alternative(oldinstr, newinstr, feature) \ asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory") +#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \ + asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory") + /* * Alternative inline assembly with input. * diff --git a/drivers/include/asm/arch_hweight.h b/drivers/include/asm/arch_hweight.h index 061b03a29..bd7134d15 100644 --- a/drivers/include/asm/arch_hweight.h +++ b/drivers/include/asm/arch_hweight.h @@ -21,15 +21,13 @@ * ARCH_HWEIGHT_CFLAGS in for the respective * compiler switches. */ -static inline unsigned int __arch_hweight32(unsigned int w) +static __always_inline unsigned int __arch_hweight32(unsigned int w) { - unsigned int res = 0; - - asm ("call __sw_hweight32" - : "="REG_OUT (res) - : REG_IN (w)); - - return res; + unsigned int res = w - ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res + (res >> 4)) & 0x0F0F0F0F; + res = res + (res >> 8); + return (res + (res >> 16)) & 0x000000FF; } static inline unsigned int __arch_hweight16(unsigned int w) @@ -42,20 +40,23 @@ static inline unsigned int __arch_hweight8(unsigned int w) return __arch_hweight32(w & 0xff); } +#ifdef CONFIG_X86_32 static inline unsigned long __arch_hweight64(__u64 w) +{ + return __arch_hweight32((u32)w) + + __arch_hweight32((u32)(w >> 32)); +} +#else +static __always_inline unsigned long __arch_hweight64(__u64 w) { unsigned long res = 0; -#ifdef CONFIG_X86_32 - return __arch_hweight32((u32)w) + - __arch_hweight32((u32)(w >> 32)); -#else asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) : "="REG_OUT (res) : REG_IN (w)); -#endif /* CONFIG_X86_32 */ return res; } +#endif /* CONFIG_X86_32 */ #endif diff --git a/drivers/include/asm/asm.h b/drivers/include/asm/asm.h index d08ccd305..5c829a9ed 100644 --- a/drivers/include/asm/asm.h +++ b/drivers/include/asm/asm.h @@ -44,7 +44,7 @@ /* Exception table entry */ #ifdef __ASSEMBLY__ -# define _ASM_EXTABLE(from,to) \ +# define _ASM_EXTABLE(from,to) \ .pushsection "__ex_table","a" ; \ .balign 8 ; \ .long (from) - . ; \ @@ -60,11 +60,36 @@ # define _ASM_NOKPROBE(entry) \ .pushsection "_kprobe_blacklist","aw" ; \ - _ASM_ALIGN ; \ + _ASM_ALIGN ; \ _ASM_PTR (entry); \ .popsection + +.macro ALIGN_DESTINATION + /* check for bad alignment of destination */ + movl %edi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +100: movb (%rsi),%al +101: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 100b +102: + .section .fixup,"ax" +103: addl %ecx,%edx /* ecx is zerorest also */ + jmp copy_user_handle_tail + .previous + + _ASM_EXTABLE(100b,103b) + _ASM_EXTABLE(101b,103b) + .endm + #else -# define _ASM_EXTABLE(from,to) \ +# define _ASM_EXTABLE(from,to) \ " .pushsection \"__ex_table\",\"a\"\n" \ " .balign 8\n" \ " .long (" #from ") - .\n" \ diff --git a/drivers/include/asm/atomic.h b/drivers/include/asm/atomic.h index 38def971b..e733c9999 100644 --- a/drivers/include/asm/atomic.h +++ b/drivers/include/asm/atomic.h @@ -22,9 +22,9 @@ * * Atomically reads the value of @v. */ -static inline int atomic_read(const atomic_t *v) +static __always_inline int atomic_read(const atomic_t *v) { - return ACCESS_ONCE((v)->counter); + return READ_ONCE((v)->counter); } /** @@ -34,9 +34,9 @@ static inline int atomic_read(const atomic_t *v) * * Atomically sets the value of @v to @i. */ -static inline void atomic_set(atomic_t *v, int i) +static __always_inline void atomic_set(atomic_t *v, int i) { - v->counter = i; + WRITE_ONCE(v->counter, i); } /** @@ -46,7 +46,7 @@ static inline void atomic_set(atomic_t *v, int i) * * Atomically adds @i to @v. */ -static inline void atomic_add(int i, atomic_t *v) +static __always_inline void atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" : "+m" (v->counter) @@ -60,7 +60,7 @@ static inline void atomic_add(int i, atomic_t *v) * * Atomically subtracts @i from @v. */ -static inline void atomic_sub(int i, atomic_t *v) +static __always_inline void atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" : "+m" (v->counter) @@ -76,7 +76,7 @@ static inline void atomic_sub(int i, atomic_t *v) * true if the result is zero, or false for all * other cases. */ -static inline int atomic_sub_and_test(int i, atomic_t *v) +static __always_inline int atomic_sub_and_test(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); } @@ -87,7 +87,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) * * Atomically increments @v by 1. */ -static inline void atomic_inc(atomic_t *v) +static __always_inline void atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" : "+m" (v->counter)); @@ -99,7 +99,7 @@ static inline void atomic_inc(atomic_t *v) * * Atomically decrements @v by 1. */ -static inline void atomic_dec(atomic_t *v) +static __always_inline void atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" : "+m" (v->counter)); @@ -113,7 +113,7 @@ static inline void atomic_dec(atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline int atomic_dec_and_test(atomic_t *v) +static __always_inline int atomic_dec_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); } @@ -126,7 +126,7 @@ static inline int atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline int atomic_inc_and_test(atomic_t *v) +static __always_inline int atomic_inc_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); } @@ -140,7 +140,7 @@ static inline int atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline int atomic_add_negative(int i, atomic_t *v) +static __always_inline int atomic_add_negative(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); } @@ -152,7 +152,7 @@ static inline int atomic_add_negative(int i, atomic_t *v) * * Atomically adds @i to @v and returns @i + @v */ -static inline int atomic_add_return(int i, atomic_t *v) +static __always_inline int atomic_add_return(int i, atomic_t *v) { return i + xadd(&v->counter, i); } @@ -164,7 +164,7 @@ static inline int atomic_add_return(int i, atomic_t *v) * * Atomically subtracts @i from @v and returns @v - @i */ -static inline int atomic_sub_return(int i, atomic_t *v) +static __always_inline int atomic_sub_return(int i, atomic_t *v) { return atomic_add_return(-i, v); } @@ -172,7 +172,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) #define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v)) -static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { return cmpxchg(&v->counter, old, new); } @@ -182,6 +182,21 @@ static inline int atomic_xchg(atomic_t *v, int new) return xchg(&v->counter, new); } +#define ATOMIC_OP(op) \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + asm volatile(LOCK_PREFIX #op"l %1,%0" \ + : "+m" (v->counter) \ + : "ir" (i) \ + : "memory"); \ +} + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + /** * __atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t @@ -191,7 +206,7 @@ static inline int atomic_xchg(atomic_t *v, int new) * Atomically adds @a to @v, so long as @v was not already @u. * Returns the old value of @v. */ -static inline int __atomic_add_unless(atomic_t *v, int a, int u) +static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -213,22 +228,12 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) * Atomically adds 1 to @v * Returns the new value of @u */ -static inline short int atomic_inc_short(short int *v) +static __always_inline short int atomic_inc_short(short int *v) { asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v)); return *v; } -/* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "andl %0,%1" \ - : : "r" (~(mask)), "m" (*(addr)) : "memory") - -#define atomic_set_mask(mask, addr) \ - asm volatile(LOCK_PREFIX "orl %0,%1" \ - : : "r" ((unsigned)(mask)), "m" (*(addr)) \ - : "memory") - #ifdef CONFIG_X86_32 # include #else diff --git a/drivers/include/asm/atomic64_32.h b/drivers/include/asm/atomic64_32.h index 81893b2f6..b60ba8185 100644 --- a/drivers/include/asm/atomic64_32.h +++ b/drivers/include/asm/atomic64_32.h @@ -4,7 +4,7 @@ #include #include #include -//#include +#include /* An 64bit atomic type */ diff --git a/drivers/include/asm/barrier.h b/drivers/include/asm/barrier.h index dba028792..ae973ec5c 100644 --- a/drivers/include/asm/barrier.h +++ b/drivers/include/asm/barrier.h @@ -35,12 +35,12 @@ #define smp_mb() mb() #define smp_rmb() dma_rmb() #define smp_wmb() barrier() -#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) +#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) #else /* !SMP */ #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() -#define set_mb(var, value) do { var = value; barrier(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) #endif /* SMP */ #define read_barrier_depends() do { } while (0) @@ -57,12 +57,12 @@ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ ___p1; \ @@ -74,12 +74,12 @@ do { \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ACCESS_ONCE(*p) = (v); \ + WRITE_ONCE(*p, v); \ } while (0) #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ ___p1; \ @@ -91,17 +91,4 @@ do { \ #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() -/* - * Stop RDTSC speculation. This is needed when you need to use RDTSC - * (or get_cycles or vread that possibly accesses the TSC) in a defined - * code region. - * - * (Could use an alternative three way for this if there was one.) - */ -static __always_inline void rdtsc_barrier(void) -{ - alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC); - alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC); -} - #endif /* _ASM_X86_BARRIER_H */ diff --git a/drivers/include/asm/cacheflush.h b/drivers/include/asm/cacheflush.h index e56dacecc..3082ff423 100644 --- a/drivers/include/asm/cacheflush.h +++ b/drivers/include/asm/cacheflush.h @@ -8,7 +8,7 @@ /* * The set_memory_* API can be used to change various attributes of a virtual * address range. The attributes include: - * Cachability : UnCached, WriteCombining, WriteBack + * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack * Executability : eXeutable, NoteXecutable * Read/Write : ReadOnly, ReadWrite * Presence : NotPresent @@ -35,9 +35,11 @@ int _set_memory_uc(unsigned long addr, int numpages); int _set_memory_wc(unsigned long addr, int numpages); +int _set_memory_wt(unsigned long addr, int numpages); int _set_memory_wb(unsigned long addr, int numpages); int set_memory_uc(unsigned long addr, int numpages); int set_memory_wc(unsigned long addr, int numpages); +int set_memory_wt(unsigned long addr, int numpages); int set_memory_wb(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); @@ -48,10 +50,12 @@ int set_memory_4k(unsigned long addr, int numpages); int set_memory_array_uc(unsigned long *addr, int addrinarray); int set_memory_array_wc(unsigned long *addr, int addrinarray); +int set_memory_array_wt(unsigned long *addr, int addrinarray); int set_memory_array_wb(unsigned long *addr, int addrinarray); int set_pages_array_uc(struct page **pages, int addrinarray); int set_pages_array_wc(struct page **pages, int addrinarray); +int set_pages_array_wt(struct page **pages, int addrinarray); int set_pages_array_wb(struct page **pages, int addrinarray); /* @@ -105,9 +109,10 @@ static int set_pages_rw(struct page *page, int numpages) }; - void clflush_cache_range(void *addr, unsigned int size); +#define mmio_flush_range(addr, size) clflush_cache_range(addr, size) + #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); extern const int rodata_test_data; diff --git a/drivers/include/asm/cpufeature.h b/drivers/include/asm/cpufeature.h index c7f9402d4..3ec7286c8 100644 --- a/drivers/include/asm/cpufeature.h +++ b/drivers/include/asm/cpufeature.h @@ -12,7 +12,7 @@ #include #endif -#define NCAPINTS 11 /* N 32-bit words worth of info */ +#define NCAPINTS 14 /* N 32-bit words worth of info */ #define NBUGINTS 1 /* N 32-bit bug flags */ /* @@ -119,6 +119,7 @@ #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ +#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */ #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ @@ -174,7 +175,9 @@ #define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ +#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */ #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ +#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */ /* * Auxiliary flags: Linux defined - For features scattered in various @@ -190,10 +193,11 @@ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ -#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ +#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ +#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ @@ -212,6 +216,7 @@ #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ +#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ @@ -225,15 +230,19 @@ #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ +#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */ #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ +#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */ #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ +#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */ #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ +#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */ /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ @@ -241,6 +250,15 @@ #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */ +#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */ + +/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */ +#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */ + +/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ +#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ + /* * BUG word(s) */ @@ -254,6 +272,7 @@ #define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ +#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) @@ -388,6 +407,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) +#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT) #if __GNUC__ >= 4 extern void warn_pre_alternatives(void); @@ -416,6 +436,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) " .word %P0\n" /* 1: do replace */ " .byte 2b - 1b\n" /* source len */ " .byte 0\n" /* replacement len */ + " .byte 0\n" /* pad len */ ".previous\n" /* skipping size check since replacement size = 0 */ : : "i" (X86_FEATURE_ALWAYS) : : t_warn); @@ -430,6 +451,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) " .word %P0\n" /* feature bit */ " .byte 2b - 1b\n" /* source len */ " .byte 0\n" /* replacement len */ + " .byte 0\n" /* pad len */ ".previous\n" /* skipping size check since replacement size = 0 */ : : "i" (bit) : : t_no); @@ -455,6 +477,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) " .word %P1\n" /* feature bit */ " .byte 2b - 1b\n" /* source len */ " .byte 4f - 3f\n" /* replacement len */ + " .byte 0\n" /* pad len */ ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ @@ -481,31 +504,30 @@ static __always_inline __pure bool __static_cpu_has(u16 bit) static __always_inline __pure bool _static_cpu_has_safe(u16 bit) { #ifdef CC_HAVE_ASM_GOTO -/* - * We need to spell the jumps to the compiler because, depending on the offset, - * the replacement jump can be bigger than the original jump, and this we cannot - * have. Thus, we force the jump to the widest, 4-byte, signed relative - * offset even though the last would often fit in less bytes. - */ - asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" + asm_volatile_goto("1: jmp %l[t_dynamic]\n" "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" + "3:\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" /* src offset */ - " .long 3f - .\n" /* repl offset */ + " .long 4f - .\n" /* repl offset */ " .word %P1\n" /* always replace */ - " .byte 2b - 1b\n" /* src len */ - " .byte 4f - 3f\n" /* repl len */ + " .byte 3b - 1b\n" /* src len */ + " .byte 5f - 4f\n" /* repl len */ + " .byte 3b - 2b\n" /* pad len */ ".previous\n" ".section .altinstr_replacement,\"ax\"\n" - "3: .byte 0xe9\n .long %l[t_no] - 2b\n" - "4:\n" + "4: jmp %l[t_no]\n" + "5:\n" ".previous\n" ".section .altinstructions,\"a\"\n" " .long 1b - .\n" /* src offset */ " .long 0\n" /* no replacement */ " .word %P0\n" /* feature bit */ - " .byte 2b - 1b\n" /* src len */ + " .byte 3b - 1b\n" /* src len */ " .byte 0\n" /* repl len */ + " .byte 0\n" /* pad len */ ".previous\n" : : "i" (bit), "i" (X86_FEATURE_ALWAYS) : : t_dynamic, t_no); @@ -525,6 +547,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) " .word %P2\n" /* always replace */ " .byte 2b - 1b\n" /* source len */ " .byte 4f - 3f\n" /* replacement len */ + " .byte 0\n" /* pad len */ ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ @@ -539,6 +562,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) " .word %P1\n" /* feature bit */ " .byte 4b - 3b\n" /* src len */ " .byte 6f - 5f\n" /* repl len */ + " .byte 0\n" /* pad len */ ".previous\n" ".section .discard,\"aw\",@progbits\n" " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */ diff --git a/drivers/include/asm/delay.h b/drivers/include/asm/delay.h index 9b3b4f275..36a760bda 100644 --- a/drivers/include/asm/delay.h +++ b/drivers/include/asm/delay.h @@ -4,5 +4,6 @@ #include void use_tsc_delay(void); +void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/drivers/include/asm/e820.h b/drivers/include/asm/e820.h index 779c2efe2..3ab053787 100644 --- a/drivers/include/asm/e820.h +++ b/drivers/include/asm/e820.h @@ -40,14 +40,6 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn) } #endif -#ifdef CONFIG_MEMTEST -extern void early_memtest(unsigned long start, unsigned long end); -#else -static inline void early_memtest(unsigned long start, unsigned long end) -{ -} -#endif - extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); extern u64 early_reserve_e820(u64 sizet, u64 align); diff --git a/drivers/include/asm/fpu/types.h b/drivers/include/asm/fpu/types.h new file mode 100644 index 000000000..1c6f6ac52 --- /dev/null +++ b/drivers/include/asm/fpu/types.h @@ -0,0 +1,355 @@ +/* + * FPU data structures: + */ +#ifndef _ASM_X86_FPU_H +#define _ASM_X86_FPU_H + +/* + * The legacy x87 FPU state format, as saved by FSAVE and + * restored by the FRSTOR instructions: + */ +struct fregs_state { + u32 cwd; /* FPU Control Word */ + u32 swd; /* FPU Status Word */ + u32 twd; /* FPU Tag Word */ + u32 fip; /* FPU IP Offset */ + u32 fcs; /* FPU IP Selector */ + u32 foo; /* FPU Operand Pointer Offset */ + u32 fos; /* FPU Operand Pointer Selector */ + + /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 st_space[20]; + + /* Software status information [not touched by FSAVE]: */ + u32 status; +}; + +/* + * The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and + * restored by the FXRSTOR instructions. It's similar to the FSAVE + * format, but differs in some areas, plus has extensions at + * the end for the XMM registers. + */ +struct fxregs_state { + u16 cwd; /* Control Word */ + u16 swd; /* Status Word */ + u16 twd; /* Tag Word */ + u16 fop; /* Last Instruction Opcode */ + union { + struct { + u64 rip; /* Instruction Pointer */ + u64 rdp; /* Data Pointer */ + }; + struct { + u32 fip; /* FPU IP Offset */ + u32 fcs; /* FPU IP Selector */ + u32 foo; /* FPU Operand Offset */ + u32 fos; /* FPU Operand Selector */ + }; + }; + u32 mxcsr; /* MXCSR Register State */ + u32 mxcsr_mask; /* MXCSR Mask */ + + /* 8*16 bytes for each FP-reg = 128 bytes: */ + u32 st_space[32]; + + /* 16*16 bytes for each XMM-reg = 256 bytes: */ + u32 xmm_space[64]; + + u32 padding[12]; + + union { + u32 padding1[12]; + u32 sw_reserved[12]; + }; + +} __attribute__((aligned(16))); + +/* Default value for fxregs_state.mxcsr: */ +#define MXCSR_DEFAULT 0x1f80 + +/* + * Software based FPU emulation state. This is arbitrary really, + * it matches the x87 format to make it easier to understand: + */ +struct swregs_state { + u32 cwd; + u32 swd; + u32 twd; + u32 fip; + u32 fcs; + u32 foo; + u32 fos; + /* 8*10 bytes for each FP-reg = 80 bytes: */ + u32 st_space[20]; + u8 ftop; + u8 changed; + u8 lookahead; + u8 no_update; + u8 rm; + u8 alimit; + struct math_emu_info *info; + u32 entry_eip; +}; + +/* + * List of XSAVE features Linux knows about: + */ +enum xfeature { + XFEATURE_FP, + XFEATURE_SSE, + /* + * Values above here are "legacy states". + * Those below are "extended states". + */ + XFEATURE_YMM, + XFEATURE_BNDREGS, + XFEATURE_BNDCSR, + XFEATURE_OPMASK, + XFEATURE_ZMM_Hi256, + XFEATURE_Hi16_ZMM, + + XFEATURE_MAX, +}; + +#define XFEATURE_MASK_FP (1 << XFEATURE_FP) +#define XFEATURE_MASK_SSE (1 << XFEATURE_SSE) +#define XFEATURE_MASK_YMM (1 << XFEATURE_YMM) +#define XFEATURE_MASK_BNDREGS (1 << XFEATURE_BNDREGS) +#define XFEATURE_MASK_BNDCSR (1 << XFEATURE_BNDCSR) +#define XFEATURE_MASK_OPMASK (1 << XFEATURE_OPMASK) +#define XFEATURE_MASK_ZMM_Hi256 (1 << XFEATURE_ZMM_Hi256) +#define XFEATURE_MASK_Hi16_ZMM (1 << XFEATURE_Hi16_ZMM) + +#define XFEATURE_MASK_FPSSE (XFEATURE_MASK_FP | XFEATURE_MASK_SSE) +#define XFEATURE_MASK_AVX512 (XFEATURE_MASK_OPMASK \ + | XFEATURE_MASK_ZMM_Hi256 \ + | XFEATURE_MASK_Hi16_ZMM) + +#define FIRST_EXTENDED_XFEATURE XFEATURE_YMM + +struct reg_128_bit { + u8 regbytes[128/8]; +}; +struct reg_256_bit { + u8 regbytes[256/8]; +}; +struct reg_512_bit { + u8 regbytes[512/8]; +}; + +/* + * State component 2: + * + * There are 16x 256-bit AVX registers named YMM0-YMM15. + * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15) + * and are stored in 'struct fxregs_state::xmm_space[]' in the + * "legacy" area. + * + * The high 128 bits are stored here. + */ +struct ymmh_struct { + struct reg_128_bit hi_ymm[16]; +} __packed; + +/* Intel MPX support: */ + +struct mpx_bndreg { + u64 lower_bound; + u64 upper_bound; +} __packed; +/* + * State component 3 is used for the 4 128-bit bounds registers + */ +struct mpx_bndreg_state { + struct mpx_bndreg bndreg[4]; +} __packed; + +/* + * State component 4 is used for the 64-bit user-mode MPX + * configuration register BNDCFGU and the 64-bit MPX status + * register BNDSTATUS. We call the pair "BNDCSR". + */ +struct mpx_bndcsr { + u64 bndcfgu; + u64 bndstatus; +} __packed; + +/* + * The BNDCSR state is padded out to be 64-bytes in size. + */ +struct mpx_bndcsr_state { + union { + struct mpx_bndcsr bndcsr; + u8 pad_to_64_bytes[64]; + }; +} __packed; + +/* AVX-512 Components: */ + +/* + * State component 5 is used for the 8 64-bit opmask registers + * k0-k7 (opmask state). + */ +struct avx_512_opmask_state { + u64 opmask_reg[8]; +} __packed; + +/* + * State component 6 is used for the upper 256 bits of the + * registers ZMM0-ZMM15. These 16 256-bit values are denoted + * ZMM0_H-ZMM15_H (ZMM_Hi256 state). + */ +struct avx_512_zmm_uppers_state { + struct reg_256_bit zmm_upper[16]; +} __packed; + +/* + * State component 7 is used for the 16 512-bit registers + * ZMM16-ZMM31 (Hi16_ZMM state). + */ +struct avx_512_hi16_state { + struct reg_512_bit hi16_zmm[16]; +} __packed; + +struct xstate_header { + u64 xfeatures; + u64 xcomp_bv; + u64 reserved[6]; +} __attribute__((packed)); + +/* + * This is our most modern FPU state format, as saved by the XSAVE + * and restored by the XRSTOR instructions. + * + * It consists of a legacy fxregs portion, an xstate header and + * subsequent areas as defined by the xstate header. Not all CPUs + * support all the extensions, so the size of the extended area + * can vary quite a bit between CPUs. + */ +struct xregs_state { + struct fxregs_state i387; + struct xstate_header header; + u8 extended_state_area[0]; +} __attribute__ ((packed, aligned (64))); + +/* + * This is a union of all the possible FPU state formats + * put together, so that we can pick the right one runtime. + * + * The size of the structure is determined by the largest + * member - which is the xsave area. The padding is there + * to ensure that statically-allocated task_structs (just + * the init_task today) have enough space. + */ +union fpregs_state { + struct fregs_state fsave; + struct fxregs_state fxsave; + struct swregs_state soft; + struct xregs_state xsave; + u8 __padding[PAGE_SIZE]; +}; + +/* + * Highest level per task FPU state data structure that + * contains the FPU register state plus various FPU + * state fields: + */ +struct fpu { + /* + * @last_cpu: + * + * Records the last CPU on which this context was loaded into + * FPU registers. (In the lazy-restore case we might be + * able to reuse FPU registers across multiple context switches + * this way, if no intermediate task used the FPU.) + * + * A value of -1 is used to indicate that the FPU state in context + * memory is newer than the FPU state in registers, and that the + * FPU state should be reloaded next time the task is run. + */ + unsigned int last_cpu; + + /* + * @fpstate_active: + * + * This flag indicates whether this context is active: if the task + * is not running then we can restore from this context, if the task + * is running then we should save into this context. + */ + unsigned char fpstate_active; + + /* + * @fpregs_active: + * + * This flag determines whether a given context is actively + * loaded into the FPU's registers and that those registers + * represent the task's current FPU state. + * + * Note the interaction with fpstate_active: + * + * # task does not use the FPU: + * fpstate_active == 0 + * + * # task uses the FPU and regs are active: + * fpstate_active == 1 && fpregs_active == 1 + * + * # the regs are inactive but still match fpstate: + * fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu + * + * The third state is what we use for the lazy restore optimization + * on lazy-switching CPUs. + */ + unsigned char fpregs_active; + + /* + * @counter: + * + * This counter contains the number of consecutive context switches + * during which the FPU stays used. If this is over a threshold, the + * lazy FPU restore logic becomes eager, to save the trap overhead. + * This is an unsigned char so that after 256 iterations the counter + * wraps and the context switch behavior turns lazy again; this is to + * deal with bursty apps that only use the FPU for a short time: + */ + unsigned char counter; + /* + * @state: + * + * In-memory copy of all FPU registers that we save/restore + * over context switches. If the task is using the FPU then + * the registers in the FPU are more recent than this state + * copy. If the task context-switches away then they get + * saved here and represent the FPU state. + * + * After context switches there may be a (short) time period + * during which the in-FPU hardware registers are unchanged + * and still perfectly match this state, if the tasks + * scheduled afterwards are not using the FPU. + * + * This is the 'lazy restore' window of optimization, which + * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'. + * + * We detect whether a subsequent task uses the FPU via setting + * CR0::TS to 1, which causes any FPU use to raise a #NM fault. + * + * During this window, if the task gets scheduled again, we + * might be able to skip having to do a restore from this + * memory buffer to the hardware registers - at the cost of + * incurring the overhead of #NM fault traps. + * + * Note that on modern CPUs that support the XSAVEOPT (or other + * optimized XSAVE instructions), we don't use #NM traps anymore, + * as the hardware can track whether FPU registers need saving + * or not. On such CPUs we activate the non-lazy ('eagerfpu') + * logic, which unconditionally saves/restores all FPU state + * across context switches. (if FPU state exists.) + */ + union fpregs_state state; + /* + * WARNING: 'state' is dynamically-sized. Do not put + * anything after it here. + */ +}; + +#endif /* _ASM_X86_FPU_H */ diff --git a/drivers/include/asm/intel-mid.h b/drivers/include/asm/intel-mid.h new file mode 100644 index 000000000..6c3933c26 --- /dev/null +++ b/drivers/include/asm/intel-mid.h @@ -0,0 +1,151 @@ +/* + * intel-mid.h: Intel MID specific setup code + * + * (C) Copyright 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _ASM_X86_INTEL_MID_H +#define _ASM_X86_INTEL_MID_H + +#include +//#include + +extern int intel_mid_pci_init(void); +extern int get_gpio_by_name(const char *name); +extern void intel_scu_device_register(struct platform_device *pdev); +extern int __init sfi_parse_mrtc(struct sfi_table_header *table); +extern int __init sfi_parse_mtmr(struct sfi_table_header *table); +extern int sfi_mrtc_num; +extern struct sfi_rtc_table_entry sfi_mrtc_array[]; + +/* + * Here defines the array of devices platform data that IAFW would export + * through SFI "DEVS" table, we use name and type to match the device and + * its platform data. + */ +struct devs_id { + char name[SFI_NAME_LEN + 1]; + u8 type; + u8 delay; + void *(*get_platform_data)(void *info); + /* Custom handler for devices */ + void (*device_handler)(struct sfi_device_table_entry *pentry, + struct devs_id *dev); +}; + +#define sfi_device(i) \ + static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \ + __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i + +/* + * Medfield is the follow-up of Moorestown, it combines two chip solution into + * one. Other than that it also added always-on and constant tsc and lapic + * timers. Medfield is the platform name, and the chip name is called Penwell + * we treat Medfield/Penwell as a variant of Moorestown. Penwell can be + * identified via MSRs. + */ +enum intel_mid_cpu_type { + /* 1 was Moorestown */ + INTEL_MID_CPU_CHIP_PENWELL = 2, + INTEL_MID_CPU_CHIP_CLOVERVIEW, + INTEL_MID_CPU_CHIP_TANGIER, +}; + +extern enum intel_mid_cpu_type __intel_mid_cpu_chip; + +/** + * struct intel_mid_ops - Interface between intel-mid & sub archs + * @arch_setup: arch_setup function to re-initialize platform + * structures (x86_init, x86_platform_init) + * + * This structure can be extended if any new interface is required + * between intel-mid & its sub arch files. + */ +struct intel_mid_ops { + void (*arch_setup)(void); +}; + +/* Helper API's for INTEL_MID_OPS_INIT */ +#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \ + [cpuid] = get_##cpuname##_ops + +/* Maximum number of CPU ops */ +#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *)) + +/* + * For every new cpu addition, a weak get__ops() function needs be + * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h. + */ +#define INTEL_MID_OPS_INIT {\ + DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \ + DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \ + DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \ +}; + +#ifdef CONFIG_X86_INTEL_MID + +static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void) +{ + return __intel_mid_cpu_chip; +} + +static inline bool intel_mid_has_msic(void) +{ + return (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_PENWELL); +} + +#else /* !CONFIG_X86_INTEL_MID */ + +#define intel_mid_identify_cpu() (0) +#define intel_mid_has_msic() (0) + +#endif /* !CONFIG_X86_INTEL_MID */ + +enum intel_mid_timer_options { + INTEL_MID_TIMER_DEFAULT, + INTEL_MID_TIMER_APBT_ONLY, + INTEL_MID_TIMER_LAPIC_APBT, +}; + +extern enum intel_mid_timer_options intel_mid_timer_options; + +/* + * Penwell uses spread spectrum clock, so the freq number is not exactly + * the same as reported by MSR based on SDM. + */ +#define FSB_FREQ_83SKU 83200 +#define FSB_FREQ_100SKU 99840 +#define FSB_FREQ_133SKU 133000 + +#define FSB_FREQ_167SKU 167000 +#define FSB_FREQ_200SKU 200000 +#define FSB_FREQ_267SKU 267000 +#define FSB_FREQ_333SKU 333000 +#define FSB_FREQ_400SKU 400000 + +/* Bus Select SoC Fuse value */ +#define BSEL_SOC_FUSE_MASK 0x7 +#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */ +#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */ +#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */ + +#define SFI_MTMR_MAX_NUM 8 +#define SFI_MRTC_MAX 8 + +extern void intel_scu_devices_create(void); +extern void intel_scu_devices_destroy(void); + +/* VRTC timer */ +#define MRST_VRTC_MAP_SZ (1024) +/*#define MRST_VRTC_PGOFFSET (0xc00) */ + +extern void intel_mid_rtc_init(void); + +/* the offset for the mapping of global gpio pin to irq */ +#define INTEL_MID_IRQ_OFFSET 0x100 + +#endif /* _ASM_X86_INTEL_MID_H */ diff --git a/drivers/include/asm/irqflags.h b/drivers/include/asm/irqflags.h index 0a8b51922..b77f5edb0 100644 --- a/drivers/include/asm/irqflags.h +++ b/drivers/include/asm/irqflags.h @@ -136,10 +136,6 @@ static inline notrace unsigned long arch_local_irq_save(void) #define USERGS_SYSRET32 \ swapgs; \ sysretl -#define ENABLE_INTERRUPTS_SYSEXIT32 \ - swapgs; \ - sti; \ - sysexit #else #define INTERRUPT_RETURN iret @@ -163,33 +159,9 @@ static inline int arch_irqs_disabled(void) return arch_irqs_disabled_flags(flags); } +#endif /* !__ASSEMBLY__ */ -#else - -#ifdef CONFIG_X86_64 -#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk -#define ARCH_LOCKDEP_SYS_EXIT_IRQ \ - TRACE_IRQS_ON; \ - sti; \ - SAVE_REST; \ - LOCKDEP_SYS_EXIT; \ - RESTORE_REST; \ - cli; \ - TRACE_IRQS_OFF; - -#else -#define ARCH_LOCKDEP_SYS_EXIT \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call lockdep_sys_exit; \ - popl %edx; \ - popl %ecx; \ - popl %eax; - -#define ARCH_LOCKDEP_SYS_EXIT_IRQ -#endif - +#ifdef __ASSEMBLY__ #ifdef CONFIG_TRACE_IRQFLAGS # define TRACE_IRQS_ON call trace_hardirqs_on_thunk; # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk; @@ -198,12 +170,29 @@ static inline int arch_irqs_disabled(void) # define TRACE_IRQS_OFF #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT -# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ -# else +# ifdef CONFIG_X86_64 +# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk +# define LOCKDEP_SYS_EXIT_IRQ \ + TRACE_IRQS_ON; \ + sti; \ + call lockdep_sys_exit_thunk; \ + cli; \ + TRACE_IRQS_OFF; +# else +# define LOCKDEP_SYS_EXIT \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call lockdep_sys_exit; \ + popl %edx; \ + popl %ecx; \ + popl %eax; +# define LOCKDEP_SYS_EXIT_IRQ +# endif +#else # define LOCKDEP_SYS_EXIT # define LOCKDEP_SYS_EXIT_IRQ -# endif - -#endif /* __ASSEMBLY__ */ +#endif +#endif /* __ASSEMBLY__ */ + #endif diff --git a/drivers/include/asm/math_emu.h b/drivers/include/asm/math_emu.h index 031f6266f..0d9b14f60 100644 --- a/drivers/include/asm/math_emu.h +++ b/drivers/include/asm/math_emu.h @@ -2,7 +2,6 @@ #define _ASM_X86_MATH_EMU_H #include -#include /* This structure matches the layout of the data saved to the stack following a device-not-present interrupt, part of it saved @@ -10,9 +9,6 @@ */ struct math_emu_info { long ___orig_eip; - union { - struct pt_regs *regs; - struct kernel_vm86_regs *vm86; - }; + struct pt_regs *regs; }; #endif /* _ASM_X86_MATH_EMU_H */ diff --git a/drivers/include/asm/msr-index.h b/drivers/include/asm/msr-index.h new file mode 100644 index 000000000..690b4027e --- /dev/null +++ b/drivers/include/asm/msr-index.h @@ -0,0 +1,694 @@ +#ifndef _ASM_X86_MSR_INDEX_H +#define _ASM_X86_MSR_INDEX_H + +/* CPU model specific register (MSR) numbers */ + +/* x86-64 specific MSRs */ +#define MSR_EFER 0xc0000080 /* extended feature register */ +#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ +#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ +#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ +#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ +#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ +#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ +#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ +#define MSR_TSC_AUX 0xc0000103 /* Auxiliary TSC */ + +/* EFER bits: */ +#define _EFER_SCE 0 /* SYSCALL/SYSRET */ +#define _EFER_LME 8 /* Long mode enable */ +#define _EFER_LMA 10 /* Long mode active (read-only) */ +#define _EFER_NX 11 /* No execute enable */ +#define _EFER_SVME 12 /* Enable virtualization */ +#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ +#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ + +#define EFER_SCE (1<<_EFER_SCE) +#define EFER_LME (1<<_EFER_LME) +#define EFER_LMA (1<<_EFER_LMA) +#define EFER_NX (1<<_EFER_NX) +#define EFER_SVME (1<<_EFER_SVME) +#define EFER_LMSLE (1<<_EFER_LMSLE) +#define EFER_FFXSR (1<<_EFER_FFXSR) + +/* Intel MSRs. Some also available on other CPUs */ +#define MSR_IA32_PERFCTR0 0x000000c1 +#define MSR_IA32_PERFCTR1 0x000000c2 +#define MSR_FSB_FREQ 0x000000cd +#define MSR_PLATFORM_INFO 0x000000ce + +#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 +#define NHM_C3_AUTO_DEMOTE (1UL << 25) +#define NHM_C1_AUTO_DEMOTE (1UL << 26) +#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) +#define SNB_C1_AUTO_UNDEMOTE (1UL << 27) +#define SNB_C3_AUTO_UNDEMOTE (1UL << 28) + +#define MSR_MTRRcap 0x000000fe +#define MSR_IA32_BBL_CR_CTL 0x00000119 +#define MSR_IA32_BBL_CR_CTL3 0x0000011e + +#define MSR_IA32_SYSENTER_CS 0x00000174 +#define MSR_IA32_SYSENTER_ESP 0x00000175 +#define MSR_IA32_SYSENTER_EIP 0x00000176 + +#define MSR_IA32_MCG_CAP 0x00000179 +#define MSR_IA32_MCG_STATUS 0x0000017a +#define MSR_IA32_MCG_CTL 0x0000017b +#define MSR_IA32_MCG_EXT_CTL 0x000004d0 + +#define MSR_OFFCORE_RSP_0 0x000001a6 +#define MSR_OFFCORE_RSP_1 0x000001a7 +#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad +#define MSR_IVT_TURBO_RATIO_LIMIT 0x000001ae +#define MSR_TURBO_RATIO_LIMIT 0x000001ad +#define MSR_TURBO_RATIO_LIMIT1 0x000001ae +#define MSR_TURBO_RATIO_LIMIT2 0x000001af + +#define MSR_LBR_SELECT 0x000001c8 +#define MSR_LBR_TOS 0x000001c9 +#define MSR_LBR_NHM_FROM 0x00000680 +#define MSR_LBR_NHM_TO 0x000006c0 +#define MSR_LBR_CORE_FROM 0x00000040 +#define MSR_LBR_CORE_TO 0x00000060 + +#define MSR_LBR_INFO_0 0x00000dc0 /* ... 0xddf for _31 */ +#define LBR_INFO_MISPRED BIT_ULL(63) +#define LBR_INFO_IN_TX BIT_ULL(62) +#define LBR_INFO_ABORT BIT_ULL(61) +#define LBR_INFO_CYCLES 0xffff + +#define MSR_IA32_PEBS_ENABLE 0x000003f1 +#define MSR_IA32_DS_AREA 0x00000600 +#define MSR_IA32_PERF_CAPABILITIES 0x00000345 +#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6 + +#define MSR_IA32_RTIT_CTL 0x00000570 +#define RTIT_CTL_TRACEEN BIT(0) +#define RTIT_CTL_CYCLEACC BIT(1) +#define RTIT_CTL_OS BIT(2) +#define RTIT_CTL_USR BIT(3) +#define RTIT_CTL_CR3EN BIT(7) +#define RTIT_CTL_TOPA BIT(8) +#define RTIT_CTL_MTC_EN BIT(9) +#define RTIT_CTL_TSC_EN BIT(10) +#define RTIT_CTL_DISRETC BIT(11) +#define RTIT_CTL_BRANCH_EN BIT(13) +#define RTIT_CTL_MTC_RANGE_OFFSET 14 +#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET) +#define RTIT_CTL_CYC_THRESH_OFFSET 19 +#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET) +#define RTIT_CTL_PSB_FREQ_OFFSET 24 +#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET) +#define MSR_IA32_RTIT_STATUS 0x00000571 +#define RTIT_STATUS_CONTEXTEN BIT(1) +#define RTIT_STATUS_TRIGGEREN BIT(2) +#define RTIT_STATUS_ERROR BIT(4) +#define RTIT_STATUS_STOPPED BIT(5) +#define MSR_IA32_RTIT_CR3_MATCH 0x00000572 +#define MSR_IA32_RTIT_OUTPUT_BASE 0x00000560 +#define MSR_IA32_RTIT_OUTPUT_MASK 0x00000561 + +#define MSR_MTRRfix64K_00000 0x00000250 +#define MSR_MTRRfix16K_80000 0x00000258 +#define MSR_MTRRfix16K_A0000 0x00000259 +#define MSR_MTRRfix4K_C0000 0x00000268 +#define MSR_MTRRfix4K_C8000 0x00000269 +#define MSR_MTRRfix4K_D0000 0x0000026a +#define MSR_MTRRfix4K_D8000 0x0000026b +#define MSR_MTRRfix4K_E0000 0x0000026c +#define MSR_MTRRfix4K_E8000 0x0000026d +#define MSR_MTRRfix4K_F0000 0x0000026e +#define MSR_MTRRfix4K_F8000 0x0000026f +#define MSR_MTRRdefType 0x000002ff + +#define MSR_IA32_CR_PAT 0x00000277 + +#define MSR_IA32_DEBUGCTLMSR 0x000001d9 +#define MSR_IA32_LASTBRANCHFROMIP 0x000001db +#define MSR_IA32_LASTBRANCHTOIP 0x000001dc +#define MSR_IA32_LASTINTFROMIP 0x000001dd +#define MSR_IA32_LASTINTTOIP 0x000001de + +/* DEBUGCTLMSR bits (others vary by model): */ +#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ +#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ +#define DEBUGCTLMSR_TR (1UL << 6) +#define DEBUGCTLMSR_BTS (1UL << 7) +#define DEBUGCTLMSR_BTINT (1UL << 8) +#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9) +#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) +#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) + +#define MSR_PEBS_FRONTEND 0x000003f7 + +#define MSR_IA32_POWER_CTL 0x000001fc + +#define MSR_IA32_MC0_CTL 0x00000400 +#define MSR_IA32_MC0_STATUS 0x00000401 +#define MSR_IA32_MC0_ADDR 0x00000402 +#define MSR_IA32_MC0_MISC 0x00000403 + +/* C-state Residency Counters */ +#define MSR_PKG_C3_RESIDENCY 0x000003f8 +#define MSR_PKG_C6_RESIDENCY 0x000003f9 +#define MSR_PKG_C7_RESIDENCY 0x000003fa +#define MSR_CORE_C3_RESIDENCY 0x000003fc +#define MSR_CORE_C6_RESIDENCY 0x000003fd +#define MSR_CORE_C7_RESIDENCY 0x000003fe +#define MSR_KNL_CORE_C6_RESIDENCY 0x000003ff +#define MSR_PKG_C2_RESIDENCY 0x0000060d +#define MSR_PKG_C8_RESIDENCY 0x00000630 +#define MSR_PKG_C9_RESIDENCY 0x00000631 +#define MSR_PKG_C10_RESIDENCY 0x00000632 + +/* Run Time Average Power Limiting (RAPL) Interface */ + +#define MSR_RAPL_POWER_UNIT 0x00000606 + +#define MSR_PKG_POWER_LIMIT 0x00000610 +#define MSR_PKG_ENERGY_STATUS 0x00000611 +#define MSR_PKG_PERF_STATUS 0x00000613 +#define MSR_PKG_POWER_INFO 0x00000614 + +#define MSR_DRAM_POWER_LIMIT 0x00000618 +#define MSR_DRAM_ENERGY_STATUS 0x00000619 +#define MSR_DRAM_PERF_STATUS 0x0000061b +#define MSR_DRAM_POWER_INFO 0x0000061c + +#define MSR_PP0_POWER_LIMIT 0x00000638 +#define MSR_PP0_ENERGY_STATUS 0x00000639 +#define MSR_PP0_POLICY 0x0000063a +#define MSR_PP0_PERF_STATUS 0x0000063b + +#define MSR_PP1_POWER_LIMIT 0x00000640 +#define MSR_PP1_ENERGY_STATUS 0x00000641 +#define MSR_PP1_POLICY 0x00000642 + +#define MSR_CONFIG_TDP_NOMINAL 0x00000648 +#define MSR_CONFIG_TDP_LEVEL_1 0x00000649 +#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A +#define MSR_CONFIG_TDP_CONTROL 0x0000064B +#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C + +#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658 +#define MSR_PKG_ANY_CORE_C0_RES 0x00000659 +#define MSR_PKG_ANY_GFXE_C0_RES 0x0000065A +#define MSR_PKG_BOTH_CORE_GFXE_C0_RES 0x0000065B + +#define MSR_CORE_C1_RES 0x00000660 + +#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 +#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 + +#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690 +#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 +#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 + +/* Config TDP MSRs */ +#define MSR_CONFIG_TDP_NOMINAL 0x00000648 +#define MSR_CONFIG_TDP_LEVEL1 0x00000649 +#define MSR_CONFIG_TDP_LEVEL2 0x0000064A +#define MSR_CONFIG_TDP_CONTROL 0x0000064B +#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C + +/* Hardware P state interface */ +#define MSR_PPERF 0x0000064e +#define MSR_PERF_LIMIT_REASONS 0x0000064f +#define MSR_PM_ENABLE 0x00000770 +#define MSR_HWP_CAPABILITIES 0x00000771 +#define MSR_HWP_REQUEST_PKG 0x00000772 +#define MSR_HWP_INTERRUPT 0x00000773 +#define MSR_HWP_REQUEST 0x00000774 +#define MSR_HWP_STATUS 0x00000777 + +/* CPUID.6.EAX */ +#define HWP_BASE_BIT (1<<7) +#define HWP_NOTIFICATIONS_BIT (1<<8) +#define HWP_ACTIVITY_WINDOW_BIT (1<<9) +#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) +#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) + +/* IA32_HWP_CAPABILITIES */ +#define HWP_HIGHEST_PERF(x) (x & 0xff) +#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) +#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) +#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) + +/* IA32_HWP_REQUEST */ +#define HWP_MIN_PERF(x) (x & 0xff) +#define HWP_MAX_PERF(x) ((x & 0xff) << 8) +#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) +#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) +#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) +#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) + +/* IA32_HWP_STATUS */ +#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) +#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) + +/* IA32_HWP_INTERRUPT */ +#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) +#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) + +#define MSR_AMD64_MC0_MASK 0xc0010044 + +#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) +#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) +#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) +#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) + +#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) + +/* These are consecutive and not in the normal 4er MCE bank block */ +#define MSR_IA32_MC0_CTL2 0x00000280 +#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) + +#define MSR_P6_PERFCTR0 0x000000c1 +#define MSR_P6_PERFCTR1 0x000000c2 +#define MSR_P6_EVNTSEL0 0x00000186 +#define MSR_P6_EVNTSEL1 0x00000187 + +#define MSR_KNC_PERFCTR0 0x00000020 +#define MSR_KNC_PERFCTR1 0x00000021 +#define MSR_KNC_EVNTSEL0 0x00000028 +#define MSR_KNC_EVNTSEL1 0x00000029 + +/* Alternative perfctr range with full access. */ +#define MSR_IA32_PMC0 0x000004c1 + +/* AMD64 MSRs. Not complete. See the architecture manual for a more + complete list. */ + +#define MSR_AMD64_PATCH_LEVEL 0x0000008b +#define MSR_AMD64_TSC_RATIO 0xc0000104 +#define MSR_AMD64_NB_CFG 0xc001001f +#define MSR_AMD64_PATCH_LOADER 0xc0010020 +#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 +#define MSR_AMD64_OSVW_STATUS 0xc0010141 +#define MSR_AMD64_LS_CFG 0xc0011020 +#define MSR_AMD64_DC_CFG 0xc0011022 +#define MSR_AMD64_BU_CFG2 0xc001102a +#define MSR_AMD64_IBSFETCHCTL 0xc0011030 +#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 +#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 +#define MSR_AMD64_IBSFETCH_REG_COUNT 3 +#define MSR_AMD64_IBSFETCH_REG_MASK ((1UL< +#include "msr-index.h" #ifndef __ASSEMBLY__ #include #include #include +#include struct msr { union { @@ -46,14 +47,13 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) * it means rax *or* rdx. */ #ifdef CONFIG_X86_64 -#define DECLARE_ARGS(val, low, high) unsigned low, high -#define EAX_EDX_VAL(val, low, high) ((low) | ((u64)(high) << 32)) -#define EAX_EDX_ARGS(val, low, high) "a" (low), "d" (high) +/* Using 64-bit values saves one instruction clearing the high half of low */ +#define DECLARE_ARGS(val, low, high) unsigned long low, high +#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) #else #define DECLARE_ARGS(val, low, high) unsigned long long val #define EAX_EDX_VAL(val, low, high) (val) -#define EAX_EDX_ARGS(val, low, high) "A" (val) #define EAX_EDX_RET(val, low, high) "=A" (val) #endif @@ -105,12 +105,19 @@ notrace static inline int native_write_msr_safe(unsigned int msr, return err; } -extern unsigned long long native_read_tsc(void); - extern int rdmsr_safe_regs(u32 regs[8]); extern int wrmsr_safe_regs(u32 regs[8]); -static __always_inline unsigned long long __native_read_tsc(void) +/** + * rdtsc() - returns the current TSC without ordering constraints + * + * rdtsc() returns the result of RDTSC as a 64-bit integer. The + * only ordering constraint it supplies is the ordering implied by + * "asm volatile": it will put the RDTSC in the place you expect. The + * CPU can and will speculatively execute that RDTSC, though, so the + * results can be non-monotonic if compared on different CPUs. + */ +static __always_inline unsigned long long rdtsc(void) { DECLARE_ARGS(val, low, high); @@ -152,8 +159,10 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) #define rdmsrl(msr, val) \ ((val) = native_read_msr((msr))) -#define wrmsrl(msr, val) \ - native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) +static inline void wrmsrl(unsigned msr, u64 val) +{ + native_write_msr(msr, (u32)val, (u32)(val >> 32)); +} /* wrmsr with exception handling */ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) @@ -179,12 +188,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) return err; } -#define rdtscl(low) \ - ((low) = (u32)__native_read_tsc()) - -#define rdtscll(val) \ - ((val) = __native_read_tsc()) - #define rdpmc(counter, low, high) \ do { \ u64 _l = native_read_pmc((counter)); \ @@ -194,19 +197,15 @@ do { \ #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) -#define rdtscp(low, high, aux) \ -do { \ - unsigned long long _val = native_read_tscp(&(aux)); \ - (low) = (u32)_val; \ - (high) = (u32)(_val >> 32); \ -} while (0) - -#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux)) - #endif /* !CONFIG_PARAVIRT */ -#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \ - (u32)((val) >> 32)) +/* + * 64-bit version of wrmsr_safe(): + */ +static inline int wrmsrl_safe(u32 msr, u64 val) +{ + return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); +} #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high)) diff --git a/drivers/include/asm/page_types.h b/drivers/include/asm/page_types.h new file mode 100644 index 000000000..cc071c6f7 --- /dev/null +++ b/drivers/include/asm/page_types.h @@ -0,0 +1,75 @@ +#ifndef _ASM_X86_PAGE_DEFS_H +#define _ASM_X86_PAGE_DEFS_H + +#include +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) + +#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) + +#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) +#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) + +/* Cast *PAGE_MASK to a signed type so that it is sign-extended if + virtual addresses are 32-bits but physical addresses are larger + (ie, 32-bit PAE). */ +#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) +#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK) +#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK) + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HUGE_MAX_HSTATE 2 + +#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) + +#define VM_DATA_DEFAULT_FLAGS \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \ + CONFIG_PHYSICAL_ALIGN) + +#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) + +#ifdef CONFIG_X86_64 +#include +#define IOREMAP_MAX_ORDER (PUD_SHIFT) +#else +#include +#define IOREMAP_MAX_ORDER (PMD_SHIFT) +#endif /* CONFIG_X86_64 */ + +#ifndef __ASSEMBLY__ + +extern int devmem_is_allowed(unsigned long pagenr); + +extern unsigned long max_low_pfn_mapped; +extern unsigned long max_pfn_mapped; + +static inline phys_addr_t get_max_mapped(void) +{ + return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; +} + +bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); + +extern unsigned long init_memory_mapping(unsigned long start, + unsigned long end); + +extern void initmem_init(void); + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_X86_PAGE_DEFS_H */ diff --git a/drivers/include/asm/pgtable-2level.h b/drivers/include/asm/pgtable-2level.h index 206a87fdd..fd74a1195 100644 --- a/drivers/include/asm/pgtable-2level.h +++ b/drivers/include/asm/pgtable-2level.h @@ -62,44 +62,8 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi return ((value >> rightshift) & mask) << leftshift; } -/* - * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, - * split up the 29 bits of offset into this range. - */ -#define PTE_FILE_MAX_BITS 29 -#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) -#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) -#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) -#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) -#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) - -#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1) -#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1) - -#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1) -#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2) - -static __always_inline pgoff_t pte_to_pgoff(pte_t pte) -{ - return (pgoff_t) - (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) + - pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) + - pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3)); -} - -static __always_inline pte_t pgoff_to_pte(pgoff_t off) -{ - return (pte_t){ - .pte_low = - pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) + - pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) + - pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) + - _PAGE_FILE, - }; -} - /* Encode and de-code a swap entry */ -#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) +#define SWP_TYPE_BITS 5 #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS) diff --git a/drivers/include/asm/pgtable-2level_types.h b/drivers/include/asm/pgtable-2level_types.h index daacc23e3..392576433 100644 --- a/drivers/include/asm/pgtable-2level_types.h +++ b/drivers/include/asm/pgtable-2level_types.h @@ -17,7 +17,6 @@ typedef union { #endif /* !__ASSEMBLY__ */ #define SHARED_KERNEL_PMD 0 -#define PAGETABLE_LEVELS 2 /* * traditional i386 two-level paging structure: diff --git a/drivers/include/asm/pgtable.h b/drivers/include/asm/pgtable.h index e99cf6728..df28c63a7 100644 --- a/drivers/include/asm/pgtable.h +++ b/drivers/include/asm/pgtable.h @@ -19,6 +19,13 @@ #include void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); +void ptdump_walk_pgd_level_checkwx(void); + +#ifdef CONFIG_DEBUG_WX +#define debug_checkwx() ptdump_walk_pgd_level_checkwx() +#else +#define debug_checkwx() do { } while (0) +#endif /* * ZERO_PAGE is a global shared page that is always zero: used @@ -115,11 +122,6 @@ static inline int pte_write(pte_t pte) return pte_flags(pte) & _PAGE_RW; } -static inline int pte_file(pte_t pte) -{ - return pte_flags(pte) & _PAGE_FILE; -} - static inline int pte_huge(pte_t pte) { return pte_flags(pte) & _PAGE_PSE; @@ -137,13 +139,7 @@ static inline int pte_exec(pte_t pte) static inline int pte_special(pte_t pte) { - /* - * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. - * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == - * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. - */ - return (pte_flags(pte) & _PAGE_SPECIAL) && - (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); + return pte_flags(pte) & _PAGE_SPECIAL; } static inline unsigned long pte_pfn(pte_t pte) @@ -153,12 +149,12 @@ static inline unsigned long pte_pfn(pte_t pte) static inline unsigned long pmd_pfn(pmd_t pmd) { - return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; + return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; } static inline unsigned long pud_pfn(pud_t pud) { - return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; + return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; } #define pte_page(pte) pfn_to_page(pte_pfn(pte)) @@ -305,7 +301,7 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) static inline pmd_t pmd_mknotpresent(pmd_t pmd) { - return pmd_clear_flags(pmd, _PAGE_PRESENT); + return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); } #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY @@ -329,19 +325,14 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); } -static inline pte_t pte_file_clear_soft_dirty(pte_t pte) +static inline pte_t pte_clear_soft_dirty(pte_t pte) { return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); } -static inline pte_t pte_file_mksoft_dirty(pte_t pte) +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) { - return pte_set_flags(pte, _PAGE_SOFT_DIRTY); -} - -static inline int pte_file_soft_dirty(pte_t pte) -{ - return pte_flags(pte) & _PAGE_SOFT_DIRTY; + return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); } #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ @@ -405,7 +396,9 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) return __pgprot(preservebits | addbits); } -#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) +#define pte_pgprot(x) __pgprot(pte_flags(x)) +#define pmd_pgprot(x) __pgprot(pmd_flags(x)) +#define pud_pgprot(x) __pgprot(pud_flags(x)) #define canon_pgprot(p) __pgprot(massage_pgprot(p)) @@ -424,11 +417,17 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, * requested memtype: * - request is uncached, return cannot be write-back * - request is write-combine, return cannot be write-back + * - request is write-through, return cannot be write-back + * - request is write-through, return cannot be write-combine */ if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && new_pcm == _PAGE_CACHE_MODE_WB) || (pcm == _PAGE_CACHE_MODE_WC && - new_pcm == _PAGE_CACHE_MODE_WB)) { + new_pcm == _PAGE_CACHE_MODE_WB) || + (pcm == _PAGE_CACHE_MODE_WT && + new_pcm == _PAGE_CACHE_MODE_WB) || + (pcm == _PAGE_CACHE_MODE_WT && + new_pcm == _PAGE_CACHE_MODE_WC)) { return 0; } @@ -462,13 +461,6 @@ static inline int pte_same(pte_t a, pte_t b) } static inline int pte_present(pte_t a) -{ - return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | - _PAGE_NUMA); -} - -#define pte_present_nonuma pte_present_nonuma -static inline int pte_present_nonuma(pte_t a) { return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } @@ -479,7 +471,7 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a) if (pte_flags(a) & _PAGE_PRESENT) return true; - if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && + if ((pte_flags(a) & _PAGE_PROTNONE) && mm_tlb_flush_pending(mm)) return true; @@ -499,10 +491,27 @@ static inline int pmd_present(pmd_t pmd) * the _PAGE_PSE flag will remain set at all times while the * _PAGE_PRESENT bit is clear). */ - return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE | - _PAGE_NUMA); + return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); } +#ifdef CONFIG_NUMA_BALANCING +/* + * These work without NUMA balancing but the kernel does not care. See the + * comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) + == _PAGE_PROTNONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) + == _PAGE_PROTNONE; +} +#endif /* CONFIG_NUMA_BALANCING */ + static inline int pmd_none(pmd_t pmd) { /* Only check low word on 32-bit platforms, since it might be @@ -512,14 +521,15 @@ static inline int pmd_none(pmd_t pmd) static inline unsigned long pmd_page_vaddr(pmd_t pmd) { - return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); + return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); } /* * Currently stuck as a macro due to indirect forward reference to * linux/mmzone.h's __section_mem_map_addr() definition: */ -#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) +#define pmd_page(pmd) \ + pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT) /* * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] @@ -559,11 +569,6 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) static inline int pmd_bad(pmd_t pmd) { -#ifdef CONFIG_NUMA_BALANCING - /* pmd_numa check */ - if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA) - return 0; -#endif return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; } @@ -572,7 +577,7 @@ static inline unsigned long pages_to_mb(unsigned long npg) return npg >> (20 - PAGE_SHIFT); } -#if PAGETABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 static inline int pud_none(pud_t pud) { return native_pud_val(pud) == 0; @@ -585,14 +590,15 @@ static inline int pud_present(pud_t pud) static inline unsigned long pud_page_vaddr(pud_t pud) { - return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); + return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); } /* * Currently stuck as a macro due to indirect forward reference to * linux/mmzone.h's __section_mem_map_addr() definition: */ -#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) +#define pud_page(pud) \ + pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT) /* Find an entry in the second-level page table.. */ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) @@ -615,9 +621,9 @@ static inline int pud_large(pud_t pud) { return 0; } -#endif /* PAGETABLE_LEVELS > 2 */ +#endif /* CONFIG_PGTABLE_LEVELS > 2 */ -#if PAGETABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 static inline int pgd_present(pgd_t pgd) { return pgd_flags(pgd) & _PAGE_PRESENT; @@ -654,7 +660,7 @@ static inline int pgd_none(pgd_t pgd) { return !native_pgd_val(pgd); } -#endif /* PAGETABLE_LEVELS > 3 */ +#endif /* CONFIG_PGTABLE_LEVELS > 3 */ #endif /* __ASSEMBLY__ */ @@ -820,8 +826,8 @@ static inline int pmd_write(pmd_t pmd) return pmd_flags(pmd) & _PAGE_RW; } -#define __HAVE_ARCH_PMDP_GET_AND_CLEAR -static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { pmd_t pmd = native_pmdp_get_and_clear(pmdp); @@ -882,19 +888,16 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { - VM_BUG_ON(pte_present_nonuma(pte)); return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); } static inline int pte_swp_soft_dirty(pte_t pte) { - VM_BUG_ON(pte_present_nonuma(pte)); return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; } static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { - VM_BUG_ON(pte_present_nonuma(pte)); return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); } #endif diff --git a/drivers/include/asm/pgtable_types.h b/drivers/include/asm/pgtable_types.h index 25bcd4a89..a471cadb9 100644 --- a/drivers/include/asm/pgtable_types.h +++ b/drivers/include/asm/pgtable_types.h @@ -4,7 +4,7 @@ #include #include -#define FIRST_USER_ADDRESS 0 +#define FIRST_USER_ADDRESS 0UL #define _PAGE_BIT_PRESENT 0 /* is present */ #define _PAGE_BIT_RW 1 /* writeable */ @@ -27,19 +27,9 @@ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ -/* - * Swap offsets on configurations that allow automatic NUMA balancing use the - * bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from - * swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the - * maximum possible swap space from 16TB to 8TB. - */ -#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1) - /* If _PAGE_BIT_PRESENT is clear, we use these: */ /* - if the user mapped it with PROT_NONE; pte_present gives true */ #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL -/* - set: nonlinear file mapping, saved PTE; unset:swap */ -#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) @@ -77,21 +67,6 @@ #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) #endif -/* - * _PAGE_NUMA distinguishes between a numa hinting minor fault and a page - * that is not present. The hinting fault gathers numa placement statistics - * (see pte_numa()). The bit is always zero when the PTE is not present. - * - * The bit picked must be always zero when the pmd is present and not - * present, so that we don't lose information when we set it while - * atomically clearing the present bit. - */ -#ifdef CONFIG_NUMA_BALANCING -#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA) -#else -#define _PAGE_NUMA (_AT(pteval_t, 0)) -#endif - /* * Tracking soft dirty bit when a page goes to a swap is tricky. * We need a bit which can be stored in pte _and_ not conflict @@ -114,7 +89,6 @@ #define _PAGE_NX (_AT(pteval_t, 0)) #endif -#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ @@ -125,8 +99,8 @@ /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_SOFT_DIRTY | _PAGE_NUMA) -#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) + _PAGE_SOFT_DIRTY) +#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) /* * The cache modes defined here are used to translate between pure SW usage @@ -235,10 +209,10 @@ enum page_cache_mode { #include -/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ +/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) -/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ +/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */ #define PTE_FLAGS_MASK (~PTE_PFN_MASK) typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; @@ -260,7 +234,7 @@ static inline pgdval_t pgd_flags(pgd_t pgd) return native_pgd_val(pgd) & PTE_FLAGS_MASK; } -#if PAGETABLE_LEVELS > 3 +#if CONFIG_PGTABLE_LEVELS > 3 typedef struct { pudval_t pud; } pud_t; static inline pud_t native_make_pud(pmdval_t val) @@ -281,7 +255,7 @@ static inline pudval_t native_pud_val(pud_t pud) } #endif -#if PAGETABLE_LEVELS > 2 +#if CONFIG_PGTABLE_LEVELS > 2 typedef struct { pmdval_t pmd; } pmd_t; static inline pmd_t native_make_pmd(pmdval_t val) @@ -302,14 +276,40 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) } #endif +static inline pudval_t pud_pfn_mask(pud_t pud) +{ + if (native_pud_val(pud) & _PAGE_PSE) + return PHYSICAL_PUD_PAGE_MASK; + else + return PTE_PFN_MASK; +} + +static inline pudval_t pud_flags_mask(pud_t pud) +{ + return ~pud_pfn_mask(pud); +} + static inline pudval_t pud_flags(pud_t pud) { - return native_pud_val(pud) & PTE_FLAGS_MASK; + return native_pud_val(pud) & pud_flags_mask(pud); +} + +static inline pmdval_t pmd_pfn_mask(pmd_t pmd) +{ + if (native_pmd_val(pmd) & _PAGE_PSE) + return PHYSICAL_PMD_PAGE_MASK; + else + return PTE_PFN_MASK; +} + +static inline pmdval_t pmd_flags_mask(pmd_t pmd) +{ + return ~pmd_pfn_mask(pmd); } static inline pmdval_t pmd_flags(pmd_t pmd) { - return native_pmd_val(pmd) & PTE_FLAGS_MASK; + return native_pmd_val(pmd) & pmd_flags_mask(pmd); } static inline pte_t native_make_pte(pteval_t val) @@ -327,20 +327,6 @@ static inline pteval_t pte_flags(pte_t pte) return native_pte_val(pte) & PTE_FLAGS_MASK; } -#ifdef CONFIG_NUMA_BALANCING -/* Set of bits that distinguishes present, prot_none and numa ptes */ -#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT) -static inline pteval_t ptenuma_flags(pte_t pte) -{ - return pte_flags(pte) & _PAGE_NUMA_MASK; -} - -static inline pmdval_t pmdnuma_flags(pmd_t pmd) -{ - return pmd_flags(pmd) & _PAGE_NUMA_MASK; -} -#endif /* CONFIG_NUMA_BALANCING */ - #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) @@ -407,6 +393,9 @@ extern int nx_enabled; #define pgprot_writecombine pgprot_writecombine extern pgprot_t pgprot_writecombine(pgprot_t prot); +#define pgprot_writethrough pgprot_writethrough +extern pgprot_t pgprot_writethrough(pgprot_t prot); + /* Indicate that x86 has its own track and untrack pfn vma functions */ #define __HAVE_PFNMAP_TRACKING diff --git a/drivers/include/asm/preempt.h b/drivers/include/asm/preempt.h index ff44240a8..85e2ba54c 100644 --- a/drivers/include/asm/preempt.h +++ b/drivers/include/asm/preempt.h @@ -30,12 +30,9 @@ static __always_inline void preempt_count_set(int pc) /* * must be macros to avoid header recursion hell */ -#define init_task_preempt_count(p) do { \ - task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \ -} while (0) +#define init_task_preempt_count(p) do { } while (0) #define init_idle_preempt_count(p, cpu) do { \ - task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \ per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ } while (0) @@ -90,20 +87,18 @@ static __always_inline bool __preempt_count_dec_and_test(void) /* * Returns true when we need to resched and can (barring IRQ state). */ -static __always_inline bool should_resched(void) +static __always_inline bool should_resched(int preempt_offset) { - return unlikely(!raw_cpu_read_4(__preempt_count)); + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); } #ifdef CONFIG_PREEMPT extern asmlinkage void ___preempt_schedule(void); # define __preempt_schedule() asm ("call ___preempt_schedule") extern asmlinkage void preempt_schedule(void); -# ifdef CONFIG_CONTEXT_TRACKING - extern asmlinkage void ___preempt_schedule_context(void); -# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") - extern asmlinkage void preempt_schedule_context(void); -# endif + extern asmlinkage void ___preempt_schedule_notrace(void); +# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace") + extern asmlinkage void preempt_schedule_notrace(void); #endif #endif /* __ASM_PREEMPT_H */ diff --git a/drivers/include/asm/processor.h b/drivers/include/asm/processor.h index 2b9796a0e..afabeda26 100644 --- a/drivers/include/asm/processor.h +++ b/drivers/include/asm/processor.h @@ -6,12 +6,12 @@ /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; +struct vm86; -#include #include #include #include -#include +#include #include #include #include @@ -21,6 +21,7 @@ struct mm_struct; #include #include #include +#include #include #include @@ -52,6 +53,11 @@ static inline void *current_text_addr(void) return pc; } +/* + * These alignment constraints are for performance in the vSMP case, + * but in the task_struct case we must also meet hardware imposed + * alignment requirements of the FPU state: + */ #ifdef CONFIG_X86_VSMP # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) @@ -109,6 +115,9 @@ struct cpuinfo_x86 { /* in KB - valid for CPUS which support this call: */ int x86_cache_size; int x86_cache_alignment; /* In bytes */ + /* Cache QoS architectural values: */ + int x86_cache_max_rmid; /* max index */ + int x86_cache_occ_scale; /* scale to bytes */ int x86_power; unsigned long loops_per_jiffy; /* cpuid returned max cores value: */ @@ -160,10 +169,7 @@ DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); extern const struct seq_operations cpuinfo_op; -#define cache_line_size() (x86_cache_alignment) - extern void cpu_detect(struct cpuinfo_x86 *c); -extern void fpu_detect(struct cpuinfo_x86 *c); extern void early_cpu_init(void); extern void identify_boot_cpu(void); @@ -210,8 +216,23 @@ struct x86_hw_tss { unsigned long sp0; unsigned short ss0, __ss0h; unsigned long sp1; - /* ss1 caches MSR_IA32_SYSENTER_CS: */ - unsigned short ss1, __ss1h; + + /* + * We don't use ring 1, so ss1 is a convenient scratch space in + * the same cacheline as sp0. We use ss1 to cache the value in + * MSR_IA32_SYSENTER_CS. When we context switch + * MSR_IA32_SYSENTER_CS, we first check if the new value being + * written matches ss1, and, if it's not, then we wrmsr the new + * value and update ss1. + * + * The only reason we context switch MSR_IA32_SYSENTER_CS is + * that we set it to zero in vm86 tasks to avoid corrupting the + * stack if we were to go through the sysenter path from vm86 + * mode. + */ + unsigned short ss1; /* MSR_IA32_SYSENTER_CS */ + + unsigned short __ss1h; unsigned long sp2; unsigned short ss2, __ss2h; unsigned long __cr3; @@ -276,13 +297,17 @@ struct tss_struct { unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; /* - * .. and then another 0x100 bytes for the emergency kernel stack: + * Space for the temporary SYSENTER stack: */ - unsigned long stack[64]; + unsigned long SYSENTER_stack[64]; } ____cacheline_aligned; -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); +DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss); + +#ifdef CONFIG_X86_32 +DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); +#endif /* * Save the original ist values for checking stack pointers during debugging @@ -291,128 +316,6 @@ struct orig_ist { unsigned long ist[7]; }; -#define MXCSR_DEFAULT 0x1f80 - -struct i387_fsave_struct { - u32 cwd; /* FPU Control Word */ - u32 swd; /* FPU Status Word */ - u32 twd; /* FPU Tag Word */ - u32 fip; /* FPU IP Offset */ - u32 fcs; /* FPU IP Selector */ - u32 foo; /* FPU Operand Pointer Offset */ - u32 fos; /* FPU Operand Pointer Selector */ - - /* 8*10 bytes for each FP-reg = 80 bytes: */ - u32 st_space[20]; - - /* Software status information [not touched by FSAVE ]: */ - u32 status; -}; - -struct i387_fxsave_struct { - u16 cwd; /* Control Word */ - u16 swd; /* Status Word */ - u16 twd; /* Tag Word */ - u16 fop; /* Last Instruction Opcode */ - union { - struct { - u64 rip; /* Instruction Pointer */ - u64 rdp; /* Data Pointer */ - }; - struct { - u32 fip; /* FPU IP Offset */ - u32 fcs; /* FPU IP Selector */ - u32 foo; /* FPU Operand Offset */ - u32 fos; /* FPU Operand Selector */ - }; - }; - u32 mxcsr; /* MXCSR Register State */ - u32 mxcsr_mask; /* MXCSR Mask */ - - /* 8*16 bytes for each FP-reg = 128 bytes: */ - u32 st_space[32]; - - /* 16*16 bytes for each XMM-reg = 256 bytes: */ - u32 xmm_space[64]; - - u32 padding[12]; - - union { - u32 padding1[12]; - u32 sw_reserved[12]; - }; - -} __attribute__((aligned(16))); - -struct i387_soft_struct { - u32 cwd; - u32 swd; - u32 twd; - u32 fip; - u32 fcs; - u32 foo; - u32 fos; - /* 8*10 bytes for each FP-reg = 80 bytes: */ - u32 st_space[20]; - u8 ftop; - u8 changed; - u8 lookahead; - u8 no_update; - u8 rm; - u8 alimit; - struct math_emu_info *info; - u32 entry_eip; -}; - -struct ymmh_struct { - /* 16 * 16 bytes for each YMMH-reg = 256 bytes */ - u32 ymmh_space[64]; -}; - -/* We don't support LWP yet: */ -struct lwp_struct { - u8 reserved[128]; -}; - -struct bndreg { - u64 lower_bound; - u64 upper_bound; -} __packed; - -struct bndcsr { - u64 bndcfgu; - u64 bndstatus; -} __packed; - -struct xsave_hdr_struct { - u64 xstate_bv; - u64 xcomp_bv; - u64 reserved[6]; -} __attribute__((packed)); - -struct xsave_struct { - struct i387_fxsave_struct i387; - struct xsave_hdr_struct xsave_hdr; - struct ymmh_struct ymmh; - struct lwp_struct lwp; - struct bndreg bndreg[4]; - struct bndcsr bndcsr; - /* new processor state extensions will go here */ -} __attribute__ ((packed, aligned (64))); - -union thread_xstate { - struct i387_fsave_struct fsave; - struct i387_fxsave_struct fxsave; - struct i387_soft_struct soft; - struct xsave_struct xsave; -}; - -struct fpu { - unsigned int last_cpu; - unsigned int has_fpu; - union thread_xstate *state; -}; - #ifdef CONFIG_X86_64 DECLARE_PER_CPU(struct orig_ist, orig_ist); @@ -461,8 +364,6 @@ DECLARE_PER_CPU(struct irq_stack *, softirq_stack); #endif /* X86_64 */ extern unsigned int xstate_size; -extern void free_thread_xstate(struct task_struct *); -extern struct kmem_cache *task_xstate_cachep; struct perf_event; @@ -474,7 +375,6 @@ struct thread_struct { #ifdef CONFIG_X86_32 unsigned long sysenter_cs; #else - unsigned long usersp; /* Copy from PDA */ unsigned short es; unsigned short ds; unsigned short fsindex; @@ -487,6 +387,7 @@ struct thread_struct { unsigned long fs; #endif unsigned long gs; + /* Save middle states of ptrace breakpoints */ struct perf_event *ptrace_bps[HBP_NUM]; /* Debug status used for traps, single steps, etc... */ @@ -497,32 +398,22 @@ struct thread_struct { unsigned long cr2; unsigned long trap_nr; unsigned long error_code; - /* floating point and extended processor state */ - struct fpu fpu; -#ifdef CONFIG_X86_32 +#ifdef CONFIG_VM86 /* Virtual 86 mode info */ - struct vm86_struct __user *vm86_info; - unsigned long screen_bitmap; - unsigned long v86flags; - unsigned long v86mask; - unsigned long saved_sp0; - unsigned int saved_fs; - unsigned int saved_gs; + struct vm86 *vm86; #endif /* IO permissions: */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* Max allowed port in the bitmap, in bytes: */ unsigned io_bitmap_max; + + /* Floating point and extended processor state */ + struct fpu fpu; /* - * fpu_counter contains the number of consecutive context switches - * that the FPU is used. If this is over a threshold, the lazy fpu - * saving becomes unlazy to save the trap. This is an unsigned char - * so that after 256 times the counter wraps and the behavior turns - * lazy again; this to deal with bursty apps that only use FPU for - * a short time + * WARNING: 'fpu' is dynamically-sized. It *MUST* be at + * the end. */ - unsigned char fpu_counter; }; /* @@ -564,11 +455,13 @@ static inline void native_swapgs(void) #endif } + #ifdef CONFIG_PARAVIRT #include #else #define __cpuid native_cpuid #define paravirt_enabled() 0 +#define paravirt_has(x) 0 static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread) @@ -579,39 +472,6 @@ static inline void load_sp0(struct tss_struct *tss, #define set_iopl_mask native_set_iopl_mask #endif /* CONFIG_PARAVIRT */ -/* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; -extern u32 *trampoline_cr4_features; - -static inline void set_in_cr4(unsigned long mask) -{ - unsigned long cr4; - - mmu_cr4_features |= mask; - if (trampoline_cr4_features) - *trampoline_cr4_features = mmu_cr4_features; - cr4 = read_cr4(); - cr4 |= mask; - write_cr4(cr4); -} - -static inline void clear_in_cr4(unsigned long mask) -{ - unsigned long cr4; - - mmu_cr4_features &= ~mask; - if (trampoline_cr4_features) - *trampoline_cr4_features = mmu_cr4_features; - cr4 = read_cr4(); - cr4 &= ~mask; - write_cr4(cr4); -} - typedef struct { unsigned long seg; } mm_segment_t; @@ -686,12 +546,12 @@ static inline unsigned int cpuid_edx(unsigned int op) } /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static inline void rep_nop(void) +static __always_inline void rep_nop(void) { asm volatile("rep; nop" ::: "memory"); } -static inline void cpu_relax(void) +static __always_inline void cpu_relax(void) { rep_nop(); } @@ -775,14 +635,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr) extern void set_task_blockstep(struct task_struct *task, bool on); -/* - * from system description table in BIOS. Mostly for MCA use, but - * others may find it useful: - */ -extern unsigned int machine_id; -extern unsigned int machine_submodel_id; -extern unsigned int BIOS_revision; - /* Boot loader type from the setup header: */ extern int bootloader_type; extern int bootloader_version; @@ -794,10 +646,10 @@ extern char ignore_fpu_irq; #define ARCH_HAS_SPINLOCK_PREFETCH #ifdef CONFIG_X86_32 -# define BASE_PREFETCH ASM_NOP4 +# define BASE_PREFETCH "" # define ARCH_HAS_PREFETCH #else -# define BASE_PREFETCH "prefetcht0 (%1)" +# define BASE_PREFETCH "prefetcht0 %P1" #endif /* @@ -832,6 +684,9 @@ static inline void spin_lock_prefetch(const void *x) prefetchw(x); } +#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \ + TOP_OF_KERNEL_STACK_PADDING) + #ifdef CONFIG_X86_32 /* * User space process size: 3GB (default). @@ -842,39 +697,15 @@ static inline void spin_lock_prefetch(const void *x) #define STACK_TOP_MAX STACK_TOP #define INIT_THREAD { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ - .vm86_info = NULL, \ + .sp0 = TOP_OF_INIT_STACK, \ .sysenter_cs = __KERNEL_CS, \ .io_bitmap_ptr = NULL, \ } -/* - * Note that the .io_bitmap member must be extra-big. This is because - * the CPU will access an additional byte beyond the end of the IO - * permission bitmap. The extra byte must be all 1 bits, and must - * be within the limit. - */ -#define INIT_TSS { \ - .x86_tss = { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ - .ss0 = __KERNEL_DS, \ - .ss1 = __KERNEL_CS, \ - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ - }, \ - .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ -} - extern unsigned long thread_saved_pc(struct task_struct *tsk); -#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) -#define KSTK_TOP(info) \ -({ \ - unsigned long *__ptr = (unsigned long *)(info); \ - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ -}) - /* - * The below -8 is to reserve 8 bytes on top of the ring0 stack. + * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack. * This is necessary to guarantee that the entire "struct pt_regs" * is accessible even if the CPU haven't stored the SS/ESP registers * on the stack (interrupt gate does not save these registers @@ -883,11 +714,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); * "struct pt_regs" is possible, but they may contain the * completely wrong values. */ -#define task_pt_regs(task) \ -({ \ - struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ - __regs__ - 1; \ +#define task_pt_regs(task) \ +({ \ + unsigned long __ptr = (unsigned long)task_stack_page(task); \ + __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \ + ((struct pt_regs *)__ptr) - 1; \ }) #define KSTK_ESP(task) (task_pt_regs(task)->sp) @@ -919,11 +750,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define STACK_TOP_MAX TASK_SIZE_MAX #define INIT_THREAD { \ - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ -} - -#define INIT_TSS { \ - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \ + .sp0 = TOP_OF_INIT_STACK \ } /* @@ -935,11 +762,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) extern unsigned long KSTK_ESP(struct task_struct *task); -/* - * User space RSP while inside the SYSCALL fast path - */ -DECLARE_PER_CPU(unsigned long, old_rsp); - #endif /* CONFIG_X86_64 */ extern void start_thread(struct pt_regs *regs, unsigned long new_ip, @@ -961,24 +783,25 @@ extern int get_tsc_mode(unsigned long adr); extern int set_tsc_mode(unsigned int val); /* Register/unregister a process' MPX related resource */ -#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk)) -#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk)) +#define MPX_ENABLE_MANAGEMENT() mpx_enable_management() +#define MPX_DISABLE_MANAGEMENT() mpx_disable_management() #ifdef CONFIG_X86_INTEL_MPX -extern int mpx_enable_management(struct task_struct *tsk); -extern int mpx_disable_management(struct task_struct *tsk); +extern int mpx_enable_management(void); +extern int mpx_disable_management(void); #else -static inline int mpx_enable_management(struct task_struct *tsk) +static inline int mpx_enable_management(void) { return -EINVAL; } -static inline int mpx_disable_management(struct task_struct *tsk) +static inline int mpx_disable_management(void) { return -EINVAL; } #endif /* CONFIG_X86_INTEL_MPX */ extern u16 amd_get_nb_id(int cpu); +extern u32 amd_get_nodes_per_socket(void); static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) { diff --git a/drivers/include/asm/sigcontext.h b/drivers/include/asm/sigcontext.h index 9dfce4e04..e6cd2c489 100644 --- a/drivers/include/asm/sigcontext.h +++ b/drivers/include/asm/sigcontext.h @@ -1,79 +1,8 @@ #ifndef _ASM_X86_SIGCONTEXT_H #define _ASM_X86_SIGCONTEXT_H +/* This is a legacy header - all kernel code includes directly. */ + #include -#ifdef __i386__ -struct sigcontext { - unsigned short gs, __gsh; - unsigned short fs, __fsh; - unsigned short es, __esh; - unsigned short ds, __dsh; - unsigned long di; - unsigned long si; - unsigned long bp; - unsigned long sp; - unsigned long bx; - unsigned long dx; - unsigned long cx; - unsigned long ax; - unsigned long trapno; - unsigned long err; - unsigned long ip; - unsigned short cs, __csh; - unsigned long flags; - unsigned long sp_at_signal; - unsigned short ss, __ssh; - - /* - * fpstate is really (struct _fpstate *) or (struct _xstate *) - * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved - * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end - * of extended memory layout. See comments at the definition of - * (struct _fpx_sw_bytes) - */ - void __user *fpstate; /* zero when no FPU/extended context */ - unsigned long oldmask; - unsigned long cr2; -}; -#else /* __i386__ */ -struct sigcontext { - unsigned long r8; - unsigned long r9; - unsigned long r10; - unsigned long r11; - unsigned long r12; - unsigned long r13; - unsigned long r14; - unsigned long r15; - unsigned long di; - unsigned long si; - unsigned long bp; - unsigned long bx; - unsigned long dx; - unsigned long ax; - unsigned long cx; - unsigned long sp; - unsigned long ip; - unsigned long flags; - unsigned short cs; - unsigned short gs; - unsigned short fs; - unsigned short __pad0; - unsigned long err; - unsigned long trapno; - unsigned long oldmask; - unsigned long cr2; - - /* - * fpstate is really (struct _fpstate *) or (struct _xstate *) - * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved - * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end - * of extended memory layout. See comments at the definition of - * (struct _fpx_sw_bytes) - */ - void __user *fpstate; /* zero when no FPU/extended context */ - unsigned long reserved1[8]; -}; -#endif /* !__i386__ */ #endif /* _ASM_X86_SIGCONTEXT_H */ diff --git a/drivers/include/asm/special_insns.h b/drivers/include/asm/special_insns.h index e820c080a..6a4b00faf 100644 --- a/drivers/include/asm/special_insns.h +++ b/drivers/include/asm/special_insns.h @@ -137,17 +137,17 @@ static inline void write_cr3(unsigned long x) native_write_cr3(x); } -static inline unsigned long read_cr4(void) +static inline unsigned long __read_cr4(void) { return native_read_cr4(); } -static inline unsigned long read_cr4_safe(void) +static inline unsigned long __read_cr4_safe(void) { return native_read_cr4_safe(); } -static inline void write_cr4(unsigned long x) +static inline void __write_cr4(unsigned long x) { native_write_cr4(x); } diff --git a/drivers/include/asm/x86_init.h b/drivers/include/asm/x86_init.h index 90dbf351e..dfd40dab9 100644 --- a/drivers/include/asm/x86_init.h +++ b/drivers/include/asm/x86_init.h @@ -1,7 +1,6 @@ #ifndef _ASM_X86_PLATFORM_H #define _ASM_X86_PLATFORM_H -#include //#include struct mpc_bus; @@ -171,38 +170,17 @@ struct x86_platform_ops { }; struct pci_dev; -struct msi_msg; struct x86_msi_ops { int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); - void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq, - unsigned int dest, struct msi_msg *msg, - u8 hpet_id); void (*teardown_msi_irq)(unsigned int irq); void (*teardown_msi_irqs)(struct pci_dev *dev); void (*restore_msi_irqs)(struct pci_dev *dev); - int (*setup_hpet_msi)(unsigned int irq, unsigned int id); }; -struct IO_APIC_route_entry; -struct io_apic_irq_attr; -struct irq_data; -struct cpumask; - struct x86_io_apic_ops { - void (*init) (void); unsigned int (*read) (unsigned int apic, unsigned int reg); - void (*write) (unsigned int apic, unsigned int reg, unsigned int value); - void (*modify) (unsigned int apic, unsigned int reg, unsigned int value); void (*disable)(void); - void (*print_entries)(unsigned int apic, unsigned int nr_entries); - int (*set_affinity)(struct irq_data *data, - const struct cpumask *mask, - bool force); - int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry, - unsigned int destination, int vector, - struct io_apic_irq_attr *attr); - void (*eoi_ioapic_pin)(int apic, int pin, int vector); }; extern struct x86_init_ops x86_init; diff --git a/drivers/include/ddk.h b/drivers/include/ddk.h index c220f948a..80c783411 100644 --- a/drivers/include/ddk.h +++ b/drivers/include/ddk.h @@ -15,6 +15,7 @@ #define PG_SW 0x003 #define PG_UW 0x007 +#define PG_WRITEC 0x008 #define PG_NOCACHE 0x018 #define PG_SHARED 0x200 @@ -63,7 +64,4 @@ int ddk_init(struct ddk_params *params); u32 drvEntry(int, char *)__asm__("_drvEntry"); - - - #endif /* DDK_H */ diff --git a/drivers/include/drm/drmP.h b/drivers/include/drm/drmP.h index f6414cad0..3bc0467e4 100644 --- a/drivers/include/drm/drmP.h +++ b/drivers/include/drm/drmP.h @@ -45,8 +45,6 @@ #include #include -#include -#include #include #include #include @@ -110,6 +108,12 @@ struct sg_table; * PRIME: used in the prime code. * This is the category used by the DRM_DEBUG_PRIME() macro. * + * ATOMIC: used in the atomic code. + * This is the category used by the DRM_DEBUG_ATOMIC() macro. + * + * VBL: used for verbose debug message in the vblank code + * This is the category used by the DRM_DEBUG_VBL() macro. + * * Enabling verbose debug messages is done through the drm.debug parameter, * each category being enabled by a bit. * @@ -117,7 +121,7 @@ struct sg_table; * drm.debug=0x2 will enable DRIVER messages * drm.debug=0x3 will enable CORE and DRIVER messages * ... - * drm.debug=0xf will enable all messages + * drm.debug=0x3f will enable all messages * * An interesting feature is that it's possible to enable verbose logging at * run-time by echoing the debug value in its sysfs node: @@ -125,12 +129,14 @@ struct sg_table; */ #define DRM_UT_CORE 0x01 #define DRM_UT_DRIVER 0x02 -#define DRM_UT_KMS 0x04 +#define DRM_UT_KMS 0x04 #define DRM_UT_PRIME 0x08 +#define DRM_UT_ATOMIC 0x10 +#define DRM_UT_VBL 0x20 extern __printf(2, 3) void drm_ut_debug_printk(const char *function_name, - const char *format, ...); + const char *format, ...); extern __printf(1, 2) void drm_err(const char *format, ...); @@ -139,16 +145,18 @@ void drm_err(const char *format, ...); /*@{*/ /* driver capabilities and requirements mask */ -#define DRIVER_USE_AGP 0x1 -#define DRIVER_PCI_DMA 0x8 -#define DRIVER_SG 0x10 -#define DRIVER_HAVE_DMA 0x20 -#define DRIVER_HAVE_IRQ 0x40 -#define DRIVER_IRQ_SHARED 0x80 -#define DRIVER_GEM 0x1000 -#define DRIVER_MODESET 0x2000 -#define DRIVER_PRIME 0x4000 -#define DRIVER_RENDER 0x8000 +#define DRIVER_USE_AGP 0x1 +#define DRIVER_PCI_DMA 0x8 +#define DRIVER_SG 0x10 +#define DRIVER_HAVE_DMA 0x20 +#define DRIVER_HAVE_IRQ 0x40 +#define DRIVER_IRQ_SHARED 0x80 +#define DRIVER_GEM 0x1000 +#define DRIVER_MODESET 0x2000 +#define DRIVER_PRIME 0x4000 +#define DRIVER_RENDER 0x8000 +#define DRIVER_ATOMIC 0x10000 +#define DRIVER_KMS_LEGACY_CONTEXT 0x20000 /***********************************************************************/ /** \name Macros to make printk easier */ @@ -161,7 +169,7 @@ void drm_err(const char *format, ...); * \param arg arguments */ #define DRM_ERROR(fmt, ...) \ - drm_err(fmt, ##__VA_ARGS__) + printk("DRM Error" fmt, ##__VA_ARGS__) /** * Rate limited error output. Like DRM_ERROR() but won't flood the log. @@ -209,13 +217,23 @@ void drm_err(const char *format, ...); do { \ printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \ } while (0) +#define DRM_DEBUG_ATOMIC(fmt, args...) \ + do { \ + printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \ + } while (0) +#define DRM_DEBUG_VBL(fmt, args...) \ + do { \ + printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \ + } while (0) + #else #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0) #define DRM_DEBUG_KMS(fmt, args...) do { } while (0) #define DRM_DEBUG_PRIME(fmt, args...) do { } while (0) #define DRM_DEBUG(fmt, arg...) do { } while (0) +#define DRM_DEBUG_ATOMIC(fmt, args...) do { } while (0) +#define DRM_DEBUG_VBL(fmt, args...) do { } while (0) #endif - /*@}*/ /***********************************************************************/ @@ -252,7 +270,6 @@ struct drm_ioctl_desc { unsigned int cmd; int flags; drm_ioctl_t *func; - unsigned int cmd_drv; const char *name; }; @@ -261,8 +278,13 @@ struct drm_ioctl_desc { * ioctl, for use by drm_ioctl(). */ -#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ - [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} +#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \ + .cmd = DRM_IOCTL_##ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } /* Event queued up for userspace to read */ struct drm_pending_event { @@ -292,7 +314,10 @@ struct drm_file { * in the plane list */ unsigned universal_planes:1; + /* true if client understands atomic properties */ + unsigned atomic:1; struct list_head lhead; + struct drm_minor *minor; unsigned long lock_count; /** Mapping of mm object handles to object pointers. */ @@ -313,6 +338,10 @@ struct drm_file { struct list_head fbs; struct mutex fbs_lock; + /** User-created blob properties; this retains a reference on the + * property. */ + struct list_head blobs; + wait_queue_head_t event_wait; struct list_head event_list; int event_space; @@ -340,8 +369,7 @@ struct drm_lock_data { * @minor: Link back to minor char device we are master for. Immutable. * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. * @unique_len: Length of unique field. Protected by drm_global_mutex. - * @magiclist: Hash of used authentication tokens. Protected by struct_mutex. - * @magicfree: List of used authentication tokens. Protected by struct_mutex. + * @magic_map: Map of used authentication tokens. Protected by struct_mutex. * @lock: DRI lock information. * @driver_priv: Pointer to driver-private information. */ @@ -350,8 +378,7 @@ struct drm_master { struct drm_minor *minor; char *unique; int unique_len; - struct drm_open_hash magiclist; - struct list_head magicfree; + struct idr magic_map; struct drm_lock_data lock; void *driver_priv; }; @@ -383,7 +410,7 @@ struct drm_driver { /** * get_vblank_counter - get raw hardware vblank counter * @dev: DRM device - * @crtc: counter to fetch + * @pipe: counter to fetch * * Driver callback for fetching a raw hardware vblank counter for @crtc. * If a device doesn't have a hardware counter, the driver can simply @@ -397,12 +424,12 @@ struct drm_driver { * RETURNS * Raw vblank counter value. */ - u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); + u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe); /** * enable_vblank - enable vblank interrupt events * @dev: DRM device - * @crtc: which irq to enable + * @pipe: which irq to enable * * Enable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since @@ -412,18 +439,32 @@ struct drm_driver { * Zero on success, appropriate errno if the given @crtc's vblank * interrupt cannot be enabled. */ - int (*enable_vblank) (struct drm_device *dev, int crtc); + int (*enable_vblank) (struct drm_device *dev, unsigned int pipe); /** * disable_vblank - disable vblank interrupt events * @dev: DRM device - * @crtc: which irq to enable + * @pipe: which irq to enable * * Disable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since * interrupts will have to stay on to keep the count accurate. */ - void (*disable_vblank) (struct drm_device *dev, int crtc); + void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); + + /** + * Called by \c drm_device_is_agp. Typically used to determine if a + * card is really attached to AGP or not. + * + * \param dev DRM device handle + * + * \returns + * One of three values is returned depending on whether or not the + * card is absolutely \b not AGP (return of 0), absolutely \b is AGP + * (return of 1), or may or may not be AGP (return of 2). + */ + int (*device_is_agp) (struct drm_device *dev); + /** * Called by vblank timestamping code. * @@ -431,7 +472,7 @@ struct drm_driver { * optional accurate ktime_get timestamp of when position was measured. * * \param dev DRM device. - * \param crtc Id of the crtc to query. + * \param pipe Id of the crtc to query. * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0). * \param *vpos Target location for current vertical scanout position. * \param *hpos Target location for current horizontal scanout position. @@ -439,6 +480,7 @@ struct drm_driver { * scanout position query. Can be NULL to skip timestamp. * \param *etime Target location for timestamp taken immediately after * scanout position query. Can be NULL to skip timestamp. + * \param mode Current display timings. * * Returns vpos as a positive number while in active scanout area. * Returns vpos as a negative number inside vblank, counting the number @@ -454,10 +496,10 @@ struct drm_driver { * but unknown small number of scanlines wrt. real scanout position. * */ - int (*get_scanout_position) (struct drm_device *dev, int crtc, - unsigned int flags, - int *vpos, int *hpos, void *stime, - void *etime); + int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe, + unsigned int flags, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); /** * Called by \c drm_get_last_vbltimestamp. Should return a precise @@ -473,7 +515,7 @@ struct drm_driver { * to the OpenML OML_sync_control extension specification. * * \param dev dev DRM device handle. - * \param crtc crtc for which timestamp should be returned. + * \param pipe crtc for which timestamp should be returned. * \param *max_error Maximum allowable timestamp error in nanoseconds. * Implementation should strive to provide timestamp * with an error of at most *max_error nanoseconds. @@ -489,7 +531,7 @@ struct drm_driver { * negative number on failure. A positive status code on success, * which describes how the vblank_time timestamp was computed. */ - int (*get_vblank_timestamp) (struct drm_device *dev, int crtc, + int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe, int *max_error, struct timeval *vblank_time, unsigned flags); @@ -548,7 +590,7 @@ struct drm_info_node { struct drm_minor { int index; /**< Minor device number */ int type; /**< Control or render */ -// struct device kdev; /**< Linux device */ + struct device *kdev; /**< Linux device */ struct drm_device *dev; struct dentry *debugfs_root; @@ -558,28 +600,33 @@ struct drm_minor { /* currently active master for this node. Protected by master_mutex */ struct drm_master *master; - struct drm_mode_group mode_group; }; struct drm_pending_vblank_event { struct drm_pending_event base; - int pipe; + unsigned int pipe; struct drm_event_vblank event; }; struct drm_vblank_crtc { struct drm_device *dev; /* pointer to the drm_device */ wait_queue_head_t queue; /**< VBLANK wait queue */ - struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */ struct timer_list disable_timer; /* delayed disable timer */ - atomic_t count; /**< number of VBLANK interrupts */ + + /* vblank counter, protected by dev->vblank_time_lock for writes */ + u32 count; + /* vblank timestamps, protected by dev->vblank_time_lock for writes */ + struct timeval time[DRM_VBLANKTIME_RBSIZE]; + atomic_t refcount; /* number of users of vblank interruptsper crtc */ u32 last; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ u32 last_wait; /* Last vblank seqno waited per CRTC */ unsigned int inmodeset; /* Display driver is setting mode */ - int crtc; /* crtc index */ + unsigned int pipe; /* crtc index */ + int framedur_ns; /* frame/field duration in ns */ + int linedur_ns; /* line duration in ns */ bool enabled; /* so we don't call enable more than once per disable */ }; @@ -598,7 +645,9 @@ struct drm_device { struct device *dev; /**< Device structure of bus-device */ struct drm_driver *driver; /**< DRM driver managing the device */ void *dev_private; /**< DRM driver private data */ + struct drm_minor *control; /**< Control node */ struct drm_minor *primary; /**< Primary node */ + struct drm_minor *render; /**< Render node */ atomic_t unplugged; /**< Flag whether dev is dead */ /** \name Locks */ /*@{ */ @@ -611,7 +660,7 @@ struct drm_device { int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ int buf_use; /**< Buffers in use -- cannot alloc */ - atomic_t buf_alloc; /**< Buffer allocation in progress */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ /*@} */ struct list_head filelist; @@ -638,8 +687,6 @@ struct drm_device { /** \name Context support */ /*@{ */ - bool irq_enabled; /**< True if irq handler is enabled */ - int irq; __volatile__ long context_flag; /**< Context swapping flag */ int last_context; /**< Last current context */ @@ -647,6 +694,8 @@ struct drm_device { /** \name VBLANK IRQ support */ /*@{ */ + bool irq_enabled; + int irq; /* * At load time, disabling the vblank interrupt won't be allowed since @@ -693,7 +742,7 @@ struct drm_device { struct drm_hw_lock *lock; } sigdata; - struct drm_mode_config mode_config; /**< Current mode config */ + struct drm_mode_config mode_config; /**< Current mode config */ /** \name GEM information */ /*@{ */ @@ -734,8 +783,9 @@ static inline int drm_device_is_unplugged(struct drm_device *dev) /*@{*/ /* Driver support (drm_drv.h) */ +extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv); extern long drm_ioctl(struct file *filp, - unsigned int cmd, unsigned long arg); + unsigned int cmd, unsigned long arg); extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); @@ -745,13 +795,16 @@ extern int drm_open(struct inode *inode, struct file *filp); extern ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); extern int drm_release(struct inode *inode, struct file *filp); +extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv); /* Mapping support (drm_vm.h) */ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Misc. IOCTL support (drm_ioctl.c) */ int drm_noop(struct drm_device *dev, void *data, - struct drm_file *file_priv); + struct drm_file *file_priv); +int drm_invalid_op(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Cache management (drm_cache.c) */ void drm_clflush_pages(struct page *pages[], unsigned long num_pages); @@ -767,32 +820,39 @@ void drm_clflush_virt_range(void *addr, unsigned long length); extern int drm_irq_install(struct drm_device *dev, int irq); extern int drm_irq_uninstall(struct drm_device *dev); -extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); +extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); -extern u32 drm_vblank_count(struct drm_device *dev, int crtc); -extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, +extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe); +extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); +extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, struct timeval *vblanktime); -extern void drm_send_vblank_event(struct drm_device *dev, int crtc, - struct drm_pending_vblank_event *e); -extern bool drm_handle_vblank(struct drm_device *dev, int crtc); -extern int drm_vblank_get(struct drm_device *dev, int crtc); -extern void drm_vblank_put(struct drm_device *dev, int crtc); +extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, + struct timeval *vblanktime); +extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e); +extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); +extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); +extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe); extern int drm_crtc_vblank_get(struct drm_crtc *crtc); extern void drm_crtc_vblank_put(struct drm_crtc *crtc); -extern void drm_wait_one_vblank(struct drm_device *dev, int crtc); +extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); -extern void drm_vblank_off(struct drm_device *dev, int crtc); -extern void drm_vblank_on(struct drm_device *dev, int crtc); +extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe); extern void drm_crtc_vblank_off(struct drm_crtc *crtc); +extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); extern void drm_crtc_vblank_on(struct drm_crtc *crtc); extern void drm_vblank_cleanup(struct drm_device *dev); +extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, - int crtc, int *max_error, + unsigned int pipe, int *max_error, struct timeval *vblank_time, unsigned flags, - const struct drm_crtc *refcrtc, const struct drm_display_mode *mode); extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, const struct drm_display_mode *mode); @@ -810,8 +870,8 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc } /* Modesetting support */ -extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); -extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); +extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe); /* Stub support (drm_stub.h) */ extern struct drm_master *drm_master_get(struct drm_master *master); @@ -820,12 +880,13 @@ extern void drm_master_put(struct drm_master **master); extern void drm_put_dev(struct drm_device *dev); extern void drm_unplug_dev(struct drm_device *dev); extern unsigned int drm_debug; +extern bool drm_atomic; /* Debugfs support */ #if defined(CONFIG_DEBUG_FS) extern int drm_debugfs_create_files(const struct drm_info_list *files, int count, struct dentry *root, - struct drm_minor *minor); + struct drm_minor *minor); extern int drm_debugfs_remove_files(const struct drm_info_list *files, int count, struct drm_minor *minor); #else @@ -861,7 +922,7 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, - size_t align); + size_t align); extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); /* sysfs support (drm_sysfs.c) */ diff --git a/drivers/include/drm/drm_agpsupport.h b/drivers/include/drm/drm_agpsupport.h index 055dc058d..193ef19df 100644 --- a/drivers/include/drm/drm_agpsupport.h +++ b/drivers/include/drm/drm_agpsupport.h @@ -12,9 +12,6 @@ struct drm_device; struct drm_file; -#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && \ - defined(MODULE))) - struct drm_agp_head { struct agp_kern_info agp_info; struct list_head memory; @@ -28,7 +25,7 @@ struct drm_agp_head { unsigned long page_mask; }; -#if __OS_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) void drm_free_agp(struct agp_memory * handle, int pages); int drm_bind_agp(struct agp_memory * handle, unsigned int start); @@ -66,7 +63,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -#else /* __OS_HAS_AGP */ +#else /* CONFIG_AGP */ static inline void drm_free_agp(struct agp_memory * handle, int pages) { @@ -105,95 +102,47 @@ static inline int drm_agp_acquire(struct drm_device *dev) return -ENODEV; } -static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - static inline int drm_agp_release(struct drm_device *dev) { return -ENODEV; } -static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - static inline int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) { return -ENODEV; } -static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - static inline int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info) { return -ENODEV; } -static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - static inline int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { return -ENODEV; } -static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - static inline int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { return -ENODEV; } -static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - static inline int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { return -ENODEV; } -static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - static inline int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { return -ENODEV; } -static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return -ENODEV; -} - -#endif /* __OS_HAS_AGP */ +#endif /* CONFIG_AGP */ #endif /* _DRM_AGPSUPPORT_H_ */ diff --git a/drivers/include/drm/drm_atomic.h b/drivers/include/drm/drm_atomic.h index ad2229574..4b74c97d2 100644 --- a/drivers/include/drm/drm_atomic.h +++ b/drivers/include/drm/drm_atomic.h @@ -35,19 +35,89 @@ drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); void drm_atomic_state_free(struct drm_atomic_state *state); +int __must_check +drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state); +void drm_atomic_state_default_clear(struct drm_atomic_state *state); +void drm_atomic_state_default_release(struct drm_atomic_state *state); + struct drm_crtc_state * __must_check drm_atomic_get_crtc_state(struct drm_atomic_state *state, struct drm_crtc *crtc); +int drm_atomic_crtc_set_property(struct drm_crtc *crtc, + struct drm_crtc_state *state, struct drm_property *property, + uint64_t val); struct drm_plane_state * __must_check drm_atomic_get_plane_state(struct drm_atomic_state *state, struct drm_plane *plane); +int drm_atomic_plane_set_property(struct drm_plane *plane, + struct drm_plane_state *state, struct drm_property *property, + uint64_t val); struct drm_connector_state * __must_check drm_atomic_get_connector_state(struct drm_atomic_state *state, struct drm_connector *connector); +int drm_atomic_connector_set_property(struct drm_connector *connector, + struct drm_connector_state *state, struct drm_property *property, + uint64_t val); + +/** + * drm_atomic_get_existing_crtc_state - get crtc state, if it exists + * @state: global atomic state object + * @crtc: crtc to grab + * + * This function returns the crtc state for the given crtc, or NULL + * if the crtc is not part of the global atomic state. + */ +static inline struct drm_crtc_state * +drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + return state->crtc_states[drm_crtc_index(crtc)]; +} + +/** + * drm_atomic_get_existing_plane_state - get plane state, if it exists + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, or NULL + * if the plane is not part of the global atomic state. + */ +static inline struct drm_plane_state * +drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + return state->plane_states[drm_plane_index(plane)]; +} + +/** + * drm_atomic_get_existing_connector_state - get connector state, if it exists + * @state: global atomic state object + * @connector: connector to grab + * + * This function returns the connector state for the given connector, + * or NULL if the connector is not part of the global atomic state. + */ +static inline struct drm_connector_state * +drm_atomic_get_existing_connector_state(struct drm_atomic_state *state, + struct drm_connector *connector) +{ + int index = drm_connector_index(connector); + + if (index >= state->num_connector) + return NULL; + + return state->connector_states[index]; +} int __must_check -drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state, - struct drm_plane *plane, struct drm_crtc *crtc); +drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, + struct drm_display_mode *mode); +int __must_check +drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, + struct drm_property_blob *blob); +int __must_check +drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + struct drm_crtc *crtc); void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, struct drm_framebuffer *fb); int __must_check @@ -56,14 +126,52 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, int __must_check drm_atomic_add_affected_connectors(struct drm_atomic_state *state, struct drm_crtc *crtc); +int __must_check +drm_atomic_add_affected_planes(struct drm_atomic_state *state, + struct drm_crtc *crtc); + int drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc); void drm_atomic_legacy_backoff(struct drm_atomic_state *state); +void +drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); + int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); +#define for_each_connector_in_state(state, connector, connector_state, __i) \ + for ((__i) = 0; \ + (__i) < (state)->num_connector && \ + ((connector) = (state)->connectors[__i], \ + (connector_state) = (state)->connector_states[__i], 1); \ + (__i)++) \ + if (connector) + +#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (state)->dev->mode_config.num_crtc && \ + ((crtc) = (state)->crtcs[__i], \ + (crtc_state) = (state)->crtc_states[__i], 1); \ + (__i)++) \ + if (crtc_state) + +#define for_each_plane_in_state(state, plane, plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (state)->dev->mode_config.num_total_plane && \ + ((plane) = (state)->planes[__i], \ + (plane_state) = (state)->plane_states[__i], 1); \ + (__i)++) \ + if (plane_state) +static inline bool +drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) +{ + return state->mode_changed || state->active_changed || + state->connectors_changed; +} + + #endif /* DRM_ATOMIC_H_ */ diff --git a/drivers/include/drm/drm_atomic_helper.h b/drivers/include/drm/drm_atomic_helper.h index f956b4133..8cba54a2a 100644 --- a/drivers/include/drm/drm_atomic_helper.h +++ b/drivers/include/drm/drm_atomic_helper.h @@ -30,6 +30,12 @@ #include +struct drm_atomic_state; + +int drm_atomic_helper_check_modeset(struct drm_device *dev, + struct drm_atomic_state *state); +int drm_atomic_helper_check_planes(struct drm_device *dev, + struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, @@ -39,17 +45,23 @@ int drm_atomic_helper_commit(struct drm_device *dev, void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); -void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, - struct drm_atomic_state *state); -void drm_atomic_helper_commit_post_planes(struct drm_device *dev, +void +drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, + struct drm_atomic_state *old_state); + +void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *state); +void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, struct drm_atomic_state *old_state); int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state); void drm_atomic_helper_commit_planes(struct drm_device *dev, - struct drm_atomic_state *state); + struct drm_atomic_state *state, + bool active_only); void drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state); +void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); void drm_atomic_helper_swap_state(struct drm_device *dev, struct drm_atomic_state *state); @@ -63,7 +75,11 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); int drm_atomic_helper_disable_plane(struct drm_plane *plane); +int __drm_atomic_helper_disable_plane(struct drm_plane *plane, + struct drm_plane_state *plane_state); int drm_atomic_helper_set_config(struct drm_mode_set *set); +int __drm_atomic_helper_set_config(struct drm_mode_set *set, + struct drm_atomic_state *state); int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, struct drm_property *property, @@ -78,23 +94,42 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags); +int drm_atomic_helper_connector_dpms(struct drm_connector *connector, + int mode); /* default implementations for state handling */ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); struct drm_crtc_state * drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc); +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state); void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); void drm_atomic_helper_plane_reset(struct drm_plane *plane); +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state); struct drm_plane_state * drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane); +void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state); void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); void drm_atomic_helper_connector_reset(struct drm_connector *connector); +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state); struct drm_connector_state * drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector); +struct drm_atomic_state * +drm_atomic_helper_duplicate_state(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state); void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state); @@ -123,4 +158,41 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, #define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) +/* + * drm_atomic_plane_disabling - check whether a plane is being disabled + * @plane: plane object + * @old_state: previous atomic state + * + * Checks the atomic state of a plane to determine whether it's being disabled + * or not. This also WARNs if it detects an invalid state (both CRTC and FB + * need to either both be NULL or both be non-NULL). + * + * RETURNS: + * True if the plane is being disabled, false otherwise. + */ +static inline bool +drm_atomic_plane_disabling(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + /* + * When disabling a plane, CRTC and FB should always be NULL together. + * Anything else should be considered a bug in the atomic core, so we + * gently warn about it. + */ + WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) || + (plane->state->crtc != NULL && plane->state->fb == NULL)); + + /* + * When using the transitional helpers, old_state may be NULL. If so, + * we know nothing about the current state and have to assume that it + * might be enabled. + * + * When using the atomic helpers, old_state won't be NULL. Therefore + * this check assumes that either the driver will have reconstructed + * the correct state in ->reset() or that the driver will have taken + * appropriate measures to disable all planes. + */ + return (!old_state || old_state->crtc) && !plane->state->crtc; +} + #endif /* DRM_ATOMIC_HELPER_H_ */ diff --git a/drivers/include/drm/drm_crtc.h b/drivers/include/drm/drm_crtc.h index fbfced8bf..e4a72db33 100644 --- a/drivers/include/drm/drm_crtc.h +++ b/drivers/include/drm/drm_crtc.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -52,7 +53,6 @@ struct fence; #define DRM_MODE_OBJECT_FB 0xfbfbfbfb #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb #define DRM_MODE_OBJECT_PLANE 0xeeeeeeee -#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd #define DRM_MODE_OBJECT_ANY 0 struct drm_mode_object { @@ -63,8 +63,16 @@ struct drm_mode_object { #define DRM_OBJECT_MAX_PROPERTY 24 struct drm_object_properties { - int count; - uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; + int count, atomic_count; + /* NOTE: if we ever start dynamically destroying properties (ie. + * not at drm_mode_config_cleanup() time), then we'd have to do + * a better job of detaching property from mode objects to avoid + * dangling property pointers: + */ + struct drm_property *properties[DRM_OBJECT_MAX_PROPERTY]; + /* do not read/write values directly, but use drm_object_property_get_value() + * and drm_object_property_set_value(): + */ uint64_t values[DRM_OBJECT_MAX_PROPERTY]; }; @@ -78,10 +86,12 @@ static inline uint64_t I642U64(int64_t val) } /* rotation property bits */ +#define DRM_ROTATE_MASK 0x0f #define DRM_ROTATE_0 0 #define DRM_ROTATE_90 1 #define DRM_ROTATE_180 2 #define DRM_ROTATE_270 3 +#define DRM_REFLECT_MASK (~DRM_ROTATE_MASK) #define DRM_REFLECT_X 4 #define DRM_REFLECT_Y 5 @@ -131,6 +141,9 @@ struct drm_display_info { enum subpixel_order subpixel_order; u32 color_formats; + const u32 *bus_formats; + unsigned int num_bus_formats; + /* Mask of supported hdmi deep color modes */ u8 edid_hdmi_dc_modes; @@ -190,6 +203,7 @@ struct drm_framebuffer { const struct drm_framebuffer_funcs *funcs; unsigned int pitches[4]; unsigned int offsets[4]; + uint64_t modifier[4]; unsigned int width; unsigned int height; /* depth can be 15 or 16 */ @@ -198,13 +212,14 @@ struct drm_framebuffer { int flags; uint32_t pixel_format; /* fourcc format */ struct list_head filp_head; - /* if you are using the helper */ - void *helper_private; }; struct drm_property_blob { struct drm_mode_object base; - struct list_head head; + struct drm_device *dev; + struct kref refcount; + struct list_head head_global; + struct list_head head_file; size_t length; unsigned char data[]; }; @@ -216,12 +231,12 @@ struct drm_property_enum { }; struct drm_property { - struct list_head head; - struct drm_mode_object base; - uint32_t flags; - char name[DRM_PROP_NAME_LEN]; - uint32_t num_values; - uint64_t *values; + struct list_head head; + struct drm_mode_object base; + uint32_t flags; + char name[DRM_PROP_NAME_LEN]; + uint32_t num_values; + uint64_t *values; struct drm_device *dev; struct list_head enum_list; @@ -237,24 +252,39 @@ struct drm_atomic_state; /** * struct drm_crtc_state - mutable CRTC state + * @crtc: backpointer to the CRTC * @enable: whether the CRTC should be enabled, gates all other state - * @mode_changed: for use by helpers and drivers when computing state updates + * @active: whether the CRTC is actively displaying (used for DPMS) + * @planes_changed: planes on this crtc are updated + * @mode_changed: crtc_state->mode or crtc_state->enable has been changed + * @active_changed: crtc_state->active has been toggled. + * @connectors_changed: connectors to this crtc have been updated * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes * @last_vblank_count: for helpers and drivers to capture the vblank of the * update to ensure framebuffer cleanup isn't done too early - * @planes_changed: for use by helpers and drivers when computing state updates * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings * @mode: current mode timings * @event: optional pointer to a DRM event to signal upon completion of the * state update * @state: backpointer to global drm_atomic_state + * + * Note that the distinction between @enable and @active is rather subtile: + * Flipping @active while @enable is set without changing anything else may + * never return in a failure from the ->atomic_check callback. Userspace assumes + * that a DPMS On will always succeed. In other words: @enable controls resource + * assignment, @active controls the actual hardware state. */ struct drm_crtc_state { + struct drm_crtc *crtc; + bool enable; + bool active; /* computed state bits used by helpers and drivers */ bool planes_changed : 1; bool mode_changed : 1; + bool active_changed : 1; + bool connectors_changed : 1; /* attached planes bitmask: * WARNING: transitional helpers do not maintain plane_mask so @@ -271,6 +301,9 @@ struct drm_crtc_state { struct drm_display_mode mode; + /* blob property to expose current mode to atomic userspace */ + struct drm_property_blob *mode_blob; + struct drm_pending_vblank_event *event; struct drm_atomic_state *state; @@ -292,6 +325,9 @@ struct drm_crtc_state { * @atomic_duplicate_state: duplicate the atomic state for this CRTC * @atomic_destroy_state: destroy an atomic state for this CRTC * @atomic_set_property: set a property on an atomic state for this CRTC + * (do not call directly, use drm_atomic_crtc_set_property()) + * @atomic_get_property: get a property on an atomic state for this CRTC + * (do not call directly, use drm_atomic_crtc_get_property()) * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name @@ -351,6 +387,10 @@ struct drm_crtc_funcs { struct drm_crtc_state *state, struct drm_property *property, uint64_t val); + int (*atomic_get_property)(struct drm_crtc *crtc, + const struct drm_crtc_state *state, + struct drm_property *property, + uint64_t *val); }; /** @@ -367,17 +407,11 @@ struct drm_crtc_funcs { * @enabled: is this CRTC enabled? * @mode: current mode timings * @hwmode: mode timings as programmed to hw regs - * @invert_dimensions: for purposes of error checking crtc vs fb sizes, - * invert the width/height of the crtc. This is used if the driver - * is performing 90 or 270 degree rotated scanout * @x: x position on screen * @y: y position on screen * @funcs: CRTC control functions * @gamma_size: size of gamma ramp * @gamma_store: gamma ramp values - * @framedur_ns: precise frame timing - * @linedur_ns: precise line timing - * @pixeldur_ns: precise pixel timing * @helper_private: mid-layer private data * @properties: property tracking for this CRTC * @state: current atomic state for this CRTC @@ -421,8 +455,6 @@ struct drm_crtc { */ struct drm_display_mode hwmode; - bool invert_dimensions; - int x, y; const struct drm_crtc_funcs *funcs; @@ -430,11 +462,8 @@ struct drm_crtc { uint32_t gamma_size; uint16_t *gamma_store; - /* Constants needed for precise vblank and swap timestamping. */ - int framedur_ns, linedur_ns, pixeldur_ns; - /* if you are using the helper */ - void *helper_private; + const void *helper_private; struct drm_object_properties properties; @@ -449,11 +478,14 @@ struct drm_crtc { /** * struct drm_connector_state - mutable connector state + * @connector: backpointer to the connector * @crtc: CRTC to connect connector to, NULL if disabled * @best_encoder: can be used by helpers and drivers to select the encoder * @state: backpointer to global drm_atomic_state */ struct drm_connector_state { + struct drm_connector *connector; + struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */ struct drm_encoder *best_encoder; @@ -463,7 +495,7 @@ struct drm_connector_state { /** * struct drm_connector_funcs - control connectors on a given device - * @dpms: set power state (see drm_crtc_funcs above) + * @dpms: set power state * @save: save connector state * @restore: restore connector state * @reset: reset connector after state has been invalidated (e.g. resume) @@ -475,13 +507,16 @@ struct drm_connector_state { * @atomic_duplicate_state: duplicate the atomic state for this connector * @atomic_destroy_state: destroy an atomic state for this connector * @atomic_set_property: set a property on an atomic state for this connector + * (do not call directly, use drm_atomic_connector_set_property()) + * @atomic_get_property: get a property on an atomic state for this connector + * (do not call directly, use drm_atomic_connector_get_property()) * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, * etc. */ struct drm_connector_funcs { - void (*dpms)(struct drm_connector *connector, int mode); + int (*dpms)(struct drm_connector *connector, int mode); void (*save)(struct drm_connector *connector); void (*restore)(struct drm_connector *connector); void (*reset)(struct drm_connector *connector); @@ -508,6 +543,10 @@ struct drm_connector_funcs { struct drm_connector_state *state, struct drm_property *property, uint64_t val); + int (*atomic_get_property)(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, + uint64_t *val); }; /** @@ -554,7 +593,7 @@ struct drm_encoder { struct drm_crtc *crtc; struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; - void *helper_private; + const void *helper_private; }; /* should we poll this connector for connects and disconnects */ @@ -605,6 +644,7 @@ struct drm_encoder { * @audio_latency: audio latency info from ELD, if found * @null_edid_counter: track sinks that give us all zeros for the EDID * @bad_edid_counter: track sinks that give us an EDID with invalid checksum + * @edid_corrupt: indicates whether the last read EDID was corrupt * @debugfs_entry: debugfs directory for this connector * @state: current atomic state for this connector * @has_tile: is this connector connected to a tiled monitor @@ -658,7 +698,7 @@ struct drm_connector { /* requested DPMS state */ int dpms; - void *helper_private; + const void *helper_private; /* forced on connector */ struct drm_cmdline_mode cmdline_mode; @@ -677,6 +717,11 @@ struct drm_connector { int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ unsigned bad_edid_counter; + /* Flag for raw EDID header corruption - used in Displayport + * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6 + */ + bool edid_corrupt; + struct dentry *debugfs_entry; struct drm_connector_state *state; @@ -693,6 +738,7 @@ struct drm_connector { /** * struct drm_plane_state - mutable plane state + * @plane: backpointer to the plane * @crtc: currently bound CRTC, NULL if disabled * @fb: currently bound framebuffer * @fence: optional fence to wait for before scanning out @fb @@ -709,6 +755,8 @@ struct drm_connector { * @state: backpointer to global drm_atomic_state */ struct drm_plane_state { + struct drm_plane *plane; + struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */ struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */ struct fence *fence; @@ -721,6 +769,9 @@ struct drm_plane_state { uint32_t src_x, src_y; uint32_t src_h, src_w; + /* Plane rotation */ + unsigned int rotation; + struct drm_atomic_state *state; }; @@ -735,6 +786,9 @@ struct drm_plane_state { * @atomic_duplicate_state: duplicate the atomic state for this plane * @atomic_destroy_state: destroy an atomic state for this plane * @atomic_set_property: set a property on an atomic state for this plane + * (do not call directly, use drm_atomic_plane_set_property()) + * @atomic_get_property: get a property on an atomic state for this plane + * (do not call directly, use drm_atomic_plane_get_property()) */ struct drm_plane_funcs { int (*update_plane)(struct drm_plane *plane, @@ -758,6 +812,10 @@ struct drm_plane_funcs { struct drm_plane_state *state, struct drm_property *property, uint64_t val); + int (*atomic_get_property)(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val); }; enum drm_plane_type { @@ -774,6 +832,7 @@ enum drm_plane_type { * @possible_crtcs: pipes this plane can be bound to * @format_types: array of formats supported by this plane * @format_count: number of formats supported + * @format_default: driver hasn't supplied supported formats for the plane * @crtc: currently bound CRTC * @fb: currently bound fb * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by @@ -793,7 +852,8 @@ struct drm_plane { uint32_t possible_crtcs; uint32_t *format_types; - uint32_t format_count; + unsigned int format_count; + bool format_default; struct drm_crtc *crtc; struct drm_framebuffer *fb; @@ -806,22 +866,23 @@ struct drm_plane { enum drm_plane_type type; - void *helper_private; + const void *helper_private; struct drm_plane_state *state; }; /** * struct drm_bridge_funcs - drm_bridge control functions + * @attach: Called during drm_bridge_attach * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge * @disable: Called right before encoder prepare, disables the bridge * @post_disable: Called right after encoder prepare, for lockstepped disable * @mode_set: Set this mode to the bridge * @pre_enable: Called right before encoder commit, for lockstepped commit * @enable: Called right after encoder commit, enables the bridge - * @destroy: make object go away */ struct drm_bridge_funcs { + int (*attach)(struct drm_bridge *bridge); bool (*mode_fixup)(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); @@ -832,31 +893,36 @@ struct drm_bridge_funcs { struct drm_display_mode *adjusted_mode); void (*pre_enable)(struct drm_bridge *bridge); void (*enable)(struct drm_bridge *bridge); - void (*destroy)(struct drm_bridge *bridge); }; /** * struct drm_bridge - central DRM bridge control structure * @dev: DRM device this bridge belongs to - * @head: list management - * @base: base mode object + * @encoder: encoder to which this bridge is connected + * @next: the next bridge in the encoder chain + * @of_node: device node pointer to the bridge + * @list: to keep track of all added bridges * @funcs: control functions * @driver_private: pointer to the bridge driver's internal context */ struct drm_bridge { struct drm_device *dev; - struct list_head head; - - struct drm_mode_object base; + struct drm_encoder *encoder; + struct drm_bridge *next; +#ifdef CONFIG_OF + struct device_node *of_node; +#endif + struct list_head list; const struct drm_bridge_funcs *funcs; void *driver_private; }; /** - * struct struct drm_atomic_state - the global state object for atomic updates + * struct drm_atomic_state - the global state object for atomic updates * @dev: parent DRM device - * @flags: state flags like async update + * @allow_modeset: allow full modeset + * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics * @planes: pointer to array of plane pointers * @plane_states: pointer to array of plane states pointers * @crtcs: pointer to array of CRTC pointers @@ -868,7 +934,8 @@ struct drm_bridge { */ struct drm_atomic_state { struct drm_device *dev; - uint32_t flags; + bool allow_modeset : 1; + bool legacy_cursor_update : 1; struct drm_plane **planes; struct drm_plane_state **plane_states; struct drm_crtc **crtcs; @@ -912,9 +979,12 @@ struct drm_mode_set { * struct drm_mode_config_funcs - basic driver provided mode setting functions * @fb_create: create a new framebuffer object * @output_poll_changed: function to handle output configuration changes - * @atomic_check: check whether a give atomic state update is possible + * @atomic_check: check whether a given atomic state update is possible * @atomic_commit: commit an atomic state update previously verified with * atomic_check() + * @atomic_state_alloc: allocate a new atomic state + * @atomic_state_clear: clear the atomic state + * @atomic_state_free: free the atomic state * * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that * involve drivers. @@ -930,30 +1000,9 @@ struct drm_mode_config_funcs { int (*atomic_commit)(struct drm_device *dev, struct drm_atomic_state *a, bool async); -}; - -/** - * struct drm_mode_group - group of mode setting resources for potential sub-grouping - * @num_crtcs: CRTC count - * @num_encoders: encoder count - * @num_connectors: connector count - * @num_bridges: bridge count - * @id_list: list of KMS object IDs in this group - * - * Currently this simply tracks the global mode setting state. But in the - * future it could allow groups of objects to be set aside into independent - * control groups for use by different user level processes (e.g. two X servers - * running simultaneously on different heads, each with their own mode - * configuration and freedom of mode setting). - */ -struct drm_mode_group { - uint32_t num_crtcs; - uint32_t num_encoders; - uint32_t num_connectors; - uint32_t num_bridges; - - /* list of object IDs for this group */ - uint32_t *id_list; + struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + void (*atomic_state_clear)(struct drm_atomic_state *state); + void (*atomic_state_free)(struct drm_atomic_state *state); }; /** @@ -969,8 +1018,6 @@ struct drm_mode_group { * @fb_list: list of framebuffers available * @num_connector: number of connectors on this device * @connector_list: list of connector objects - * @num_bridge: number of bridges on this device - * @bridge_list: list of bridge objects * @num_encoder: number of encoders on this device * @encoder_list: list of encoder objects * @num_overlay_plane: number of overlay planes on this device @@ -989,6 +1036,7 @@ struct drm_mode_group { * @poll_running: track polling status for this device * @output_poll_work: delayed work for polling in process context * @property_blob_list: list of all the blob property objects + * @blob_lock: mutex for blob property allocation and management * @*_property: core property tracking * @preferred_depth: preferred RBG pixel depth, used by fb helpers * @prefer_shadow: hint to userspace to prefer shadow-fb rendering @@ -1005,7 +1053,7 @@ struct drm_mode_config { struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ struct mutex idr_mutex; /* for IDR management */ - struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ + struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ /* this is limited to one for now */ @@ -1015,8 +1063,6 @@ struct drm_mode_config { int num_connector; struct list_head connector_list; - int num_bridge; - struct list_head bridge_list; int num_encoder; struct list_head encoder_list; @@ -1043,8 +1089,11 @@ struct drm_mode_config { /* output poll support */ bool poll_enabled; bool poll_running; + bool delayed_event; struct delayed_work output_poll_work; + struct mutex blob_lock; + /* pointers to standard properties */ struct list_head property_blob_list; struct drm_property *edid_property; @@ -1053,6 +1102,18 @@ struct drm_mode_config { struct drm_property *tile_property; struct drm_property *plane_type_property; struct drm_property *rotation_property; + struct drm_property *prop_src_x; + struct drm_property *prop_src_y; + struct drm_property *prop_src_w; + struct drm_property *prop_src_h; + struct drm_property *prop_crtc_x; + struct drm_property *prop_crtc_y; + struct drm_property *prop_crtc_w; + struct drm_property *prop_crtc_h; + struct drm_property *prop_fb_id; + struct drm_property *prop_crtc_id; + struct drm_property *prop_active; + struct drm_property *prop_mode_id; /* DVI-I properties */ struct drm_property *dvi_i_subconnector_property; @@ -1088,6 +1149,9 @@ struct drm_mode_config { /* whether async page flip is supported or not */ bool async_page_flip; + /* whether the driver supports fb modifiers */ + bool allow_fb_modifiers; + /* cursor size */ uint32_t cursor_width, cursor_height; }; @@ -1142,9 +1206,9 @@ static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc) extern void drm_connector_ida_init(void); extern void drm_connector_ida_destroy(void); extern int drm_connector_init(struct drm_device *dev, - struct drm_connector *connector, - const struct drm_connector_funcs *funcs, - int connector_type); + struct drm_connector *connector, + const struct drm_connector_funcs *funcs, + int connector_type); int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); @@ -1153,14 +1217,26 @@ extern unsigned int drm_connector_index(struct drm_connector *connector); /* helper to unplug all connectors from sysfs for device */ extern void drm_connector_unplug_all(struct drm_device *dev); -extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge, - const struct drm_bridge_funcs *funcs); -extern void drm_bridge_cleanup(struct drm_bridge *bridge); +extern int drm_bridge_add(struct drm_bridge *bridge); +extern void drm_bridge_remove(struct drm_bridge *bridge); +extern struct drm_bridge *of_drm_find_bridge(struct device_node *np); +extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge); + +bool drm_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_bridge_disable(struct drm_bridge *bridge); +void drm_bridge_post_disable(struct drm_bridge *bridge); +void drm_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void drm_bridge_pre_enable(struct drm_bridge *bridge); +void drm_bridge_enable(struct drm_bridge *bridge); extern int drm_encoder_init(struct drm_device *dev, - struct drm_encoder *encoder, - const struct drm_encoder_funcs *funcs, - int encoder_type); + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type); /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? @@ -1180,17 +1256,22 @@ extern int drm_universal_plane_init(struct drm_device *dev, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, - uint32_t format_count, + unsigned int format_count, enum drm_plane_type type); extern int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, - const uint32_t *formats, uint32_t format_count, + const uint32_t *formats, unsigned int format_count, bool is_primary); extern void drm_plane_cleanup(struct drm_plane *plane); extern unsigned int drm_plane_index(struct drm_plane *plane); +extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); extern void drm_plane_force_disable(struct drm_plane *plane); +extern int drm_plane_check_pixel_format(const struct drm_plane *plane, + u32 format); +extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, + int *hdisplay, int *vdisplay); extern int drm_crtc_check_viewport(const struct drm_crtc *crtc, int x, int y, const struct drm_display_mode *mode, @@ -1206,9 +1287,8 @@ extern const char *drm_get_dvi_i_select_name(int val); extern const char *drm_get_tv_subconnector_name(int val); extern const char *drm_get_tv_select_name(int val); extern void drm_fb_release(struct drm_file *file_priv); -extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); -extern void drm_mode_group_destroy(struct drm_mode_group *group); -extern void drm_reinit_primary_mode_group(struct drm_device *dev); +extern void drm_property_destroy_user_blobs(struct drm_device *dev, + struct drm_file *file_priv); extern bool drm_probe_ddc(struct i2c_adapter *adapter); extern struct edid *drm_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter); @@ -1224,6 +1304,10 @@ int drm_mode_connector_set_tile_property(struct drm_connector *connector); extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, const struct edid *edid); +extern int drm_display_info_set_bus_formats(struct drm_display_info *info, + const u32 *formats, + unsigned int num_formats); + static inline bool drm_property_type_is(struct drm_property *property, uint32_t type) { @@ -1279,17 +1363,30 @@ struct drm_property *drm_property_create_signed_range(struct drm_device *dev, int64_t min, int64_t max); struct drm_property *drm_property_create_object(struct drm_device *dev, int flags, const char *name, uint32_t type); +struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, + const char *name); +struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, + size_t length, + const void *data); +struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, + uint32_t id); +struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob); +void drm_property_unreference_blob(struct drm_property_blob *blob); extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); extern int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name); extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); extern int drm_mode_create_tv_properties(struct drm_device *dev, unsigned int num_modes, - char *modes[]); + const char * const modes[]); extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); extern int drm_mode_create_dirty_info_property(struct drm_device *dev); extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev); +extern bool drm_property_change_valid_get(struct drm_property *property, + uint64_t value, struct drm_mode_object **ref); +extern void drm_property_change_valid_put(struct drm_property *property, + struct drm_mode_object *ref); extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder); @@ -1334,6 +1431,10 @@ extern int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mode_createblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_destroyblob_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mode_getencoder(struct drm_device *dev, @@ -1355,7 +1456,8 @@ extern void drm_set_preferred_mode(struct drm_connector *connector, int hpref, int vpref); extern int drm_edid_header_is_valid(const u8 *raw_edid); -extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); +extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid, + bool *edid_corrupt); extern bool drm_edid_is_valid(struct edid *edid); extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, @@ -1381,6 +1483,8 @@ extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane, struct drm_property *property, uint64_t value); +extern int drm_mode_atomic_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); @@ -1436,17 +1540,46 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev, return mo ? obj_to_property(mo) : NULL; } -static inline struct drm_property_blob * -drm_property_blob_find(struct drm_device *dev, uint32_t id) -{ - struct drm_mode_object *mo; - mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB); - return mo ? obj_to_blob(mo) : NULL; -} - /* Plane list iterator for legacy (overlay only) planes. */ -#define drm_for_each_legacy_plane(plane, planelist) \ - list_for_each_entry(plane, planelist, head) \ +#define drm_for_each_legacy_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ if (plane->type == DRM_PLANE_TYPE_OVERLAY) +#define drm_for_each_plane(plane, dev) \ + list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) + +#define drm_for_each_crtc(crtc, dev) \ + list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) + +static inline void +assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config) +{ + /* + * The connector hotadd/remove code currently grabs both locks when + * updating lists. Hence readers need only hold either of them to be + * safe and the check amounts to + * + * WARN_ON(not_holding(A) && not_holding(B)). + */ + WARN_ON(!mutex_is_locked(&mode_config->mutex) && + !drm_modeset_is_locked(&mode_config->connection_mutex)); +} + +#define drm_for_each_connector(connector, dev) \ + for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \ + connector = list_first_entry(&(dev)->mode_config.connector_list, \ + struct drm_connector, head); \ + &connector->head != (&(dev)->mode_config.connector_list); \ + connector = list_next_entry(connector, head)) + +#define drm_for_each_encoder(encoder, dev) \ + list_for_each_entry(encoder, &(dev)->mode_config.encoder_list, head) + +#define drm_for_each_fb(fb, dev) \ + for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ + fb = list_first_entry(&(dev)->mode_config.fb_list, \ + struct drm_framebuffer, head); \ + &fb->head != (&(dev)->mode_config.fb_list); \ + fb = list_next_entry(fb, head)) + #endif /* __DRM_CRTC_H__ */ diff --git a/drivers/include/drm/drm_crtc_helper.h b/drivers/include/drm/drm_crtc_helper.h index d56c1b49c..3002d3153 100644 --- a/drivers/include/drm/drm_crtc_helper.h +++ b/drivers/include/drm/drm_crtc_helper.h @@ -39,17 +39,38 @@ #include +#include + enum mode_set_atomic { LEAVE_ATOMIC_MODE_SET, ENTER_ATOMIC_MODE_SET, }; /** - * drm_crtc_helper_funcs - helper operations for CRTCs - * @mode_fixup: try to fixup proposed mode for this connector + * struct drm_crtc_helper_funcs - helper operations for CRTCs + * @dpms: set power state + * @prepare: prepare the CRTC, called before @mode_set + * @commit: commit changes to CRTC, called after @mode_set + * @mode_fixup: try to fixup proposed mode for this CRTC * @mode_set: set this mode + * @mode_set_nofb: set mode only (no scanout buffer attached) + * @mode_set_base: update the scanout buffer + * @mode_set_base_atomic: non-blocking mode set (used for kgdb support) + * @load_lut: load color palette + * @disable: disable CRTC when no longer in use + * @enable: enable CRTC + * @atomic_check: check for validity of an atomic state + * @atomic_begin: begin atomic update + * @atomic_flush: flush atomic update * * The helper operations are called by the mid-layer CRTC helper. + * + * Note that with atomic helpers @dpms, @prepare and @commit hooks are + * deprecated. Used @enable and @disable instead exclusively. + * + * With legacy crtc helpers there's a big semantic difference between @disable + * and the other hooks: @disable also needs to release any resources acquired in + * @mode_set (like shared PLLs). */ struct drm_crtc_helper_funcs { /* @@ -68,6 +89,7 @@ struct drm_crtc_helper_funcs { int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb); + /* Actually set the mode for atomic helpers, optional */ void (*mode_set_nofb)(struct drm_crtc *crtc); /* Move the crtc on the current fb to the given position *optional* */ @@ -80,22 +102,41 @@ struct drm_crtc_helper_funcs { /* reload the current crtc LUT */ void (*load_lut)(struct drm_crtc *crtc); - /* disable crtc when not in use - more explicit than dpms off */ void (*disable)(struct drm_crtc *crtc); + void (*enable)(struct drm_crtc *crtc); /* atomic helpers */ int (*atomic_check)(struct drm_crtc *crtc, struct drm_crtc_state *state); - void (*atomic_begin)(struct drm_crtc *crtc); - void (*atomic_flush)(struct drm_crtc *crtc); + void (*atomic_begin)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + void (*atomic_flush)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); }; /** - * drm_encoder_helper_funcs - helper operations for encoders + * struct drm_encoder_helper_funcs - helper operations for encoders + * @dpms: set power state + * @save: save connector state + * @restore: restore connector state * @mode_fixup: try to fixup proposed mode for this connector - * @mode_set: set this mode + * @prepare: part of the disable sequence, called before the CRTC modeset + * @commit: called after the CRTC modeset + * @mode_set: set this mode, optional for atomic helpers + * @get_crtc: return CRTC that the encoder is currently attached to + * @detect: connection status detection + * @disable: disable encoder when not in use (overrides DPMS off) + * @enable: enable encoder + * @atomic_check: check for validity of an atomic update * * The helper operations are called by the mid-layer CRTC helper. + * + * Note that with atomic helpers @dpms, @prepare and @commit hooks are + * deprecated. Used @enable and @disable instead exclusively. + * + * With legacy crtc helpers there's a big semantic difference between @disable + * and the other hooks: @disable also needs to release any resources acquired in + * @mode_set (like shared PLLs). */ struct drm_encoder_helper_funcs { void (*dpms)(struct drm_encoder *encoder, int mode); @@ -114,22 +155,32 @@ struct drm_encoder_helper_funcs { /* detect for DAC style encoders */ enum drm_connector_status (*detect)(struct drm_encoder *encoder, struct drm_connector *connector); - /* disable encoder when not in use - more explicit than dpms off */ void (*disable)(struct drm_encoder *encoder); + + void (*enable)(struct drm_encoder *encoder); + + /* atomic helpers */ + int (*atomic_check)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); }; /** - * drm_connector_helper_funcs - helper operations for connectors + * struct drm_connector_helper_funcs - helper operations for connectors * @get_modes: get mode list for this connector - * @mode_valid (optional): is this mode valid on the given connector? + * @mode_valid: is this mode valid on the given connector? (optional) + * @best_encoder: return the preferred encoder for this connector + * @atomic_best_encoder: atomic version of @best_encoder * * The helper operations are called by the mid-layer CRTC helper. */ struct drm_connector_helper_funcs { int (*get_modes)(struct drm_connector *connector); enum drm_mode_status (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); + struct drm_display_mode *mode); struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); }; extern void drm_helper_disable_unused_functions(struct drm_device *dev); @@ -141,29 +192,29 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); -extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); +extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode); extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, - struct drm_mode_fb_cmd2 *mode_cmd); + struct drm_mode_fb_cmd2 *mode_cmd); static inline void drm_crtc_helper_add(struct drm_crtc *crtc, const struct drm_crtc_helper_funcs *funcs) { - crtc->helper_private = (void *)funcs; + crtc->helper_private = funcs; } static inline void drm_encoder_helper_add(struct drm_encoder *encoder, const struct drm_encoder_helper_funcs *funcs) { - encoder->helper_private = (void *)funcs; + encoder->helper_private = funcs; } static inline void drm_connector_helper_add(struct drm_connector *connector, const struct drm_connector_helper_funcs *funcs) { - connector->helper_private = (void *)funcs; + connector->helper_private = funcs; } extern void drm_helper_resume_force_mode(struct drm_device *dev); @@ -189,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/drivers/include/drm/drm_dp_helper.h b/drivers/include/drm/drm_dp_helper.h index ac69ba2b3..8e3d5cbcc 100644 --- a/drivers/include/drm/drm_dp_helper.h +++ b/drivers/include/drm/drm_dp_helper.h @@ -42,9 +42,11 @@ * 1.2 formally includes both eDP and DPI definitions. */ +#define DP_AUX_MAX_PAYLOAD_BYTES 16 + #define DP_AUX_I2C_WRITE 0x0 #define DP_AUX_I2C_READ 0x1 -#define DP_AUX_I2C_STATUS 0x2 +#define DP_AUX_I2C_WRITE_STATUS_UPDATE 0x2 #define DP_AUX_I2C_MOT 0x4 #define DP_AUX_NATIVE_WRITE 0x8 #define DP_AUX_NATIVE_READ 0x9 @@ -92,6 +94,15 @@ # define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ # define DP_OUI_SUPPORT (1 << 7) +#define DP_RECEIVE_PORT_0_CAP_0 0x008 +# define DP_LOCAL_EDID_PRESENT (1 << 1) +# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) + +#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 + +#define DP_RECEIVE_PORT_1_CAP_0 0x00a +#define DP_RECEIVE_PORT_1_BUFFER_SIZE 0x00b + #define DP_I2C_SPEED_CAP 0x00c /* DPI */ # define DP_I2C_SPEED_1K 0x01 # define DP_I2C_SPEED_5K 0x02 @@ -101,8 +112,19 @@ # define DP_I2C_SPEED_1M 0x20 #define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_CAP (1 << 0) +# define DP_FRAMING_CHANGE_CAP (1 << 1) +# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ + #define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ +#define DP_ADAPTER_CAP 0x00f /* 1.2 */ +# define DP_FORCE_LOAD_SENSE_CAP (1 << 0) +# define DP_ALTERNATE_I2C_PATTERN_CAP (1 << 1) + +#define DP_SUPPORTED_LINK_RATES 0x010 /* eDP 1.4 */ +# define DP_MAX_SUPPORTED_RATES 8 /* 16-bit little-endian */ + /* Multiple stream transport */ #define DP_FAUX_CAP 0x020 /* 1.2 */ # define DP_FAUX_CAP_1 (1 << 0) @@ -110,10 +132,56 @@ #define DP_MSTM_CAP 0x021 /* 1.2 */ # define DP_MST_CAP (1 << 0) +#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */ + +/* AV_SYNC_DATA_BLOCK 1.2 */ +#define DP_AV_GRANULARITY 0x023 +# define DP_AG_FACTOR_MASK (0xf << 0) +# define DP_AG_FACTOR_3MS (0 << 0) +# define DP_AG_FACTOR_2MS (1 << 0) +# define DP_AG_FACTOR_1MS (2 << 0) +# define DP_AG_FACTOR_500US (3 << 0) +# define DP_AG_FACTOR_200US (4 << 0) +# define DP_AG_FACTOR_100US (5 << 0) +# define DP_AG_FACTOR_10US (6 << 0) +# define DP_AG_FACTOR_1US (7 << 0) +# define DP_VG_FACTOR_MASK (0xf << 4) +# define DP_VG_FACTOR_3MS (0 << 4) +# define DP_VG_FACTOR_2MS (1 << 4) +# define DP_VG_FACTOR_1MS (2 << 4) +# define DP_VG_FACTOR_500US (3 << 4) +# define DP_VG_FACTOR_200US (4 << 4) +# define DP_VG_FACTOR_100US (5 << 4) + +#define DP_AUD_DEC_LAT0 0x024 +#define DP_AUD_DEC_LAT1 0x025 + +#define DP_AUD_PP_LAT0 0x026 +#define DP_AUD_PP_LAT1 0x027 + +#define DP_VID_INTER_LAT 0x028 + +#define DP_VID_PROG_LAT 0x029 + +#define DP_REP_LAT 0x02a + +#define DP_AUD_DEL_INS0 0x02b +#define DP_AUD_DEL_INS1 0x02c +#define DP_AUD_DEL_INS2 0x02d +/* End of AV_SYNC_DATA_BLOCK */ + +#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */ +# define DP_ALPM_CAP (1 << 0) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_CAP (1 << 0) + #define DP_GUID 0x030 /* 1.2 */ #define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ # define DP_PSR_IS_SUPPORTED 1 +# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */ + #define DP_PSR_CAPS 0x071 /* XXX 1.2? */ # define DP_PSR_NO_TRAIN_ON_EXIT 1 # define DP_PSR_SETUP_TIME_330 (0 << 1) @@ -153,6 +221,7 @@ /* link configuration */ #define DP_LINK_BW_SET 0x100 +# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ # define DP_LINK_BW_1_62 0x06 # define DP_LINK_BW_2_7 0x0a # define DP_LINK_BW_5_4 0x14 /* 1.2 */ @@ -168,11 +237,12 @@ # define DP_TRAINING_PATTERN_3 3 /* 1.2 */ # define DP_TRAINING_PATTERN_MASK 0x3 -# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) -# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) -# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) -# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) -# define DP_LINK_QUAL_PATTERN_MASK (3 << 2) +/* DPCD 1.1 only. For DPCD >= 1.2 see per-lane DP_LINK_QUAL_LANEn_SET */ +# define DP_LINK_QUAL_PATTERN_11_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_11_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_11_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_11_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_11_MASK (3 << 2) # define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) # define DP_LINK_SCRAMBLING_DISABLE (1 << 5) @@ -215,17 +285,63 @@ /* bitmask as for DP_I2C_SPEED_CAP */ #define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ +# define DP_ALTERNATE_SCRAMBLER_RESET_ENABLE (1 << 0) +# define DP_FRAMING_CHANGE_ENABLE (1 << 1) +# define DP_PANEL_SELF_TEST_ENABLE (1 << 7) + +#define DP_LINK_QUAL_LANE0_SET 0x10b /* DPCD >= 1.2 */ +#define DP_LINK_QUAL_LANE1_SET 0x10c +#define DP_LINK_QUAL_LANE2_SET 0x10d +#define DP_LINK_QUAL_LANE3_SET 0x10e +# define DP_LINK_QUAL_PATTERN_DISABLE 0 +# define DP_LINK_QUAL_PATTERN_D10_2 1 +# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2 +# define DP_LINK_QUAL_PATTERN_PRBS7 3 +# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4 +# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5 +# define DP_LINK_QUAL_PATTERN_MASK 7 + +#define DP_TRAINING_LANE0_1_SET2 0x10f +#define DP_TRAINING_LANE2_3_SET2 0x110 +# define DP_LANE02_POST_CURSOR2_SET_MASK (3 << 0) +# define DP_LANE02_MAX_POST_CURSOR2_REACHED (1 << 2) +# define DP_LANE13_POST_CURSOR2_SET_MASK (3 << 4) +# define DP_LANE13_MAX_POST_CURSOR2_REACHED (1 << 6) #define DP_MSTM_CTRL 0x111 /* 1.2 */ # define DP_MST_EN (1 << 0) # define DP_UP_REQ_EN (1 << 1) # define DP_UPSTREAM_IS_SRC (1 << 2) +#define DP_AUDIO_DELAY0 0x112 /* 1.2 */ +#define DP_AUDIO_DELAY1 0x113 +#define DP_AUDIO_DELAY2 0x114 + +#define DP_LINK_RATE_SET 0x115 /* eDP 1.4 */ +# define DP_LINK_RATE_SET_SHIFT 0 +# define DP_LINK_RATE_SET_MASK (7 << 0) + +#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */ +# define DP_ALPM_ENABLE (1 << 0) +# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) + +#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0) +# define DP_IRQ_HPD_ENABLE (1 << 1) + +#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */ +# define DP_PWR_NOT_NEEDED (1 << 0) + +#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ +# define DP_AUX_FRAME_SYNC_VALID (1 << 0) + #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ # define DP_PSR_ENABLE (1 << 0) # define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) # define DP_PSR_CRC_VERIFICATION (1 << 2) # define DP_PSR_FRAME_CAPTURE (1 << 3) +# define DP_PSR_SELECTIVE_UPDATE (1 << 4) +# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) #define DP_ADAPTER_CTRL 0x1a0 # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) @@ -304,7 +420,7 @@ #define DP_TEST_SINK_MISC 0x246 # define DP_TEST_CRC_SUPPORTED (1 << 5) -# define DP_TEST_COUNT_MASK 0x7 +# define DP_TEST_COUNT_MASK 0xf #define DP_TEST_RESPONSE 0x260 # define DP_TEST_ACK (1 << 0) @@ -332,6 +448,49 @@ # define DP_SET_POWER_D3 0x2 # define DP_SET_POWER_MASK 0x3 +#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */ +# define DP_EDP_11 0x00 +# define DP_EDP_12 0x01 +# define DP_EDP_13 0x02 +# define DP_EDP_14 0x03 + +#define DP_EDP_GENERAL_CAP_1 0x701 + +#define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 + +#define DP_EDP_GENERAL_CAP_2 0x703 + +#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ + +#define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 + +#define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 + +#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 +#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 + +#define DP_EDP_PWMGEN_BIT_COUNT 0x724 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN 0x725 +#define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX 0x726 + +#define DP_EDP_BACKLIGHT_CONTROL_STATUS 0x727 + +#define DP_EDP_BACKLIGHT_FREQ_SET 0x728 + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB 0x72a +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID 0x72b +#define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_LSB 0x72c + +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MSB 0x72d +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_MID 0x72e +#define DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB 0x72f + +#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732 +#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733 + +#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */ +#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */ + #define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ #define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ #define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ @@ -350,6 +509,7 @@ #define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ # define DP_PSR_LINK_CRC_ERROR (1 << 0) # define DP_PSR_RFB_STORAGE_ERROR (1 << 1) +# define DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) /* eDP 1.4 */ #define DP_PSR_ESI 0x2007 /* XXX 1.2? */ # define DP_PSR_CAPS_CHANGE (1 << 0) @@ -363,6 +523,9 @@ # define DP_PSR_SINK_INTERNAL_ERROR 7 # define DP_PSR_SINK_STATE_MASK 0x07 +#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */ +# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0) + /* DP 1.2 Sideband message defines */ /* peer device type - DP 1.2a Table 2-92 */ #define DP_PEER_DEVICE_NONE 0x0 @@ -405,6 +568,10 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); @@ -415,7 +582,8 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE], u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE], int lane); -#define DP_RECEIVER_CAP_SIZE 0xf +#define DP_BRANCH_OUI_HEADER_SIZE 0xc +#define DP_RECEIVER_CAP_SIZE 0xf #define EDP_PSR_RECEIVER_CAP_SIZE 2 void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); @@ -470,6 +638,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); } +static inline bool +drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + return dpcd[DP_DPCD_REV] >= 0x12 && + dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED; +} + /* * DisplayPort AUX channel */ @@ -516,9 +691,12 @@ struct drm_dp_aux_msg { * An AUX channel can also be used to transport I2C messages to a sink. A * typical application of that is to access an EDID that's present in the * sink device. The .transfer() function can also be used to execute such - * transactions. The drm_dp_aux_register_i2c_bus() function registers an - * I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers - * should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter. + * transactions. The drm_dp_aux_register() function registers an I2C + * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers + * should call drm_dp_aux_unregister() to remove the I2C adapter. + * The I2C adapter uses long transfers by default; if a partial response is + * received, the adapter will drop down to the size given by the partial + * response for this transaction only. * * Note that the aux helper code assumes that the .transfer() function * only modifies the reply field of the drm_dp_aux_msg structure. The @@ -586,6 +764,7 @@ struct drm_dp_link { int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); int drm_dp_aux_register(struct drm_dp_aux *aux); diff --git a/drivers/include/drm/drm_dp_mst_helper.h b/drivers/include/drm/drm_dp_mst_helper.h index 69cfd1f49..534009974 100644 --- a/drivers/include/drm/drm_dp_mst_helper.h +++ b/drivers/include/drm/drm_dp_mst_helper.h @@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write { u8 *bytes; }; +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 struct drm_dp_remote_i2c_read { u8 num_transactions; u8 port_number; @@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read { u8 *bytes; u8 no_stop_bit; u8 i2c_transaction_delay; - } transactions[4]; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; u8 read_i2c_device_id; u8 num_bytes_read; }; @@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + void (*register_connector)(struct drm_connector *connector); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); @@ -446,7 +448,7 @@ struct drm_dp_mst_topology_mgr { /* messages to be transmitted */ /* qlock protects the upq/downq and in_progress, the mstb tx_slots and txmsg->state once they are queued */ - struct mutex qlock; + struct mutex qlock; struct list_head tx_msg_downq; struct list_head tx_msg_upq; bool tx_down_in_progress; @@ -463,6 +465,10 @@ struct drm_dp_mst_topology_mgr { struct work_struct work; struct work_struct tx_work; + + struct list_head destroy_connector_list; + struct mutex destroy_connector_lock; + struct work_struct destroy_connector_work; }; int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id); @@ -486,6 +492,8 @@ int drm_dp_calc_pbn_mode(int clock, int bpp); bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots); +int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); + void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); diff --git a/drivers/include/drm/drm_edid.h b/drivers/include/drm/drm_edid.h index 3df68df20..2af97691e 100644 --- a/drivers/include/drm/drm_edid.h +++ b/drivers/include/drm/drm_edid.h @@ -96,11 +96,11 @@ struct detailed_data_monitor_range { union { struct { u8 reserved; - u8 hfreq_start_khz; /* need to multiply by 2 */ - u8 c; /* need to divide by 2 */ - __le16 m; - u8 k; - u8 j; /* need to divide by 2 */ + u8 hfreq_start_khz; /* need to multiply by 2 */ + u8 c; /* need to divide by 2 */ + __le16 m; + u8 k; + u8 j; /* need to divide by 2 */ } __attribute__((packed)) gtf2; struct { u8 version; @@ -215,6 +215,8 @@ struct detailed_timing { #define DRM_ELD_VER 0 # define DRM_ELD_VER_SHIFT 3 # define DRM_ELD_VER_MASK (0x1f << 3) +# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */ +# define DRM_ELD_VER_CANNED (0x1f << 3) #define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */ @@ -324,9 +326,8 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads); int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb); int drm_av_sync_delay(struct drm_connector *connector, - struct drm_display_mode *mode); -struct drm_connector *drm_select_eld(struct drm_encoder *encoder, - struct drm_display_mode *mode); + const struct drm_display_mode *mode); +struct drm_connector *drm_select_eld(struct drm_encoder *encoder); int drm_load_edid_firmware(struct drm_connector *connector); int @@ -345,6 +346,25 @@ static inline int drm_eld_mnl(const uint8_t *eld) return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT; } +/** + * drm_eld_sad - Get ELD SAD structures. + * @eld: pointer to an eld memory structure with sad_count set + */ +static inline const uint8_t *drm_eld_sad(const uint8_t *eld) +{ + unsigned int ver, mnl; + + ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; + if (ver != 2 && ver != 31) + return NULL; + + mnl = drm_eld_mnl(eld); + if (mnl > 16) + return NULL; + + return eld + DRM_ELD_CEA_SAD(mnl, 0); +} + /** * drm_eld_sad_count - Get ELD SAD count. * @eld: pointer to an eld memory structure with sad_count set diff --git a/drivers/include/drm/drm_fb_helper.h b/drivers/include/drm/drm_fb_helper.h index 2e7d6322d..f42ca2aa8 100644 --- a/drivers/include/drm/drm_fb_helper.h +++ b/drivers/include/drm/drm_fb_helper.h @@ -44,6 +44,25 @@ struct drm_fb_helper_crtc { int x, y; }; +/** + * struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size + * @fb_width: fbdev width + * @fb_height: fbdev height + * @surface_width: scanout buffer width + * @surface_height: scanout buffer height + * @surface_bpp: scanout buffer bpp + * @surface_depth: scanout buffer depth + * + * Note that the scanout surface width/height may be larger than the fbdev + * width/height. In case of multiple displays, the scanout surface is sized + * according to the largest width/height (so it is large enough for all CRTCs + * to scanout). But the fbdev width/height is sized to the minimum width/ + * height of all the displays. This ensures that fbcon fits on the smallest + * of the attached displays. + * + * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height, + * rather than the surface size. + */ struct drm_fb_helper_surface_size { u32 fb_width; u32 fb_height; @@ -57,11 +76,11 @@ struct drm_fb_helper_surface_size { * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library * @gamma_set: Set the given gamma lut register on the given crtc. * @gamma_get: Read the given gamma lut register on the given crtc, used to - * save the current lut when force-restoring the fbdev for e.g. - * kdbg. + * save the current lut when force-restoring the fbdev for e.g. + * kdbg. * @fb_probe: Driver callback to allocate and initialize the fbdev info * structure. Furthermore it also needs to allocate the drm - * framebuffer used to back the fbdev. + * framebuffer used to back the fbdev. * @initial_config: Setup an initial fbdev display configuration * * Driver callbacks used by the fbdev emulation helper library. @@ -85,6 +104,20 @@ struct drm_fb_helper_connector { struct drm_connector *connector; }; +/** + * struct drm_fb_helper - helper to emulate fbdev on top of kms + * @fb: Scanout framebuffer object + * @dev: DRM device + * @crtc_count: number of possible CRTCs + * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) + * @connector_count: number of connected connectors + * @connector_info_alloc_count: size of connector_info + * @funcs: driver callbacks for fb helper + * @fbdev: emulated fbdev device info struct + * @pseudo_palette: fake palette of 16 colors + * @kernel_fb_list: list_head in kernel_fb_helper_list + * @delayed_hotplug: was there a hotplug while kms master active? + */ struct drm_fb_helper { struct drm_framebuffer *fb; struct drm_device *dev; @@ -101,13 +134,25 @@ struct drm_fb_helper { /* we got a hotplug but fbdev wasn't running the console delay until next set_par */ bool delayed_hotplug; + + /** + * @atomic: + * + * Use atomic updates for restore_fbdev_mode(), etc. This defaults to + * true if driver has DRIVER_ATOMIC feature flag, but drivers can + * override it to true after drm_fb_helper_init() if they support atomic + * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper + * does not require ASYNC commits). + */ + bool atomic; }; +#ifdef CONFIG_DRM_FBDEV_EMULATION void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs); int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper, int crtc_count, - int max_conn); + int max_conn); void drm_fb_helper_fini(struct drm_fb_helper *helper); int drm_fb_helper_blank(int blank, struct fb_info *info); int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, @@ -116,16 +161,43 @@ int drm_fb_helper_set_par(struct fb_info *info); int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct fb_info *info); -bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); +int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); + +struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); +void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper); void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height); void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, uint32_t depth); +void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper); + +ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos); +ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos); + +void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect); +void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area); +void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image); + +void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, int state); + int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); -bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); +int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel); int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); int drm_fb_helper_debug_enter(struct fb_info *info); int drm_fb_helper_debug_leave(struct fb_info *info); @@ -139,4 +211,188 @@ drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); +#else +static inline void drm_fb_helper_prepare(struct drm_device *dev, + struct drm_fb_helper *helper, + const struct drm_fb_helper_funcs *funcs) +{ +} + +static inline int drm_fb_helper_init(struct drm_device *dev, + struct drm_fb_helper *helper, int crtc_count, + int max_conn) +{ + return 0; +} + +static inline void drm_fb_helper_fini(struct drm_fb_helper *helper) +{ +} + +static inline int drm_fb_helper_blank(int blank, struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_set_par(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return 0; +} + +static inline int +drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline struct fb_info * +drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +{ + return NULL; +} + +static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +{ +} +static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper) +{ +} + +static inline void drm_fb_helper_fill_var(struct fb_info *info, + struct drm_fb_helper *fb_helper, + uint32_t fb_width, uint32_t fb_height) +{ +} + +static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, + uint32_t depth) +{ +} + +static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap, + struct fb_info *info) +{ + return 0; +} + +static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) +{ +} + +static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info, + char __user *buf, size_t count, + loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info, + const char __user *buf, + size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +static inline void drm_fb_helper_sys_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_sys_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_sys_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ +} + +static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ +} + +static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ +} + +static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, + int state) +{ +} + +static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, + int bpp_sel) +{ + return 0; +} + +static inline int +drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) +{ + return 0; +} + +static inline int drm_fb_helper_debug_enter(struct fb_info *info) +{ + return 0; +} + +static inline int drm_fb_helper_debug_leave(struct fb_info *info) +{ + return 0; +} + +static inline struct drm_display_mode * +drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, + int width, int height) +{ + return NULL; +} + +static inline struct drm_display_mode * +drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, + int width, int height) +{ + return NULL; +} + +static inline int +drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + return 0; +} + +static inline int +drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, + struct drm_connector *connector) +{ + return 0; +} +#endif #endif diff --git a/drivers/include/drm/drm_gem.h b/drivers/include/drm/drm_gem.h index 780511a45..1e6ae1458 100644 --- a/drivers/include/drm/drm_gem.h +++ b/drivers/include/drm/drm_gem.h @@ -119,13 +119,6 @@ struct drm_gem_object { * simply leave it as NULL. */ struct dma_buf_attachment *import_attach; - - /** - * dumb - created as dumb buffer - * Whether the gem object was created using the dumb buffer interface - * as such it may not be used for GPU rendering. - */ - bool dumb; }; void drm_gem_object_release(struct drm_gem_object *obj); diff --git a/drivers/include/drm/drm_mipi_dsi.h b/drivers/include/drm/drm_mipi_dsi.h new file mode 100644 index 000000000..f1d8d0dbb --- /dev/null +++ b/drivers/include/drm/drm_mipi_dsi.h @@ -0,0 +1,259 @@ +/* + * MIPI DSI Bus + * + * Copyright (C) 2012-2013, Samsung Electronics, Co., Ltd. + * Andrzej Hajda + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DRM_MIPI_DSI_H__ +#define __DRM_MIPI_DSI_H__ + +#include + +struct mipi_dsi_host; +struct mipi_dsi_device; + +/* request ACK from peripheral */ +#define MIPI_DSI_MSG_REQ_ACK BIT(0) +/* use Low Power Mode to transmit message */ +#define MIPI_DSI_MSG_USE_LPM BIT(1) + +/** + * struct mipi_dsi_msg - read/write DSI buffer + * @channel: virtual channel id + * @type: payload data type + * @flags: flags controlling this message transmission + * @tx_len: length of @tx_buf + * @tx_buf: data to be written + * @rx_len: length of @rx_buf + * @rx_buf: data to be read, or NULL + */ +struct mipi_dsi_msg { + u8 channel; + u8 type; + u16 flags; + + size_t tx_len; + const void *tx_buf; + + size_t rx_len; + void *rx_buf; +}; + +bool mipi_dsi_packet_format_is_short(u8 type); +bool mipi_dsi_packet_format_is_long(u8 type); + +/** + * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format + * @size: size (in bytes) of the packet + * @header: the four bytes that make up the header (Data ID, Word Count or + * Packet Data, and ECC) + * @payload_length: number of bytes in the payload + * @payload: a pointer to a buffer containing the payload, if any + */ +struct mipi_dsi_packet { + size_t size; + u8 header[4]; + size_t payload_length; + const u8 *payload; +}; + +int mipi_dsi_create_packet(struct mipi_dsi_packet *packet, + const struct mipi_dsi_msg *msg); + +/** + * struct mipi_dsi_host_ops - DSI bus operations + * @attach: attach DSI device to DSI host + * @detach: detach DSI device from DSI host + * @transfer: transmit a DSI packet + * + * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg + * structures. This structure contains information about the type of packet + * being transmitted as well as the transmit and receive buffers. When an + * error is encountered during transmission, this function will return a + * negative error code. On success it shall return the number of bytes + * transmitted for write packets or the number of bytes received for read + * packets. + * + * Note that typically DSI packet transmission is atomic, so the .transfer() + * function will seldomly return anything other than the number of bytes + * contained in the transmit buffer on success. + */ +struct mipi_dsi_host_ops { + int (*attach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + int (*detach)(struct mipi_dsi_host *host, + struct mipi_dsi_device *dsi); + ssize_t (*transfer)(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg); +}; + +/** + * struct mipi_dsi_host - DSI host device + * @dev: driver model device node for this DSI host + * @ops: DSI host operations + */ +struct mipi_dsi_host { + struct device *dev; + const struct mipi_dsi_host_ops *ops; +}; + +int mipi_dsi_host_register(struct mipi_dsi_host *host); +void mipi_dsi_host_unregister(struct mipi_dsi_host *host); + +/* DSI mode flags */ + +/* video mode */ +#define MIPI_DSI_MODE_VIDEO BIT(0) +/* video burst mode */ +#define MIPI_DSI_MODE_VIDEO_BURST BIT(1) +/* video pulse mode */ +#define MIPI_DSI_MODE_VIDEO_SYNC_PULSE BIT(2) +/* enable auto vertical count mode */ +#define MIPI_DSI_MODE_VIDEO_AUTO_VERT BIT(3) +/* enable hsync-end packets in vsync-pulse and v-porch area */ +#define MIPI_DSI_MODE_VIDEO_HSE BIT(4) +/* disable hfront-porch area */ +#define MIPI_DSI_MODE_VIDEO_HFP BIT(5) +/* disable hback-porch area */ +#define MIPI_DSI_MODE_VIDEO_HBP BIT(6) +/* disable hsync-active area */ +#define MIPI_DSI_MODE_VIDEO_HSA BIT(7) +/* flush display FIFO on vsync pulse */ +#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8) +/* disable EoT packets in HS mode */ +#define MIPI_DSI_MODE_EOT_PACKET BIT(9) +/* device supports non-continuous clock behavior (DSI spec 5.6.1) */ +#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10) +/* transmit data in low power */ +#define MIPI_DSI_MODE_LPM BIT(11) + +enum mipi_dsi_pixel_format { + MIPI_DSI_FMT_RGB888, + MIPI_DSI_FMT_RGB666, + MIPI_DSI_FMT_RGB666_PACKED, + MIPI_DSI_FMT_RGB565, +}; + +/** + * struct mipi_dsi_device - DSI peripheral device + * @host: DSI host for this peripheral + * @dev: driver model device node for this peripheral + * @channel: virtual channel assigned to the peripheral + * @format: pixel format for video mode + * @lanes: number of active data lanes + * @mode_flags: DSI operation mode related flags + */ +struct mipi_dsi_device { + struct mipi_dsi_host *host; + struct device dev; + + unsigned int channel; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + unsigned long mode_flags; +}; + +static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) +{ + return container_of(dev, struct mipi_dsi_device, dev); +} + +struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); +int mipi_dsi_attach(struct mipi_dsi_device *dsi); +int mipi_dsi_detach(struct mipi_dsi_device *dsi); +int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, + u16 value); + +ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload, + size_t size); +ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params, + size_t num_params, void *data, size_t size); + +/** + * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode + * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking + * information only + * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both + * V-Blanking and H-Blanking information + */ +enum mipi_dsi_dcs_tear_mode { + MIPI_DSI_DCS_TEAR_MODE_VBLANK, + MIPI_DSI_DCS_TEAR_MODE_VHBLANK, +}; + +#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2) +#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3) +#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4) +#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5) +#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6) + +ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd, + const void *data, size_t len); +ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, + size_t len); +int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode); +int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format); +int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, + u16 end); +int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); +int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, + enum mipi_dsi_dcs_tear_mode mode); +int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format); + +/** + * struct mipi_dsi_driver - DSI driver + * @driver: device driver model driver + * @probe: callback for device binding + * @remove: callback for device unbinding + * @shutdown: called at shutdown time to quiesce the device + */ +struct mipi_dsi_driver { + struct device_driver driver; + int(*probe)(struct mipi_dsi_device *dsi); + int(*remove)(struct mipi_dsi_device *dsi); + void (*shutdown)(struct mipi_dsi_device *dsi); +}; + +static inline struct mipi_dsi_driver * +to_mipi_dsi_driver(struct device_driver *driver) +{ + return container_of(driver, struct mipi_dsi_driver, driver); +} + +static inline void *mipi_dsi_get_drvdata(const struct mipi_dsi_device *dsi) +{ + return dev_get_drvdata(&dsi->dev); +} + +static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data) +{ + dev_set_drvdata(&dsi->dev, data); +} + +int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver, + struct module *owner); +void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver); + +#define mipi_dsi_driver_register(driver) \ + mipi_dsi_driver_register_full(driver, THIS_MODULE) + +#define module_mipi_dsi_driver(__mipi_dsi_driver) \ + module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \ + mipi_dsi_driver_unregister) + +#endif /* __DRM_MIPI_DSI__ */ diff --git a/drivers/include/drm/drm_mm.h b/drivers/include/drm/drm_mm.h index 5651d5ac3..62c9546f7 100644 --- a/drivers/include/drm/drm_mm.h +++ b/drivers/include/drm/drm_mm.h @@ -68,8 +68,8 @@ struct drm_mm_node { unsigned scanned_preceeds_hole : 1; unsigned allocated : 1; unsigned long color; - unsigned long start; - unsigned long size; + u64 start; + u64 size; struct drm_mm *mm; }; @@ -82,16 +82,16 @@ struct drm_mm { unsigned int scan_check_range : 1; unsigned scan_alignment; unsigned long scan_color; - unsigned long scan_size; - unsigned long scan_hit_start; - unsigned long scan_hit_end; + u64 scan_size; + u64 scan_hit_start; + u64 scan_hit_end; unsigned scanned_blocks; - unsigned long scan_start; - unsigned long scan_end; + u64 scan_start; + u64 scan_end; struct drm_mm_node *prev_scanned_node; void (*color_adjust)(struct drm_mm_node *node, unsigned long color, - unsigned long *start, unsigned long *end); + u64 *start, u64 *end); }; /** @@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) return mm->hole_stack.next; } -static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } @@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no * Returns: * Start of the subsequent hole. */ -static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) { BUG_ON(!hole_node->hole_follows); return __drm_mm_hole_node_start(hole_node); } -static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return list_entry(hole_node->node_list.next, struct drm_mm_node, node_list)->start; @@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node * Returns: * End of the subsequent hole. */ -static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } @@ -221,10 +221,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); int drm_mm_insert_node_generic(struct drm_mm *mm, - struct drm_mm_node *node, - unsigned long size, - unsigned alignment, - unsigned long color, + struct drm_mm_node *node, + u64 size, + unsigned alignment, + unsigned long color, enum drm_mm_search_flags sflags, enum drm_mm_allocator_flags aflags); /** @@ -244,9 +244,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, * 0 on success, -ENOSPC if there's no suitable hole. */ static inline int drm_mm_insert_node(struct drm_mm *mm, - struct drm_mm_node *node, - unsigned long size, - unsigned alignment, + struct drm_mm_node *node, + u64 size, + unsigned alignment, enum drm_mm_search_flags flags) { return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, @@ -254,12 +254,12 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, } int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, - struct drm_mm_node *node, - unsigned long size, - unsigned alignment, - unsigned long color, - unsigned long start, - unsigned long end, + struct drm_mm_node *node, + u64 size, + unsigned alignment, + unsigned long color, + u64 start, + u64 end, enum drm_mm_search_flags sflags, enum drm_mm_allocator_flags aflags); /** @@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, */ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, - unsigned alignment, - unsigned long start, - unsigned long end, + u64 size, + unsigned alignment, + u64 start, + u64 end, enum drm_mm_search_flags flags) { return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, @@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); void drm_mm_init(struct drm_mm *mm, - unsigned long start, - unsigned long size); + u64 start, + u64 size); void drm_mm_takedown(struct drm_mm *mm); bool drm_mm_clean(struct drm_mm *mm); void drm_mm_init_scan(struct drm_mm *mm, - unsigned long size, - unsigned alignment, + u64 size, + unsigned alignment, unsigned long color); void drm_mm_init_scan_with_range(struct drm_mm *mm, - unsigned long size, + u64 size, unsigned alignment, unsigned long color, - unsigned long start, - unsigned long end); + u64 start, + u64 end); bool drm_mm_scan_add_block(struct drm_mm_node *node); bool drm_mm_scan_remove_block(struct drm_mm_node *node); diff --git a/drivers/include/drm/drm_modes.h b/drivers/include/drm/drm_modes.h index 91d0582f9..08a8cac9e 100644 --- a/drivers/include/drm/drm_modes.h +++ b/drivers/include/drm/drm_modes.h @@ -90,6 +90,9 @@ enum drm_mode_status { #define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ #define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ +#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */ +#define CRTC_NO_VSCAN (1 << 3) /* don't adjust doublescan */ +#define CRTC_STEREO_DOUBLE_ONLY (CRTC_STEREO_DOUBLE | CRTC_NO_DBLSCAN | CRTC_NO_VSCAN) #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF @@ -179,6 +182,10 @@ struct drm_cmdline_mode; struct drm_display_mode *drm_mode_create(struct drm_device *dev); void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); +void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out, + const struct drm_display_mode *in); +int drm_mode_convert_umode(struct drm_display_mode *out, + const struct drm_mode_modeinfo *in); void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); @@ -197,6 +204,8 @@ struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, int GTF_K, int GTF_2J); void drm_display_mode_from_videomode(const struct videomode *vm, struct drm_display_mode *dmode); +void drm_display_mode_to_videomode(const struct drm_display_mode *dmode, + struct videomode *vm); int of_get_drm_display_mode(struct device_node *np, struct drm_display_mode *dmode, int index); @@ -217,9 +226,9 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); /* for use by the crtc helper probe functions */ -void drm_mode_validate_size(struct drm_device *dev, - struct list_head *mode_list, - int maxX, int maxY); +enum drm_mode_status drm_mode_validate_basic(const struct drm_display_mode *mode); +enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, + int maxX, int maxY); void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); diff --git a/drivers/include/drm/drm_modeset_lock.h b/drivers/include/drm/drm_modeset_lock.h index 70595ff56..94938d893 100644 --- a/drivers/include/drm/drm_modeset_lock.h +++ b/drivers/include/drm/drm_modeset_lock.h @@ -43,19 +43,19 @@ struct drm_modeset_acquire_ctx { struct ww_acquire_ctx ww_ctx; - /** + /* * Contended lock: if a lock is contended you should only call * drm_modeset_backoff() which drops locks and slow-locks the * contended lock. */ struct drm_modeset_lock *contended; - /** + /* * list of held locks (drm_modeset_lock) */ struct list_head locked; - /** + /* * Trylock mode, use only for panic handlers! */ bool trylock_only; @@ -70,12 +70,12 @@ struct drm_modeset_acquire_ctx { * Used for locking CRTCs and other modeset resources. */ struct drm_modeset_lock { - /** + /* * modeset lock */ struct ww_mutex mutex; - /** + /* * Resources that are locked as part of an atomic update are added * to a list (so we know what to unlock at the end). */ @@ -130,7 +130,6 @@ struct drm_crtc; struct drm_plane; void drm_modeset_lock_all(struct drm_device *dev); -int __drm_modeset_lock_all(struct drm_device *dev, bool trylock); void drm_modeset_unlock_all(struct drm_device *dev); void drm_modeset_lock_crtc(struct drm_crtc *crtc, struct drm_plane *plane); diff --git a/drivers/include/drm/drm_panel.h b/drivers/include/drm/drm_panel.h new file mode 100644 index 000000000..13ff44b28 --- /dev/null +++ b/drivers/include/drm/drm_panel.h @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2013, NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sub license, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_PANEL_H__ +#define __DRM_PANEL_H__ + +#include + +struct drm_connector; +struct drm_device; +struct drm_panel; +struct display_timing; + +/** + * struct drm_panel_funcs - perform operations on a given panel + * @disable: disable panel (turn off back light, etc.) + * @unprepare: turn off panel + * @prepare: turn on panel and perform set up + * @enable: enable panel (turn on back light, etc.) + * @get_modes: add modes to the connector that the panel is attached to and + * return the number of modes added + * @get_timings: copy display timings into the provided array and return + * the number of display timings available + * + * The .prepare() function is typically called before the display controller + * starts to transmit video data. Panel drivers can use this to turn the panel + * on and wait for it to become ready. If additional configuration is required + * (via a control bus such as I2C, SPI or DSI for example) this is a good time + * to do that. + * + * After the display controller has started transmitting video data, it's safe + * to call the .enable() function. This will typically enable the backlight to + * make the image on screen visible. Some panels require a certain amount of + * time or frames before the image is displayed. This function is responsible + * for taking this into account before enabling the backlight to avoid visual + * glitches. + * + * Before stopping video transmission from the display controller it can be + * necessary to turn off the panel to avoid visual glitches. This is done in + * the .disable() function. Analogously to .enable() this typically involves + * turning off the backlight and waiting for some time to make sure no image + * is visible on the panel. It is then safe for the display controller to + * cease transmission of video data. + * + * To save power when no video data is transmitted, a driver can power down + * the panel. This is the job of the .unprepare() function. + */ +struct drm_panel_funcs { + int (*disable)(struct drm_panel *panel); + int (*unprepare)(struct drm_panel *panel); + int (*prepare)(struct drm_panel *panel); + int (*enable)(struct drm_panel *panel); + int (*get_modes)(struct drm_panel *panel); + int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, + struct display_timing *timings); +}; + +struct drm_panel { + struct drm_device *drm; + struct drm_connector *connector; + struct device *dev; + + const struct drm_panel_funcs *funcs; + + struct list_head list; +}; + +static inline int drm_panel_unprepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->unprepare) + return panel->funcs->unprepare(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +static inline int drm_panel_disable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->disable) + return panel->funcs->disable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +static inline int drm_panel_prepare(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->prepare) + return panel->funcs->prepare(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +static inline int drm_panel_enable(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->enable) + return panel->funcs->enable(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +static inline int drm_panel_get_modes(struct drm_panel *panel) +{ + if (panel && panel->funcs && panel->funcs->get_modes) + return panel->funcs->get_modes(panel); + + return panel ? -ENOSYS : -EINVAL; +} + +void drm_panel_init(struct drm_panel *panel); + +int drm_panel_add(struct drm_panel *panel); +void drm_panel_remove(struct drm_panel *panel); + +int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector); +int drm_panel_detach(struct drm_panel *panel); + +#ifdef CONFIG_OF +struct drm_panel *of_drm_find_panel(struct device_node *np); +#else +static inline struct drm_panel *of_drm_find_panel(struct device_node *np) +{ + return NULL; +} +#endif + +#endif diff --git a/drivers/include/drm/drm_pciids.h b/drivers/include/drm/drm_pciids.h index 97d572555..56ffa88fc 100644 --- a/drivers/include/drm/drm_pciids.h +++ b/drivers/include/drm/drm_pciids.h @@ -172,6 +172,7 @@ {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -186,6 +187,7 @@ {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/drivers/include/drm/drm_plane_helper.h b/drivers/include/drm/drm_plane_helper.h index a185392ca..5a7f9d4ef 100644 --- a/drivers/include/drm/drm_plane_helper.h +++ b/drivers/include/drm/drm_plane_helper.h @@ -43,62 +43,60 @@ * planes. */ -extern int drm_crtc_init(struct drm_device *dev, - struct drm_crtc *crtc, - const struct drm_crtc_funcs *funcs); +int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, + const struct drm_crtc_funcs *funcs); /** * drm_plane_helper_funcs - helper operations for CRTCs * @prepare_fb: prepare a framebuffer for use by the plane * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane * @atomic_check: check that a given atomic state is valid and can be applied - * @atomic_update: apply an atomic state to the plane + * @atomic_update: apply an atomic state to the plane (mandatory) + * @atomic_disable: disable the plane * * The helper operations are called by the mid-layer CRTC helper. */ struct drm_plane_helper_funcs { int (*prepare_fb)(struct drm_plane *plane, - struct drm_framebuffer *fb); + const struct drm_plane_state *new_state); void (*cleanup_fb)(struct drm_plane *plane, - struct drm_framebuffer *fb); + const struct drm_plane_state *old_state); int (*atomic_check)(struct drm_plane *plane, struct drm_plane_state *state); void (*atomic_update)(struct drm_plane *plane, struct drm_plane_state *old_state); + void (*atomic_disable)(struct drm_plane *plane, + struct drm_plane_state *old_state); }; static inline void drm_plane_helper_add(struct drm_plane *plane, const struct drm_plane_helper_funcs *funcs) { - plane->helper_private = (void *)funcs; + plane->helper_private = funcs; } -extern int drm_plane_helper_check_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_rect *src, - struct drm_rect *dest, - const struct drm_rect *clip, - int min_scale, - int max_scale, - bool can_position, - bool can_update_disabled, - bool *visible); -extern int drm_primary_helper_update(struct drm_plane *plane, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); -extern int drm_primary_helper_disable(struct drm_plane *plane); -extern void drm_primary_helper_destroy(struct drm_plane *plane); +int drm_plane_helper_check_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_rect *src, + struct drm_rect *dest, + const struct drm_rect *clip, + int min_scale, + int max_scale, + bool can_position, + bool can_update_disabled, + bool *visible); +int drm_primary_helper_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +int drm_primary_helper_disable(struct drm_plane *plane); +void drm_primary_helper_destroy(struct drm_plane *plane); extern const struct drm_plane_funcs drm_primary_helper_funcs; -extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, - const uint32_t *formats, - int num_formats); - int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, diff --git a/drivers/include/drm/drm_vma_manager.h b/drivers/include/drm/drm_vma_manager.h index 681d3a055..6534554e6 100644 --- a/drivers/include/drm/drm_vma_manager.h +++ b/drivers/include/drm/drm_vma_manager.h @@ -54,9 +54,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, unsigned long page_offset, unsigned long size); void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); -struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr, - unsigned long start, - unsigned long pages); struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, unsigned long start, unsigned long pages); @@ -71,25 +68,25 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, struct file *filp); /** - * drm_vma_offset_exact_lookup() - Look up node by exact address + * drm_vma_offset_exact_lookup_locked() - Look up node by exact address * @mgr: Manager object * @start: Start address (page-based, not byte-based) * @pages: Size of object (page-based) * - * Same as drm_vma_offset_lookup() but does not allow any offset into the node. + * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. * It only returns the exact object with the given start address. * * RETURNS: * Node at exact start address @start. */ static inline struct drm_vma_offset_node * -drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, - unsigned long start, - unsigned long pages) +drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, + unsigned long start, + unsigned long pages) { struct drm_vma_offset_node *node; - node = drm_vma_offset_lookup(mgr, start, pages); + node = drm_vma_offset_lookup_locked(mgr, start, pages); return (node && node->vm_node.start == start) ? node : NULL; } @@ -97,7 +94,7 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, * drm_vma_offset_lock_lookup() - Lock lookup for extended private use * @mgr: Manager object * - * Lock VMA manager for extended lookups. Only *_locked() VMA function calls + * Lock VMA manager for extended lookups. Only locked VMA function calls * are allowed while holding this lock. All other contexts are blocked from VMA * until the lock is released via drm_vma_offset_unlock_lookup(). * @@ -108,13 +105,6 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, * not call any other VMA helpers while holding this lock. * * Note: You're in atomic-context while holding this lock! - * - * Example: - * drm_vma_offset_lock_lookup(mgr); - * node = drm_vma_offset_lookup_locked(mgr); - * if (node) - * kref_get_unless_zero(container_of(node, sth, entr)); - * drm_vma_offset_unlock_lookup(mgr); */ static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) { diff --git a/drivers/include/drm/i915_component.h b/drivers/include/drm/i915_component.h new file mode 100644 index 000000000..30d89e0da --- /dev/null +++ b/drivers/include/drm/i915_component.h @@ -0,0 +1,78 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _I915_COMPONENT_H_ +#define _I915_COMPONENT_H_ + +/* MAX_PORT is the number of port + * It must be sync with I915_MAX_PORTS defined i915_drv.h + * 5 should be enough as only HSW, BDW, SKL need such fix. + */ +#define MAX_PORTS 5 + +/** + * struct i915_audio_component_ops - callbacks defined in gfx driver + * @owner: the module owner + * @get_power: get the POWER_DOMAIN_AUDIO power well + * @put_power: put the POWER_DOMAIN_AUDIO power well + * @codec_wake_override: Enable/Disable generating the codec wake signal + * @get_cdclk_freq: get the Core Display Clock in KHz + * @sync_audio_rate: set n/cts based on the sample rate + */ +struct i915_audio_component_ops { + struct module *owner; + void (*get_power)(struct device *); + void (*put_power)(struct device *); + void (*codec_wake_override)(struct device *, bool enable); + int (*get_cdclk_freq)(struct device *); + int (*sync_audio_rate)(struct device *, int port, int rate); +}; + +struct i915_audio_component_audio_ops { + void *audio_ptr; + /** + * Call from i915 driver, notifying the HDA driver that + * pin sense and/or ELD information has changed. + * @audio_ptr: HDA driver object + * @port: Which port has changed (PORTA / PORTB / PORTC etc) + */ + void (*pin_eld_notify)(void *audio_ptr, int port); +}; + +/** + * struct i915_audio_component - used for audio video interaction + * @dev: the device from gfx driver + * @aud_sample_rate: the array of audio sample rate per port + * @ops: callback for audio driver calling + * @audio_ops: Call from i915 driver + */ +struct i915_audio_component { + struct device *dev; + int aud_sample_rate[MAX_PORTS]; + + const struct i915_audio_component_ops *ops; + + const struct i915_audio_component_audio_ops *audio_ops; +}; + +#endif /* _I915_COMPONENT_H_ */ diff --git a/drivers/include/drm/i915_pciids.h b/drivers/include/drm/i915_pciids.h index 180ad0e6d..17c445612 100644 --- a/drivers/include/drm/i915_pciids.h +++ b/drivers/include/drm/i915_pciids.h @@ -208,40 +208,41 @@ #define INTEL_VLV_D_IDS(info) \ INTEL_VGA_DEVICE(0x0155, info) -#define _INTEL_BDW_M(gt, id, info) \ - INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) -#define _INTEL_BDW_D(gt, id, info) \ - INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) - -#define _INTEL_BDW_M_IDS(gt, info) \ - _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \ - _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \ - _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \ - _INTEL_BDW_M(gt, 0x160E, info) /* ULX */ - -#define _INTEL_BDW_D_IDS(gt, info) \ - _INTEL_BDW_D(gt, 0x160A, info), /* Server */ \ - _INTEL_BDW_D(gt, 0x160D, info) /* Workstation */ - -#define INTEL_BDW_GT12M_IDS(info) \ - _INTEL_BDW_M_IDS(1, info), \ - _INTEL_BDW_M_IDS(2, info) +#define INTEL_BDW_GT12M_IDS(info) \ + INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \ + INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \ + INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \ + INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \ + INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \ + INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \ + INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */ #define INTEL_BDW_GT12D_IDS(info) \ - _INTEL_BDW_D_IDS(1, info), \ - _INTEL_BDW_D_IDS(2, info) + INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \ + INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \ + INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \ + INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */ #define INTEL_BDW_GT3M_IDS(info) \ - _INTEL_BDW_M_IDS(3, info) + INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \ + INTEL_VGA_DEVICE(0x162E, info) /* ULX */ #define INTEL_BDW_GT3D_IDS(info) \ - _INTEL_BDW_D_IDS(3, info) + INTEL_VGA_DEVICE(0x162A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x162D, info) /* Workstation */ #define INTEL_BDW_RSVDM_IDS(info) \ - _INTEL_BDW_M_IDS(4, info) + INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \ + INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \ + INTEL_VGA_DEVICE(0x163E, info) /* ULX */ #define INTEL_BDW_RSVDD_IDS(info) \ - _INTEL_BDW_D_IDS(4, info) + INTEL_VGA_DEVICE(0x163A, info), /* Server */ \ + INTEL_VGA_DEVICE(0x163D, info) /* Workstation */ #define INTEL_BDW_M_IDS(info) \ INTEL_BDW_GT12M_IDS(info), \ @@ -259,21 +260,35 @@ INTEL_VGA_DEVICE(0x22b2, info), \ INTEL_VGA_DEVICE(0x22b3, info) -#define INTEL_SKL_IDS(info) \ - INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ +#define INTEL_SKL_GT1_IDS(info) \ INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \ - INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \ INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */ + +#define INTEL_SKL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \ INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \ INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \ - INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \ INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \ - INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ - INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \ INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \ - INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \ - INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ +#define INTEL_SKL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \ + +#define INTEL_SKL_IDS(info) \ + INTEL_SKL_GT1_IDS(info), \ + INTEL_SKL_GT2_IDS(info), \ + INTEL_SKL_GT3_IDS(info) + +#define INTEL_BXT_IDS(info) \ + INTEL_VGA_DEVICE(0x0A84, info), \ + INTEL_VGA_DEVICE(0x1A84, info), \ + INTEL_VGA_DEVICE(0x5A84, info) + #endif /* _I915_PCIIDS_H */ diff --git a/drivers/include/drm/intel-gtt.h b/drivers/include/drm/intel-gtt.h index b08bdade6..9e9bddaa5 100644 --- a/drivers/include/drm/intel-gtt.h +++ b/drivers/include/drm/intel-gtt.h @@ -3,8 +3,8 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, - phys_addr_t *mappable_base, unsigned long *mappable_end); +void intel_gtt_get(u64 *gtt_total, size_t *stolen_size, + phys_addr_t *mappable_base, u64 *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); diff --git a/drivers/include/drm/ttm/ttm_bo_api.h b/drivers/include/drm/ttm/ttm_bo_api.h index 176bd401f..c768ddfbe 100644 --- a/drivers/include/drm/ttm/ttm_bo_api.h +++ b/drivers/include/drm/ttm/ttm_bo_api.h @@ -71,9 +71,9 @@ struct ttm_place { * Structure indicating the placement you request for an object. */ struct ttm_placement { - unsigned num_placement; + unsigned num_placement; const struct ttm_place *placement; - unsigned num_busy_placement; + unsigned num_busy_placement; const struct ttm_place *busy_placement; }; @@ -249,7 +249,7 @@ struct ttm_buffer_object { * either of these locks held. */ - unsigned long offset; + uint64_t offset; /* GPU address space is independent of CPU word size */ uint32_t cur_placement; struct sg_table *sg; diff --git a/drivers/include/drm/ttm/ttm_bo_driver.h b/drivers/include/drm/ttm/ttm_bo_driver.h index 3c667e9dd..c445bda52 100644 --- a/drivers/include/drm/ttm/ttm_bo_driver.h +++ b/drivers/include/drm/ttm/ttm_bo_driver.h @@ -277,7 +277,7 @@ struct ttm_mem_type_manager { bool has_type; bool use_type; uint32_t flags; - unsigned long gpu_offset; + uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ uint64_t size; uint32_t available_caching; uint32_t default_caching; @@ -441,7 +441,7 @@ struct ttm_bo_driver { */ struct ttm_bo_global_ref { - struct drm_global_reference ref; + struct drm_global_reference ref; struct ttm_mem_global *mem_glob; }; @@ -776,9 +776,9 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); * be returned if @use_ticket is set to true. */ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, - bool no_wait, bool use_ticket, - struct ww_acquire_ctx *ticket) + bool interruptible, + bool no_wait, bool use_ticket, + struct ww_acquire_ctx *ticket) { int ret = 0; @@ -846,7 +846,7 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, * be returned if @use_ticket is set to true. */ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, - bool interruptible, + bool interruptible, bool no_wait, bool use_ticket, struct ww_acquire_ctx *ticket) { @@ -872,7 +872,7 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, * held by us, this function cannot deadlock any more. */ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, - bool interruptible, + bool interruptible, struct ww_acquire_ctx *ticket) { int ret = 0; diff --git a/drivers/include/drm/ttm/ttm_lock.h b/drivers/include/drm/ttm/ttm_lock.h index 7cd6fb886..2902beb5f 100644 --- a/drivers/include/drm/ttm/ttm_lock.h +++ b/drivers/include/drm/ttm/ttm_lock.h @@ -51,7 +51,7 @@ #include #include -//#include +#include /** * struct ttm_lock diff --git a/drivers/include/drm/ttm/ttm_memory.h b/drivers/include/drm/ttm/ttm_memory.h index 8c1bd961a..72dcbe81d 100644 --- a/drivers/include/drm/ttm/ttm_memory.h +++ b/drivers/include/drm/ttm/ttm_memory.h @@ -75,7 +75,7 @@ struct ttm_mem_shrink { #define TTM_MEM_MAX_ZONES 2 struct ttm_mem_zone; struct ttm_mem_global { - struct kobject kobj; + struct kobject kobj; struct ttm_mem_shrink *shrink; struct workqueue_struct *swap_queue; struct work_struct work; diff --git a/drivers/include/linux/atomic.h b/drivers/include/linux/atomic.h index 5b08a8540..301de78d6 100644 --- a/drivers/include/linux/atomic.h +++ b/drivers/include/linux/atomic.h @@ -2,6 +2,426 @@ #ifndef _LINUX_ATOMIC_H #define _LINUX_ATOMIC_H #include +#include + +/* + * Relaxed variants of xchg, cmpxchg and some atomic operations. + * + * We support four variants: + * + * - Fully ordered: The default implementation, no suffix required. + * - Acquire: Provides ACQUIRE semantics, _acquire suffix. + * - Release: Provides RELEASE semantics, _release suffix. + * - Relaxed: No ordering guarantees, _relaxed suffix. + * + * For compound atomics performing both a load and a store, ACQUIRE + * semantics apply only to the load and RELEASE semantics only to the + * store portion of the operation. Note that a failed cmpxchg_acquire + * does -not- imply any memory ordering constraints. + * + * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. + */ + +#ifndef atomic_read_acquire +#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif + +#ifndef atomic_set_release +#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif + +/* + * The idea here is to build acquire/release variants by adding explicit + * barriers on top of the relaxed variant. In the case where the relaxed + * variant is already fully ordered, no additional barriers are needed. + */ +#define __atomic_op_acquire(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ + smp_mb__after_atomic(); \ + __ret; \ +}) + +#define __atomic_op_release(op, args...) \ +({ \ + smp_mb__before_atomic(); \ + op##_relaxed(args); \ +}) + +#define __atomic_op_fence(op, args...) \ +({ \ + typeof(op##_relaxed(args)) __ret; \ + smp_mb__before_atomic(); \ + __ret = op##_relaxed(args); \ + smp_mb__after_atomic(); \ + __ret; \ +}) + +/* atomic_add_return_relaxed */ +#ifndef atomic_add_return_relaxed +#define atomic_add_return_relaxed atomic_add_return +#define atomic_add_return_acquire atomic_add_return +#define atomic_add_return_release atomic_add_return + +#else /* atomic_add_return_relaxed */ + +#ifndef atomic_add_return_acquire +#define atomic_add_return_acquire(...) \ + __atomic_op_acquire(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return_release +#define atomic_add_return_release(...) \ + __atomic_op_release(atomic_add_return, __VA_ARGS__) +#endif + +#ifndef atomic_add_return +#define atomic_add_return(...) \ + __atomic_op_fence(atomic_add_return, __VA_ARGS__) +#endif +#endif /* atomic_add_return_relaxed */ + +/* atomic_inc_return_relaxed */ +#ifndef atomic_inc_return_relaxed +#define atomic_inc_return_relaxed atomic_inc_return +#define atomic_inc_return_acquire atomic_inc_return +#define atomic_inc_return_release atomic_inc_return + +#else /* atomic_inc_return_relaxed */ + +#ifndef atomic_inc_return_acquire +#define atomic_inc_return_acquire(...) \ + __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return_release +#define atomic_inc_return_release(...) \ + __atomic_op_release(atomic_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic_inc_return +#define atomic_inc_return(...) \ + __atomic_op_fence(atomic_inc_return, __VA_ARGS__) +#endif +#endif /* atomic_inc_return_relaxed */ + +/* atomic_sub_return_relaxed */ +#ifndef atomic_sub_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return +#define atomic_sub_return_acquire atomic_sub_return +#define atomic_sub_return_release atomic_sub_return + +#else /* atomic_sub_return_relaxed */ + +#ifndef atomic_sub_return_acquire +#define atomic_sub_return_acquire(...) \ + __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return_release +#define atomic_sub_return_release(...) \ + __atomic_op_release(atomic_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic_sub_return +#define atomic_sub_return(...) \ + __atomic_op_fence(atomic_sub_return, __VA_ARGS__) +#endif +#endif /* atomic_sub_return_relaxed */ + +/* atomic_dec_return_relaxed */ +#ifndef atomic_dec_return_relaxed +#define atomic_dec_return_relaxed atomic_dec_return +#define atomic_dec_return_acquire atomic_dec_return +#define atomic_dec_return_release atomic_dec_return + +#else /* atomic_dec_return_relaxed */ + +#ifndef atomic_dec_return_acquire +#define atomic_dec_return_acquire(...) \ + __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return_release +#define atomic_dec_return_release(...) \ + __atomic_op_release(atomic_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic_dec_return +#define atomic_dec_return(...) \ + __atomic_op_fence(atomic_dec_return, __VA_ARGS__) +#endif +#endif /* atomic_dec_return_relaxed */ + +/* atomic_xchg_relaxed */ +#ifndef atomic_xchg_relaxed +#define atomic_xchg_relaxed atomic_xchg +#define atomic_xchg_acquire atomic_xchg +#define atomic_xchg_release atomic_xchg + +#else /* atomic_xchg_relaxed */ + +#ifndef atomic_xchg_acquire +#define atomic_xchg_acquire(...) \ + __atomic_op_acquire(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg_release +#define atomic_xchg_release(...) \ + __atomic_op_release(atomic_xchg, __VA_ARGS__) +#endif + +#ifndef atomic_xchg +#define atomic_xchg(...) \ + __atomic_op_fence(atomic_xchg, __VA_ARGS__) +#endif +#endif /* atomic_xchg_relaxed */ + +/* atomic_cmpxchg_relaxed */ +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed atomic_cmpxchg +#define atomic_cmpxchg_acquire atomic_cmpxchg +#define atomic_cmpxchg_release atomic_cmpxchg + +#else /* atomic_cmpxchg_relaxed */ + +#ifndef atomic_cmpxchg_acquire +#define atomic_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg_release +#define atomic_cmpxchg_release(...) \ + __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic_cmpxchg +#define atomic_cmpxchg(...) \ + __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic_cmpxchg_relaxed */ + +#ifndef atomic64_read_acquire +#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) +#endif + +#ifndef atomic64_set_release +#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) +#endif + +/* atomic64_add_return_relaxed */ +#ifndef atomic64_add_return_relaxed +#define atomic64_add_return_relaxed atomic64_add_return +#define atomic64_add_return_acquire atomic64_add_return +#define atomic64_add_return_release atomic64_add_return + +#else /* atomic64_add_return_relaxed */ + +#ifndef atomic64_add_return_acquire +#define atomic64_add_return_acquire(...) \ + __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return_release +#define atomic64_add_return_release(...) \ + __atomic_op_release(atomic64_add_return, __VA_ARGS__) +#endif + +#ifndef atomic64_add_return +#define atomic64_add_return(...) \ + __atomic_op_fence(atomic64_add_return, __VA_ARGS__) +#endif +#endif /* atomic64_add_return_relaxed */ + +/* atomic64_inc_return_relaxed */ +#ifndef atomic64_inc_return_relaxed +#define atomic64_inc_return_relaxed atomic64_inc_return +#define atomic64_inc_return_acquire atomic64_inc_return +#define atomic64_inc_return_release atomic64_inc_return + +#else /* atomic64_inc_return_relaxed */ + +#ifndef atomic64_inc_return_acquire +#define atomic64_inc_return_acquire(...) \ + __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return_release +#define atomic64_inc_return_release(...) \ + __atomic_op_release(atomic64_inc_return, __VA_ARGS__) +#endif + +#ifndef atomic64_inc_return +#define atomic64_inc_return(...) \ + __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) +#endif +#endif /* atomic64_inc_return_relaxed */ + + +/* atomic64_sub_return_relaxed */ +#ifndef atomic64_sub_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return +#define atomic64_sub_return_acquire atomic64_sub_return +#define atomic64_sub_return_release atomic64_sub_return + +#else /* atomic64_sub_return_relaxed */ + +#ifndef atomic64_sub_return_acquire +#define atomic64_sub_return_acquire(...) \ + __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return_release +#define atomic64_sub_return_release(...) \ + __atomic_op_release(atomic64_sub_return, __VA_ARGS__) +#endif + +#ifndef atomic64_sub_return +#define atomic64_sub_return(...) \ + __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) +#endif +#endif /* atomic64_sub_return_relaxed */ + +/* atomic64_dec_return_relaxed */ +#ifndef atomic64_dec_return_relaxed +#define atomic64_dec_return_relaxed atomic64_dec_return +#define atomic64_dec_return_acquire atomic64_dec_return +#define atomic64_dec_return_release atomic64_dec_return + +#else /* atomic64_dec_return_relaxed */ + +#ifndef atomic64_dec_return_acquire +#define atomic64_dec_return_acquire(...) \ + __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return_release +#define atomic64_dec_return_release(...) \ + __atomic_op_release(atomic64_dec_return, __VA_ARGS__) +#endif + +#ifndef atomic64_dec_return +#define atomic64_dec_return(...) \ + __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) +#endif +#endif /* atomic64_dec_return_relaxed */ + +/* atomic64_xchg_relaxed */ +#ifndef atomic64_xchg_relaxed +#define atomic64_xchg_relaxed atomic64_xchg +#define atomic64_xchg_acquire atomic64_xchg +#define atomic64_xchg_release atomic64_xchg + +#else /* atomic64_xchg_relaxed */ + +#ifndef atomic64_xchg_acquire +#define atomic64_xchg_acquire(...) \ + __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg_release +#define atomic64_xchg_release(...) \ + __atomic_op_release(atomic64_xchg, __VA_ARGS__) +#endif + +#ifndef atomic64_xchg +#define atomic64_xchg(...) \ + __atomic_op_fence(atomic64_xchg, __VA_ARGS__) +#endif +#endif /* atomic64_xchg_relaxed */ + +/* atomic64_cmpxchg_relaxed */ +#ifndef atomic64_cmpxchg_relaxed +#define atomic64_cmpxchg_relaxed atomic64_cmpxchg +#define atomic64_cmpxchg_acquire atomic64_cmpxchg +#define atomic64_cmpxchg_release atomic64_cmpxchg + +#else /* atomic64_cmpxchg_relaxed */ + +#ifndef atomic64_cmpxchg_acquire +#define atomic64_cmpxchg_acquire(...) \ + __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg_release +#define atomic64_cmpxchg_release(...) \ + __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) +#endif + +#ifndef atomic64_cmpxchg +#define atomic64_cmpxchg(...) \ + __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) +#endif +#endif /* atomic64_cmpxchg_relaxed */ + +/* cmpxchg_relaxed */ +#ifndef cmpxchg_relaxed +#define cmpxchg_relaxed cmpxchg +#define cmpxchg_acquire cmpxchg +#define cmpxchg_release cmpxchg + +#else /* cmpxchg_relaxed */ + +#ifndef cmpxchg_acquire +#define cmpxchg_acquire(...) \ + __atomic_op_acquire(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg_release +#define cmpxchg_release(...) \ + __atomic_op_release(cmpxchg, __VA_ARGS__) +#endif + +#ifndef cmpxchg +#define cmpxchg(...) \ + __atomic_op_fence(cmpxchg, __VA_ARGS__) +#endif +#endif /* cmpxchg_relaxed */ + +/* cmpxchg64_relaxed */ +#ifndef cmpxchg64_relaxed +#define cmpxchg64_relaxed cmpxchg64 +#define cmpxchg64_acquire cmpxchg64 +#define cmpxchg64_release cmpxchg64 + +#else /* cmpxchg64_relaxed */ + +#ifndef cmpxchg64_acquire +#define cmpxchg64_acquire(...) \ + __atomic_op_acquire(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64_release +#define cmpxchg64_release(...) \ + __atomic_op_release(cmpxchg64, __VA_ARGS__) +#endif + +#ifndef cmpxchg64 +#define cmpxchg64(...) \ + __atomic_op_fence(cmpxchg64, __VA_ARGS__) +#endif +#endif /* cmpxchg64_relaxed */ + +/* xchg_relaxed */ +#ifndef xchg_relaxed +#define xchg_relaxed xchg +#define xchg_acquire xchg +#define xchg_release xchg + +#else /* xchg_relaxed */ + +#ifndef xchg_acquire +#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) +#endif + +#ifndef xchg_release +#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) +#endif + +#ifndef xchg +#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) +#endif +#endif /* xchg_relaxed */ /** * atomic_add_unless - add unless the number is already a given value @@ -28,6 +448,23 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #endif +#ifndef atomic_andnot +static inline void atomic_andnot(int i, atomic_t *v) +{ + atomic_and(~i, v); +} +#endif + +static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + atomic_andnot(mask, v); +} + +static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + atomic_or(mask, v); +} + /** * atomic_inc_not_zero_hint - increment if not null * @v: pointer of type atomic_t @@ -111,21 +548,17 @@ static inline int atomic_dec_if_positive(atomic_t *v) } #endif -#ifndef CONFIG_ARCH_HAS_ATOMIC_OR -static inline void atomic_or(int i, atomic_t *v) -{ - int old; - int new; - - do { - old = atomic_read(v); - new = old | i; - } while (atomic_cmpxchg(v, old, new) != old); -} -#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ - -#include #ifdef CONFIG_GENERIC_ATOMIC64 #include #endif + +#ifndef atomic64_andnot +static inline void atomic64_andnot(long long i, atomic64_t *v) +{ + atomic64_and(~i, v); +} +#endif + +#include + #endif /* _LINUX_ATOMIC_H */ diff --git a/drivers/include/linux/bitmap.h b/drivers/include/linux/bitmap.h index f1953d5ae..1165e841d 100644 --- a/drivers/include/linux/bitmap.h +++ b/drivers/include/linux/bitmap.h @@ -52,16 +52,13 @@ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz - * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf - * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region - * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex */ /* @@ -96,10 +93,10 @@ extern int __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits); -extern void __bitmap_shift_right(unsigned long *dst, - const unsigned long *src, int shift, int bits); -extern void __bitmap_shift_left(unsigned long *dst, - const unsigned long *src, int shift, int bits); +extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); +extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits); extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, @@ -147,71 +144,67 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -extern int bitmap_scnprintf(char *buf, unsigned int len, - const unsigned long *src, int nbits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern int bitmap_scnlistprintf(char *buf, unsigned int len, - const unsigned long *src, int nbits); extern int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); extern void bitmap_remap(unsigned long *dst, const unsigned long *src, - const unsigned long *old, const unsigned long *new, int bits); + const unsigned long *old, const unsigned long *new, unsigned int nbits); extern int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits); extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, - const unsigned long *relmap, int bits); + const unsigned long *relmap, unsigned int bits); extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, - int sz, int bits); + unsigned int sz, unsigned int nbits); extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); -extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); -extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); +#ifdef __BIG_ENDIAN +extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); +#else +#define bitmap_copy_le bitmap_copy +#endif +extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits); extern int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); -#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) -#define BITMAP_LAST_WORD_MASK(nbits) \ -( \ - ((nbits) % BITS_PER_LONG) ? \ - (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \ -) +#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) +#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) #define small_const_nbits(nbits) \ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) -static inline void bitmap_zero(unsigned long *dst, int nbits) +static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = 0UL; else { - int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); memset(dst, 0, len); } } -static inline void bitmap_fill(unsigned long *dst, int nbits) +static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { - size_t nlongs = BITS_TO_LONGS(nbits); + unsigned int nlongs = BITS_TO_LONGS(nbits); if (!small_const_nbits(nbits)) { - int len = (nlongs - 1) * sizeof(unsigned long); + unsigned int len = (nlongs - 1) * sizeof(unsigned long); memset(dst, 0xff, len); } dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); } static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, - int nbits) + unsigned int nbits) { if (small_const_nbits(nbits)) *dst = *src; else { - int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); memcpy(dst, src, len); } } @@ -290,41 +283,41 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits) { if (small_const_nbits(nbits)) return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); - else - return __bitmap_empty(src, nbits); + + return find_first_bit(src, nbits) == nbits; } static inline int bitmap_full(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); - else - return __bitmap_full(src, nbits); + + return find_first_zero_bit(src, nbits) == nbits; } -static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) +static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); return __bitmap_weight(src, nbits); } -static inline void bitmap_shift_right(unsigned long *dst, - const unsigned long *src, int n, int nbits) +static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, + unsigned int shift, int nbits) { if (small_const_nbits(nbits)) - *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n; + *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; else - __bitmap_shift_right(dst, src, n, nbits); + __bitmap_shift_right(dst, src, shift, nbits); } -static inline void bitmap_shift_left(unsigned long *dst, - const unsigned long *src, int n, int nbits) +static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, + unsigned int shift, unsigned int nbits) { if (small_const_nbits(nbits)) - *dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits); + *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits); else - __bitmap_shift_left(dst, src, n, nbits); + __bitmap_shift_left(dst, src, shift, nbits); } static inline int bitmap_parse(const char *buf, unsigned int buflen, diff --git a/drivers/include/linux/bitops.h b/drivers/include/linux/bitops.h index bea716309..daf99f36b 100644 --- a/drivers/include/linux/bitops.h +++ b/drivers/include/linux/bitops.h @@ -3,7 +3,7 @@ #include #ifdef __KERNEL__ -#define BIT(nr) (1UL << (nr)) +#define BIT(nr) (1UL << (nr)) #define BIT_ULL(nr) (1ULL << (nr)) #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) @@ -36,8 +36,8 @@ extern unsigned long __sw_hweight64(__u64 w); #include #define for_each_set_bit(bit, addr, size) \ - for ((bit) = find_first_bit((addr), (size)); \ - (bit) < (size); \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ (bit) = find_next_bit((addr), (size), (bit) + 1)) /* same as for_each_set_bit() but use bit as value to start with */ @@ -57,7 +57,7 @@ extern unsigned long __sw_hweight64(__u64 w); (bit) < (size); \ (bit) = find_next_zero_bit((addr), (size), (bit) + 1)) -static __inline__ int get_bitmask_order(unsigned int count) +static inline int get_bitmask_order(unsigned int count) { int order; @@ -65,7 +65,7 @@ static __inline__ int get_bitmask_order(unsigned int count) return order; /* We could be slightly more clever with -1 here... */ } -static __inline__ int get_count_order(unsigned int count) +static inline int get_count_order(unsigned int count) { int order; @@ -75,7 +75,7 @@ static __inline__ int get_count_order(unsigned int count) return order; } -static inline unsigned long hweight_long(unsigned long w) +static __always_inline unsigned long hweight_long(unsigned long w) { return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } @@ -164,6 +164,8 @@ static inline __u8 ror8(__u8 word, unsigned int shift) * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit * @value: value to sign extend * @index: 0 based bit index (0<=index<32) to sign bit + * + * This is safe to use for 16- and 8-bit types as well. */ static inline __s32 sign_extend32(__u32 value, int index) { @@ -171,6 +173,17 @@ static inline __s32 sign_extend32(__u32 value, int index) return (__s32)(value << shift) >> shift; } +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + static inline unsigned fls_long(unsigned long l) { if (sizeof(l) == 4) @@ -218,9 +231,9 @@ static inline unsigned long __ffs64(u64 word) /** * find_last_bit - find the last set bit in a memory region * @addr: The address to start the search at - * @size: The maximum size to search + * @size: The number of bits to search * - * Returns the bit number of the first set bit, or size. + * Returns the bit number of the last set bit, or size. */ extern unsigned long find_last_bit(const unsigned long *addr, unsigned long size); diff --git a/drivers/include/linux/bottom_half.h b/drivers/include/linux/bottom_half.h index 86c12c93e..8fdcb7831 100644 --- a/drivers/include/linux/bottom_half.h +++ b/drivers/include/linux/bottom_half.h @@ -2,7 +2,6 @@ #define _LINUX_BH_H #include -#include #ifdef CONFIG_TRACE_IRQFLAGS extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); diff --git a/drivers/include/linux/bug.h b/drivers/include/linux/bug.h index 5e65bc092..0960328fb 100644 --- a/drivers/include/linux/bug.h +++ b/drivers/include/linux/bug.h @@ -51,8 +51,8 @@ #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) /* Force a compilation error if a constant expression is not a power of 2 */ -#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ - BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) /* Force a compilation error if condition is true, but also produce a result (of value 0 and type size_t), so the expression can be used diff --git a/drivers/include/linux/circ_buf.h b/drivers/include/linux/circ_buf.h new file mode 100644 index 000000000..90f2471dc --- /dev/null +++ b/drivers/include/linux/circ_buf.h @@ -0,0 +1,36 @@ +/* + * See Documentation/circular-buffers.txt for more information. + */ + +#ifndef _LINUX_CIRC_BUF_H +#define _LINUX_CIRC_BUF_H 1 + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +/* Return count in buffer. */ +#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) + +/* Return space available, 0..size-1. We always leave one free char + as a completely full buffer has head == tail, which is the same as + empty. */ +#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) + +/* Return count up to the end of the buffer. Carefully avoid + accessing head and tail more than once, so they can change + underneath us without returning inconsistent results. */ +#define CIRC_CNT_TO_END(head,tail,size) \ + ({int end = (size) - (tail); \ + int n = ((head) + end) & ((size)-1); \ + n < end ? n : end;}) + +/* Return space available up to the end of the buffer. */ +#define CIRC_SPACE_TO_END(head,tail,size) \ + ({int end = (size) - 1 - (head); \ + int n = (end + (tail)) & ((size)-1); \ + n <= end ? n : end+1;}) + +#endif /* _LINUX_CIRC_BUF_H */ diff --git a/drivers/include/linux/compiler-gcc.h b/drivers/include/linux/compiler-gcc.h index 12dd9a7b7..09f649114 100644 --- a/drivers/include/linux/compiler-gcc.h +++ b/drivers/include/linux/compiler-gcc.h @@ -5,14 +5,28 @@ /* * Common definitions for all gcc versions go here. */ -#define GCC_VERSION (__GNUC__ * 10000 \ - + __GNUC_MINOR__ * 100 \ - + __GNUC_PATCHLEVEL__) - +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) /* Optimization barrier */ + /* The "volatile" is due to gcc bugs */ #define barrier() __asm__ __volatile__("": : :"memory") +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proofed that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") /* * This macro obfuscates arithmetic on a variable address so that gcc @@ -32,54 +46,63 @@ * the inline assembly constraint from =g to =r, in this particular * case either is valid. */ -#define RELOC_HIDE(ptr, off) \ - ({ unsigned long __ptr; \ - __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ - (typeof(ptr)) (__ptr + (off)); }) +#define RELOC_HIDE(ptr, off) \ +({ \ + unsigned long __ptr; \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); \ +}) /* Make the optimizer believe the variable can be manipulated arbitrarily. */ -#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) #ifdef __CHECKER__ -#define __must_be_array(arr) 0 +#define __must_be_array(a) 0 #else /* &a[0] degrades to a pointer: a different type from an array */ -#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) #endif /* * Force always-inline if the user requests it so via the .config, * or if gcc is too old: */ -#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ +#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -# define inline inline __attribute__((always_inline)) notrace -# define __inline__ __inline__ __attribute__((always_inline)) notrace -# define __inline __inline __attribute__((always_inline)) notrace +#define inline inline __attribute__((always_inline)) notrace +#define __inline__ __inline__ __attribute__((always_inline)) notrace +#define __inline __inline __attribute__((always_inline)) notrace #else /* A lot of inline functions can cause havoc with function tracing */ -# define inline inline notrace -# define __inline__ __inline__ notrace -# define __inline __inline notrace +#define inline inline notrace +#define __inline__ __inline__ notrace +#define __inline __inline notrace #endif -#define __deprecated __attribute__((deprecated)) -#define __packed __attribute__((packed)) -#define __weak __attribute__((weak)) +#define __always_inline inline __attribute__((always_inline)) +#define noinline __attribute__((noinline)) + +#define __deprecated __attribute__((deprecated)) +#define __packed __attribute__((packed)) +#define __weak __attribute__((weak)) +#define __alias(symbol) __attribute__((alias(#symbol))) /* - * it doesn't make sense on ARM (currently the only user of __naked) to trace - * naked functions because then mcount is called without stack and frame pointer - * being set up and there is no chance to restore the lr register to the value - * before mcount was called. + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without + * stack and frame pointer being set up and there is no chance to + * restore the lr register to the value before mcount was called. * - * The asm() bodies of naked functions often depend on standard calling conventions, - * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce - * this, so we must do so ourselves. See GCC PR44290. + * The asm() bodies of naked functions often depend on standard calling + * conventions, therefore they must be noinline and noclone. + * + * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. + * See GCC PR44290. */ -#define __naked __attribute__((naked)) noinline __noclone notrace +#define __naked __attribute__((naked)) noinline __noclone notrace -#define __noreturn __attribute__((noreturn)) +#define __noreturn __attribute__((noreturn)) /* * From the GCC manual: @@ -91,28 +114,170 @@ * would be. * [...] */ -#define __pure __attribute__((pure)) -#define __aligned(x) __attribute__((aligned(x))) -#define __printf(a, b) __attribute__((format(printf, a, b))) -#define __scanf(a, b) __attribute__((format(scanf, a, b))) -#define noinline __attribute__((noinline)) -#define __attribute_const__ __attribute__((__const__)) -#define __maybe_unused __attribute__((unused)) -#define __always_unused __attribute__((unused)) +#define __pure __attribute__((pure)) +#define __aligned(x) __attribute__((aligned(x))) +#define __printf(a, b) __attribute__((format(printf, a, b))) +#define __scanf(a, b) __attribute__((format(scanf, a, b))) +#define __attribute_const__ __attribute__((__const__)) +#define __maybe_unused __attribute__((unused)) +#define __always_unused __attribute__((unused)) -#define __gcc_header(x) #x -#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) -#define gcc_header(x) _gcc_header(x) -#include gcc_header(__GNUC__) +/* gcc version specific checks */ + +#if GCC_VERSION < 30200 +# error Sorry, your compiler is too old - please upgrade it. +#endif + +#if GCC_VERSION < 30300 +# define __used __attribute__((__unused__)) +#else +# define __used __attribute__((__used__)) +#endif + +#ifdef CONFIG_GCOV_KERNEL +# if GCC_VERSION < 30400 +# error "GCOV profiling support for gcc versions below 3.4 not included" +# endif /* __GNUC_MINOR__ */ +#endif /* CONFIG_GCOV_KERNEL */ + +#if GCC_VERSION >= 30400 +#define __must_check __attribute__((warn_unused_result)) +#endif + +#if GCC_VERSION >= 40000 + +/* GCC 4.1.[01] miscompiles __weak */ +#ifdef __KERNEL__ +# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 +# error Your version of gcc miscompiles the __weak directive +# endif +#endif + +#define __used __attribute__((__used__)) +#define __compiler_offsetof(a, b) \ + __builtin_offsetof(a, b) + +#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 +# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) +#endif + +#if GCC_VERSION >= 40300 +/* Mark functions as cold. gcc will assume any path leading to a call + * to them will be unlikely. This means a lot of manual unlikely()s + * are unnecessary now for any paths leading to the usual suspects + * like BUG(), printk(), panic() etc. [but let's keep them for now for + * older compilers] + * + * Early snapshots of gcc 4.3 don't support this and we can't detect this + * in the preprocessor, but we can live with this because they're unreleased. + * Maketime probing would be overkill here. + * + * gcc also has a __attribute__((__hot__)) to move hot functions into + * a special section, but I don't see any sense in this right now in + * the kernel context + */ +#define __cold __attribute__((__cold__)) + +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#ifndef __CHECKER__ +# define __compiletime_warning(message) __attribute__((warning(message))) +# define __compiletime_error(message) __attribute__((error(message))) +#endif /* __CHECKER__ */ +#endif /* GCC_VERSION >= 40300 */ + +#if GCC_VERSION >= 40500 +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + * + * Early snapshots of gcc 4.5 don't support this and we can't detect + * this in the preprocessor, but we can live with this because they're + * unreleased. Really, we need to have autoconf for the kernel. + */ +#define unreachable() __builtin_unreachable() + +/* Mark a function definition as prohibited from being cloned. */ +#define __noclone __attribute__((__noclone__)) + +#endif /* GCC_VERSION >= 40500 */ + +#if GCC_VERSION >= 40600 +/* + * When used with Link Time Optimization, gcc can optimize away C functions or + * variables which are referenced only from assembly code. __visible tells the + * optimizer that something else uses this function or variable, thus preventing + * this. + */ +#define __visible __attribute__((externally_visible)) +#endif + + +#if GCC_VERSION >= 40900 && !defined(__CHECKER__) +/* + * __assume_aligned(n, k): Tell the optimizer that the returned + * pointer can be assumed to be k modulo n. The second argument is + * optional (default 0), so we use a variadic macro to make the + * shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + */ +#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#endif + +/* + * GCC 'asm goto' miscompiles certain code sequences: + * + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. + * + * (asm goto is automatically volatile - the naming reflects this.) + */ +#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) + +#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP +#if GCC_VERSION >= 40400 +#define __HAVE_BUILTIN_BSWAP32__ +#define __HAVE_BUILTIN_BSWAP64__ +#endif +#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) +#define __HAVE_BUILTIN_BSWAP16__ +#endif +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ + +#if GCC_VERSION >= 50000 +#define KASAN_ABI_VERSION 4 +#elif GCC_VERSION >= 40902 +#define KASAN_ABI_VERSION 3 +#endif + +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#endif + +#endif /* gcc version >= 40000 specific checks */ #if !defined(__noclone) #define __noclone /* not needed */ #endif +#if !defined(__no_sanitize_address) +#define __no_sanitize_address +#endif + /* * A trick to suppress uninitialized variable warning without generating any * code */ #define uninitialized_var(x) x = x - -#define __always_inline inline __attribute__((always_inline)) diff --git a/drivers/include/linux/compiler.h b/drivers/include/linux/compiler.h index 51ef68fc9..85d201eb8 100644 --- a/drivers/include/linux/compiler.h +++ b/drivers/include/linux/compiler.h @@ -17,6 +17,7 @@ # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) # define __percpu __attribute__((noderef, address_space(3))) +# define __pmem __attribute__((noderef, address_space(5))) #ifdef CONFIG_SPARSE_RCU_POINTER # define __rcu __attribute__((noderef, address_space(4))) #else @@ -42,6 +43,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); # define __cond_lock(x,c) (c) # define __percpu # define __rcu +# define __pmem #endif /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ @@ -54,7 +56,11 @@ extern void __chk_io_ptr(const volatile void __iomem *); #include #endif +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) +#define notrace __attribute__((hotpatch(0,0))) +#else #define notrace __attribute__((no_instrument_function)) +#endif /* Intel compiler defines __GNUC__. So we will overwrite implementations * coming from above header files here @@ -165,6 +171,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define barrier() __memory_barrier() #endif +#ifndef barrier_data +# define barrier_data(ptr) barrier() +#endif + /* Unreachable code */ #ifndef unreachable # define unreachable() do { } while (1) @@ -188,46 +198,56 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #include -static __always_inline void data_access_exceeds_word_size(void) -#ifdef __compiletime_warning -__compiletime_warning("data access exceeds word size and won't be atomic") -#endif -; +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) -static __always_inline void data_access_exceeds_word_size(void) +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) { + __READ_ONCE_SIZE; } -static __always_inline void __read_once_size(volatile void *p, void *res, int size) +#ifdef CONFIG_KASAN +/* + * This function is not 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +static __no_sanitize_address __maybe_unused +void __read_once_size_nocheck(const volatile void *p, void *res, int size) { - switch (size) { - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; -#ifdef CONFIG_64BIT - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; -#endif - default: - barrier(); - __builtin_memcpy((void *)res, (const void *)p, size); - data_access_exceeds_word_size(); - barrier(); - } + __READ_ONCE_SIZE; } +#else +static __always_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} +#endif -static __always_inline void __assign_once_size(volatile void *p, void *res, int size) +static __always_inline void __write_once_size(volatile void *p, void *res, int size) { switch (size) { case 1: *(volatile __u8 *)p = *(__u8 *)res; break; case 2: *(volatile __u16 *)p = *(__u16 *)res; break; case 4: *(volatile __u32 *)p = *(__u32 *)res; break; -#ifdef CONFIG_64BIT case 8: *(volatile __u64 *)p = *(__u64 *)res; break; -#endif default: barrier(); __builtin_memcpy((void *)p, (const void *)res, size); - data_access_exceeds_word_size(); barrier(); } } @@ -235,15 +255,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of - * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the * compiler is aware of some particular ordering. One way to make the * compiler aware of ordering is to put the two invocations of READ_ONCE, - * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. + * WRITE_ONCE or ACCESS_ONCE() in different C statements. * * In contrast to ACCESS_ONCE these two macros will also work on aggregate * data types like structs or unions. If the size of the accessed data * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a * compile-time warning. * * Their two major use cases are: (1) Mediating communication between @@ -254,11 +274,30 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int * required ordering. */ -#define READ_ONCE(x) \ - ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) -#define ASSIGN_ONCE(val, x) \ - ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) + +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(x)) (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) #endif /* __KERNEL__ */ @@ -378,6 +417,14 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int #define __visible #endif +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) @@ -385,7 +432,7 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int /* Is this type a native word size -- useful for atomic operations */ #ifndef __native_word -# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) #endif /* Compile time object size, -1 for unknown */ @@ -404,7 +451,7 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. */ # ifndef __CHECKER__ -# define __compiletime_error_fallback(condition) \ +# define __compiletime_error_fallback(condition) \ do { ((void)sizeof(char[1 - 2 * condition])); } while (0) # endif #endif @@ -447,12 +494,38 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int * to make the compiler aware of ordering is to put the two invocations of * ACCESS_ONCE() in different C statements. * - * This macro does absolutely -nothing- to prevent the CPU from reordering, - * merging, or refetching absolutely anything at any time. Its main intended - * use is to mediate communication between process-level code and irq/NMI - * handlers, all running on the same CPU. + * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE + * on a union member will work as long as the size of the member matches the + * size of the union and the size is smaller than word size. + * + * The major use cases of ACCESS_ONCE used to be (1) Mediating communication + * between process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + * + * If possible use READ_ONCE()/WRITE_ONCE() instead. */ -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +#define __ACCESS_ONCE(x) ({ \ + __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ + (volatile typeof(x) *)&(x); }) +#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) + +/** + * lockless_dereference() - safely load a pointer for later dereference + * @p: The pointer to load + * + * Similar to rcu_dereference(), but for situations where the pointed-to + * object's lifetime is managed by something other than RCU. That + * "something other" might be reference counting or simple immortality. + */ +#define lockless_dereference(p) \ +({ \ + typeof(p) _________p1 = READ_ONCE(p); \ + smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ + (_________p1); \ +}) /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ #ifdef CONFIG_KPROBES diff --git a/drivers/include/linux/component.h b/drivers/include/linux/component.h new file mode 100644 index 000000000..c00dcc302 --- /dev/null +++ b/drivers/include/linux/component.h @@ -0,0 +1,39 @@ +#ifndef COMPONENT_H +#define COMPONENT_H + +struct device; + +struct component_ops { + int (*bind)(struct device *, struct device *, void *); + void (*unbind)(struct device *, struct device *, void *); +}; + +int component_add(struct device *, const struct component_ops *); +void component_del(struct device *, const struct component_ops *); + +int component_bind_all(struct device *, void *); +void component_unbind_all(struct device *, void *); + +struct master; + +struct component_master_ops { + int (*add_components)(struct device *, struct master *); + int (*bind)(struct device *); + void (*unbind)(struct device *); +}; + +int component_master_add(struct device *, const struct component_master_ops *); +void component_master_del(struct device *, + const struct component_master_ops *); + +int component_master_add_child(struct master *master, + int (*compare)(struct device *, void *), void *compare_data); + +struct component_match; + +int component_master_add_with_match(struct device *, + const struct component_master_ops *, struct component_match *); +void component_match_add(struct device *, struct component_match **, + int (*compare)(struct device *, void *), void *compare_data); + +#endif diff --git a/drivers/include/linux/cpumask.h b/drivers/include/linux/cpumask.h index b950e9d60..59915ea53 100644 --- a/drivers/include/linux/cpumask.h +++ b/drivers/include/linux/cpumask.h @@ -11,6 +11,7 @@ #include #include +/* Don't assign or return these: may not be this big! */ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; /** @@ -22,6 +23,14 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; */ #define cpumask_bits(maskp) ((maskp)->bits) +/** + * cpumask_pr_args - printf args to output a cpumask + * @maskp: cpumask to be printed + * + * Can be used to provide arguments for '%*pb[l]' when printing a cpumask. + */ +#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp) + #if NR_CPUS == 1 #define nr_cpu_ids 1 #else @@ -142,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask, return 1; } -static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) +static inline unsigned int cpumask_local_spread(unsigned int i, int node) { - set_bit(0, cpumask_bits(dstp)); - return 0; } @@ -199,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); -int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); +unsigned int cpumask_local_spread(unsigned int i, int node); /** * for_each_cpu - iterate over every cpu in a mask @@ -281,11 +288,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) * @cpumask: the cpumask pointer * * Returns 1 if @cpu is set in @cpumask, else returns 0 - * - * No static inline type checking - see Subtlety (1) above. */ -#define cpumask_test_cpu(cpu, cpumask) \ - test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) +static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) +{ + return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); +} /** * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask @@ -538,21 +545,6 @@ static inline void cpumask_copy(struct cpumask *dstp, */ #define cpumask_of(cpu) (get_cpu_mask(cpu)) -/** - * cpumask_scnprintf - print a cpumask into a string as comma-separated hex - * @buf: the buffer to sprintf into - * @len: the length of the buffer - * @srcp: the cpumask to print - * - * If len is zero, returns zero. Otherwise returns the length of the - * (nul-terminated) @buf string. - */ -static inline int cpumask_scnprintf(char *buf, int len, - const struct cpumask *srcp) -{ - return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); -} - /** * cpumask_parse_user - extract a cpumask from a user string * @buf: the buffer to extract from @@ -564,7 +556,7 @@ static inline int cpumask_scnprintf(char *buf, int len, static inline int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp) { - return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); } /** @@ -579,23 +571,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp) { return bitmap_parselist_user(buf, len, cpumask_bits(dstp), - nr_cpumask_bits); -} - -/** - * cpulist_scnprintf - print a cpumask into a string as comma-separated list - * @buf: the buffer to sprintf into - * @len: the length of the buffer - * @srcp: the cpumask to print - * - * If len is zero, returns zero. Otherwise returns the length of the - * (nul-terminated) @buf string. - */ -static inline int cpulist_scnprintf(char *buf, int len, - const struct cpumask *srcp) -{ - return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), - nr_cpumask_bits); + nr_cpu_ids); } /** @@ -610,7 +586,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) char *nl = strchr(buf, '\n'); unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); - return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); } /** @@ -622,7 +598,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) */ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) { - return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); } /** @@ -632,9 +608,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) */ static inline size_t cpumask_size(void) { - /* FIXME: Once all cpumask assignments are eliminated, this - * can be nr_cpumask_bits */ - return BITS_TO_LONGS(NR_CPUS) * sizeof(long); + return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); } /* @@ -791,7 +765,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) #if NR_CPUS <= BITS_PER_LONG #define CPU_BITS_ALL \ { \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } #else /* NR_CPUS > BITS_PER_LONG */ @@ -799,7 +773,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu) #define CPU_BITS_ALL \ { \ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } #endif /* NR_CPUS > BITS_PER_LONG */ @@ -817,35 +791,21 @@ static inline ssize_t cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) { return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), - nr_cpumask_bits); + nr_cpu_ids); } -/* - * - * From here down, all obsolete. Use cpumask_ variants! - * - */ -#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS -#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) - -#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) - #if NR_CPUS <= BITS_PER_LONG - #define CPU_MASK_ALL \ (cpumask_t) { { \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } } - #else - #define CPU_MASK_ALL \ (cpumask_t) { { \ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ - [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ + [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \ } } - -#endif +#endif /* NR_CPUS > BITS_PER_LONG */ #define CPU_MASK_NONE \ (cpumask_t) { { \ @@ -857,143 +817,4 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) [0] = 1UL \ } } -#if NR_CPUS == 1 -#define first_cpu(src) ({ (void)(src); 0; }) -#define next_cpu(n, src) ({ (void)(src); 1; }) -#define any_online_cpu(mask) 0 -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#else /* NR_CPUS > 1 */ -int __first_cpu(const cpumask_t *srcp); -int __next_cpu(int n, const cpumask_t *srcp); - -#define first_cpu(src) __first_cpu(&(src)) -#define next_cpu(n, src) __next_cpu((n), &(src)) -#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) -#define for_each_cpu_mask(cpu, mask) \ - for ((cpu) = -1; \ - (cpu) = next_cpu((cpu), (mask)), \ - (cpu) < NR_CPUS; ) -#endif /* SMP */ - -#if NR_CPUS <= 64 - -#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) - -#else /* NR_CPUS > 64 */ - -int __next_cpu_nr(int n, const cpumask_t *srcp); -#define for_each_cpu_mask_nr(cpu, mask) \ - for ((cpu) = -1; \ - (cpu) = __next_cpu_nr((cpu), &(mask)), \ - (cpu) < nr_cpu_ids; ) - -#endif /* NR_CPUS > 64 */ - -#define cpus_addr(src) ((src).bits) - -#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) -static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) -{ - set_bit(cpu, dstp->bits); -} - -#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) -static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) -{ - clear_bit(cpu, dstp->bits); -} - -#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) -static inline void __cpus_setall(cpumask_t *dstp, int nbits) -{ - bitmap_fill(dstp->bits, nbits); -} - -#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) -static inline void __cpus_clear(cpumask_t *dstp, int nbits) -{ - bitmap_zero(dstp->bits, nbits); -} - -/* No static inline type checking - see Subtlety (1) above. */ -#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) - -#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) -static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) -{ - return test_and_set_bit(cpu, addr->bits); -} - -#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) -static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) -static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_andnot(dst, src1, src2) \ - __cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) -static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); -} - -#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) -static inline int __cpus_equal(const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_equal(src1p->bits, src2p->bits, nbits); -} - -#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) -static inline int __cpus_intersects(const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_intersects(src1p->bits, src2p->bits, nbits); -} - -#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) -static inline int __cpus_subset(const cpumask_t *src1p, - const cpumask_t *src2p, int nbits) -{ - return bitmap_subset(src1p->bits, src2p->bits, nbits); -} - -#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) -static inline int __cpus_empty(const cpumask_t *srcp, int nbits) -{ - return bitmap_empty(srcp->bits, nbits); -} - -#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) -static inline int __cpus_weight(const cpumask_t *srcp, int nbits) -{ - return bitmap_weight(srcp->bits, nbits); -} - -#define cpus_shift_left(dst, src, n) \ - __cpus_shift_left(&(dst), &(src), (n), NR_CPUS) -static inline void __cpus_shift_left(cpumask_t *dstp, - const cpumask_t *srcp, int n, int nbits) -{ - bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); -} -#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ - #endif /* __LINUX_CPUMASK_H */ diff --git a/drivers/include/linux/device.h b/drivers/include/linux/device.h new file mode 100644 index 000000000..59c06dab9 --- /dev/null +++ b/drivers/include/linux/device.h @@ -0,0 +1,99 @@ +/* + * device.h - generic, centralized driver model + * + * Copyright (c) 2001-2003 Patrick Mochel + * Copyright (c) 2004-2009 Greg Kroah-Hartman + * Copyright (c) 2008-2009 Novell Inc. + * + * This file is released under the GPLv2 + * + * See Documentation/driver-model/ for more information. + */ + +#ifndef _DEVICE_H_ +#define _DEVICE_H_ + +#include +#include +#include +#include +struct device; +enum probe_type { + PROBE_DEFAULT_STRATEGY, + PROBE_PREFER_ASYNCHRONOUS, + PROBE_FORCE_SYNCHRONOUS, +}; + +struct device_driver { + const char *name; + const char *mod_name; /* used for built-in modules */ + + bool suppress_bind_attrs; /* disables bind/unbind via sysfs */ + enum probe_type probe_type; +}; + +struct device { + struct device *parent; + + const char *init_name; /* initial name of the device */ + struct device_driver *driver; /* which driver has allocated this + device */ + void *platform_data; /* Platform specific data, device + core doesn't touch it */ + void *driver_data; /* Driver data, set and get with + dev_set/get_drvdata */ +#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN + struct irq_domain *msi_domain; +#endif +#ifdef CONFIG_PINCTRL + struct dev_pin_info *pins; +#endif +#ifdef CONFIG_GENERIC_MSI_IRQ + struct list_head msi_list; +#endif + +#ifdef CONFIG_NUMA + int numa_node; /* NUMA node this device is close to */ +#endif +#ifdef CONFIG_DMA_CMA + struct cma *cma_area; /* contiguous memory area for dma + allocations */ +#endif +}; + +extern __printf(2, 3) +int dev_set_name(struct device *dev, const char *name, ...); + +#ifdef CONFIG_NUMA +static inline int dev_to_node(struct device *dev) +{ + return dev->numa_node; +} +static inline void set_dev_node(struct device *dev, int node) +{ + dev->numa_node = node; +} +#else +static inline int dev_to_node(struct device *dev) +{ + return -1; +} +static inline void set_dev_node(struct device *dev, int node) +{ +} +#endif + + +static inline void *dev_get_drvdata(const struct device *dev) +{ + return dev->driver_data; +} + +static inline void dev_set_drvdata(struct device *dev, void *data) +{ + dev->driver_data = data; +} + + + +#endif /* _DEVICE_H_ */ diff --git a/drivers/include/linux/dma-buf.h b/drivers/include/linux/dma-buf.h index 4d898c43b..f8658833c 100644 --- a/drivers/include/linux/dma-buf.h +++ b/drivers/include/linux/dma-buf.h @@ -115,6 +115,8 @@ struct dma_buf_ops { * @attachments: list of dma_buf_attachment that denotes all devices attached. * @ops: dma_buf_ops associated with this buffer object. * @exp_name: name of the exporter; useful for debugging. + * @owner: pointer to exporter module; used for refcounting when exporter is a + * kernel module. * @list_node: node for dma_buf accounting and debugging. * @priv: exporter specific private data for this buffer object. * @resv: reservation object linked to this dma-buf @@ -170,12 +172,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *dmabuf_attach); -struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, - size_t size, int flags, const char *, - struct reservation_object *); - -#define dma_buf_export(priv, ops, size, flags, resv) \ - dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv) +struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); diff --git a/drivers/include/linux/dma_remapping.h b/drivers/include/linux/dma_remapping.h index 7ac17f572..187c10299 100644 --- a/drivers/include/linux/dma_remapping.h +++ b/drivers/include/linux/dma_remapping.h @@ -20,6 +20,14 @@ #define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_PASS_THROUGH 2 +/* Extended context entry types */ +#define CONTEXT_TT_PT_PASID 4 +#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 +#define CONTEXT_TT_MASK (7ULL << 2) + +#define CONTEXT_DINVE (1ULL << 8) +#define CONTEXT_PRS (1ULL << 9) +#define CONTEXT_PASIDE (1ULL << 11) struct intel_iommu; struct dmar_domain; diff --git a/drivers/include/linux/dmapool.h b/drivers/include/linux/dmapool.h index 3d2f2fdab..4c002b0a9 100644 --- a/drivers/include/linux/dmapool.h +++ b/drivers/include/linux/dmapool.h @@ -3,8 +3,8 @@ * * Allocation pools for DMAable (coherent) memory. * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ @@ -19,6 +19,12 @@ void dma_pool_destroy(struct dma_pool *pool); void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle); +static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, + dma_addr_t *handle) +{ + return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle); +} + void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); /* diff --git a/drivers/include/linux/dmi.h b/drivers/include/linux/dmi.h index f820f0a33..919deb6cd 100644 --- a/drivers/include/linux/dmi.h +++ b/drivers/include/linux/dmi.h @@ -74,7 +74,7 @@ struct dmi_header { u8 type; u8 length; u16 handle; -}; +} __packed; struct dmi_device { struct list_head list; diff --git a/drivers/include/linux/errno.h b/drivers/include/linux/errno.h index 9d14d1a98..3abd50c64 100644 --- a/drivers/include/linux/errno.h +++ b/drivers/include/linux/errno.h @@ -22,7 +22,7 @@ #define EBADHANDLE 521 /* Illegal NFS file handle */ #define ENOTSYNC 522 /* Update synchronization mismatch */ #define EBADCOOKIE 523 /* Cookie is stale */ -#define ENOTSUPP 524 /* Operation is not supported */ +#define ENOTSUPP 524 /* Operation is not supported */ #define ETOOSMALL 525 /* Buffer or request is too small */ #define ESERVERFAULT 526 /* An untranslatable error occurred */ #define EBADTYPE 527 /* Type not supported by server */ diff --git a/drivers/include/linux/fb.h b/drivers/include/linux/fb.h index bf536d6c1..cf69b2f2c 100644 --- a/drivers/include/linux/fb.h +++ b/drivers/include/linux/fb.h @@ -1111,7 +1111,9 @@ extern int fb_videomode_from_videomode(const struct videomode *vm, struct fb_videomode *fbmode); /* drivers/video/modedb.c */ -#define VESA_MODEDB_SIZE 34 +#define VESA_MODEDB_SIZE 43 +#define DMT_SIZE 0x50 + extern void fb_var_to_videomode(struct fb_videomode *mode, const struct fb_var_screeninfo *var); extern void fb_videomode_to_var(struct fb_var_screeninfo *var, @@ -1162,9 +1164,17 @@ struct fb_videomode { u32 flag; }; +struct dmt_videomode { + u32 dmt_id; + u32 std_2byte_code; + u32 cvt_3byte_code; + const struct fb_videomode *mode; +}; + extern const char *fb_mode_option; extern const struct fb_videomode vesa_modes[]; extern const struct fb_videomode cea_modes[64]; +extern const struct dmt_videomode dmt_modes[]; struct fb_modelist { struct list_head list; @@ -1178,4 +1188,16 @@ extern int fb_find_mode(struct fb_var_screeninfo *var, const struct fb_videomode *default_mode, unsigned int default_bpp); +/* Convenience logging macros */ +#define fb_err(fb_info, fmt, ...) \ + pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_notice(info, fmt, ...) \ + pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_warn(fb_info, fmt, ...) \ + pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_info(fb_info, fmt, ...) \ + pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) +#define fb_dbg(fb_info, fmt, ...) \ + pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__) + #endif /* _LINUX_FB_H */ diff --git a/drivers/include/linux/fence.h b/drivers/include/linux/fence.h index 493539813..f964b05bb 100644 --- a/drivers/include/linux/fence.h +++ b/drivers/include/linux/fence.h @@ -77,7 +77,7 @@ struct fence { spinlock_t *lock; unsigned context, seqno; unsigned long flags; -// ktime_t timestamp; + ktime_t timestamp; int status; }; @@ -279,6 +279,22 @@ fence_is_signaled(struct fence *fence) return false; } +/** + * fence_is_later - return if f1 is chronologically later than f2 + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns true if f1 is chronologically later than f2. Both fences must be + * from the same context, since a seqno is not re-used across contexts. + */ +static inline bool fence_is_later(struct fence *f1, struct fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return false; + + return f1->seqno - f2->seqno < INT_MAX; +} + /** * fence_later - return the chronologically later fence * @f1: [in] the first fence from the same context @@ -298,14 +314,15 @@ static inline struct fence *fence_later(struct fence *f1, struct fence *f2) * set if enable_signaling wasn't called, and enabling that here is * overkill. */ - if (f2->seqno - f1->seqno <= INT_MAX) - return fence_is_signaled(f2) ? NULL : f2; - else + if (fence_is_later(f1, f2)) return fence_is_signaled(f1) ? NULL : f1; + else + return fence_is_signaled(f2) ? NULL : f2; } signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); - +signed long fence_wait_any_timeout(struct fence **fences, uint32_t count, + bool intr, signed long timeout); /** * fence_wait - sleep until the fence gets signaled diff --git a/drivers/include/linux/firmware.h b/drivers/include/linux/firmware.h index 109afddbf..9f1eb081b 100644 --- a/drivers/include/linux/firmware.h +++ b/drivers/include/linux/firmware.h @@ -1,10 +1,11 @@ #ifndef _LINUX_FIRMWARE_H #define _LINUX_FIRMWARE_H -#include #include #include #include +#include +#include #define FW_ACTION_NOHOTPLUG 0 #define FW_ACTION_HOTPLUG 1 diff --git a/drivers/include/linux/gfp.h b/drivers/include/linux/gfp.h index ae3783265..ac4d70f07 100644 --- a/drivers/include/linux/gfp.h +++ b/drivers/include/linux/gfp.h @@ -13,7 +13,7 @@ struct vm_area_struct; #define ___GFP_HIGHMEM 0x02u #define ___GFP_DMA32 0x04u #define ___GFP_MOVABLE 0x08u -#define ___GFP_WAIT 0x10u +#define ___GFP_RECLAIMABLE 0x10u #define ___GFP_HIGH 0x20u #define ___GFP_IO 0x40u #define ___GFP_FS 0x80u @@ -28,17 +28,17 @@ struct vm_area_struct; #define ___GFP_NOMEMALLOC 0x10000u #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u -#define ___GFP_RECLAIMABLE 0x80000u +#define ___GFP_ATOMIC 0x80000u +#define ___GFP_NOACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u -#define ___GFP_NO_KSWAPD 0x400000u +#define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_OTHER_NODE 0x800000u #define ___GFP_WRITE 0x1000000u +#define ___GFP_KSWAPD_RECLAIM 0x2000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* - * GFP bitmasks.. - * - * Zone modifiers (see linux/mmzone.h - low three bits) + * Physical address zone modifiers (see linux/mmzone.h - low four bits) * * Do not put any conditional on these. If necessary modify the definitions * without the underscores and use them consistently. The definitions here may @@ -48,109 +48,218 @@ struct vm_area_struct; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) + /* - * Action modifiers - doesn't change the zoning + * Page mobility and placement hints + * + * These flags provide hints about how mobile the page is. Pages with similar + * mobility are placed within the same pageblocks to minimise problems due + * to external fragmentation. + * + * __GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. + * + * __GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * + * __GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). + * + * __GFP_HARDWALL enforces the cpuset memory allocation policy. + * + * __GFP_THISNODE forces the allocation to be satisified from the requested + * node with no fallbacks or placement policy enforcements. + */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) + +/* + * Watermark modifiers -- controls access to emergency reserves + * + * __GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. + * + * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with __GFP_HIGH + * + * __GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * + * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the __GFP_MEMALLOC flag if both are set. + * + * __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement. + */ +#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) +#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) +#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) + +/* + * Reclaim modifiers + * + * __GFP_IO can start physical IO. + * + * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. + * + * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. + * + * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. + * + * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. * * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt - * _might_ fail. This depends upon the particular VM implementation. + * _might_ fail. This depends upon the particular VM implementation. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. This modifier is deprecated and no new - * users should be added. + * cannot handle allocation failures. New users should be evaluated carefully + * (and the flag should be used only when there is no reasonable failure + * policy) but it is definitely preferable to use the flag rather than + * opencode endless loop around allocator. * - * __GFP_NORETRY: The VM implementation must not retry indefinitely. - * - * __GFP_MOVABLE: Flag that this page will be movable by the page migration - * mechanism or reclaimed + * __GFP_NORETRY: The VM implementation must not retry indefinitely and will + * return NULL when direct reclaim and memory compaction have failed to allow + * the allocation to succeed. The OOM killer is not called with the current + * implementation. */ -#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ -#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ -#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ -#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ -#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ -#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ -#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ -#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ -#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ -#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ -#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ -#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ -#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. - * This takes precedence over the - * __GFP_MEMALLOC flag if both are - * set - */ -#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ -#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ -#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ -#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ - -#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) -#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ -#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) +#define __GFP_FS ((__force gfp_t)___GFP_FS) +#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ +#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ +#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) +#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* - * This may seem redundant, but it's a way of annotating false positives vs. - * allocations that simply cannot be supported (e.g. page tables). + * Action modifiers + * + * __GFP_COLD indicates that the caller does not expect to be used in the near + * future. Where possible, a cache-cold page will be returned. + * + * __GFP_NOWARN suppresses allocation failure reports. + * + * __GFP_COMP address compound page metadata. + * + * __GFP_ZERO returns a zeroed page on success. + * + * __GFP_NOTRACK avoids tracking with kmemcheck. + * + * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of + * distinguishing in the source between false positives and allocations that + * cannot be supported (e.g. page tables). + * + * __GFP_OTHER_NODE is for allocations that are on a remote node but that + * should not be accounted for as a remote allocation in vmstat. A + * typical user would be khugepaged collapsing a huge page on a remote + * node. */ +#define __GFP_COLD ((__force gfp_t)___GFP_COLD) +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) +#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ +/* Room for N __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 26 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) -/* This equals 0, but use constants in case they ever change */ -#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) -/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ -#define GFP_ATOMIC (__GFP_HIGH) -#define GFP_NOIO (__GFP_WAIT) -#define GFP_NOFS (__GFP_WAIT | __GFP_IO) -#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) -#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ +/* + * Useful GFP flag combinations that are commonly used. It is recommended + * that subsystems start with one of these combinations and then set/clear + * __GFP_FOO flags as necessary. + * + * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves" + * + * GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * + * GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. + * + * GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. + * + * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. + * + * GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. + * + * GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. + * + * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit + * address. + * + * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. + * + * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE. + * + * GFP_TRANSHUGE is used for THP allocations. They are compound allocations + * that will fail quickly if memory is not available and will not wake + * kswapd on failure. + */ +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) +#define GFP_NOIO (__GFP_RECLAIM) +#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) +#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \ __GFP_RECLAIMABLE) -#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_DMA __GFP_DMA +#define GFP_DMA32 __GFP_DMA32 #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) -#define GFP_IOFS (__GFP_IO | __GFP_FS) -#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ - __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ - __GFP_NO_KSWAPD) +#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ + ~__GFP_KSWAPD_RECLAIM) -/* - * GFP_THISNODE does not perform any reclaim, you most likely want to - * use __GFP_THISNODE to allocate from a given node without fallback! - */ -#ifdef CONFIG_NUMA -#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) -#else -#define GFP_THISNODE ((__force gfp_t)0) -#endif - -/* This mask makes up all the page movable related flags */ +/* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) +#define GFP_MOVABLE_SHIFT 3 -/* Control page allocator reclaim behavior */ -#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ - __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ - __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) - -/* Control slab gfp mask during early boot */ -#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) - -/* Control allocation constraints */ -#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) - -/* Do not use these with a slab allocator */ -#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) - -/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some - platforms, used as appropriate on others */ - -#define GFP_DMA __GFP_DMA - -/* 4GB DMA on some platforms */ -#define GFP_DMA32 __GFP_DMA32 +#undef GFP_MOVABLE_MASK +#undef GFP_MOVABLE_SHIFT +static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) +{ + return gfp_flags & __GFP_DIRECT_RECLAIM; +} #ifdef CONFIG_HIGHMEM #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM diff --git a/drivers/include/linux/gpio/consumer.h b/drivers/include/linux/gpio/consumer.h new file mode 100644 index 000000000..fb0fde686 --- /dev/null +++ b/drivers/include/linux/gpio/consumer.h @@ -0,0 +1,455 @@ +#ifndef __LINUX_GPIO_CONSUMER_H +#define __LINUX_GPIO_CONSUMER_H + +#include +#include +#include + +struct device; + +/** + * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are + * preferable to the old integer-based handles. + * + * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid + * until the GPIO is released. + */ +struct gpio_desc; + +/** + * Struct containing an array of descriptors that can be obtained using + * gpiod_get_array(). + */ +struct gpio_descs { + unsigned int ndescs; + struct gpio_desc *desc[]; +}; + +#define GPIOD_FLAGS_BIT_DIR_SET BIT(0) +#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1) +#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2) + +/** + * Optional flags that can be passed to one of gpiod_* to configure direction + * and output value. These values cannot be OR'd. + */ +enum gpiod_flags { + GPIOD_ASIS = 0, + GPIOD_IN = GPIOD_FLAGS_BIT_DIR_SET, + GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT, + GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | + GPIOD_FLAGS_BIT_DIR_VAL, +}; + +#ifdef CONFIG_GPIOLIB + +/* Return the number of GPIOs associated with a device / function */ +int gpiod_count(struct device *dev, const char *con_id); + +/* Acquire and dispose GPIOs */ +struct gpio_desc *__must_check gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags); +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index, + enum gpiod_flags flags); +struct gpio_descs *__must_check gpiod_get_array(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +void gpiod_put(struct gpio_desc *desc); +void gpiod_put_array(struct gpio_descs *descs); + +struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags); +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index, enum gpiod_flags flags); +struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev, + const char *con_id, + enum gpiod_flags flags); +struct gpio_descs *__must_check +devm_gpiod_get_array_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags); +void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); +void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs); + +int gpiod_get_direction(struct gpio_desc *desc); +int gpiod_direction_input(struct gpio_desc *desc); +int gpiod_direction_output(struct gpio_desc *desc, int value); +int gpiod_direction_output_raw(struct gpio_desc *desc, int value); + +/* Value get/set from non-sleeping context */ +int gpiod_get_value(const struct gpio_desc *desc); +void gpiod_set_value(struct gpio_desc *desc, int value); +void gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, int *value_array); +int gpiod_get_raw_value(const struct gpio_desc *desc); +void gpiod_set_raw_value(struct gpio_desc *desc, int value); +void gpiod_set_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); + +/* Value get/set from sleeping context */ +int gpiod_get_value_cansleep(const struct gpio_desc *desc); +void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); +void gpiod_set_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); +int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); +void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); +void gpiod_set_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array); + +int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); + +int gpiod_is_active_low(const struct gpio_desc *desc); +int gpiod_cansleep(const struct gpio_desc *desc); + +int gpiod_to_irq(const struct gpio_desc *desc); + +/* Convert between the old gpio_ and new gpiod_ interfaces */ +struct gpio_desc *gpio_to_desc(unsigned gpio); +int desc_to_gpio(const struct gpio_desc *desc); + +/* Child properties interface */ +struct fwnode_handle; + +struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, + const char *propname); +struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, + const char *con_id, + struct fwnode_handle *child); +#else /* CONFIG_GPIOLIB */ + +static inline int gpiod_count(struct device *dev, const char *con_id) +{ + return 0; +} + +static inline struct gpio_desc *__must_check gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} +static inline struct gpio_desc *__must_check +gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +gpiod_get_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index, enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_descs *__must_check +gpiod_get_array(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_descs *__must_check +gpiod_get_array_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void gpiod_put(struct gpio_desc *desc) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline void gpiod_put_array(struct gpio_descs *descs) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get(struct device *dev, + const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} +static inline +struct gpio_desc *__must_check +devm_gpiod_get_index(struct device *dev, + const char *con_id, + unsigned int idx, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index, enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_descs *__must_check +devm_gpiod_get_array(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_descs *__must_check +devm_gpiod_get_array_optional(struct device *dev, const char *con_id, + enum gpiod_flags flags) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline void devm_gpiod_put_array(struct device *dev, + struct gpio_descs *descs) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + + +static inline int gpiod_get_direction(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} +static inline int gpiod_direction_input(struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} +static inline int gpiod_direction_output(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} +static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} + + +static inline int gpiod_get_value(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return 0; +} +static inline void gpiod_set_value(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} +static inline void gpiod_set_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} +static inline int gpiod_get_raw_value(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return 0; +} +static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} +static inline void gpiod_set_raw_array_value(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return 0; +} +static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} +static inline void gpiod_set_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} +static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return 0; +} +static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, + int value) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} +static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size, + struct gpio_desc **desc_array, + int *value_array) +{ + /* GPIO can never have been requested */ + WARN_ON(1); +} + +static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -ENOSYS; +} + +static inline int gpiod_is_active_low(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return 0; +} +static inline int gpiod_cansleep(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return 0; +} + +static inline int gpiod_to_irq(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -EINVAL; +} + +static inline struct gpio_desc *gpio_to_desc(unsigned gpio) +{ + return ERR_PTR(-EINVAL); +} + +static inline int desc_to_gpio(const struct gpio_desc *desc) +{ + /* GPIO can never have been requested */ + WARN_ON(1); + return -EINVAL; +} + +/* Child properties interface */ +struct fwnode_handle; + +static inline struct gpio_desc *fwnode_get_named_gpiod( + struct fwnode_handle *fwnode, const char *propname) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *devm_get_gpiod_from_child( + struct device *dev, const char *con_id, struct fwnode_handle *child) +{ + return ERR_PTR(-ENOSYS); +} + +#endif /* CONFIG_GPIOLIB */ + +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) + +int gpiod_export(struct gpio_desc *desc, bool direction_may_change); +int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc); +void gpiod_unexport(struct gpio_desc *desc); + +#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ + +static inline int gpiod_export(struct gpio_desc *desc, + bool direction_may_change) +{ + return -ENOSYS; +} + +static inline int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc) +{ + return -ENOSYS; +} + +static inline void gpiod_unexport(struct gpio_desc *desc) +{ +} + +#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ + +#endif diff --git a/drivers/include/linux/hdmi.h b/drivers/include/linux/hdmi.h index cbb5790a3..e9744202f 100644 --- a/drivers/include/linux/hdmi.h +++ b/drivers/include/linux/hdmi.h @@ -25,6 +25,7 @@ #define __LINUX_HDMI_H_ #include +#include enum hdmi_infoframe_type { HDMI_INFOFRAME_TYPE_VENDOR = 0x81, @@ -52,12 +53,18 @@ enum hdmi_colorspace { HDMI_COLORSPACE_RGB, HDMI_COLORSPACE_YUV422, HDMI_COLORSPACE_YUV444, + HDMI_COLORSPACE_YUV420, + HDMI_COLORSPACE_RESERVED4, + HDMI_COLORSPACE_RESERVED5, + HDMI_COLORSPACE_RESERVED6, + HDMI_COLORSPACE_IDO_DEFINED, }; enum hdmi_scan_mode { HDMI_SCAN_MODE_NONE, HDMI_SCAN_MODE_OVERSCAN, HDMI_SCAN_MODE_UNDERSCAN, + HDMI_SCAN_MODE_RESERVED, }; enum hdmi_colorimetry { @@ -71,6 +78,7 @@ enum hdmi_picture_aspect { HDMI_PICTURE_ASPECT_NONE, HDMI_PICTURE_ASPECT_4_3, HDMI_PICTURE_ASPECT_16_9, + HDMI_PICTURE_ASPECT_RESERVED, }; enum hdmi_active_aspect { @@ -92,12 +100,18 @@ enum hdmi_extended_colorimetry { HDMI_EXTENDED_COLORIMETRY_S_YCC_601, HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601, HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB, + + /* The following EC values are only defined in CEA-861-F. */ + HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM, + HDMI_EXTENDED_COLORIMETRY_BT2020, + HDMI_EXTENDED_COLORIMETRY_RESERVED, }; enum hdmi_quantization_range { HDMI_QUANTIZATION_RANGE_DEFAULT, HDMI_QUANTIZATION_RANGE_LIMITED, HDMI_QUANTIZATION_RANGE_FULL, + HDMI_QUANTIZATION_RANGE_RESERVED, }; /* non-uniform picture scaling */ @@ -114,7 +128,7 @@ enum hdmi_ycc_quantization_range { }; enum hdmi_content_type { - HDMI_CONTENT_TYPE_NONE, + HDMI_CONTENT_TYPE_GRAPHICS, HDMI_CONTENT_TYPE_PHOTO, HDMI_CONTENT_TYPE_CINEMA, HDMI_CONTENT_TYPE_GAME, @@ -194,6 +208,7 @@ enum hdmi_audio_coding_type { HDMI_AUDIO_CODING_TYPE_MLP, HDMI_AUDIO_CODING_TYPE_DST, HDMI_AUDIO_CODING_TYPE_WMA_PRO, + HDMI_AUDIO_CODING_TYPE_CXT, }; enum hdmi_audio_sample_size { @@ -215,10 +230,25 @@ enum hdmi_audio_sample_frequency { }; enum hdmi_audio_coding_type_ext { - HDMI_AUDIO_CODING_TYPE_EXT_STREAM, + /* Refer to Audio Coding Type (CT) field in Data Byte 1 */ + HDMI_AUDIO_CODING_TYPE_EXT_CT, + + /* + * The next three CXT values are defined in CEA-861-E only. + * They do not exist in older versions, and in CEA-861-F they are + * defined as 'Not in use'. + */ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC, HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2, HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND, + + /* The following CXT values are only defined in CEA-861-F. */ + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC, + HDMI_AUDIO_CODING_TYPE_EXT_DRA, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND, + HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10, }; struct hdmi_audio_infoframe { @@ -299,5 +329,8 @@ union hdmi_infoframe { ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); +int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer); +void hdmi_infoframe_log(const char *level, struct device *dev, + union hdmi_infoframe *frame); #endif /* _DRM_HDMI_H */ diff --git a/drivers/include/linux/i2c.h b/drivers/include/linux/i2c.h index b9e668148..0dc397fbb 100644 --- a/drivers/include/linux/i2c.h +++ b/drivers/include/linux/i2c.h @@ -26,11 +26,8 @@ #ifndef _LINUX_I2C_H #define _LINUX_I2C_H -#include -#ifdef __KERNEL__ -#include -#include #include +#include /* for struct device */ #include /* for completion */ #include #include @@ -61,8 +58,6 @@ extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, * @probe: Callback for device binding * @remove: Callback for device unbinding * @shutdown: Callback for device shutdown - * @suspend: Callback for device suspend - * @resume: Callback for device resume * @alert: Alert callback, for example for the SMBus alert protocol * @command: Callback for bus-wide signaling (optional) * @driver: Device driver model driver @@ -105,8 +100,6 @@ struct i2c_driver { /* driver model interfaces that don't relate to enumeration */ void (*shutdown)(struct i2c_client *); -// int (*suspend)(struct i2c_client *, pm_message_t mesg); - int (*resume)(struct i2c_client *); /* Alert callback, for example for the SMBus alert protocol. * The format and meaning of the data value depends on the protocol. @@ -166,10 +159,10 @@ extern struct i2c_client *i2c_verify_client(struct device *dev); extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); enum i2c_slave_event { - I2C_SLAVE_REQ_READ_START, - I2C_SLAVE_REQ_READ_END, - I2C_SLAVE_REQ_WRITE_START, - I2C_SLAVE_REQ_WRITE_END, + I2C_SLAVE_READ_REQUESTED, + I2C_SLAVE_WRITE_REQUESTED, + I2C_SLAVE_READ_PROCESSED, + I2C_SLAVE_WRITE_RECEIVED, I2C_SLAVE_STOP, }; /** @@ -180,7 +173,7 @@ enum i2c_slave_event { * @platform_data: stored in i2c_client.dev.platform_data * @archdata: copied into i2c_client.dev.archdata * @of_node: pointer to OpenFirmware device node - * @acpi_node: ACPI device node + * @fwnode: device node supplied by the platform firmware * @irq: stored in i2c_client.irq * * I2C doesn't actually support hardware probing, although controllers and @@ -306,7 +299,7 @@ void i2c_unlock_adapter(struct i2c_adapter *); ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END }) -#endif /* __KERNEL__ */ + /** * struct i2c_msg - an I2C transaction segment beginning with START diff --git a/drivers/include/linux/idr.h b/drivers/include/linux/idr.h index 7de2bba57..fb66c9914 100644 --- a/drivers/include/linux/idr.h +++ b/drivers/include/linux/idr.h @@ -1,6 +1,6 @@ /* * include/linux/idr.h - * + * * 2002-10-18 written by Jim Houston jim.houston@ccur.com * Copyright (C) 2002 by Concurrent Computer Corporation * Distributed under the GNU GPL license version 2. @@ -32,17 +32,17 @@ struct idr_layer { int prefix; /* the ID prefix of this idr_layer */ int layer; /* distance from leaf */ struct idr_layer __rcu *ary[1<end - res->start + 1; diff --git a/drivers/include/linux/irqflags.h b/drivers/include/linux/irqflags.h index d176d658f..5dd1272d1 100644 --- a/drivers/include/linux/irqflags.h +++ b/drivers/include/linux/irqflags.h @@ -85,7 +85,7 @@ * The local_irq_*() APIs are equal to the raw_local_irq*() * if !TRACE_IRQFLAGS. */ -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +#ifdef CONFIG_TRACE_IRQFLAGS #define local_irq_enable() \ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) #define local_irq_disable() \ @@ -107,22 +107,6 @@ raw_local_irq_restore(flags); \ } \ } while (0) -#define local_save_flags(flags) \ - do { \ - raw_local_save_flags(flags); \ - } while (0) - -#define irqs_disabled_flags(flags) \ - ({ \ - raw_irqs_disabled_flags(flags); \ - }) - -#define irqs_disabled() \ - ({ \ - unsigned long _flags; \ - raw_local_save_flags(_flags); \ - raw_irqs_disabled_flags(_flags); \ - }) #define safe_halt() \ do { \ @@ -131,7 +115,7 @@ } while (0) -#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ +#else /* !CONFIG_TRACE_IRQFLAGS */ #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) @@ -140,11 +124,28 @@ raw_local_irq_save(flags); \ } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) -#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) -#define irqs_disabled() (raw_irqs_disabled()) -#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) #define safe_halt() do { raw_safe_halt(); } while (0) +#endif /* CONFIG_TRACE_IRQFLAGS */ + +#define local_save_flags(flags) raw_local_save_flags(flags) + +/* + * Some architectures don't define arch_irqs_disabled(), so even if either + * definition would be fine we need to use different ones for the time being + * to avoid build issues. + */ +#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +#define irqs_disabled() \ + ({ \ + unsigned long _flags; \ + raw_local_save_flags(_flags); \ + raw_irqs_disabled_flags(_flags); \ + }) +#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ +#define irqs_disabled() raw_irqs_disabled() #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ +#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) + #endif diff --git a/drivers/include/linux/irqreturn.h b/drivers/include/linux/irqreturn.h index 4efaf45a6..85815756f 100644 --- a/drivers/include/linux/irqreturn.h +++ b/drivers/include/linux/irqreturn.h @@ -3,7 +3,7 @@ /** * enum irqreturn - * @IRQ_NONE interrupt was not from this device + * @IRQ_NONE interrupt was not from this device or was not handled * @IRQ_HANDLED interrupt was handled by this device * @IRQ_WAKE_THREAD handler requests to wake the handler thread */ diff --git a/drivers/include/linux/jiffies.h b/drivers/include/linux/jiffies.h index 8a97d3b15..610dd88e4 100644 --- a/drivers/include/linux/jiffies.h +++ b/drivers/include/linux/jiffies.h @@ -90,7 +90,7 @@ static inline u64 get_jiffies_64(void) #endif /* - * These inlines deal with timer wrapping correctly. You are + * These inlines deal with timer wrapping correctly. You are * strongly encouraged to use them * 1. Because people otherwise forget * 2. Because if the timer wrap changes in future you won't have to @@ -292,11 +292,145 @@ static inline u64 jiffies_to_nsecs(const unsigned long j) return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; } -extern unsigned long msecs_to_jiffies(const unsigned int m); -extern unsigned long usecs_to_jiffies(const unsigned int u); -extern unsigned long timespec_to_jiffies(const struct timespec *value); -extern void jiffies_to_timespec(const unsigned long jiffies, - struct timespec *value); +extern unsigned long __msecs_to_jiffies(const unsigned int m); +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) +/* + * HZ is equal to or smaller than 1000, and 1000 is a nice round + * multiple of HZ, divide with the factor between them, but round + * upwards: + */ +static inline unsigned long _msecs_to_jiffies(const unsigned int m) +{ + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +} +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) +/* + * HZ is larger than 1000, and HZ is a nice round multiple of 1000 - + * simply multiply with the factor between them. + * + * But first make sure the multiplication result cannot overflow: + */ +static inline unsigned long _msecs_to_jiffies(const unsigned int m) +{ + if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + return m * (HZ / MSEC_PER_SEC); +} +#else +/* + * Generic case - multiply, round and divide. But first check that if + * we are doing a net multiplication, that we wouldn't overflow: + */ +static inline unsigned long _msecs_to_jiffies(const unsigned int m) +{ + if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + + return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32; +} +#endif +/** + * msecs_to_jiffies: - convert milliseconds to jiffies + * @m: time in milliseconds + * + * conversion is done as follows: + * + * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) + * + * - 'too large' values [that would result in larger than + * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. + * + * - all other values are converted to jiffies by either multiplying + * the input value by a factor or dividing it with a factor and + * handling any 32-bit overflows. + * for the details see __msecs_to_jiffies() + * + * msecs_to_jiffies() checks for the passed in value being a constant + * via __builtin_constant_p() allowing gcc to eliminate most of the + * code, __msecs_to_jiffies() is called if the value passed does not + * allow constant folding and the actual conversion must be done at + * runtime. + * the HZ range specific helpers _msecs_to_jiffies() are called both + * directly here and from __msecs_to_jiffies() in the case where + * constant folding is not possible. + */ +static inline unsigned long msecs_to_jiffies(const unsigned int m) +{ + if (__builtin_constant_p(m)) { + if ((int)m < 0) + return MAX_JIFFY_OFFSET; + return _msecs_to_jiffies(m); + } else { + return __msecs_to_jiffies(m); + } +} + +extern unsigned long __usecs_to_jiffies(const unsigned int u); +#if !(USEC_PER_SEC % HZ) +static inline unsigned long _usecs_to_jiffies(const unsigned int u) +{ + return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); +} +#else +static inline unsigned long _usecs_to_jiffies(const unsigned int u) +{ + return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) + >> USEC_TO_HZ_SHR32; +} +#endif + +/** + * usecs_to_jiffies: - convert microseconds to jiffies + * @u: time in microseconds + * + * conversion is done as follows: + * + * - 'too large' values [that would result in larger than + * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. + * + * - all other values are converted to jiffies by either multiplying + * the input value by a factor or dividing it with a factor and + * handling any 32-bit overflows as for msecs_to_jiffies. + * + * usecs_to_jiffies() checks for the passed in value being a constant + * via __builtin_constant_p() allowing gcc to eliminate most of the + * code, __usecs_to_jiffies() is called if the value passed does not + * allow constant folding and the actual conversion must be done at + * runtime. + * the HZ range specific helpers _usecs_to_jiffies() are called both + * directly here and from __msecs_to_jiffies() in the case where + * constant folding is not possible. + */ +static __always_inline unsigned long usecs_to_jiffies(const unsigned int u) +{ + if (__builtin_constant_p(u)) { + if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; + return _usecs_to_jiffies(u); + } else { + return __usecs_to_jiffies(u); + } +} + +extern unsigned long timespec64_to_jiffies(const struct timespec64 *value); +extern void jiffies_to_timespec64(const unsigned long jiffies, + struct timespec64 *value); +static inline unsigned long timespec_to_jiffies(const struct timespec *value) +{ + struct timespec64 ts = timespec_to_timespec64(*value); + + return timespec64_to_jiffies(&ts); +} + +static inline void jiffies_to_timespec(const unsigned long jiffies, + struct timespec *value) +{ + struct timespec64 ts; + + jiffies_to_timespec64(jiffies, &ts); + *value = timespec64_to_timespec(ts); +} + extern unsigned long timeval_to_jiffies(const struct timeval *value); extern void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value); diff --git a/drivers/include/linux/kernel.h b/drivers/include/linux/kernel.h index 6bc966792..276950253 100644 --- a/drivers/include/linux/kernel.h +++ b/drivers/include/linux/kernel.h @@ -17,15 +17,15 @@ #define USHRT_MAX ((u16)(~0U)) #define SHRT_MAX ((s16)(USHRT_MAX>>1)) #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) -#define INT_MAX ((int)(~0U>>1)) -#define INT_MIN (-INT_MAX - 1) -#define UINT_MAX (~0U) -#define LONG_MAX ((long)(~0UL>>1)) -#define LONG_MIN (-LONG_MAX - 1) -#define ULONG_MAX (~0UL) -#define LLONG_MAX ((long long)(~0ULL>>1)) -#define LLONG_MIN (-LLONG_MAX - 1) -#define ULLONG_MAX (~0ULL) +#define INT_MAX ((int)(~0U>>1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL>>1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL>>1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) #define SIZE_MAX (~(size_t)0) #define U8_MAX ((u8)~0U) @@ -47,8 +47,8 @@ #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) -#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) -#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) @@ -74,11 +74,11 @@ #endif /* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ -#define roundup(x, y) ( \ -{ \ - const typeof(y) __y = y; \ - (((x) + (__y - 1)) / __y) * __y; \ -} \ +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ ) #define rounddown(x, y) ( \ { \ @@ -92,15 +92,27 @@ * to closest integer. Result is undefined for negative divisors and * for negative dividends if the divisor variable type is unsigned. */ -#define DIV_ROUND_CLOSEST(x, divisor)( \ -{ \ +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ typeof(x) __x = x; \ typeof(divisor) __d = divisor; \ (((typeof(x))-1) > 0 || \ ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ (((__x) + ((__d) / 2)) / (__d)) : \ (((__x) - ((__d) / 2)) / (__d)); \ -} \ +} \ +) +/* + * Same as above but for u64 dividends. divisor must be a 32-bit + * number. + */ +#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ +{ \ + typeof(divisor) __d = divisor; \ + unsigned long long _tmp = (x) + (__d) / 2; \ + do_div(_tmp, __d); \ + _tmp; \ +} \ ) /* @@ -149,29 +161,89 @@ */ #define lower_32_bits(n) ((u32)(n)) +#ifdef CONFIG_PREEMPT_VOLUNTARY +extern int _cond_resched(void); +# define might_resched() _cond_resched() +#else +# define might_resched() do { } while (0) +#endif -/* - * abs() handles unsigned and signed longs, ints, shorts and chars. For all - * input types abs() returns a signed long. - * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() - * for those. +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP + void ___might_sleep(const char *file, int line, int preempt_offset); + void __might_sleep(const char *file, int line, int preempt_offset); +/** + * might_sleep - annotation for functions that can sleep + * + * this macro will print a stack trace if it is executed in an atomic + * context (spinlock, irq-handler, ...). + * + * This is a useful debugging help to be able to catch problems early and not + * be bitten later when the calling function happens to sleep when it is not + * supposed to. */ -#define abs(x) ({ \ - long ret; \ - if (sizeof(x) == sizeof(long)) { \ - long __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } else { \ - int __x = (x); \ - ret = (__x < 0) ? -__x : __x; \ - } \ - ret; \ - }) +# define might_sleep() \ + do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) +# define sched_annotate_sleep() (current->task_state_change = 0) +#else + static inline void ___might_sleep(const char *file, int line, + int preempt_offset) { } + static inline void __might_sleep(const char *file, int line, + int preempt_offset) { } +# define might_sleep() do { might_resched(); } while (0) +# define sched_annotate_sleep() do { } while (0) +#endif -#define abs64(x) ({ \ - s64 __x = (x); \ - (__x < 0) ? -__x : __x; \ - }) +#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) + +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first + * (s64, long or int depending on its size). + * + * Return: an absolute value of x. If x is 64-bit, macro's return type is s64, + * otherwise it is signed long. + */ +#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \ + s64 __x = (x); \ + (__x < 0) ? -__x : __x; \ + }), ({ \ + long ret; \ + if (sizeof(x) == sizeof(long)) { \ + long __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } else { \ + int __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } \ + ret; \ + })) + +/** + * reciprocal_scale - "scale" a value into range [0, ep_ro) + * @val: value + * @ep_ro: right open interval endpoint + * + * Perform a "reciprocal multiplication" in order to "scale" a value into + * range [0, ep_ro), where the upper interval endpoint is right-open. + * This is useful, e.g. for accessing a index of an array containing + * ep_ro elements, for example. Think of it as sort of modulus, only that + * the result isn't that of modulo. ;) Note that if initial input is a + * small value, then result will return 0. + * + * Return: a result based on val in interval [0, ep_ro). + */ +static inline u32 reciprocal_scale(u32 val, u32 ep_ro) +{ + return (u32)(((u64) val * ep_ro) >> 32); +} + +#if defined(CONFIG_MMU) && \ + (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)) +#define might_fault() __might_fault(__FILE__, __LINE__) +void __might_fault(const char *file, int line); +#else +static inline void might_fault(void) { } +#endif #define KERN_EMERG "<0>" /* system is unusable */ #define KERN_ALERT "<1>" /* action must be taken immediately */ @@ -181,6 +253,15 @@ #define KERN_NOTICE "<5>" /* normal but significant condition */ #define KERN_INFO "<6>" /* informational */ #define KERN_DEBUG "<7>" /* debug-level messages */ +extern unsigned long simple_strtoul(const char *,char **,unsigned int); +extern long simple_strtol(const char *,char **,unsigned int); +extern unsigned long long simple_strtoull(const char *,char **,unsigned int); +extern long long simple_strtoll(const char *,char **,unsigned int); + +extern int num_to_str(char *buf, int size, unsigned long long num); + +/* lib/printf utilities */ + extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); extern __printf(3, 4) @@ -193,7 +274,16 @@ extern __printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); extern __printf(2, 3) char *kasprintf(gfp_t gfp, const char *fmt, ...); -extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); +extern __printf(2, 0) +char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); +extern __printf(2, 0) +const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args); + +extern __scanf(2, 3) +int sscanf(const char *, const char *, ...); +extern __scanf(2, 0) +int vsscanf(const char *, const char *, va_list); +extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ enum lockdep_ok { LOCKDEP_STILL_OK, LOCKDEP_NOW_UNRELIABLE @@ -229,6 +319,7 @@ extern enum system_states { #define TAINT_OOT_MODULE 12 #define TAINT_UNSIGNED_MODULE 13 #define TAINT_SOFTLOCKUP 14 +#define TAINT_LIVEPATCH 15 extern const char hex_asc[]; #define hex_asc_lo(x) hex_asc[((x) & 0x0f)] @@ -277,12 +368,6 @@ bool mac_pton(const char *s, u8 *mac); * * Most likely, you want to use tracing_on/tracing_off. */ -#ifdef CONFIG_RING_BUFFER -/* trace_off_permanent stops recording with no way to bring it back */ -void tracing_off_permanent(void); -#else -static inline void tracing_off_permanent(void) { } -#endif enum ftrace_dump_mode { DUMP_NONE, @@ -426,10 +511,10 @@ do { \ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ } while (0) -extern int +extern __printf(2, 0) int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); -extern int +extern __printf(2, 0) int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); @@ -449,7 +534,7 @@ int trace_printk(const char *fmt, ...) { return 0; } -static inline int +static __printf(1, 0) inline int ftrace_vprintk(const char *fmt, va_list ap) { return 0; @@ -462,17 +547,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * strict type-checking.. See the * "unnecessary" pointer comparison. */ -#define min(x, y) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - (void) (&_min1 == &_min2); \ - _min1 < _min2 ? _min1 : _min2; }) +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) -#define max(x, y) ({ \ - typeof(x) _max1 = (x); \ - typeof(y) _max2 = (y); \ - (void) (&_max1 == &_max2); \ - _max1 > _max2 ? _max1 : _max2; }) +#define max(x, y) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + (void) (&_max1 == &_max2); \ + _max1 > _max2 ? _max1 : _max2; }) #define min3(x, y, z) min((typeof(x))min(x, y), z) #define max3(x, y, z) max((typeof(x))max(x, y), z) @@ -504,15 +589,15 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * * Or not use min/max/clamp at all, of course. */ -#define min_t(type, x, y) ({ \ - type __min1 = (x); \ - type __min2 = (y); \ - __min1 < __min2 ? __min1: __min2; }) +#define min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1: __min2; }) -#define max_t(type, x, y) ({ \ - type __max1 = (x); \ - type __max2 = (y); \ - __max1 > __max2 ? __max1: __max2; }) +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1: __max2; }) /** * clamp_t - return a value clamped to a given range using a given type @@ -548,14 +633,14 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } /** * container_of - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. * */ -#define container_of(ptr, type, member) ({ \ - const typeof( ((type *)0)->member ) *__mptr = (ptr); \ - (type *)( (char *)__mptr - offsetof(type,member) );}) +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD @@ -563,14 +648,16 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #endif /* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ -#define VERIFY_OCTAL_PERMISSIONS(perms) \ - (BUILD_BUG_ON_ZERO((perms) < 0) + \ - BUILD_BUG_ON_ZERO((perms) > 0777) + \ - /* User perms >= group perms >= other perms */ \ - BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \ - BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \ - /* Other writable? Generally considered a bad idea. */ \ - BUILD_BUG_ON_ZERO((perms) & 2) + \ +#define VERIFY_OCTAL_PERMISSIONS(perms) \ + (BUILD_BUG_ON_ZERO((perms) < 0) + \ + BUILD_BUG_ON_ZERO((perms) > 0777) + \ + /* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \ + BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \ + BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \ + /* USER_WRITABLE >= GROUP_WRITABLE */ \ + BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \ + /* OTHER_WRITABLE? Generally considered a bad idea. */ \ + BUILD_BUG_ON_ZERO((perms) & 2) + \ (perms)) @@ -591,22 +678,6 @@ struct file struct vm_area_struct {}; struct address_space {}; -struct device -{ - struct device *parent; - void *driver_data; -}; - -static inline void dev_set_drvdata(struct device *dev, void *data) -{ - dev->driver_data = data; -} - -static inline void *dev_get_drvdata(struct device *dev) -{ - return dev->driver_data; -} - #define in_dbg_master() (0) @@ -728,9 +799,6 @@ struct pagelist { #define IS_ENABLED(a) 0 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) - - #define cpufreq_quick_get_max(x) GetCpuFreq() @@ -771,8 +839,6 @@ static inline __must_check long __copy_to_user(void __user *to, return 0; } -struct seq_file; - void *kmap(struct page *page); void *kmap_atomic(struct page *page); void kunmap(struct page *page); @@ -787,5 +853,4 @@ typedef u64 async_cookie_t; #define CONFIG_PAGE_OFFSET 0 - #endif diff --git a/drivers/include/linux/kobject.h b/drivers/include/linux/kobject.h index 5020ba322..4db6a083e 100644 --- a/drivers/include/linux/kobject.h +++ b/drivers/include/linux/kobject.h @@ -61,7 +61,7 @@ enum kobject_action { }; struct kobject { - const char *name; + const char *name; struct list_head entry; struct kobject *parent; // struct kset *kset; @@ -80,8 +80,9 @@ struct kobject { extern __printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...); -extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, - va_list vargs); +extern __printf(2, 0) +int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, + va_list vargs); static inline const char *kobject_name(const struct kobject *kobj) { @@ -121,6 +122,7 @@ struct kobj_type { }; struct kobj_uevent_env { + char *argv[3]; char *envp[UEVENT_NUM_ENVP]; int envp_idx; char buf[UEVENT_BUFFER_SIZE]; diff --git a/drivers/include/linux/kref.h b/drivers/include/linux/kref.h index a8abd32d0..16c6de296 100644 --- a/drivers/include/linux/kref.h +++ b/drivers/include/linux/kref.h @@ -25,10 +25,147 @@ struct kref { atomic_t refcount; }; -void kref_init(struct kref *kref); -void kref_get(struct kref *kref); -int kref_put(struct kref *kref, void (*release) (struct kref *kref)); -int kref_sub(struct kref *kref, unsigned int count, - void (*release) (struct kref *kref)); +/** + * kref_init - initialize object. + * @kref: object in question. + */ +static inline void kref_init(struct kref *kref) +{ + atomic_set(&kref->refcount, 1); +} +/** + * kref_get - increment refcount for object. + * @kref: object. + */ +static inline void kref_get(struct kref *kref) +{ + /* If refcount was 0 before incrementing then we have a race + * condition when this kref is freeing by some other thread right now. + * In this case one should use kref_get_unless_zero() + */ + WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2); +} + +/** + * kref_sub - subtract a number of refcounts for object. + * @kref: object. + * @count: Number of recounts to subtract. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Subtract @count from the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_sub(struct kref *kref, unsigned int count, + void (*release)(struct kref *kref)) +{ + WARN_ON(release == NULL); + + if (atomic_sub_and_test((int) count, &kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + +/** + * kref_put - decrement refcount for object. + * @kref: object. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. If the caller does pass kfree to this + * function, you will be publicly mocked mercilessly by the kref + * maintainer, and anyone else who happens to notice it. You have + * been warned. + * + * Decrement the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) +{ + return kref_sub(kref, 1, release); +} + +/** + * kref_put_spinlock_irqsave - decrement refcount for object. + * @kref: object. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. + * @lock: lock to take in release case + * + * Behaves identical to kref_put with one exception. If the reference count + * drops to zero, the lock will be taken atomically wrt dropping the reference + * count. The release function has to call spin_unlock() without _irqrestore. + */ +static inline int kref_put_spinlock_irqsave(struct kref *kref, + void (*release)(struct kref *kref), + spinlock_t *lock) +{ + unsigned long flags; + + WARN_ON(release == NULL); + if (atomic_add_unless(&kref->refcount, -1, 1)) + return 0; + spin_lock_irqsave(lock, flags); + if (atomic_dec_and_test(&kref->refcount)) { + release(kref); + local_irq_restore(flags); + return 1; + } + spin_unlock_irqrestore(lock, flags); + return 0; +} + +static inline int kref_put_mutex(struct kref *kref, + void (*release)(struct kref *kref), + struct mutex *lock) +{ + WARN_ON(release == NULL); + if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { + mutex_lock(lock); + if (unlikely(!atomic_dec_and_test(&kref->refcount))) { + mutex_unlock(lock); + return 0; + } + release(kref); + return 1; + } + return 0; +} + +/** + * kref_get_unless_zero - Increment refcount for object unless it is zero. + * @kref: object. + * + * Return non-zero if the increment succeeded. Otherwise return 0. + * + * This function is intended to simplify locking around refcounting for + * objects that can be looked up from a lookup structure, and which are + * removed from that lookup structure in the object destructor. + * Operations on such objects require at least a read lock around + * lookup + kref_get, and a write lock around kref_put + remove from lookup + * structure. Furthermore, RCU implementations become extremely tricky. + * With a lookup followed by a kref_get_unless_zero *with return value check* + * locking in the kref_put path can be deferred to the actual removal from + * the lookup structure and RCU lookups become trivial. + */ +static inline int __must_check kref_get_unless_zero(struct kref *kref) +{ + return atomic_add_unless(&kref->refcount, 1, 0); +} #endif /* _KREF_H_ */ diff --git a/drivers/include/linux/ktime.h b/drivers/include/linux/ktime.h new file mode 100644 index 000000000..ba95feafd --- /dev/null +++ b/drivers/include/linux/ktime.h @@ -0,0 +1,304 @@ +/* + * include/linux/ktime.h + * + * ktime_t - nanosecond-resolution time format. + * + * Copyright(C) 2005, Thomas Gleixner + * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar + * + * data type definitions, declarations, prototypes and macros. + * + * Started by: Thomas Gleixner and Ingo Molnar + * + * Credits: + * + * Roman Zippel provided the ideas and primary code snippets of + * the ktime_t union and further simplifications of the original + * code. + * + * For licencing details see kernel-base/COPYING + */ +#ifndef _LINUX_KTIME_H +#define _LINUX_KTIME_H + +#include +#include + +/* + * ktime_t: + * + * A single 64-bit variable is used to store the hrtimers + * internal representation of time values in scalar nanoseconds. The + * design plays out best on 64-bit CPUs, where most conversions are + * NOPs and most arithmetic ktime_t operations are plain arithmetic + * operations. + * + */ +union ktime { + s64 tv64; +}; + +typedef union ktime ktime_t; /* Kill this */ + +/** + * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value + * @secs: seconds to set + * @nsecs: nanoseconds to set + * + * Return: The ktime_t representation of the value. + */ +static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) +{ + if (unlikely(secs >= KTIME_SEC_MAX)) + return (ktime_t){ .tv64 = KTIME_MAX }; + + return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; +} + +/* Subtract two ktime_t variables. rem = lhs -rhs: */ +#define ktime_sub(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; }) + +/* Add two ktime_t variables. res = lhs + rhs: */ +#define ktime_add(lhs, rhs) \ + ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; }) + +/* + * Add a ktime_t variable and a scalar nanosecond value. + * res = kt + nsval: + */ +#define ktime_add_ns(kt, nsval) \ + ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; }) + +/* + * Subtract a scalar nanosecod from a ktime_t variable + * res = kt - nsval: + */ +#define ktime_sub_ns(kt, nsval) \ + ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; }) + +/* convert a timespec to ktime_t format: */ +static inline ktime_t timespec_to_ktime(struct timespec ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} + +/* convert a timespec64 to ktime_t format: */ +static inline ktime_t timespec64_to_ktime(struct timespec64 ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} + +/* convert a timeval to ktime_t format: */ +static inline ktime_t timeval_to_ktime(struct timeval tv) +{ + return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); +} + +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) + +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) + +/* Map the ktime_t to timeval conversion to ns_to_timeval function */ +#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) + +/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ +#define ktime_to_ns(kt) ((kt).tv64) + + +/** + * ktime_equal - Compares two ktime_t variables to see if they are equal + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Compare two ktime_t variables. + * + * Return: 1 if equal. + */ +static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) +{ + return cmp1.tv64 == cmp2.tv64; +} + +/** + * ktime_compare - Compares two ktime_t variables for less, greater or equal + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Return: ... + * cmp1 < cmp2: return <0 + * cmp1 == cmp2: return 0 + * cmp1 > cmp2: return >0 + */ +static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) +{ + if (cmp1.tv64 < cmp2.tv64) + return -1; + if (cmp1.tv64 > cmp2.tv64) + return 1; + return 0; +} + +/** + * ktime_after - Compare if a ktime_t value is bigger than another one. + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Return: true if cmp1 happened after cmp2. + */ +static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2) +{ + return ktime_compare(cmp1, cmp2) > 0; +} + +/** + * ktime_before - Compare if a ktime_t value is smaller than another one. + * @cmp1: comparable1 + * @cmp2: comparable2 + * + * Return: true if cmp1 happened before cmp2. + */ +static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) +{ + return ktime_compare(cmp1, cmp2) < 0; +} + +#if BITS_PER_LONG < 64 +extern s64 __ktime_divns(const ktime_t kt, s64 div); +static inline s64 ktime_divns(const ktime_t kt, s64 div) +{ + /* + * Negative divisors could cause an inf loop, + * so bug out here. + */ + BUG_ON(div < 0); + if (__builtin_constant_p(div) && !(div >> 32)) { + s64 ns = kt.tv64; + u64 tmp = ns < 0 ? -ns : ns; + + do_div(tmp, div); + return ns < 0 ? -tmp : tmp; + } else { + return __ktime_divns(kt, div); + } +} +#else /* BITS_PER_LONG < 64 */ +static inline s64 ktime_divns(const ktime_t kt, s64 div) +{ + /* + * 32-bit implementation cannot handle negative divisors, + * so catch them on 64bit as well. + */ + WARN_ON(div < 0); + return kt.tv64 / div; +} +#endif + +static inline s64 ktime_to_us(const ktime_t kt) +{ + return ktime_divns(kt, NSEC_PER_USEC); +} + +static inline s64 ktime_to_ms(const ktime_t kt) +{ + return ktime_divns(kt, NSEC_PER_MSEC); +} + +static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) +{ + return ktime_to_us(ktime_sub(later, earlier)); +} + +static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier) +{ + return ktime_to_ms(ktime_sub(later, earlier)); +} + +static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) +{ + return ktime_add_ns(kt, usec * NSEC_PER_USEC); +} + +static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) +{ + return ktime_add_ns(kt, msec * NSEC_PER_MSEC); +} + +static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) +{ + return ktime_sub_ns(kt, usec * NSEC_PER_USEC); +} + +extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); + +/** + * ktime_to_timespec_cond - convert a ktime_t variable to timespec + * format only if the variable contains data + * @kt: the ktime_t variable to convert + * @ts: the timespec variable to store the result in + * + * Return: %true if there was a successful conversion, %false if kt was 0. + */ +static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, + struct timespec *ts) +{ + if (kt.tv64) { + *ts = ktime_to_timespec(kt); + return true; + } else { + return false; + } +} + +/** + * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 + * format only if the variable contains data + * @kt: the ktime_t variable to convert + * @ts: the timespec variable to store the result in + * + * Return: %true if there was a successful conversion, %false if kt was 0. + */ +static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, + struct timespec64 *ts) +{ + if (kt.tv64) { + *ts = ktime_to_timespec64(kt); + return true; + } else { + return false; + } +} + +/* + * The resolution of the clocks. The resolution value is returned in + * the clock_getres() system call to give application programmers an + * idea of the (in)accuracy of timers. Timer values are rounded up to + * this resolution values. + */ +#define LOW_RES_NSEC TICK_NSEC +#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } + +ktime_t ktime_get(void); + +static inline ktime_t ns_to_ktime(u64 ns) +{ + static const ktime_t ktime_zero = { .tv64 = 0 }; + + return ktime_add_ns(ktime_zero, ns); +} + +static inline ktime_t ms_to_ktime(u64 ms) +{ + static const ktime_t ktime_zero = { .tv64 = 0 }; + + return ktime_add_ms(ktime_zero, ms); +} + +static inline u64 ktime_get_raw_ns(void) +{ + return ktime_get().tv64; +} + +#endif diff --git a/drivers/include/linux/list.h b/drivers/include/linux/list.h index 290d0e502..9579ff026 100644 --- a/drivers/include/linux/list.h +++ b/drivers/include/linux/list.h @@ -204,7 +204,7 @@ static inline int list_empty(const struct list_head *head) */ static inline int list_empty_careful(const struct list_head *head) { - struct list_head *next = head->next; + struct list_head *next = head->next; return (next == head) && (next == head->prev); } @@ -445,7 +445,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry(pos, head, member) \ for (pos = list_first_entry(head, typeof(*pos), member); \ - &pos->member != (head); \ + &pos->member != (head); \ pos = list_next_entry(pos, member)) /** @@ -456,7 +456,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_reverse(pos, head, member) \ for (pos = list_last_entry(head, typeof(*pos), member); \ - &pos->member != (head); \ + &pos->member != (head); \ pos = list_prev_entry(pos, member)) /** @@ -481,7 +481,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue(pos, head, member) \ for (pos = list_next_entry(pos, member); \ - &pos->member != (head); \ + &pos->member != (head); \ pos = list_next_entry(pos, member)) /** @@ -495,7 +495,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_for_each_entry_continue_reverse(pos, head, member) \ for (pos = list_prev_entry(pos, member); \ - &pos->member != (head); \ + &pos->member != (head); \ pos = list_prev_entry(pos, member)) /** @@ -507,7 +507,7 @@ static inline void list_splice_tail_init(struct list_head *list, * Iterate over list of given type, continuing from current position. */ #define list_for_each_entry_from(pos, head, member) \ - for (; &pos->member != (head); \ + for (; &pos->member != (head); \ pos = list_next_entry(pos, member)) /** @@ -672,6 +672,11 @@ static inline void hlist_add_fake(struct hlist_node *n) n->pprev = &n->next; } +static inline bool hlist_fake(struct hlist_node *h) +{ + return h->pprev == &h->next; +} + /* * Move a list from one list head to another. Fixup the pprev * reference of the first entry if it exists. diff --git a/drivers/include/linux/lockdep.h b/drivers/include/linux/lockdep.h index 48cc79c45..598780ca8 100644 --- a/drivers/include/linux/lockdep.h +++ b/drivers/include/linux/lockdep.h @@ -130,8 +130,8 @@ enum bounce_type { }; struct lock_class_stats { - unsigned long contention_point[4]; - unsigned long contending_point[4]; + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; struct lock_time read_waittime; struct lock_time write_waittime; struct lock_time read_holdtime; @@ -255,6 +255,7 @@ struct held_lock { unsigned int check:1; /* see lock_acquire() comment */ unsigned int hardirqs_off:1; unsigned int references:12; /* 32 bits */ + unsigned int pin_count; }; /* @@ -354,6 +355,9 @@ extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); extern void lockdep_clear_current_reclaim_state(void); extern void lockdep_trace_alloc(gfp_t mask); +extern void lock_pin_lock(struct lockdep_map *lock); +extern void lock_unpin_lock(struct lockdep_map *lock); + # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) @@ -368,6 +372,9 @@ extern void lockdep_trace_alloc(gfp_t mask); #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) +#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) +#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map) + #else /* !CONFIG_LOCKDEP */ static inline void lockdep_off(void) @@ -420,6 +427,9 @@ struct lock_class_key { }; #define lockdep_recursing(tsk) (0) +#define lockdep_pin_lock(l) do { (void)(l); } while (0) +#define lockdep_unpin_lock(l) do { (void)(l); } while (0) + #endif /* !LOCKDEP */ #ifdef CONFIG_LOCK_STAT @@ -531,8 +541,13 @@ do { \ # define might_lock_read(lock) do { } while (0) #endif -#ifdef CONFIG_PROVE_RCU +#ifdef CONFIG_LOCKDEP void lockdep_rcu_suspicious(const char *file, const int line, const char *s); +#else +static inline void +lockdep_rcu_suspicious(const char *file, const int line, const char *s) +{ +} #endif #endif /* __LINUX_LOCKDEP_H */ diff --git a/drivers/include/linux/math64.h b/drivers/include/linux/math64.h index c45c089bf..6e8b5b270 100644 --- a/drivers/include/linux/math64.h +++ b/drivers/include/linux/math64.h @@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) } #endif /* mul_u64_u32_shr */ +#ifndef mul_u64_u64_shr +static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u64_shr */ + #else #ifndef mul_u64_u32_shr @@ -161,6 +168,79 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) } #endif /* mul_u64_u32_shr */ +#ifndef mul_u64_u64_shr +static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) +{ + union { + u64 ll; + struct { +#ifdef __BIG_ENDIAN + u32 high, low; +#else + u32 low, high; +#endif + } l; + } rl, rm, rn, rh, a0, b0; + u64 c; + + a0.ll = a; + b0.ll = b; + + rl.ll = (u64)a0.l.low * b0.l.low; + rm.ll = (u64)a0.l.low * b0.l.high; + rn.ll = (u64)a0.l.high * b0.l.low; + rh.ll = (u64)a0.l.high * b0.l.high; + + /* + * Each of these lines computes a 64-bit intermediate result into "c", + * starting at bits 32-95. The low 32-bits go into the result of the + * multiplication, the high 32-bits are carried into the next step. + */ + rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; + rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; + rh.l.high = (c >> 32) + rh.l.high; + + /* + * The 128-bit result of the multiplication is in rl.ll and rh.ll, + * shift it right and throw away the high part of the result. + */ + if (shift == 0) + return rl.ll; + if (shift < 64) + return (rl.ll >> shift) | (rh.ll << (64 - shift)); + return rh.ll >> (shift & 63); +} +#endif /* mul_u64_u64_shr */ + #endif +#ifndef mul_u64_u32_div +static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) +{ + union { + u64 ll; + struct { +#ifdef __BIG_ENDIAN + u32 high, low; +#else + u32 low, high; +#endif + } l; + } u, rl, rh; + + u.ll = a; + rl.ll = (u64)u.l.low * mul; + rh.ll = (u64)u.l.high * mul + rl.l.high; + + /* Bits 32-63 of the result will be in rh.l.low. */ + rl.l.high = do_div(rh.ll, divisor); + + /* Bits 0-31 of the result will be in rl.l.low. */ + do_div(rl.ll, divisor); + + rl.l.high = rh.l.low; + return rl.ll; +} +#endif /* mul_u64_u32_div */ + #endif /* _LINUX_MATH64_H */ diff --git a/drivers/include/linux/mod_devicetable.h b/drivers/include/linux/mod_devicetable.h index b1f27dbb1..82dc8537e 100644 --- a/drivers/include/linux/mod_devicetable.h +++ b/drivers/include/linux/mod_devicetable.h @@ -53,9 +53,9 @@ struct ieee1394_device_id { /** * struct usb_device_id - identifies USB devices for probing and hotplugging - * @match_flags: Bit mask controlling of the other fields are used to match - * against new devices. Any field except for driver_info may be used, - * although some only make sense in conjunction with other fields. + * @match_flags: Bit mask controlling which of the other fields are used to + * match against new devices. Any field except for driver_info may be + * used, although some only make sense in conjunction with other fields. * This is usually set by a USB_DEVICE_*() macro, which sets all * other fields in this structure except for driver_info. * @idVendor: USB vendor ID for a device; numbers are assigned @@ -189,6 +189,8 @@ struct css_device_id { struct acpi_device_id { __u8 id[ACPI_ID_LEN]; kernel_ulong_t driver_data; + __u32 cls; + __u32 cls_msk; }; #define PNP_ID_LEN 8 @@ -217,11 +219,18 @@ struct serio_device_id { __u8 proto; }; +struct hda_device_id { + __u32 vendor_id; + __u32 rev_id; + __u8 api_version; + const char *name; + unsigned long driver_data; +}; + /* * Struct used for matching a device */ -struct of_device_id -{ +struct of_device_id { char name[32]; char type[32]; char compatible[128]; @@ -252,7 +261,7 @@ struct pcmcia_device_id { __u32 prod_id_hash[4]; - /* not matched against in kernelspace*/ + /* not matched against in kernelspace */ const char * prod_id[4]; /* not matched against */ @@ -365,8 +374,6 @@ struct ssb_device_id { } __attribute__((packed, aligned(2))); #define SSB_DEVICE(_vendor, _coreid, _revision) \ { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } -#define SSB_DEVTABLE_END \ - { 0, }, #define SSB_ANY_VENDOR 0xFFFF #define SSB_ANY_ID 0xFFFF @@ -381,8 +388,6 @@ struct bcma_device_id { } __attribute__((packed,aligned(2))); #define BCMA_CORE(_manuf, _id, _rev, _class) \ { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } -#define BCMA_CORETABLE_END \ - { 0, }, #define BCMA_ANY_MANUF 0xFFFF #define BCMA_ANY_ID 0xFFFF @@ -551,6 +556,14 @@ struct amba_id { void *data; }; +/** + * struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus + * @type: Device type identifier. + */ +struct mips_cdmm_device_id { + __u8 type; +}; + /* * Match x86 CPUs for CPU specific drivers. * See documentation of "x86_match_cpu" for details. @@ -596,9 +609,21 @@ struct ipack_device_id { #define MEI_CL_MODULE_PREFIX "mei:" #define MEI_CL_NAME_SIZE 32 +#define MEI_CL_VERSION_ANY 0xff +/** + * struct mei_cl_device_id - MEI client device identifier + * @name: helper name + * @uuid: client uuid + * @version: client protocol version + * @driver_info: information used by the driver. + * + * identifies mei client device by uuid and name + */ struct mei_cl_device_id { char name[MEI_CL_NAME_SIZE]; + uuid_le uuid; + __u8 version; kernel_ulong_t driver_info; }; @@ -626,4 +651,10 @@ struct mcb_device_id { kernel_ulong_t driver_data; }; +struct ulpi_device_id { + __u16 vendor; + __u16 product; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/drivers/include/linux/module.h b/drivers/include/linux/module.h index 2da0885fb..10e354647 100644 --- a/drivers/include/linux/module.h +++ b/drivers/include/linux/module.h @@ -9,9 +9,8 @@ #include #include #include -#include -#include +#include #include #include #include diff --git a/drivers/include/linux/moduleparam.h b/drivers/include/linux/moduleparam.h index b4692cb33..a51cd5966 100644 --- a/drivers/include/linux/moduleparam.h +++ b/drivers/include/linux/moduleparam.h @@ -2,9 +2,69 @@ #define _LINUX_MODULE_PARAMS_H /* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */ #include +/** + * module_param - typesafe helper for a module/cmdline parameter + * @value: the variable to alter, and exposed parameter name. + * @type: the type of the parameter + * @perm: visibility in sysfs. + * + * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a + * ".") the kernel commandline parameter. Note that - is changed to _, so + * the user can use "foo-bar=1" even for variable "foo_bar". + * + * @perm is 0 if the the variable is not to appear in sysfs, or 0444 + * for world-readable, 0644 for root-writable, etc. Note that if it + * is writable, you may need to use kernel_param_lock() around + * accesses (esp. charp, which can be kfreed when it changes). + * + * The @type is simply pasted to refer to a param_ops_##type and a + * param_check_##type: for convenience many standard types are provided but + * you can create your own by defining those variables. + * + * Standard types are: + * byte, short, ushort, int, uint, long, ulong + * charp: a character pointer + * bool: a bool, values 0/1, y/n, Y/N. + * invbool: the above, only sense-reversed (N = true). + */ +#define module_param(name, type, perm) \ + module_param_named(name, name, type, perm) -#define MODULE_PARM_DESC(_parm, desc) +/** + * module_param_unsafe - same as module_param but taints kernel + */ +#define module_param_unsafe(name, type, perm) \ + module_param_named_unsafe(name, name, type, perm) + +/** + * module_param_named - typesafe helper for a renamed module/cmdline parameter + * @name: a valid C identifier which is the parameter name. + * @value: the actual lvalue to alter. + * @type: the type of the parameter + * @perm: visibility in sysfs. + * + * Usually it's a good idea to have variable names and user-exposed names the + * same, but that's harder if the variable must be non-static or is inside a + * structure. This allows exposure under a different name. + */ #define module_param_named(name, value, type, perm) +/** + * module_param_named_unsafe - same as module_param_named but taints kernel + */ + #define module_param_named_unsafe(name, value, type, perm) +#define MODULE_PARM_DESC(_parm, desc) + +#ifdef CONFIG_SYSFS +extern void kernel_param_lock(struct module *mod); +extern void kernel_param_unlock(struct module *mod); +#else +static inline void kernel_param_lock(struct module *mod) +{ +} +static inline void kernel_param_unlock(struct module *mod) +{ +} +#endif #endif diff --git a/drivers/include/linux/mutex.h b/drivers/include/linux/mutex.h index efded740b..ab93b892f 100644 --- a/drivers/include/linux/mutex.h +++ b/drivers/include/linux/mutex.h @@ -49,7 +49,7 @@ struct mutex { /* 1: unlocked, 0: locked, negative: locked, possible waiters */ struct list_head wait_list; - atomic_t count; + atomic_t count; }; /* @@ -58,7 +58,7 @@ struct mutex { */ struct mutex_waiter { struct list_head list; - int *task; + int *task; }; diff --git a/drivers/include/linux/pci.h b/drivers/include/linux/pci.h index ab4d39c41..9be52a6a0 100644 --- a/drivers/include/linux/pci.h +++ b/drivers/include/linux/pci.h @@ -25,6 +25,16 @@ #include /* The pci register defines */ #include +#include + +#if 0 +struct device +{ + struct device *parent; + void *driver_data; +}; + +#endif #define PCI_CFG_SPACE_SIZE 256 #define PCI_CFG_SPACE_EXP_SIZE 4096 @@ -204,6 +214,10 @@ * * 7:3 = slot * 2:0 = function + * + * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h. + * In the interest of not exposing interfaces to user-space unnecessarily, + * the following kernel-only defines are being added here. */ #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) @@ -427,11 +441,12 @@ struct pci_dev { D3cold, not set for devices powered on/off by the corresponding bridge */ + unsigned int ignore_hotplug:1; /* Ignore hotplug events */ unsigned int d3_delay; /* D3->D0 transition time in ms */ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ #ifdef CONFIG_PCIEASPM - struct pcie_link_state *link_state; /* ASPM link state. */ + struct pcie_link_state *link_state; /* ASPM link state */ #endif pci_channel_state_t error_state; /* current connectivity state */ @@ -446,13 +461,15 @@ struct pci_dev { unsigned int irq; struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ + bool match_driver; /* Skip attaching driver */ /* These fields are used by common fixups */ - unsigned int transparent:1; /* Transparent PCI bridge */ + unsigned int transparent:1; /* Subtractive decode PCI bridge */ unsigned int multifunction:1;/* Part of multi-function device */ /* keep track of device state */ unsigned int is_added:1; unsigned int is_busmaster:1; /* device is busmaster */ unsigned int no_msi:1; /* device may not use msi */ + unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ unsigned int block_cfg_access:1; /* config space access is blocked */ unsigned int broken_parity_status:1; /* Device generates false positive parity */ unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ @@ -460,8 +477,6 @@ struct pci_dev { unsigned int msix_enabled:1; unsigned int ari_enabled:1; /* ARI forwarding */ unsigned int is_managed:1; - unsigned int is_pcie:1; /* Obsolete. Will be removed. - Use pci_is_pcie() instead */ unsigned int needs_freset:1; /* Dev requires fundamental reset */ unsigned int state_saved:1; unsigned int is_physfn:1; diff --git a/drivers/include/linux/percpu-defs.h b/drivers/include/linux/percpu-defs.h index 57f3a1c55..8f16299ca 100644 --- a/drivers/include/linux/percpu-defs.h +++ b/drivers/include/linux/percpu-defs.h @@ -488,10 +488,8 @@ do { \ #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) /* - * Operations with implied preemption protection. These operations can be - * used without worrying about preemption. Note that interrupts may still - * occur while an operation is in progress and if the interrupt modifies - * the variable too then RMW actions may not be reliable. + * Operations with implied preemption/interrupt protection. These + * operations can be used without worrying about preemption or interrupt. */ #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) diff --git a/drivers/include/linux/personality.h b/drivers/include/linux/personality.h index 646c0a7d5..709f49f19 100644 --- a/drivers/include/linux/personality.h +++ b/drivers/include/linux/personality.h @@ -44,11 +44,9 @@ struct exec_domain { */ #define personality(pers) (pers & PER_MASK) - /* * Change personality of the currently running process. */ -#define set_personality(pers) \ - ((current->personality == (pers)) ? 0 : __set_personality(pers)) +#define set_personality(pers) (current->personality = (pers)) #endif /* _LINUX_PERSONALITY_H */ diff --git a/drivers/include/linux/pm.h b/drivers/include/linux/pm.h new file mode 100644 index 000000000..08b3d3469 --- /dev/null +++ b/drivers/include/linux/pm.h @@ -0,0 +1,788 @@ +/* + * pm.h - Power management interface + * + * Copyright (C) 2000 Andrew Henroid + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _LINUX_PM_H +#define _LINUX_PM_H + +#include +#include +#include +#include +//#include +#include + +/* + * Callbacks for platform drivers to implement. + */ +extern void (*pm_power_off)(void); +extern void (*pm_power_off_prepare)(void); + +struct device; /* we have a circular dep with device.h */ +#ifdef CONFIG_VT_CONSOLE_SLEEP +extern void pm_vt_switch_required(struct device *dev, bool required); +extern void pm_vt_switch_unregister(struct device *dev); +#else +static inline void pm_vt_switch_required(struct device *dev, bool required) +{ +} +static inline void pm_vt_switch_unregister(struct device *dev) +{ +} +#endif /* CONFIG_VT_CONSOLE_SLEEP */ + +/* + * Device power management + */ + +struct device; + +#ifdef CONFIG_PM +extern const char power_group_name[]; /* = "power" */ +#else +#define power_group_name NULL +#endif + +typedef struct pm_message { + int event; +} pm_message_t; + +/** + * struct dev_pm_ops - device PM callbacks + * + * Several device power state transitions are externally visible, affecting + * the state of pending I/O queues and (for drivers that touch hardware) + * interrupts, wakeups, DMA, and other hardware state. There may also be + * internal transitions to various low-power modes which are transparent + * to the rest of the driver stack (such as a driver that's ON gating off + * clocks which are not in active use). + * + * The externally visible transitions are handled with the help of callbacks + * included in this structure in such a way that two levels of callbacks are + * involved. First, the PM core executes callbacks provided by PM domains, + * device types, classes and bus types. They are the subsystem-level callbacks + * supposed to execute callbacks provided by device drivers, although they may + * choose not to do that. If the driver callbacks are executed, they have to + * collaborate with the subsystem-level callbacks to achieve the goals + * appropriate for the given system transition, given transition phase and the + * subsystem the device belongs to. + * + * @prepare: The principal role of this callback is to prevent new children of + * the device from being registered after it has returned (the driver's + * subsystem and generally the rest of the kernel is supposed to prevent + * new calls to the probe method from being made too once @prepare() has + * succeeded). If @prepare() detects a situation it cannot handle (e.g. + * registration of a child already in progress), it may return -EAGAIN, so + * that the PM core can execute it once again (e.g. after a new child has + * been registered) to recover from the race condition. + * This method is executed for all kinds of suspend transitions and is + * followed by one of the suspend callbacks: @suspend(), @freeze(), or + * @poweroff(). If the transition is a suspend to memory or standby (that + * is, not related to hibernation), the return value of @prepare() may be + * used to indicate to the PM core to leave the device in runtime suspend + * if applicable. Namely, if @prepare() returns a positive number, the PM + * core will understand that as a declaration that the device appears to be + * runtime-suspended and it may be left in that state during the entire + * transition and during the subsequent resume if all of its descendants + * are left in runtime suspend too. If that happens, @complete() will be + * executed directly after @prepare() and it must ensure the proper + * functioning of the device after the system resume. + * The PM core executes subsystem-level @prepare() for all devices before + * starting to invoke suspend callbacks for any of them, so generally + * devices may be assumed to be functional or to respond to runtime resume + * requests while @prepare() is being executed. However, device drivers + * may NOT assume anything about the availability of user space at that + * time and it is NOT valid to request firmware from within @prepare() + * (it's too late to do that). It also is NOT valid to allocate + * substantial amounts of memory from @prepare() in the GFP_KERNEL mode. + * [To work around these limitations, drivers may register suspend and + * hibernation notifiers to be executed before the freezing of tasks.] + * + * @complete: Undo the changes made by @prepare(). This method is executed for + * all kinds of resume transitions, following one of the resume callbacks: + * @resume(), @thaw(), @restore(). Also called if the state transition + * fails before the driver's suspend callback: @suspend(), @freeze() or + * @poweroff(), can be executed (e.g. if the suspend callback fails for one + * of the other devices that the PM core has unsuccessfully attempted to + * suspend earlier). + * The PM core executes subsystem-level @complete() after it has executed + * the appropriate resume callbacks for all devices. If the corresponding + * @prepare() at the beginning of the suspend transition returned a + * positive number and the device was left in runtime suspend (without + * executing any suspend and resume callbacks for it), @complete() will be + * the only callback executed for the device during resume. In that case, + * @complete() must be prepared to do whatever is necessary to ensure the + * proper functioning of the device after the system resume. To this end, + * @complete() can check the power.direct_complete flag of the device to + * learn whether (unset) or not (set) the previous suspend and resume + * callbacks have been executed for it. + * + * @suspend: Executed before putting the system into a sleep state in which the + * contents of main memory are preserved. The exact action to perform + * depends on the device's subsystem (PM domain, device type, class or bus + * type), but generally the device must be quiescent after subsystem-level + * @suspend() has returned, so that it doesn't do any I/O or DMA. + * Subsystem-level @suspend() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @suspend_late: Continue operations started by @suspend(). For a number of + * devices @suspend_late() may point to the same callback routine as the + * runtime suspend callback. + * + * @resume: Executed after waking the system up from a sleep state in which the + * contents of main memory were preserved. The exact action to perform + * depends on the device's subsystem, but generally the driver is expected + * to start working again, responding to hardware events and software + * requests (the device itself may be left in a low-power state, waiting + * for a runtime resume to occur). The state of the device at the time its + * driver's @resume() callback is run depends on the platform and subsystem + * the device belongs to. On most platforms, there are no restrictions on + * availability of resources like clocks during @resume(). + * Subsystem-level @resume() is executed for all devices after invoking + * subsystem-level @resume_noirq() for all of them. + * + * @resume_early: Prepare to execute @resume(). For a number of devices + * @resume_early() may point to the same callback routine as the runtime + * resume callback. + * + * @freeze: Hibernation-specific, executed before creating a hibernation image. + * Analogous to @suspend(), but it should not enable the device to signal + * wakeup events or change its power state. The majority of subsystems + * (with the notable exception of the PCI bus type) expect the driver-level + * @freeze() to save the device settings in memory to be used by @restore() + * during the subsequent resume from hibernation. + * Subsystem-level @freeze() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @freeze_late: Continue operations started by @freeze(). Analogous to + * @suspend_late(), but it should not enable the device to signal wakeup + * events or change its power state. + * + * @thaw: Hibernation-specific, executed after creating a hibernation image OR + * if the creation of an image has failed. Also executed after a failing + * attempt to restore the contents of main memory from such an image. + * Undo the changes made by the preceding @freeze(), so the device can be + * operated in the same way as immediately before the call to @freeze(). + * Subsystem-level @thaw() is executed for all devices after invoking + * subsystem-level @thaw_noirq() for all of them. It also may be executed + * directly after @freeze() in case of a transition error. + * + * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the + * preceding @freeze_late(). + * + * @poweroff: Hibernation-specific, executed after saving a hibernation image. + * Analogous to @suspend(), but it need not save the device's settings in + * memory. + * Subsystem-level @poweroff() is executed for all devices after invoking + * subsystem-level @prepare() for all of them. + * + * @poweroff_late: Continue operations started by @poweroff(). Analogous to + * @suspend_late(), but it need not save the device's settings in memory. + * + * @restore: Hibernation-specific, executed after restoring the contents of main + * memory from a hibernation image, analogous to @resume(). + * + * @restore_early: Prepare to execute @restore(), analogous to @resume_early(). + * + * @suspend_noirq: Complete the actions started by @suspend(). Carry out any + * additional operations required for suspending the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @suspend_noirq() is being executed. + * It generally is expected that the device will be in a low-power state + * (appropriate for the target system sleep state) after subsystem-level + * @suspend_noirq() has returned successfully. If the device can generate + * system wakeup signals and is enabled to wake up the system, it should be + * configured to do so at that time. However, depending on the platform + * and device's subsystem, @suspend() or @suspend_late() may be allowed to + * put the device into the low-power state and configure it to generate + * wakeup signals, in which case it generally is not necessary to define + * @suspend_noirq(). + * + * @resume_noirq: Prepare for the execution of @resume() by carrying out any + * operations required for resuming the device that might be racing with + * its driver's interrupt handler, which is guaranteed not to run while + * @resume_noirq() is being executed. + * + * @freeze_noirq: Complete the actions started by @freeze(). Carry out any + * additional operations required for freezing the device that might be + * racing with its driver's interrupt handler, which is guaranteed not to + * run while @freeze_noirq() is being executed. + * The power state of the device should not be changed by either @freeze(), + * or @freeze_late(), or @freeze_noirq() and it should not be configured to + * signal system wakeup by any of these callbacks. + * + * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @thaw_noirq() is being executed. + * + * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to + * @suspend_noirq(), but it need not save the device's settings in memory. + * + * @restore_noirq: Prepare for the execution of @restore() by carrying out any + * operations required for thawing the device that might be racing with its + * driver's interrupt handler, which is guaranteed not to run while + * @restore_noirq() is being executed. Analogous to @resume_noirq(). + * + * All of the above callbacks, except for @complete(), return error codes. + * However, the error codes returned by the resume operations, @resume(), + * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do + * not cause the PM core to abort the resume transition during which they are + * returned. The error codes returned in those cases are only printed by the PM + * core to the system logs for debugging purposes. Still, it is recommended + * that drivers only return error codes from their resume methods in case of an + * unrecoverable failure (i.e. when the device being handled refuses to resume + * and becomes unusable) to allow us to modify the PM core in the future, so + * that it can avoid attempting to handle devices that failed to resume and + * their children. + * + * It is allowed to unregister devices while the above callbacks are being + * executed. However, a callback routine must NOT try to unregister the device + * it was called for, although it may unregister children of that device (for + * example, if it detects that a child was unplugged while the system was + * asleep). + * + * Refer to Documentation/power/devices.txt for more information about the role + * of the above callbacks in the system suspend process. + * + * There also are callbacks related to runtime power management of devices. + * Again, these callbacks are executed by the PM core only for subsystems + * (PM domains, device types, classes and bus types) and the subsystem-level + * callbacks are supposed to invoke the driver callbacks. Moreover, the exact + * actions to be performed by a device driver's callbacks generally depend on + * the platform and subsystem the device belongs to. + * + * @runtime_suspend: Prepare the device for a condition in which it won't be + * able to communicate with the CPU(s) and RAM due to power management. + * This need not mean that the device should be put into a low-power state. + * For example, if the device is behind a link which is about to be turned + * off, the device may remain at full power. If the device does go to low + * power and is capable of generating runtime wakeup events, remote wakeup + * (i.e., a hardware mechanism allowing the device to request a change of + * its power state via an interrupt) should be enabled for it. + * + * @runtime_resume: Put the device into the fully active state in response to a + * wakeup event generated by hardware or at the request of software. If + * necessary, put the device into the full-power state and restore its + * registers, so that it is fully operational. + * + * @runtime_idle: Device appears to be inactive and it might be put into a + * low-power state if all of the necessary conditions are satisfied. + * Check these conditions, and return 0 if it's appropriate to let the PM + * core queue a suspend request for the device. + * + * Refer to Documentation/power/runtime_pm.txt for more information about the + * role of the above callbacks in device runtime power management. + * + */ + +struct dev_pm_ops { + int (*prepare)(struct device *dev); + void (*complete)(struct device *dev); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); + int (*freeze)(struct device *dev); + int (*thaw)(struct device *dev); + int (*poweroff)(struct device *dev); + int (*restore)(struct device *dev); + int (*suspend_late)(struct device *dev); + int (*resume_early)(struct device *dev); + int (*freeze_late)(struct device *dev); + int (*thaw_early)(struct device *dev); + int (*poweroff_late)(struct device *dev); + int (*restore_early)(struct device *dev); + int (*suspend_noirq)(struct device *dev); + int (*resume_noirq)(struct device *dev); + int (*freeze_noirq)(struct device *dev); + int (*thaw_noirq)(struct device *dev); + int (*poweroff_noirq)(struct device *dev); + int (*restore_noirq)(struct device *dev); + int (*runtime_suspend)(struct device *dev); + int (*runtime_resume)(struct device *dev); + int (*runtime_idle)(struct device *dev); +}; + +#ifdef CONFIG_PM_SLEEP +#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend = suspend_fn, \ + .resume = resume_fn, \ + .freeze = suspend_fn, \ + .thaw = resume_fn, \ + .poweroff = suspend_fn, \ + .restore = resume_fn, +#else +#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) +#endif + +#ifdef CONFIG_PM_SLEEP +#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend_late = suspend_fn, \ + .resume_early = resume_fn, \ + .freeze_late = suspend_fn, \ + .thaw_early = resume_fn, \ + .poweroff_late = suspend_fn, \ + .restore_early = resume_fn, +#else +#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) +#endif + +#ifdef CONFIG_PM_SLEEP +#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend_noirq = suspend_fn, \ + .resume_noirq = resume_fn, \ + .freeze_noirq = suspend_fn, \ + .thaw_noirq = resume_fn, \ + .poweroff_noirq = suspend_fn, \ + .restore_noirq = resume_fn, +#else +#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) +#endif + +#ifdef CONFIG_PM +#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ + .runtime_suspend = suspend_fn, \ + .runtime_resume = resume_fn, \ + .runtime_idle = idle_fn, +#else +#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) +#endif + +/* + * Use this if you want to use the same suspend and resume callbacks for suspend + * to RAM and hibernation. + */ +#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ +const struct dev_pm_ops name = { \ + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ +} + +/* + * Use this for defining a set of PM operations to be used in all situations + * (system suspend, hibernation or runtime PM). + * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should + * be different from the corresponding runtime PM callbacks, .runtime_suspend(), + * and .runtime_resume(), because .runtime_suspend() always works on an already + * quiescent device, while .suspend() should assume that the device may be doing + * something when it is called (it should ensure that the device will be + * quiescent after it has returned). Therefore it's better to point the "late" + * suspend and "early" resume callback pointers, .suspend_late() and + * .resume_early(), to the same routines as .runtime_suspend() and + * .runtime_resume(), respectively (and analogously for hibernation). + */ +#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ +const struct dev_pm_ops name = { \ + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ +} + +/** + * PM_EVENT_ messages + * + * The following PM_EVENT_ messages are defined for the internal use of the PM + * core, in order to provide a mechanism allowing the high level suspend and + * hibernation code to convey the necessary information to the device PM core + * code: + * + * ON No transition. + * + * FREEZE System is going to hibernate, call ->prepare() and ->freeze() + * for all devices. + * + * SUSPEND System is going to suspend, call ->prepare() and ->suspend() + * for all devices. + * + * HIBERNATE Hibernation image has been saved, call ->prepare() and + * ->poweroff() for all devices. + * + * QUIESCE Contents of main memory are going to be restored from a (loaded) + * hibernation image, call ->prepare() and ->freeze() for all + * devices. + * + * RESUME System is resuming, call ->resume() and ->complete() for all + * devices. + * + * THAW Hibernation image has been created, call ->thaw() and + * ->complete() for all devices. + * + * RESTORE Contents of main memory have been restored from a hibernation + * image, call ->restore() and ->complete() for all devices. + * + * RECOVER Creation of a hibernation image or restoration of the main + * memory contents from a hibernation image has failed, call + * ->thaw() and ->complete() for all devices. + * + * The following PM_EVENT_ messages are defined for internal use by + * kernel subsystems. They are never issued by the PM core. + * + * USER_SUSPEND Manual selective suspend was issued by userspace. + * + * USER_RESUME Manual selective resume was issued by userspace. + * + * REMOTE_WAKEUP Remote-wakeup request was received from the device. + * + * AUTO_SUSPEND Automatic (device idle) runtime suspend was + * initiated by the subsystem. + * + * AUTO_RESUME Automatic (device needed) runtime resume was + * requested by a driver. + */ + +#define PM_EVENT_INVALID (-1) +#define PM_EVENT_ON 0x0000 +#define PM_EVENT_FREEZE 0x0001 +#define PM_EVENT_SUSPEND 0x0002 +#define PM_EVENT_HIBERNATE 0x0004 +#define PM_EVENT_QUIESCE 0x0008 +#define PM_EVENT_RESUME 0x0010 +#define PM_EVENT_THAW 0x0020 +#define PM_EVENT_RESTORE 0x0040 +#define PM_EVENT_RECOVER 0x0080 +#define PM_EVENT_USER 0x0100 +#define PM_EVENT_REMOTE 0x0200 +#define PM_EVENT_AUTO 0x0400 + +#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE) +#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND) +#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME) +#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME) +#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND) +#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME) + +#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, }) +#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, }) +#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, }) +#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, }) +#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, }) +#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, }) +#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, }) +#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, }) +#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, }) +#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, }) +#define PMSG_USER_SUSPEND ((struct pm_message) \ + { .event = PM_EVENT_USER_SUSPEND, }) +#define PMSG_USER_RESUME ((struct pm_message) \ + { .event = PM_EVENT_USER_RESUME, }) +#define PMSG_REMOTE_RESUME ((struct pm_message) \ + { .event = PM_EVENT_REMOTE_RESUME, }) +#define PMSG_AUTO_SUSPEND ((struct pm_message) \ + { .event = PM_EVENT_AUTO_SUSPEND, }) +#define PMSG_AUTO_RESUME ((struct pm_message) \ + { .event = PM_EVENT_AUTO_RESUME, }) + +#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0) + +/** + * Device run-time power management status. + * + * These status labels are used internally by the PM core to indicate the + * current status of a device with respect to the PM core operations. They do + * not reflect the actual power state of the device or its status as seen by the + * driver. + * + * RPM_ACTIVE Device is fully operational. Indicates that the device + * bus type's ->runtime_resume() callback has completed + * successfully. + * + * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has + * completed successfully. The device is regarded as + * suspended. + * + * RPM_RESUMING Device bus type's ->runtime_resume() callback is being + * executed. + * + * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being + * executed. + */ + +enum rpm_status { + RPM_ACTIVE = 0, + RPM_RESUMING, + RPM_SUSPENDED, + RPM_SUSPENDING, +}; + +/** + * Device run-time power management request types. + * + * RPM_REQ_NONE Do nothing. + * + * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback + * + * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback + * + * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has + * been inactive for as long as power.autosuspend_delay + * + * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback + */ + +enum rpm_request { + RPM_REQ_NONE = 0, + RPM_REQ_IDLE, + RPM_REQ_SUSPEND, + RPM_REQ_AUTOSUSPEND, + RPM_REQ_RESUME, +}; + +struct wakeup_source; +struct wake_irq; +struct pm_domain_data; + +struct pm_subsys_data { + spinlock_t lock; + unsigned int refcount; +#ifdef CONFIG_PM_CLK + struct list_head clock_list; +#endif +#ifdef CONFIG_PM_GENERIC_DOMAINS + struct pm_domain_data *domain_data; +#endif +}; + +struct dev_pm_info { + pm_message_t power_state; + unsigned int can_wakeup:1; + unsigned int async_suspend:1; + bool is_prepared:1; /* Owned by the PM core */ + bool is_suspended:1; /* Ditto */ + bool is_noirq_suspended:1; + bool is_late_suspended:1; + bool ignore_children:1; + bool early_init:1; /* Owned by the PM core */ + bool direct_complete:1; /* Owned by the PM core */ + spinlock_t lock; +#ifdef CONFIG_PM_SLEEP + struct list_head entry; + struct completion completion; + struct wakeup_source *wakeup; + bool wakeup_path:1; + bool syscore:1; +#else + unsigned int should_wakeup:1; +#endif +#ifdef CONFIG_PM + struct timer_list suspend_timer; + unsigned long timer_expires; + struct work_struct work; + wait_queue_head_t wait_queue; + struct wake_irq *wakeirq; + atomic_t usage_count; + atomic_t child_count; + unsigned int disable_depth:3; + unsigned int idle_notification:1; + unsigned int request_pending:1; + unsigned int deferred_resume:1; + unsigned int run_wake:1; + unsigned int runtime_auto:1; + unsigned int no_callbacks:1; + unsigned int irq_safe:1; + unsigned int use_autosuspend:1; + unsigned int timer_autosuspends:1; + unsigned int memalloc_noio:1; + enum rpm_request request; + enum rpm_status runtime_status; + int runtime_error; + int autosuspend_delay; + unsigned long last_busy; + unsigned long active_jiffies; + unsigned long suspended_jiffies; + unsigned long accounting_timestamp; +#endif + struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ + void (*set_latency_tolerance)(struct device *, s32); + struct dev_pm_qos *qos; +}; + +extern void update_pm_runtime_accounting(struct device *dev); +extern int dev_pm_get_subsys_data(struct device *dev); +extern void dev_pm_put_subsys_data(struct device *dev); + +/* + * Power domains provide callbacks that are executed during system suspend, + * hibernation, system resume and during runtime PM transitions along with + * subsystem-level and driver-level callbacks. + * + * @detach: Called when removing a device from the domain. + * @activate: Called before executing probe routines for bus types and drivers. + * @sync: Called after successful driver probe. + * @dismiss: Called after unsuccessful driver probe and after driver removal. + */ +struct dev_pm_domain { + struct dev_pm_ops ops; + void (*detach)(struct device *dev, bool power_off); + int (*activate)(struct device *dev); + void (*sync)(struct device *dev); + void (*dismiss)(struct device *dev); +}; + +/* + * The PM_EVENT_ messages are also used by drivers implementing the legacy + * suspend framework, based on the ->suspend() and ->resume() callbacks common + * for suspend and hibernation transitions, according to the rules below. + */ + +/* Necessary, because several drivers use PM_EVENT_PRETHAW */ +#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE + +/* + * One transition is triggered by resume(), after a suspend() call; the + * message is implicit: + * + * ON Driver starts working again, responding to hardware events + * and software requests. The hardware may have gone through + * a power-off reset, or it may have maintained state from the + * previous suspend() which the driver will rely on while + * resuming. On most platforms, there are no restrictions on + * availability of resources like clocks during resume(). + * + * Other transitions are triggered by messages sent using suspend(). All + * these transitions quiesce the driver, so that I/O queues are inactive. + * That commonly entails turning off IRQs and DMA; there may be rules + * about how to quiesce that are specific to the bus or the device's type. + * (For example, network drivers mark the link state.) Other details may + * differ according to the message: + * + * SUSPEND Quiesce, enter a low power device state appropriate for + * the upcoming system state (such as PCI_D3hot), and enable + * wakeup events as appropriate. + * + * HIBERNATE Enter a low power device state appropriate for the hibernation + * state (eg. ACPI S4) and enable wakeup events as appropriate. + * + * FREEZE Quiesce operations so that a consistent image can be saved; + * but do NOT otherwise enter a low power device state, and do + * NOT emit system wakeup events. + * + * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring + * the system from a snapshot taken after an earlier FREEZE. + * Some drivers will need to reset their hardware state instead + * of preserving it, to ensure that it's never mistaken for the + * state which that earlier snapshot had set up. + * + * A minimally power-aware driver treats all messages as SUSPEND, fully + * reinitializes its device during resume() -- whether or not it was reset + * during the suspend/resume cycle -- and can't issue wakeup events. + * + * More power-aware drivers may also use low power states at runtime as + * well as during system sleep states like PM_SUSPEND_STANDBY. They may + * be able to use wakeup events to exit from runtime low-power states, + * or from system low-power states such as standby or suspend-to-RAM. + */ + +#ifdef CONFIG_PM_SLEEP +extern void device_pm_lock(void); +extern void dpm_resume_start(pm_message_t state); +extern void dpm_resume_end(pm_message_t state); +extern void dpm_resume_noirq(pm_message_t state); +extern void dpm_resume_early(pm_message_t state); +extern void dpm_resume(pm_message_t state); +extern void dpm_complete(pm_message_t state); + +extern void device_pm_unlock(void); +extern int dpm_suspend_end(pm_message_t state); +extern int dpm_suspend_start(pm_message_t state); +extern int dpm_suspend_noirq(pm_message_t state); +extern int dpm_suspend_late(pm_message_t state); +extern int dpm_suspend(pm_message_t state); +extern int dpm_prepare(pm_message_t state); + +extern void __suspend_report_result(const char *function, void *fn, int ret); + +#define suspend_report_result(fn, ret) \ + do { \ + __suspend_report_result(__func__, fn, ret); \ + } while (0) + +extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); +extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)); + +extern int pm_generic_prepare(struct device *dev); +extern int pm_generic_suspend_late(struct device *dev); +extern int pm_generic_suspend_noirq(struct device *dev); +extern int pm_generic_suspend(struct device *dev); +extern int pm_generic_resume_early(struct device *dev); +extern int pm_generic_resume_noirq(struct device *dev); +extern int pm_generic_resume(struct device *dev); +extern int pm_generic_freeze_noirq(struct device *dev); +extern int pm_generic_freeze_late(struct device *dev); +extern int pm_generic_freeze(struct device *dev); +extern int pm_generic_thaw_noirq(struct device *dev); +extern int pm_generic_thaw_early(struct device *dev); +extern int pm_generic_thaw(struct device *dev); +extern int pm_generic_restore_noirq(struct device *dev); +extern int pm_generic_restore_early(struct device *dev); +extern int pm_generic_restore(struct device *dev); +extern int pm_generic_poweroff_noirq(struct device *dev); +extern int pm_generic_poweroff_late(struct device *dev); +extern int pm_generic_poweroff(struct device *dev); +extern void pm_generic_complete(struct device *dev); +extern void pm_complete_with_resume_check(struct device *dev); + +#else /* !CONFIG_PM_SLEEP */ + +#define device_pm_lock() do {} while (0) +#define device_pm_unlock() do {} while (0) + +static inline int dpm_suspend_start(pm_message_t state) +{ + return 0; +} + +#define suspend_report_result(fn, ret) do {} while (0) + +static inline int device_pm_wait_for_dev(struct device *a, struct device *b) +{ + return 0; +} + +static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) +{ +} + +#define pm_generic_prepare NULL +#define pm_generic_suspend_late NULL +#define pm_generic_suspend_noirq NULL +#define pm_generic_suspend NULL +#define pm_generic_resume_early NULL +#define pm_generic_resume_noirq NULL +#define pm_generic_resume NULL +#define pm_generic_freeze_noirq NULL +#define pm_generic_freeze_late NULL +#define pm_generic_freeze NULL +#define pm_generic_thaw_noirq NULL +#define pm_generic_thaw_early NULL +#define pm_generic_thaw NULL +#define pm_generic_restore_noirq NULL +#define pm_generic_restore_early NULL +#define pm_generic_restore NULL +#define pm_generic_poweroff_noirq NULL +#define pm_generic_poweroff_late NULL +#define pm_generic_poweroff NULL +#define pm_generic_complete NULL +#endif /* !CONFIG_PM_SLEEP */ + +/* How to reorder dpm_list after device_move() */ +enum dpm_order { + DPM_ORDER_NONE, + DPM_ORDER_DEV_AFTER_PARENT, + DPM_ORDER_PARENT_BEFORE_DEV, + DPM_ORDER_DEV_LAST, +}; + +#endif /* _LINUX_PM_H */ diff --git a/drivers/include/linux/pm_runtime.h b/drivers/include/linux/pm_runtime.h new file mode 100644 index 000000000..2d66664b0 --- /dev/null +++ b/drivers/include/linux/pm_runtime.h @@ -0,0 +1,276 @@ +/* + * pm_runtime.h - Device run-time power management helper functions. + * + * Copyright (C) 2009 Rafael J. Wysocki + * + * This file is released under the GPLv2. + */ + +#ifndef _LINUX_PM_RUNTIME_H +#define _LINUX_PM_RUNTIME_H + +#include +#include + +#include + +/* Runtime PM flag argument bits */ +#define RPM_ASYNC 0x01 /* Request is asynchronous */ +#define RPM_NOWAIT 0x02 /* Don't wait for concurrent + state change */ +#define RPM_GET_PUT 0x04 /* Increment/decrement the + usage_count */ +#define RPM_AUTO 0x08 /* Use autosuspend_delay */ + +#ifdef CONFIG_PM +extern struct workqueue_struct *pm_wq; + +static inline bool queue_pm_work(struct work_struct *work) +{ + return queue_work(pm_wq, work); +} + +extern int pm_generic_runtime_suspend(struct device *dev); +extern int pm_generic_runtime_resume(struct device *dev); +extern int pm_runtime_force_suspend(struct device *dev); +extern int pm_runtime_force_resume(struct device *dev); + +extern int __pm_runtime_idle(struct device *dev, int rpmflags); +extern int __pm_runtime_suspend(struct device *dev, int rpmflags); +extern int __pm_runtime_resume(struct device *dev, int rpmflags); +extern int pm_schedule_suspend(struct device *dev, unsigned int delay); +extern int __pm_runtime_set_status(struct device *dev, unsigned int status); +extern int pm_runtime_barrier(struct device *dev); +extern void pm_runtime_enable(struct device *dev); +extern void __pm_runtime_disable(struct device *dev, bool check_resume); +extern void pm_runtime_allow(struct device *dev); +extern void pm_runtime_forbid(struct device *dev); +extern void pm_runtime_no_callbacks(struct device *dev); +extern void pm_runtime_irq_safe(struct device *dev); +extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); +extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); +extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); +extern void pm_runtime_update_max_time_suspended(struct device *dev, + s64 delta_ns); +extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); + +static inline bool pm_children_suspended(struct device *dev) +{ + return dev->power.ignore_children + || !atomic_read(&dev->power.child_count); +} + +static inline void pm_runtime_get_noresume(struct device *dev) +{ + atomic_inc(&dev->power.usage_count); +} + +static inline void pm_runtime_put_noidle(struct device *dev) +{ + atomic_add_unless(&dev->power.usage_count, -1, 0); +} + +static inline bool device_run_wake(struct device *dev) +{ + return dev->power.run_wake; +} + +static inline void device_set_run_wake(struct device *dev, bool enable) +{ + dev->power.run_wake = enable; +} + +static inline bool pm_runtime_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED + && !dev->power.disable_depth; +} + +static inline bool pm_runtime_active(struct device *dev) +{ + return dev->power.runtime_status == RPM_ACTIVE + || dev->power.disable_depth; +} + +static inline bool pm_runtime_status_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED; +} + +static inline bool pm_runtime_enabled(struct device *dev) +{ + return !dev->power.disable_depth; +} + +static inline bool pm_runtime_callbacks_present(struct device *dev) +{ + return !dev->power.no_callbacks; +} + +static inline void pm_runtime_mark_last_busy(struct device *dev) +{ + ACCESS_ONCE(dev->power.last_busy) = jiffies; +} + +static inline bool pm_runtime_is_irq_safe(struct device *dev) +{ + return dev->power.irq_safe; +} + +#else /* !CONFIG_PM */ + +static inline bool queue_pm_work(struct work_struct *work) { return false; } + +static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } +static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } +static inline int pm_runtime_force_resume(struct device *dev) { return 0; } + +static inline int __pm_runtime_idle(struct device *dev, int rpmflags) +{ + return -ENOSYS; +} +static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) +{ + return -ENOSYS; +} +static inline int __pm_runtime_resume(struct device *dev, int rpmflags) +{ + return 1; +} +static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) +{ + return -ENOSYS; +} +static inline int __pm_runtime_set_status(struct device *dev, + unsigned int status) { return 0; } +static inline int pm_runtime_barrier(struct device *dev) { return 0; } +static inline void pm_runtime_enable(struct device *dev) {} +static inline void __pm_runtime_disable(struct device *dev, bool c) {} +static inline void pm_runtime_allow(struct device *dev) {} +static inline void pm_runtime_forbid(struct device *dev) {} + +static inline bool pm_children_suspended(struct device *dev) { return false; } +static inline void pm_runtime_get_noresume(struct device *dev) {} +static inline void pm_runtime_put_noidle(struct device *dev) {} +static inline bool device_run_wake(struct device *dev) { return false; } +static inline void device_set_run_wake(struct device *dev, bool enable) {} +static inline bool pm_runtime_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_active(struct device *dev) { return true; } +static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } +static inline bool pm_runtime_enabled(struct device *dev) { return false; } + +static inline void pm_runtime_no_callbacks(struct device *dev) {} +static inline void pm_runtime_irq_safe(struct device *dev) {} +static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; } + +static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } +static inline void pm_runtime_mark_last_busy(struct device *dev) {} +static inline void __pm_runtime_use_autosuspend(struct device *dev, + bool use) {} +static inline void pm_runtime_set_autosuspend_delay(struct device *dev, + int delay) {} +static inline unsigned long pm_runtime_autosuspend_expiration( + struct device *dev) { return 0; } +static inline void pm_runtime_set_memalloc_noio(struct device *dev, + bool enable){} + +#endif /* !CONFIG_PM */ + +static inline int pm_runtime_idle(struct device *dev) +{ + return __pm_runtime_idle(dev, 0); +} + +static inline int pm_runtime_suspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, 0); +} + +static inline int pm_runtime_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_AUTO); +} + +static inline int pm_runtime_resume(struct device *dev) +{ + return __pm_runtime_resume(dev, 0); +} + +static inline int pm_request_idle(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_ASYNC); +} + +static inline int pm_request_resume(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_ASYNC); +} + +static inline int pm_request_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); +} + +static inline int pm_runtime_get(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); +} + +static inline int pm_runtime_get_sync(struct device *dev) +{ + return __pm_runtime_resume(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); +} + +static inline int pm_runtime_put_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, + RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); +} + +static inline int pm_runtime_put_sync(struct device *dev) +{ + return __pm_runtime_idle(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put_sync_suspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_GET_PUT); +} + +static inline int pm_runtime_put_sync_autosuspend(struct device *dev) +{ + return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); +} + +static inline int pm_runtime_set_active(struct device *dev) +{ + return __pm_runtime_set_status(dev, RPM_ACTIVE); +} + +static inline void pm_runtime_set_suspended(struct device *dev) +{ + __pm_runtime_set_status(dev, RPM_SUSPENDED); +} + +static inline void pm_runtime_disable(struct device *dev) +{ + __pm_runtime_disable(dev, true); +} + +static inline void pm_runtime_use_autosuspend(struct device *dev) +{ + __pm_runtime_use_autosuspend(dev, true); +} + +static inline void pm_runtime_dont_use_autosuspend(struct device *dev) +{ + __pm_runtime_use_autosuspend(dev, false); +} + +#endif diff --git a/drivers/include/linux/poison.h b/drivers/include/linux/poison.h index 76899636d..ba6e2eb69 100644 --- a/drivers/include/linux/poison.h +++ b/drivers/include/linux/poison.h @@ -19,8 +19,8 @@ * under normal circumstances, used to verify that nobody uses * non-initialized list entries. */ -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) +#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) +#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ /* @@ -69,10 +69,6 @@ #define ATM_POISON_FREE 0x12 #define ATM_POISON 0xdeadbeef -/********** net/ **********/ -#define NEIGHBOR_DEAD 0xdeadbeef -#define NETFILTER_LINK_POISON 0xdead57ac - /********** kernel/mutexes **********/ #define MUTEX_DEBUG_INIT 0x11 #define MUTEX_DEBUG_FREE 0x22 @@ -83,7 +79,4 @@ /********** security/ **********/ #define KEY_DESTROY 0xbd -/********** sound/oss/ **********/ -#define OSS_POISON_FREE 0xAB - #endif diff --git a/drivers/include/linux/preempt.h b/drivers/include/linux/preempt.h index de83b4eb1..75e4e3067 100644 --- a/drivers/include/linux/preempt.h +++ b/drivers/include/linux/preempt.h @@ -10,17 +10,124 @@ #include /* - * We use the MSB mostly because its available; see for - * the other bits -- can't include that header due to inclusion hell. + * We put the hardirq and softirq counter into the preemption + * counter. The bitmask has the following meaning: + * + * - bits 0-7 are the preemption count (max preemption depth: 256) + * - bits 8-15 are the softirq count (max # of softirqs: 256) + * + * The hardirq count could in theory be the same as the number of + * interrupts in the system, but we run all interrupt handlers with + * interrupts disabled, so we cannot have nesting interrupts. Though + * there are a few palaeontologic drivers which reenable interrupts in + * the handler, so we need more than one bit here. + * + * PREEMPT_MASK: 0x000000ff + * SOFTIRQ_MASK: 0x0000ff00 + * HARDIRQ_MASK: 0x000f0000 + * NMI_MASK: 0x00100000 + * PREEMPT_NEED_RESCHED: 0x80000000 */ +#define PREEMPT_BITS 8 +#define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 4 +#define NMI_BITS 1 + +#define PREEMPT_SHIFT 0 +#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) +#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) +#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) + +#define __IRQ_MASK(x) ((1UL << (x))-1) + +#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) +#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) +#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) +#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) + +#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) +#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) +#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#define NMI_OFFSET (1UL << NMI_SHIFT) + +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) + +/* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 +/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include +#define hardirq_count() (preempt_count() & HARDIRQ_MASK) +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) + +/* + * Are we doing bottom half or hardware interrupt processing? + * Are we in a softirq context? Interrupt context? + * in_softirq - Are we currently processing softirq or have bh disabled? + * in_serving_softirq - Are we currently processing softirq? + */ +#define in_irq() (hardirq_count()) +#define in_softirq() (softirq_count()) +#define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + +/* + * Are we in NMI context? + */ +#define in_nmi() (preempt_count() & NMI_MASK) + +/* + * The preempt_count offset after preempt_disable(); + */ +#if defined(CONFIG_PREEMPT_COUNT) +# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET +#else +# define PREEMPT_DISABLE_OFFSET 0 +#endif + +/* + * The preempt_count offset after spin_lock() + */ +#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET + +/* + * The preempt_count offset needed for things like: + * + * spin_lock_bh() + * + * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and + * softirqs, such that unlock sequences of: + * + * spin_unlock(); + * local_bh_enable(); + * + * Work as expected. + */ +#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) + +/* + * Are we running in atomic context? WARNING: this macro cannot + * always detect atomic context; in particular, it cannot know about + * held spinlocks in non-preemptible kernels. Thus it should not be + * used in the general case to determine whether sleeping is possible. + * Do not use in_atomic() in driver code. + */ +#define in_atomic() (preempt_count() != 0) + +/* + * Check whether we were atomic before we did preempt_disable(): + * (used by the scheduler) + */ +#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) + #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void preempt_count_add(int val); extern void preempt_count_sub(int val); -#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) +#define preempt_count_dec_and_test() \ + ({ preempt_count_sub(1); should_resched(0); }) #else #define preempt_count_add(val) __preempt_count_add(val) #define preempt_count_sub(val) __preempt_count_sub(val) @@ -49,6 +156,8 @@ do { \ #define preempt_enable_no_resched() sched_preempt_enable_no_resched() +#define preemptible() (preempt_count() == 0 && !irqs_disabled()) + #ifdef CONFIG_PREEMPT #define preempt_enable() \ do { \ @@ -57,20 +166,34 @@ do { \ __preempt_schedule(); \ } while (0) +#define preempt_enable_notrace() \ +do { \ + barrier(); \ + if (unlikely(__preempt_count_dec_and_test())) \ + __preempt_schedule_notrace(); \ +} while (0) + #define preempt_check_resched() \ do { \ - if (should_resched()) \ + if (should_resched(0)) \ __preempt_schedule(); \ } while (0) -#else +#else /* !CONFIG_PREEMPT */ #define preempt_enable() \ do { \ barrier(); \ preempt_count_dec(); \ } while (0) + +#define preempt_enable_notrace() \ +do { \ + barrier(); \ + __preempt_count_dec(); \ +} while (0) + #define preempt_check_resched() do { } while (0) -#endif +#endif /* CONFIG_PREEMPT */ #define preempt_disable_notrace() \ do { \ @@ -84,26 +207,6 @@ do { \ __preempt_count_dec(); \ } while (0) -#ifdef CONFIG_PREEMPT - -#ifndef CONFIG_CONTEXT_TRACKING -#define __preempt_schedule_context() __preempt_schedule() -#endif - -#define preempt_enable_notrace() \ -do { \ - barrier(); \ - if (unlikely(__preempt_count_dec_and_test())) \ - __preempt_schedule_context(); \ -} while (0) -#else -#define preempt_enable_notrace() \ -do { \ - barrier(); \ - __preempt_count_dec(); \ -} while (0) -#endif - #else /* !CONFIG_PREEMPT_COUNT */ /* @@ -121,6 +224,7 @@ do { \ #define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() +#define preemptible() 0 #endif /* CONFIG_PREEMPT_COUNT */ @@ -180,6 +284,8 @@ struct preempt_notifier { struct preempt_ops *ops; }; +void preempt_notifier_inc(void); +void preempt_notifier_dec(void); void preempt_notifier_register(struct preempt_notifier *notifier); void preempt_notifier_unregister(struct preempt_notifier *notifier); diff --git a/drivers/include/linux/printk.h b/drivers/include/linux/printk.h index 09130023e..2e95be7ee 100644 --- a/drivers/include/linux/printk.h +++ b/drivers/include/linux/printk.h @@ -65,6 +65,10 @@ struct va_format { */ #define DEPRECATED "[Deprecated]: " +/* + * Dummy printk for disabled debugging statements to use whilst maintaining + * gcc's format and side-effect checking. + */ static inline __printf(1, 2) int no_printk(const char *fmt, ...) { @@ -103,6 +107,11 @@ __printf(1, 2) int dbgprintf(const char *fmt, ...); printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info(fmt, ...) \ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) +/* + * Like KERN_CONT, pr_cont() should only be used when continuing + * a line with no newline ('\n') enclosed. Otherwise it defaults + * back to KERN_DEFAULT. + */ #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) @@ -250,9 +259,9 @@ enum { DUMP_PREFIX_ADDRESS, DUMP_PREFIX_OFFSET }; -extern void hex_dump_to_buffer(const void *buf, size_t len, - int rowsize, int groupsize, - char *linebuf, size_t linebuflen, bool ascii); +extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, + int groupsize, char *linebuf, size_t linebuflen, + bool ascii); extern void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, int rowsize, int groupsize, diff --git a/drivers/include/linux/pwm.h b/drivers/include/linux/pwm.h new file mode 100644 index 000000000..7de60f0b8 --- /dev/null +++ b/drivers/include/linux/pwm.h @@ -0,0 +1,219 @@ +#ifndef __LINUX_PWM_H +#define __LINUX_PWM_H + +#include +#include +//#include + +struct device; +struct pwm_device; +struct seq_file; + +#if IS_ENABLED(CONFIG_PWM) +/* + * pwm_request - request a PWM device + */ +struct pwm_device *pwm_request(int pwm_id, const char *label); + +/* + * pwm_free - free a PWM device + */ +void pwm_free(struct pwm_device *pwm); + +/* + * pwm_config - change a PWM device configuration + */ +int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns); + +/* + * pwm_enable - start a PWM output toggling + */ +int pwm_enable(struct pwm_device *pwm); + +/* + * pwm_disable - stop a PWM output toggling + */ +void pwm_disable(struct pwm_device *pwm); +#else +static inline struct pwm_device *pwm_request(int pwm_id, const char *label) +{ + return ERR_PTR(-ENODEV); +} + +static inline void pwm_free(struct pwm_device *pwm) +{ +} + +static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) +{ + return -EINVAL; +} + +static inline int pwm_enable(struct pwm_device *pwm) +{ + return -EINVAL; +} + +static inline void pwm_disable(struct pwm_device *pwm) +{ +} +#endif + +struct pwm_chip; + +/** + * enum pwm_polarity - polarity of a PWM signal + * @PWM_POLARITY_NORMAL: a high signal for the duration of the duty- + * cycle, followed by a low signal for the remainder of the pulse + * period + * @PWM_POLARITY_INVERSED: a low signal for the duration of the duty- + * cycle, followed by a high signal for the remainder of the pulse + * period + */ +enum pwm_polarity { + PWM_POLARITY_NORMAL, + PWM_POLARITY_INVERSED, +}; + +enum { + PWMF_REQUESTED = 1 << 0, + PWMF_ENABLED = 1 << 1, + PWMF_EXPORTED = 1 << 2, +}; + +/** + * struct pwm_device - PWM channel object + * @label: name of the PWM device + * @flags: flags associated with the PWM device + * @hwpwm: per-chip relative index of the PWM device + * @pwm: global index of the PWM device + * @chip: PWM chip providing this PWM device + * @chip_data: chip-private data associated with the PWM device + * @lock: used to serialize accesses to the PWM device where necessary + * @period: period of the PWM signal (in nanoseconds) + * @duty_cycle: duty cycle of the PWM signal (in nanoseconds) + * @polarity: polarity of the PWM signal + */ +struct pwm_device { + const char *label; + unsigned long flags; + unsigned int hwpwm; + unsigned int pwm; + struct pwm_chip *chip; + void *chip_data; + struct mutex lock; + + unsigned int period; + unsigned int duty_cycle; + enum pwm_polarity polarity; +}; + +static inline bool pwm_is_enabled(const struct pwm_device *pwm) +{ + return test_bit(PWMF_ENABLED, &pwm->flags); +} + +static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) +{ + if (pwm) + pwm->period = period; +} + +static inline unsigned int pwm_get_period(const struct pwm_device *pwm) +{ + return pwm ? pwm->period : 0; +} + +static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) +{ + if (pwm) + pwm->duty_cycle = duty; +} + +static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) +{ + return pwm ? pwm->duty_cycle : 0; +} + +/* + * pwm_set_polarity - configure the polarity of a PWM signal + */ +int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity); + +static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm) +{ + return pwm ? pwm->polarity : PWM_POLARITY_NORMAL; +} + +/** + * struct pwm_ops - PWM controller operations + * @request: optional hook for requesting a PWM + * @free: optional hook for freeing a PWM + * @config: configure duty cycles and period length for this PWM + * @set_polarity: configure the polarity of this PWM + * @enable: enable PWM output toggling + * @disable: disable PWM output toggling + * @dbg_show: optional routine to show contents in debugfs + * @owner: helps prevent removal of modules exporting active PWMs + */ +struct pwm_ops { + int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); + void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); + int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns); + int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, + enum pwm_polarity polarity); + int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); + void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); +#ifdef CONFIG_DEBUG_FS + void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); +#endif + struct module *owner; +}; + +#if IS_ENABLED(CONFIG_PWM) +int pwm_set_chip_data(struct pwm_device *pwm, void *data); +void *pwm_get_chip_data(struct pwm_device *pwm); +struct pwm_device *pwm_get(struct device *dev, const char *con_id); +struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id); +void pwm_put(struct pwm_device *pwm); + +struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); +void devm_pwm_put(struct device *dev, struct pwm_device *pwm); + +bool pwm_can_sleep(struct pwm_device *pwm); +#else +static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) +{ + return -EINVAL; +} + +static inline void *pwm_get_chip_data(struct pwm_device *pwm) +{ + return NULL; +} +static inline struct pwm_device *pwm_get(struct device *dev, + const char *consumer) +{ + return ERR_PTR(-ENODEV); +} +static inline void pwm_put(struct pwm_device *pwm) +{ +} + +static inline struct pwm_device *devm_pwm_get(struct device *dev, + const char *consumer) +{ + return ERR_PTR(-ENODEV); +} +static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) +{ +} + +static inline bool pwm_can_sleep(struct pwm_device *pwm) +{ + return false; +} +#endif + +#endif /* __LINUX_PWM_H */ diff --git a/drivers/include/linux/rbtree.h b/drivers/include/linux/rbtree.h index 57e75ae99..f714a1b66 100644 --- a/drivers/include/linux/rbtree.h +++ b/drivers/include/linux/rbtree.h @@ -51,7 +51,7 @@ struct rb_root { #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) -/* 'empty' nodes are nodes that are known not to be inserted in an rbree */ +/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ #define RB_EMPTY_NODE(node) \ ((node)->__rb_parent_color == (unsigned long)(node)) #define RB_CLEAR_NODE(node) \ @@ -73,11 +73,11 @@ extern struct rb_node *rb_first_postorder(const struct rb_root *); extern struct rb_node *rb_next_postorder(const struct rb_node *); /* Fast replacement of a single node without remove/rebalance/add/rebalance */ -extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); -static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, - struct rb_node ** rb_link) +static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) { node->__rb_parent_color = (unsigned long)parent; node->rb_left = node->rb_right = NULL; @@ -85,19 +85,36 @@ static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, *rb_link = node; } +static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + rcu_assign_pointer(*rb_link, node); +} + #define rb_entry_safe(ptr, type, member) \ ({ typeof(ptr) ____ptr = (ptr); \ ____ptr ? rb_entry(____ptr, type, member) : NULL; \ }) /** - * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of - * given type safe against removal of rb_node entry + * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of + * given type allowing the backing memory of @pos to be invalidated * * @pos: the 'type *' to use as a loop cursor. * @n: another 'type *' to use as temporary storage * @root: 'rb_root *' of the rbtree. * @field: the name of the rb_node field within 'type'. + * + * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as + * list_for_each_entry_safe() and allows the iteration to continue independent + * of changes to @pos by the body of the loop. + * + * Note, however, that it cannot handle other modifications that re-order the + * rbtree it is iterating over. This includes calling rb_erase() on @pos, as + * rb_erase() may rebalance the tree, causing us to miss some nodes. */ #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ diff --git a/drivers/include/linux/rbtree_augmented.h b/drivers/include/linux/rbtree_augmented.h index 378c5ee75..14d7b831b 100644 --- a/drivers/include/linux/rbtree_augmented.h +++ b/drivers/include/linux/rbtree_augmented.h @@ -123,11 +123,11 @@ __rb_change_child(struct rb_node *old, struct rb_node *new, { if (parent) { if (parent->rb_left == old) - parent->rb_left = new; + WRITE_ONCE(parent->rb_left, new); else - parent->rb_right = new; + WRITE_ONCE(parent->rb_right, new); } else - root->rb_node = new; + WRITE_ONCE(root->rb_node, new); } extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, @@ -137,7 +137,8 @@ static __always_inline struct rb_node * __rb_erase_augmented(struct rb_node *node, struct rb_root *root, const struct rb_augment_callbacks *augment) { - struct rb_node *child = node->rb_right, *tmp = node->rb_left; + struct rb_node *child = node->rb_right; + struct rb_node *tmp = node->rb_left; struct rb_node *parent, *rebalance; unsigned long pc; @@ -167,6 +168,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, tmp = parent; } else { struct rb_node *successor = child, *child2; + tmp = child->rb_left; if (!tmp) { /* @@ -180,6 +182,7 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, */ parent = successor; child2 = successor->rb_right; + augment->copy(node, successor); } else { /* @@ -201,19 +204,23 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root, successor = tmp; tmp = tmp->rb_left; } while (tmp); - parent->rb_left = child2 = successor->rb_right; - successor->rb_right = child; + child2 = successor->rb_right; + WRITE_ONCE(parent->rb_left, child2); + WRITE_ONCE(successor->rb_right, child); rb_set_parent(child, successor); + augment->copy(node, successor); augment->propagate(parent, successor); } - successor->rb_left = tmp = node->rb_left; + tmp = node->rb_left; + WRITE_ONCE(successor->rb_left, tmp); rb_set_parent(tmp, successor); pc = node->__rb_parent_color; tmp = __rb_parent(pc); __rb_change_child(node, successor, tmp, root); + if (child2) { successor->__rb_parent_color = pc; rb_set_parent_color(child2, parent, RB_BLACK); diff --git a/drivers/include/linux/rculist.h b/drivers/include/linux/rculist.h index 226bb2067..5ed540986 100644 --- a/drivers/include/linux/rculist.h +++ b/drivers/include/linux/rculist.h @@ -29,8 +29,8 @@ */ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) { - ACCESS_ONCE(list->next) = list; - ACCESS_ONCE(list->prev) = list; + WRITE_ONCE(list->next, list); + WRITE_ONCE(list->prev, list); } /* @@ -56,7 +56,7 @@ static inline void __list_add_rcu(struct list_head *new, } #else void __list_add_rcu(struct list_head *new, - struct list_head *prev, struct list_head *next); + struct list_head *prev, struct list_head *next); #endif /** @@ -247,10 +247,7 @@ static inline void list_splice_init_rcu(struct list_head *list, * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ -({ \ - typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ - container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ -}) + container_of(lockless_dereference(ptr), type, member) /** * Where are list_empty_rcu() and list_first_entry_rcu()? @@ -288,7 +285,7 @@ static inline void list_splice_init_rcu(struct list_head *list, #define list_first_or_null_rcu(ptr, type, member) \ ({ \ struct list_head *__ptr = (ptr); \ - struct list_head *__next = ACCESS_ONCE(__ptr->next); \ + struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ }) @@ -524,11 +521,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu(pos, member) \ - for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ - typeof(*(pos)), member); \ + for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member); \ pos; \ - pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ - typeof(*(pos)), member)) + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point @@ -536,11 +533,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ - for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ - typeof(*(pos)), member); \ + for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member); \ pos; \ - pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ - typeof(*(pos)), member)) + pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point @@ -549,8 +546,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, */ #define hlist_for_each_entry_from_rcu(pos, member) \ for (; pos; \ - pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ - typeof(*(pos)), member)) + pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ + &(pos)->member)), typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif diff --git a/drivers/include/linux/rcupdate.h b/drivers/include/linux/rcupdate.h index f5338080b..ae59886be 100644 --- a/drivers/include/linux/rcupdate.h +++ b/drivers/include/linux/rcupdate.h @@ -44,10 +44,32 @@ //#include #include #include +#include + #include extern int rcu_expedited; /* for sysctl */ +#ifdef CONFIG_TINY_RCU +/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ +static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ +{ + return false; +} + +static inline void rcu_expedite_gp(void) +{ +} + +static inline void rcu_unexpedite_gp(void) +{ +} +#else /* #ifdef CONFIG_TINY_RCU */ +bool rcu_gp_is_expedited(void); /* Internal RCU use. */ +void rcu_expedite_gp(void); +void rcu_unexpedite_gp(void); +#endif /* #else #ifdef CONFIG_TINY_RCU */ + enum rcutorture_type { RCU_FLAVOR, RCU_BH_FLAVOR, @@ -138,7 +160,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename, * more than one CPU). */ void call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head)); + rcu_callback_t func); #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -169,7 +191,7 @@ void call_rcu(struct rcu_head *head, * memory ordering guarantees. */ void call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head)); + rcu_callback_t func); /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -191,7 +213,7 @@ void call_rcu_bh(struct rcu_head *head, * memory ordering guarantees. */ void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)); + rcu_callback_t func); void synchronize_sched(void); @@ -213,7 +235,7 @@ void synchronize_sched(void); * See the description of call_rcu() for more detailed information on * memory ordering guarantees. */ -void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); +void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); @@ -236,12 +258,14 @@ void synchronize_rcu(void); static inline void __rcu_read_lock(void) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_disable(); } static inline void __rcu_read_unlock(void) { - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) + preempt_enable(); } static inline void synchronize_rcu(void) @@ -258,14 +282,13 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); +void rcu_end_inkernel_boot(void); void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); struct notifier_block; -void rcu_idle_enter(void); -void rcu_idle_exit(void); -void rcu_irq_enter(void); -void rcu_irq_exit(void); +int rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu); #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); @@ -279,7 +302,7 @@ static inline void rcu_sysrq_end(void) } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ -#ifdef CONFIG_RCU_USER_QS +#ifdef CONFIG_NO_HZ_FULL void rcu_user_enter(void); void rcu_user_exit(void); #else @@ -287,7 +310,7 @@ static inline void rcu_user_enter(void) { } static inline void rcu_user_exit(void) { } static inline void rcu_user_hooks_switch(struct task_struct *prev, struct task_struct *next) { } -#endif /* CONFIG_RCU_USER_QS */ +#endif /* CONFIG_NO_HZ_FULL */ #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); @@ -331,12 +354,13 @@ static inline void rcu_init_nohz(void) extern struct srcu_struct tasks_rcu_exit_srcu; #define rcu_note_voluntary_context_switch(t) \ do { \ - if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ - ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ + rcu_all_qs(); \ + if (READ_ONCE((t)->rcu_tasks_holdout)) \ + WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) #else /* #ifdef CONFIG_TASKS_RCU */ #define TASKS_RCU(x) do { } while (0) -#define rcu_note_voluntary_context_switch(t) do { } while (0) +#define rcu_note_voluntary_context_switch(t) rcu_all_qs() #endif /* #else #ifdef CONFIG_TASKS_RCU */ /** @@ -361,10 +385,6 @@ bool __rcu_is_watching(void); * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ -typedef void call_rcu_func_t(struct rcu_head *head, - void (*func)(struct rcu_head *head)); -void wait_rcu_gp(call_rcu_func_t crf); - #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) #include #elif defined(CONFIG_TINY_RCU) @@ -576,17 +596,17 @@ static inline void rcu_preempt_sleep_check(void) #define __rcu_access_pointer(p, space) \ ({ \ - typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ + typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) #define __rcu_dereference_check(p, c, space) \ ({ \ - typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ + /* Dependency order vs. p above. */ \ + typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ rcu_dereference_sparse(p, space); \ - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ - ((typeof(*p) __force __kernel *)(_________p1)); \ + ((typeof(*p) __force __kernel *)(________p1)); \ }) #define __rcu_dereference_protected(p, c, space) \ ({ \ @@ -595,42 +615,12 @@ static inline void rcu_preempt_sleep_check(void) ((typeof(*p) __force __kernel *)(p)); \ }) -#define __rcu_access_index(p, space) \ -({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ - rcu_dereference_sparse(p, space); \ - (_________p1); \ -}) -#define __rcu_dereference_index_check(p, c) \ -({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ - rcu_lockdep_assert(c, \ - "suspicious rcu_dereference_index_check() usage"); \ - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ - (_________p1); \ -}) - /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. */ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) -/** - * lockless_dereference() - safely load a pointer for later dereference - * @p: The pointer to load - * - * Similar to rcu_dereference(), but for situations where the pointed-to - * object's lifetime is managed by something other than RCU. That - * "something other" might be reference counting or simple immortality. - */ -#define lockless_dereference(p) \ -({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ - (_________p1); \ -}) - /** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to @@ -669,7 +659,7 @@ static inline void rcu_preempt_sleep_check(void) * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the - * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful + * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful * when the value of this pointer is accessed, but the pointer is not * dereferenced, for example, when testing an RCU-protected pointer against * NULL. Although rcu_access_pointer() may also be used in cases where @@ -719,7 +709,7 @@ static inline void rcu_preempt_sleep_check(void) * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ - __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) + __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) /** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking @@ -729,7 +719,7 @@ static inline void rcu_preempt_sleep_check(void) * This is the RCU-bh counterpart to rcu_dereference_check(). */ #define rcu_dereference_bh_check(p, c) \ - __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) + __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) /** * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking @@ -739,7 +729,7 @@ static inline void rcu_preempt_sleep_check(void) * This is the RCU-sched counterpart to rcu_dereference_check(). */ #define rcu_dereference_sched_check(p, c) \ - __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ + __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ __rcu) #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ @@ -753,48 +743,13 @@ static inline void rcu_preempt_sleep_check(void) */ #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) -/** - * rcu_access_index() - fetch RCU index with no dereferencing - * @p: The index to read - * - * Return the value of the specified RCU-protected index, but omit the - * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful - * when the value of this index is accessed, but the index is not - * dereferenced, for example, when testing an RCU-protected index against - * -1. Although rcu_access_index() may also be used in cases where - * update-side locks prevent the value of the index from changing, you - * should instead use rcu_dereference_index_protected() for this use case. - */ -#define rcu_access_index(p) __rcu_access_index((p), __rcu) - -/** - * rcu_dereference_index_check() - rcu_dereference for indices with debug checking - * @p: The pointer to read, prior to dereferencing - * @c: The conditions under which the dereference will take place - * - * Similar to rcu_dereference_check(), but omits the sparse checking. - * This allows rcu_dereference_index_check() to be used on integers, - * which can then be used as array indices. Attempting to use - * rcu_dereference_check() on an integer will give compiler warnings - * because the sparse address-space mechanism relies on dereferencing - * the RCU-protected pointer. Dereferencing integers is not something - * that even gcc will put up with. - * - * Note that this function does not implicitly check for RCU read-side - * critical sections. If this function gains lots of uses, it might - * make sense to provide versions for each flavor of RCU, but it does - * not make sense as of early 2010. - */ -#define rcu_dereference_index_check(p, c) \ - __rcu_dereference_index_check((p), (c)) - /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit - * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This + * both the smp_read_barrier_depends() and the READ_ONCE(). This * is useful in cases where update-side locks prevent the value of the * pointer from changing. Please note that this primitive does -not- * prevent the compiler from repeating this reference or combining it @@ -932,9 +887,9 @@ static inline void rcu_read_unlock(void) { rcu_lockdep_assert(rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); - rcu_lock_release(&rcu_lock_map); __release(RCU); __rcu_read_unlock(); + rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ } /** @@ -1120,13 +1075,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define kfree_rcu(ptr, rcu_head) \ __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) -#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) -static inline int rcu_needs_cpu(unsigned long *delta_jiffies) +#ifdef CONFIG_TINY_RCU +static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) { - *delta_jiffies = ULONG_MAX; + *nextevt = KTIME_MAX; return 0; } -#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */ +#endif /* #ifdef CONFIG_TINY_RCU */ #if defined(CONFIG_RCU_NOCB_CPU_ALL) static inline bool rcu_is_nocb_cpu(int cpu) { return true; } diff --git a/drivers/include/linux/rcutiny.h b/drivers/include/linux/rcutiny.h index 0e5366200..3df6c1ec4 100644 --- a/drivers/include/linux/rcutiny.h +++ b/drivers/include/linux/rcutiny.h @@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu) } /* - * Return the number of grace periods. + * Return the number of grace periods started. */ -static inline long rcu_batches_completed(void) +static inline unsigned long rcu_batches_started(void) { return 0; } /* - * Return the number of bottom-half grace periods. + * Return the number of bottom-half grace periods started. */ -static inline long rcu_batches_completed_bh(void) +static inline unsigned long rcu_batches_started_bh(void) +{ + return 0; +} + +/* + * Return the number of sched grace periods started. + */ +static inline unsigned long rcu_batches_started_sched(void) +{ + return 0; +} + +/* + * Return the number of grace periods completed. + */ +static inline unsigned long rcu_batches_completed(void) +{ + return 0; +} + +/* + * Return the number of bottom-half grace periods completed. + */ +static inline unsigned long rcu_batches_completed_bh(void) +{ + return 0; +} + +/* + * Return the number of sched grace periods completed. + */ +static inline unsigned long rcu_batches_completed_sched(void) { return 0; } @@ -127,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void) { } +static inline void rcu_idle_enter(void) +{ +} + +static inline void rcu_idle_exit(void) +{ +} + +static inline void rcu_irq_enter(void) +{ +} + +static inline void rcu_irq_exit(void) +{ +} + static inline void exit_rcu(void) { } @@ -154,7 +202,10 @@ static inline bool rcu_is_watching(void) return true; } - #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ +static inline void rcu_all_qs(void) +{ +} + #endif /* __LINUX_RCUTINY_H */ diff --git a/drivers/include/linux/scatterlist.h b/drivers/include/linux/scatterlist.h index 9cbc764e6..494afd84c 100644 --- a/drivers/include/linux/scatterlist.h +++ b/drivers/include/linux/scatterlist.h @@ -2,12 +2,37 @@ #define _LINUX_SCATTERLIST_H #include +#include #include #include -#include -#include -//#include +struct scatterlist { +#ifdef CONFIG_DEBUG_SG + unsigned long sg_magic; +#endif + unsigned long page_link; + unsigned int offset; + unsigned int length; + dma_addr_t dma_address; +#ifdef CONFIG_NEED_SG_DMA_LENGTH + unsigned int dma_length; +#endif +}; + +/* + * These macros should be used after a dma_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries dma_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) ((sg)->dma_address) + +#ifdef CONFIG_NEED_SG_DMA_LENGTH +#define sg_dma_len(sg) ((sg)->dma_length) +#else +#define sg_dma_len(sg) ((sg)->length) +#endif struct sg_table { struct scatterlist *sgl; /* the list */ @@ -18,10 +43,9 @@ struct sg_table { /* * Notes on SG table design. * - * Architectures must provide an unsigned long page_link field in the - * scatterlist struct. We use that to place the page pointer AND encode - * information about the sg table as well. The two lower bits are reserved - * for this information. + * We use the unsigned long page_link field in the scatterlist struct to place + * the page pointer AND encode information about the sg table as well. The two + * lower bits are reserved for this information. * * If bit 0 is set, then the page_link contains a pointer to the next sg * table list. Otherwise the next entry is at sg + 1. @@ -108,14 +132,14 @@ static inline struct page *sg_page(struct scatterlist *sg) * @buflen: Data length * **/ -//static inline void sg_set_buf(struct scatterlist *sg, const void *buf, -// unsigned int buflen) -//{ -//#ifdef CONFIG_DEBUG_SG -// BUG_ON(!virt_addr_valid(buf)); -//#endif -// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); -//} +static inline void sg_set_buf(struct scatterlist *sg, const void *buf, + unsigned int buflen) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(!virt_addr_valid(buf)); +#endif + sg_set_page(sg, (struct page*)((unsigned)buf&0xFFFFF000), buflen, offset_in_page(buf)); +} /* * Loop over each sg element, following the pointer to a new list if necessary @@ -136,10 +160,6 @@ static inline struct page *sg_page(struct scatterlist *sg) static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, struct scatterlist *sgl) { -#ifndef CONFIG_ARCH_HAS_SG_CHAIN - BUG(); -#endif - /* * offset and length are unused for chain entry. Clear them. */ @@ -221,10 +241,16 @@ static inline dma_addr_t sg_phys(struct scatterlist *sg) //} int sg_nents(struct scatterlist *sg); +int sg_nents_for_len(struct scatterlist *sg, u64 len); struct scatterlist *sg_next(struct scatterlist *); struct scatterlist *sg_last(struct scatterlist *s, unsigned int); void sg_init_table(struct scatterlist *, unsigned int); void sg_init_one(struct scatterlist *, const void *, unsigned int); +int sg_split(struct scatterlist *in, const int in_mapped_nents, + const off_t skip, const int nb_splits, + const size_t *split_sizes, + struct scatterlist **out, int *out_mapped_nents, + gfp_t gfp_mask); typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); @@ -239,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, unsigned long offset, unsigned long size, gfp_t gfp_mask); +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer); + size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen); + const void *buf, size_t buflen); size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen); size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip); + const void *buf, size_t buflen, off_t skip); size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip); @@ -349,4 +378,6 @@ bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset); bool sg_miter_next(struct sg_mapping_iter *miter); void sg_miter_stop(struct sg_mapping_iter *miter); +#define dma_unmap_sg(d, s, n, r) + #endif /* _LINUX_SCATTERLIST_H */ diff --git a/drivers/include/linux/sched.h b/drivers/include/linux/sched.h index 483107a22..ff833f6f1 100644 --- a/drivers/include/linux/sched.h +++ b/drivers/include/linux/sched.h @@ -2,9 +2,49 @@ #define _LINUX_SCHED_H -#define TASK_UNINTERRUPTIBLE 2 +/* + * Task state bitmask. NOTE! These bits are also + * encoded in fs/proc/array.c: get_task_state(). + * + * We have two separate sets of flags: task->state + * is about runnability, while task->exit_state are + * about the task exiting. Confusing, but this way + * modifying one set can't modify the other one by + * mistake. + */ +#define TASK_RUNNING 0 +#define TASK_INTERRUPTIBLE 1 +#define TASK_UNINTERRUPTIBLE 2 +#define __TASK_STOPPED 4 +#define __TASK_TRACED 8 +/* in tsk->exit_state */ +#define EXIT_DEAD 16 +#define EXIT_ZOMBIE 32 +#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) +/* in tsk->state again */ +#define TASK_DEAD 64 +#define TASK_WAKEKILL 128 +#define TASK_WAKING 256 +#define TASK_PARKED 512 +#define TASK_NOLOAD 1024 +#define TASK_STATE_MAX 2048 +/* Convenience macros for the sake of set_task_state */ +#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) +#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) +#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) + +#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) + +/* Convenience macros for the sake of wake_up */ +#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) + +/* get_task_state() */ +#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) /* Task command name length */ -#define TASK_COMM_LEN 16 +#define TASK_COMM_LEN 16 #define schedule_timeout(x) delay(x) #define MAX_SCHEDULE_TIMEOUT LONG_MAX diff --git a/drivers/include/linux/seq_file.h b/drivers/include/linux/seq_file.h index aa4dcf8c7..d9ad7937f 100644 --- a/drivers/include/linux/seq_file.h +++ b/drivers/include/linux/seq_file.h @@ -1,9 +1,26 @@ -/* stub */ #ifndef _LINUX_SEQ_FILE_H #define _LINUX_SEQ_FILE_H +#include +#include +#include +#include +struct seq_file { + char *buf; + size_t size; + size_t from; + size_t count; + size_t pad_until; + loff_t index; + loff_t read_pos; + u64 version; + void *private; +}; +int seq_puts(struct seq_file *m, const char *s); + +__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); #endif diff --git a/drivers/include/linux/seqlock.h b/drivers/include/linux/seqlock.h index 0db6ec166..d97f93aa3 100644 --- a/drivers/include/linux/seqlock.h +++ b/drivers/include/linux/seqlock.h @@ -28,13 +28,14 @@ * to increment the sequence variables because an interrupt routine could * change the state of the data. * - * Based on x86_64 vsyscall gettimeofday + * Based on x86_64 vsyscall gettimeofday * by Keith Owens and Andrea Arcangeli */ #include //#include #include +#include #include /* @@ -108,7 +109,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s) unsigned ret; repeat: - ret = ACCESS_ONCE(s->sequence); + ret = READ_ONCE(s->sequence); if (unlikely(ret & 1)) { cpu_relax(); goto repeat; @@ -127,7 +128,7 @@ repeat: */ static inline unsigned raw_read_seqcount(const seqcount_t *s) { - unsigned ret = ACCESS_ONCE(s->sequence); + unsigned ret = READ_ONCE(s->sequence); smp_rmb(); return ret; } @@ -179,7 +180,7 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) */ static inline unsigned raw_seqcount_begin(const seqcount_t *s) { - unsigned ret = ACCESS_ONCE(s->sequence); + unsigned ret = READ_ONCE(s->sequence); smp_rmb(); return ret & ~1; } @@ -236,6 +237,79 @@ static inline void raw_write_seqcount_end(seqcount_t *s) /* * raw_write_seqcount_latch - redirect readers to even/odd copy * @s: pointer to seqcount_t + * + * The latch technique is a multiversion concurrency control method that allows + * queries during non-atomic modifications. If you can guarantee queries never + * interrupt the modification -- e.g. the concurrency is strictly between CPUs + * -- you most likely do not need this. + * + * Where the traditional RCU/lockless data structures rely on atomic + * modifications to ensure queries observe either the old or the new state the + * latch allows the same for non-atomic updates. The trade-off is doubling the + * cost of storage; we have to maintain two copies of the entire data + * structure. + * + * Very simply put: we first modify one copy and then the other. This ensures + * there is always one copy in a stable state, ready to give us an answer. + * + * The basic form is a data structure like: + * + * struct latch_struct { + * seqcount_t seq; + * struct data_struct data[2]; + * }; + * + * Where a modification, which is assumed to be externally serialized, does the + * following: + * + * void latch_modify(struct latch_struct *latch, ...) + * { + * smp_wmb(); <- Ensure that the last data[1] update is visible + * latch->seq++; + * smp_wmb(); <- Ensure that the seqcount update is visible + * + * modify(latch->data[0], ...); + * + * smp_wmb(); <- Ensure that the data[0] update is visible + * latch->seq++; + * smp_wmb(); <- Ensure that the seqcount update is visible + * + * modify(latch->data[1], ...); + * } + * + * The query will have a form like: + * + * struct entry *latch_query(struct latch_struct *latch, ...) + * { + * struct entry *entry; + * unsigned seq, idx; + * + * do { + * seq = lockless_dereference(latch->seq); + * + * idx = seq & 0x01; + * entry = data_query(latch->data[idx], ...); + * + * smp_rmb(); + * } while (seq != latch->seq); + * + * return entry; + * } + * + * So during the modification, queries are first redirected to data[1]. Then we + * modify data[0]. When that is complete, we redirect queries back to data[0] + * and we can modify data[1]. + * + * NOTE: The non-requirement for atomic modifications does _NOT_ include + * the publishing of new entries in the case where data is a dynamic + * data structure. + * + * An iteration might start in data[0] and get suspended long enough + * to miss an entire modification sequence, once it resumes it might + * observe the new entry. + * + * NOTE: When data is a dynamic data structure; one should use regular RCU + * patterns to manage the lifetimes of the objects within. */ static inline void raw_write_seqcount_latch(seqcount_t *s) { @@ -266,13 +340,13 @@ static inline void write_seqcount_end(seqcount_t *s) } /** - * write_seqcount_barrier - invalidate in-progress read-side seq operations + * write_seqcount_invalidate - invalidate in-progress read-side seq operations * @s: pointer to seqcount_t * - * After write_seqcount_barrier, no read-side seq operations will complete + * After write_seqcount_invalidate, no read-side seq operations will complete * successfully and see data older than this. */ -static inline void write_seqcount_barrier(seqcount_t *s) +static inline void write_seqcount_invalidate(seqcount_t *s) { smp_wmb(); s->sequence+=2; diff --git a/drivers/include/linux/sfi.h b/drivers/include/linux/sfi.h new file mode 100644 index 000000000..9a6c99903 --- /dev/null +++ b/drivers/include/linux/sfi.h @@ -0,0 +1,209 @@ +/* sfi.h Simple Firmware Interface */ + +/* + + This file is provided under a dual BSD/GPLv2 license. When using or + redistributing this file, you may do so under either license. + + GPL LICENSE SUMMARY + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify + it under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + The full GNU General Public License is included in this distribution + in the file called LICENSE.GPL. + + BSD LICENSE + + Copyright(c) 2009 Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef _LINUX_SFI_H +#define _LINUX_SFI_H + +//#include +#include + +/* Table signatures reserved by the SFI specification */ +#define SFI_SIG_SYST "SYST" +#define SFI_SIG_FREQ "FREQ" +#define SFI_SIG_IDLE "IDLE" +#define SFI_SIG_CPUS "CPUS" +#define SFI_SIG_MTMR "MTMR" +#define SFI_SIG_MRTC "MRTC" +#define SFI_SIG_MMAP "MMAP" +#define SFI_SIG_APIC "APIC" +#define SFI_SIG_XSDT "XSDT" +#define SFI_SIG_WAKE "WAKE" +#define SFI_SIG_DEVS "DEVS" +#define SFI_SIG_GPIO "GPIO" + +#define SFI_SIGNATURE_SIZE 4 +#define SFI_OEM_ID_SIZE 6 +#define SFI_OEM_TABLE_ID_SIZE 8 + +#define SFI_NAME_LEN 16 + +#define SFI_SYST_SEARCH_BEGIN 0x000E0000 +#define SFI_SYST_SEARCH_END 0x000FFFFF + +#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \ + ((ptable->header.len - sizeof(struct sfi_table_header)) / \ + (sizeof(entry_type))) +/* + * Table structures must be byte-packed to match the SFI specification, + * as they are provided by the BIOS. + */ +struct sfi_table_header { + char sig[SFI_SIGNATURE_SIZE]; + u32 len; + u8 rev; + u8 csum; + char oem_id[SFI_OEM_ID_SIZE]; + char oem_table_id[SFI_OEM_TABLE_ID_SIZE]; +} __packed; + +struct sfi_table_simple { + struct sfi_table_header header; + u64 pentry[1]; +} __packed; + +/* Comply with UEFI spec 2.1 */ +struct sfi_mem_entry { + u32 type; + u64 phys_start; + u64 virt_start; + u64 pages; + u64 attrib; +} __packed; + +struct sfi_cpu_table_entry { + u32 apic_id; +} __packed; + +struct sfi_cstate_table_entry { + u32 hint; /* MWAIT hint */ + u32 latency; /* latency in ms */ +} __packed; + +struct sfi_apic_table_entry { + u64 phys_addr; /* phy base addr for APIC reg */ +} __packed; + +struct sfi_freq_table_entry { + u32 freq_mhz; /* in MHZ */ + u32 latency; /* transition latency in ms */ + u32 ctrl_val; /* value to write to PERF_CTL */ +} __packed; + +struct sfi_wake_table_entry { + u64 phys_addr; /* pointer to where the wake vector locates */ +} __packed; + +struct sfi_timer_table_entry { + u64 phys_addr; /* phy base addr for the timer */ + u32 freq_hz; /* in HZ */ + u32 irq; +} __packed; + +struct sfi_rtc_table_entry { + u64 phys_addr; /* phy base addr for the RTC */ + u32 irq; +} __packed; + +struct sfi_device_table_entry { + u8 type; /* bus type, I2C, SPI or ...*/ +#define SFI_DEV_TYPE_SPI 0 +#define SFI_DEV_TYPE_I2C 1 +#define SFI_DEV_TYPE_UART 2 +#define SFI_DEV_TYPE_HSI 3 +#define SFI_DEV_TYPE_IPC 4 + + u8 host_num; /* attached to host 0, 1...*/ + u16 addr; + u8 irq; + u32 max_freq; + char name[SFI_NAME_LEN]; +} __packed; + +struct sfi_gpio_table_entry { + char controller_name[SFI_NAME_LEN]; + u16 pin_no; + char pin_name[SFI_NAME_LEN]; +} __packed; + +typedef int (*sfi_table_handler) (struct sfi_table_header *table); + +#ifdef CONFIG_SFI +extern void __init sfi_init(void); +extern int __init sfi_platform_init(void); +extern void __init sfi_init_late(void); +extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id, + sfi_table_handler handler); + +extern int sfi_disabled; +static inline void disable_sfi(void) +{ + sfi_disabled = 1; +} + +#else /* !CONFIG_SFI */ + +static inline void sfi_init(void) +{ +} + +static inline void sfi_init_late(void) +{ +} + +#define sfi_disabled 0 + +static inline int sfi_table_parse(char *signature, char *oem_id, + char *oem_table_id, + sfi_table_handler handler) +{ + return -1; +} + +#endif /* !CONFIG_SFI */ + +#endif /*_LINUX_SFI_H*/ diff --git a/drivers/include/linux/slab.h b/drivers/include/linux/slab.h index 6525bec9e..bb19b0eda 100644 --- a/drivers/include/linux/slab.h +++ b/drivers/include/linux/slab.h @@ -18,7 +18,7 @@ /* * Flags to pass to kmem_cache_create(). - * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. + * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. */ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ @@ -104,7 +104,11 @@ (unsigned long)ZERO_SIZE_PTR) void __init kmem_cache_init(void); -int slab_is_available(void); +bool slab_is_available(void); + +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, + void (*)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); @@ -120,7 +124,9 @@ static inline void kfree(void *p) } static __always_inline void *kmalloc(size_t size, gfp_t flags) { - return __builtin_malloc(size); + void *ret = __builtin_malloc(size); + memset(ret, 0, size); + return ret; } /** diff --git a/drivers/include/linux/spinlock.h b/drivers/include/linux/spinlock.h index ca9d27246..8387a7aa4 100644 --- a/drivers/include/linux/spinlock.h +++ b/drivers/include/linux/spinlock.h @@ -90,7 +90,7 @@ #ifdef CONFIG_DEBUG_SPINLOCK extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, - struct lock_class_key *key); + struct lock_class_key *key); # define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ @@ -119,7 +119,7 @@ do { \ /* * Despite its name it doesn't necessarily has to be a full barrier. * It should only guarantee that a STORE before the critical section - * can not be reordered with a LOAD inside this section. + * can not be reordered with LOADs and STOREs inside this section. * spin_lock() is the one-way barrier, this LOAD can not escape out * of the region. So the default implementation simply ensures that * a STORE can not move into the critical section, smp_wmb() should @@ -129,16 +129,6 @@ do { \ #define smp_mb__before_spinlock() smp_wmb() #endif -/* - * Place this after a lock-acquisition primitive to guarantee that - * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies - * if the UNLOCK and LOCK are executed by the same CPU or if the - * UNLOCK and LOCK operate on the same lock variable. - */ -#ifndef smp_mb__after_unlock_lock -#define smp_mb__after_unlock_lock() do { } while (0) -#endif - /** * raw_spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. @@ -189,6 +179,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #ifdef CONFIG_DEBUG_LOCK_ALLOC # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) +# define raw_spin_lock_bh_nested(lock, subclass) \ + _raw_spin_lock_bh_nested(lock, subclass) # define raw_spin_lock_nest_lock(lock, nest_lock) \ do { \ @@ -204,6 +196,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) +# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) #endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) @@ -247,8 +240,8 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) #define raw_spin_unlock_irqrestore(lock, flags) \ - do { \ - typecheck(unsigned long, flags); \ + do { \ + typecheck(unsigned long, flags); \ _raw_spin_unlock_irqrestore(lock, flags); \ } while (0) #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) @@ -292,7 +285,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ -static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) +static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) { return &lock->rlock; } @@ -303,17 +296,17 @@ do { \ raw_spin_lock_init(&(_lock)->rlock); \ } while (0) -static inline void spin_lock(spinlock_t *lock) +static __always_inline void spin_lock(spinlock_t *lock) { raw_spin_lock(&lock->rlock); } -static inline void spin_lock_bh(spinlock_t *lock) +static __always_inline void spin_lock_bh(spinlock_t *lock) { raw_spin_lock_bh(&lock->rlock); } -static inline int spin_trylock(spinlock_t *lock) +static __always_inline int spin_trylock(spinlock_t *lock) { return raw_spin_trylock(&lock->rlock); } @@ -323,12 +316,17 @@ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) +#define spin_lock_bh_nested(lock, subclass) \ +do { \ + raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ +} while (0) + #define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) -static inline void spin_lock_irq(spinlock_t *lock) +static __always_inline void spin_lock_irq(spinlock_t *lock) { raw_spin_lock_irq(&lock->rlock); } @@ -343,57 +341,57 @@ do { \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) -static inline void spin_unlock(spinlock_t *lock) +static __always_inline void spin_unlock(spinlock_t *lock) { raw_spin_unlock(&lock->rlock); } -static inline void spin_unlock_bh(spinlock_t *lock) +static __always_inline void spin_unlock_bh(spinlock_t *lock) { raw_spin_unlock_bh(&lock->rlock); } -static inline void spin_unlock_irq(spinlock_t *lock) +static __always_inline void spin_unlock_irq(spinlock_t *lock) { raw_spin_unlock_irq(&lock->rlock); } -static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { raw_spin_unlock_irqrestore(&lock->rlock, flags); } -static inline int spin_trylock_bh(spinlock_t *lock) +static __always_inline int spin_trylock_bh(spinlock_t *lock) { return raw_spin_trylock_bh(&lock->rlock); } -static inline int spin_trylock_irq(spinlock_t *lock) +static __always_inline int spin_trylock_irq(spinlock_t *lock) { return raw_spin_trylock_irq(&lock->rlock); } #define spin_trylock_irqsave(lock, flags) \ -({ \ +({ \ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) -static inline void spin_unlock_wait(spinlock_t *lock) +static __always_inline void spin_unlock_wait(spinlock_t *lock) { raw_spin_unlock_wait(&lock->rlock); } -static inline int spin_is_locked(spinlock_t *lock) +static __always_inline int spin_is_locked(spinlock_t *lock) { return raw_spin_is_locked(&lock->rlock); } -static inline int spin_is_contended(spinlock_t *lock) +static __always_inline int spin_is_contended(spinlock_t *lock) { return raw_spin_is_contended(&lock->rlock); } -static inline int spin_can_lock(spinlock_t *lock) +static __always_inline int spin_can_lock(spinlock_t *lock) { return raw_spin_can_lock(&lock->rlock); } diff --git a/drivers/include/linux/spinlock_api_up.h b/drivers/include/linux/spinlock_api_up.h index eb5ea06ff..8541c720f 100644 --- a/drivers/include/linux/spinlock_api_up.h +++ b/drivers/include/linux/spinlock_api_up.h @@ -52,11 +52,12 @@ #define __UNLOCK_IRQ(lock) \ do { local_irq_enable(); __UNLOCK(lock); } while (0) -#define __UNLOCK_IRQRESTORE(lock, flags) \ +#define __UNLOCK_IRQRESTORE(lock, flags) \ do { local_irq_restore(flags); __UNLOCK(lock); } while (0) #define _raw_spin_lock(lock) __LOCK(lock) #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) +#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock) #define _raw_read_lock(lock) __LOCK(lock) #define _raw_write_lock(lock) __LOCK(lock) #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) diff --git a/drivers/include/linux/spinlock_types.h b/drivers/include/linux/spinlock_types.h index c1a720aa2..96f4e64d2 100644 --- a/drivers/include/linux/spinlock_types.h +++ b/drivers/include/linux/spinlock_types.h @@ -43,8 +43,8 @@ typedef struct raw_spinlock { #ifdef CONFIG_DEBUG_SPINLOCK # define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ .owner = SPINLOCK_OWNER_INIT, #else # define SPIN_DEBUG_INIT(lockname) @@ -54,7 +54,7 @@ typedef struct raw_spinlock { { \ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ SPIN_DEBUG_INIT(lockname) \ - SPIN_DEP_MAP_INIT(lockname) } + SPIN_DEP_MAP_INIT(lockname) } #define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) diff --git a/drivers/include/linux/stddef.h b/drivers/include/linux/stddef.h index e5e98202a..8bd5f1728 100644 --- a/drivers/include/linux/stddef.h +++ b/drivers/include/linux/stddef.h @@ -3,7 +3,6 @@ #include - #undef NULL #define NULL ((void *)0) @@ -14,8 +13,18 @@ enum { #undef offsetof #ifdef __compiler_offsetof -#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER) +#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER) #else -#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) #endif + +/** + * offsetofend(TYPE, MEMBER) + * + * @TYPE: The type of the structure + * @MEMBER: The member within the structure to get the end offset of + */ +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) + #endif diff --git a/drivers/include/linux/string.h b/drivers/include/linux/string.h index 4d60e1e66..6778fd916 100644 --- a/drivers/include/linux/string.h +++ b/drivers/include/linux/string.h @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); #ifndef __HAVE_ARCH_STRLCPY size_t strlcpy(char *, const char *, size_t); #endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t __must_check strscpy(char *, const char *, size_t); +#endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif @@ -40,9 +43,6 @@ extern int strcmp(const char *,const char *); #ifndef __HAVE_ARCH_STRNCMP extern int strncmp(const char *,const char *,__kernel_size_t); #endif -#ifndef __HAVE_ARCH_STRNICMP -#define strnicmp strncasecmp -#endif #ifndef __HAVE_ARCH_STRCASECMP extern int strcasecmp(const char *s1, const char *s2); #endif @@ -114,8 +114,12 @@ extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); #endif void *memchr_inv(const void *s, int c, size_t n); +char *strreplace(char *s, char old, char new); + +extern void kfree_const(const void *x); extern char *kstrdup(const char *s, gfp_t gfp); +extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp); @@ -132,7 +136,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); #endif extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, - const void *from, size_t available); + const void *from, size_t available); /** * strstarts - does @str start with @prefix? diff --git a/drivers/include/linux/swab.h b/drivers/include/linux/swab.h index bb792534d..f68663f28 100644 --- a/drivers/include/linux/swab.h +++ b/drivers/include/linux/swab.h @@ -1,284 +1,8 @@ #ifndef _LINUX_SWAB_H #define _LINUX_SWAB_H -#include -#include -#include +#include -/* - * casts are necessary for constants, because we never know how for sure - * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. - */ -#define ___constant_swab16(x) ((__u16)( \ - (((__u16)(x) & (__u16)0x00ffU) << 8) | \ - (((__u16)(x) & (__u16)0xff00U) >> 8))) - -#define ___constant_swab32(x) ((__u32)( \ - (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ - (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ - (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ - (((__u32)(x) & (__u32)0xff000000UL) >> 24))) - -#define ___constant_swab64(x) ((__u64)( \ - (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ - (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ - (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ - (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ - (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ - (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ - (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ - (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) - -#define ___constant_swahw32(x) ((__u32)( \ - (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ - (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) - -#define ___constant_swahb32(x) ((__u32)( \ - (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ - (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) - -/* - * Implement the following as inlines, but define the interface using - * macros to allow constant folding when possible: - * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 - */ - -static inline __attribute_const__ __u16 __fswab16(__u16 val) -{ -#ifdef __arch_swab16 - return __arch_swab16(val); -#else - return ___constant_swab16(val); -#endif -} - -static inline __attribute_const__ __u32 __fswab32(__u32 val) -{ -#ifdef __arch_swab32 - return __arch_swab32(val); -#else - return ___constant_swab32(val); -#endif -} - -static inline __attribute_const__ __u64 __fswab64(__u64 val) -{ -#ifdef __arch_swab64 - return __arch_swab64(val); -#elif defined(__SWAB_64_THRU_32__) - __u32 h = val >> 32; - __u32 l = val & ((1ULL << 32) - 1); - return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); -#else - return ___constant_swab64(val); -#endif -} - -static inline __attribute_const__ __u32 __fswahw32(__u32 val) -{ -#ifdef __arch_swahw32 - return __arch_swahw32(val); -#else - return ___constant_swahw32(val); -#endif -} - -static inline __attribute_const__ __u32 __fswahb32(__u32 val) -{ -#ifdef __arch_swahb32 - return __arch_swahb32(val); -#else - return ___constant_swahb32(val); -#endif -} - -/** - * __swab16 - return a byteswapped 16-bit value - * @x: value to byteswap - */ -#define __swab16(x) \ - (__builtin_constant_p((__u16)(x)) ? \ - ___constant_swab16(x) : \ - __fswab16(x)) - -/** - * __swab32 - return a byteswapped 32-bit value - * @x: value to byteswap - */ -#define __swab32(x) \ - (__builtin_constant_p((__u32)(x)) ? \ - ___constant_swab32(x) : \ - __fswab32(x)) - -/** - * __swab64 - return a byteswapped 64-bit value - * @x: value to byteswap - */ -#define __swab64(x) \ - (__builtin_constant_p((__u64)(x)) ? \ - ___constant_swab64(x) : \ - __fswab64(x)) - -/** - * __swahw32 - return a word-swapped 32-bit value - * @x: value to wordswap - * - * __swahw32(0x12340000) is 0x00001234 - */ -#define __swahw32(x) \ - (__builtin_constant_p((__u32)(x)) ? \ - ___constant_swahw32(x) : \ - __fswahw32(x)) - -/** - * __swahb32 - return a high and low byte-swapped 32-bit value - * @x: value to byteswap - * - * __swahb32(0x12345678) is 0x34127856 - */ -#define __swahb32(x) \ - (__builtin_constant_p((__u32)(x)) ? \ - ___constant_swahb32(x) : \ - __fswahb32(x)) - -/** - * __swab16p - return a byteswapped 16-bit value from a pointer - * @p: pointer to a naturally-aligned 16-bit value - */ -static inline __u16 __swab16p(const __u16 *p) -{ -#ifdef __arch_swab16p - return __arch_swab16p(p); -#else - return __swab16(*p); -#endif -} - -/** - * __swab32p - return a byteswapped 32-bit value from a pointer - * @p: pointer to a naturally-aligned 32-bit value - */ -static inline __u32 __swab32p(const __u32 *p) -{ -#ifdef __arch_swab32p - return __arch_swab32p(p); -#else - return __swab32(*p); -#endif -} - -/** - * __swab64p - return a byteswapped 64-bit value from a pointer - * @p: pointer to a naturally-aligned 64-bit value - */ -static inline __u64 __swab64p(const __u64 *p) -{ -#ifdef __arch_swab64p - return __arch_swab64p(p); -#else - return __swab64(*p); -#endif -} - -/** - * __swahw32p - return a wordswapped 32-bit value from a pointer - * @p: pointer to a naturally-aligned 32-bit value - * - * See __swahw32() for details of wordswapping. - */ -static inline __u32 __swahw32p(const __u32 *p) -{ -#ifdef __arch_swahw32p - return __arch_swahw32p(p); -#else - return __swahw32(*p); -#endif -} - -/** - * __swahb32p - return a high and low byteswapped 32-bit value from a pointer - * @p: pointer to a naturally-aligned 32-bit value - * - * See __swahb32() for details of high/low byteswapping. - */ -static inline __u32 __swahb32p(const __u32 *p) -{ -#ifdef __arch_swahb32p - return __arch_swahb32p(p); -#else - return __swahb32(*p); -#endif -} - -/** - * __swab16s - byteswap a 16-bit value in-place - * @p: pointer to a naturally-aligned 16-bit value - */ -static inline void __swab16s(__u16 *p) -{ -#ifdef __arch_swab16s - __arch_swab16s(p); -#else - *p = __swab16p(p); -#endif -} -/** - * __swab32s - byteswap a 32-bit value in-place - * @p: pointer to a naturally-aligned 32-bit value - */ -static inline void __swab32s(__u32 *p) -{ -#ifdef __arch_swab32s - __arch_swab32s(p); -#else - *p = __swab32p(p); -#endif -} - -/** - * __swab64s - byteswap a 64-bit value in-place - * @p: pointer to a naturally-aligned 64-bit value - */ -static inline void __swab64s(__u64 *p) -{ -#ifdef __arch_swab64s - __arch_swab64s(p); -#else - *p = __swab64p(p); -#endif -} - -/** - * __swahw32s - wordswap a 32-bit value in-place - * @p: pointer to a naturally-aligned 32-bit value - * - * See __swahw32() for details of wordswapping - */ -static inline void __swahw32s(__u32 *p) -{ -#ifdef __arch_swahw32s - __arch_swahw32s(p); -#else - *p = __swahw32p(p); -#endif -} - -/** - * __swahb32s - high and low byteswap a 32-bit value in-place - * @p: pointer to a naturally-aligned 32-bit value - * - * See __swahb32() for details of high and low byte swapping - */ -static inline void __swahb32s(__u32 *p) -{ -#ifdef __arch_swahb32s - __arch_swahb32s(p); -#else - *p = __swahb32p(p); -#endif -} - -#ifdef __KERNEL__ # define swab16 __swab16 # define swab32 __swab32 # define swab64 __swab64 @@ -294,6 +18,4 @@ static inline void __swahb32s(__u32 *p) # define swab64s __swab64s # define swahw32s __swahw32s # define swahb32s __swahb32s -#endif /* __KERNEL__ */ - #endif /* _LINUX_SWAB_H */ diff --git a/drivers/include/linux/time.h b/drivers/include/linux/time.h index a45e24099..beebe3a02 100644 --- a/drivers/include/linux/time.h +++ b/drivers/include/linux/time.h @@ -40,8 +40,8 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva } extern time64_t mktime64(const unsigned int year, const unsigned int mon, - const unsigned int day, const unsigned int hour, - const unsigned int min, const unsigned int sec); + const unsigned int day, const unsigned int hour, + const unsigned int min, const unsigned int sec); /** * Deprecated. Use mktime64(). @@ -110,6 +110,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts) return true; } +static inline bool timeval_valid(const struct timeval *tv) +{ + /* Dates before 1970 are bogus */ + if (tv->tv_sec < 0) + return false; + + /* Can't have more microseconds then a second */ + if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC) + return false; + + return true; +} + extern struct timespec timespec_trunc(struct timespec t, unsigned gran); #define CURRENT_TIME (current_kernel_time()) diff --git a/drivers/include/linux/time64.h b/drivers/include/linux/time64.h index a3831478d..367d5af89 100644 --- a/drivers/include/linux/time64.h +++ b/drivers/include/linux/time64.h @@ -2,6 +2,7 @@ #define _LINUX_TIME64_H #include +#include typedef __s64 time64_t; @@ -11,11 +12,18 @@ typedef __s64 time64_t; */ #if __BITS_PER_LONG == 64 # define timespec64 timespec +#define itimerspec64 itimerspec #else struct timespec64 { time64_t tv_sec; /* seconds */ long tv_nsec; /* nanoseconds */ }; + +struct itimerspec64 { + struct timespec64 it_interval; + struct timespec64 it_value; +}; + #endif /* Parameters used to convert the timespec values: */ @@ -28,6 +36,7 @@ struct timespec64 { #define FSEC_PER_SEC 1000000000000000LL /* Located here for timespec[64]_valid_strict */ +#define TIME64_MAX ((s64)~((u64)1 << 63)) #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) @@ -43,6 +52,16 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) return ts; } +static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) +{ + return *its64; +} + +static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) +{ + return *its; +} + # define timespec64_equal timespec_equal # define timespec64_compare timespec_compare # define set_normalized_timespec64 set_normalized_timespec @@ -75,6 +94,24 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) return ret; } +static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64) +{ + struct itimerspec ret; + + ret.it_interval = timespec64_to_timespec(its64->it_interval); + ret.it_value = timespec64_to_timespec(its64->it_value); + return ret; +} + +static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its) +{ + struct itimerspec64 ret; + + ret.it_interval = timespec_to_timespec64(its->it_interval); + ret.it_value = timespec_to_timespec64(its->it_value); + return ret; +} + static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { diff --git a/drivers/include/linux/types.h b/drivers/include/linux/types.h index 06dcb43dc..d5bf11b4a 100644 --- a/drivers/include/linux/types.h +++ b/drivers/include/linux/types.h @@ -7,7 +7,7 @@ #ifndef __ASSEMBLY__ #define DECLARE_BITMAP(name,bits) \ - unsigned long name[BITS_TO_LONGS(bits)] + unsigned long name[BITS_TO_LONGS(bits)] typedef __u32 __kernel_dev_t; @@ -26,7 +26,7 @@ typedef __kernel_timer_t timer_t; typedef __kernel_clockid_t clockid_t; typedef __kernel_mqd_t mqd_t; -typedef _Bool bool; +typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; @@ -135,26 +135,25 @@ typedef unsigned long blkcnt_t; #endif /* - * The type of an index into the pagecache. Use a #define so asm/types.h - * can override it. + * The type of an index into the pagecache. */ -#ifndef pgoff_t #define pgoff_t unsigned long -#endif -/* A dma_addr_t can hold any valid DMA or bus address for the platform */ +/* + * A dma_addr_t can hold any valid DMA address, i.e., any address returned + * by the DMA API. + * + * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32 + * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits, + * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses, + * so they don't care about the size of the actual bus addresses. + */ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT typedef u64 dma_addr_t; #else typedef u32 dma_addr_t; -#endif /* dma_addr_t */ +#endif -#ifdef __CHECKER__ -#else -#endif -#ifdef __CHECK_ENDIAN__ -#else -#endif typedef unsigned __bitwise__ gfp_t; typedef unsigned __bitwise__ fmode_t; typedef unsigned __bitwise__ oom_flags_t; @@ -206,12 +205,32 @@ struct ustat { * struct callback_head - callback structure for use with RCU and task_work * @next: next update requests in a list * @func: actual update function to call after the grace period. + * + * The struct is aligned to size of pointer. On most architectures it happens + * naturally due ABI requirements, but some architectures (like CRIS) have + * weird ABI and we need to ask it explicitly. + * + * The alignment is required to guarantee that bits 0 and 1 of @next will be + * clear under normal conditions -- as long as we use call_rcu(), + * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. + * + * This guarantee is important for few reasons: + * - future call_rcu_lazy() will make use of lower bits in the pointer; + * - the structure shares storage spacer in struct page with @compound_head, + * which encode PageTail() in bit 0. The guarantee is needed to avoid + * false-positive PageTail(). */ struct callback_head { - struct callback_head *next; - void (*func)(struct callback_head *head); -}; + struct callback_head *next; + void (*func)(struct callback_head *head); +} __attribute__((aligned(sizeof(void *)))); #define rcu_head callback_head +typedef void (*rcu_callback_t)(struct rcu_head *head); +typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); + +/* clocksource cycle base type */ +typedef u64 cycle_t; + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ diff --git a/drivers/include/linux/vgaarb.h b/drivers/include/linux/vgaarb.h index 3324693ef..7ae1d82c9 100644 --- a/drivers/include/linux/vgaarb.h +++ b/drivers/include/linux/vgaarb.h @@ -65,8 +65,13 @@ struct pci_dev; * out of the arbitration process (and can be safe to take * interrupts at any time. */ +#if defined(CONFIG_VGA_ARB) extern void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes); +#else +static inline void vga_set_legacy_decoding(struct pci_dev *pdev, + unsigned int decodes) { }; +#endif /** * vga_get - acquire & locks VGA resources diff --git a/drivers/include/linux/workqueue.h b/drivers/include/linux/workqueue.h index 7ca4c5e84..94c9bf43d 100644 --- a/drivers/include/linux/workqueue.h +++ b/drivers/include/linux/workqueue.h @@ -61,7 +61,7 @@ struct delayed_work { static inline struct delayed_work *to_delayed_work(struct work_struct *work) { - return container_of(work, struct delayed_work, work); + return container_of(work, struct delayed_work, work); } extern struct workqueue_struct *system_wq; diff --git a/drivers/include/linux/ww_mutex.h b/drivers/include/linux/ww_mutex.h index 9bbedf7c7..60b9e03ee 100644 --- a/drivers/include/linux/ww_mutex.h +++ b/drivers/include/linux/ww_mutex.h @@ -120,7 +120,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock, static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) { - ctx->task = current; + ctx->task = current; ctx->stamp = atomic_long_inc_return(&ww_class->stamp); ctx->acquired = 0; #ifdef CONFIG_DEBUG_MUTEXES diff --git a/drivers/include/syscall.h b/drivers/include/syscall.h index 9e7924502..9f5a39d6a 100644 --- a/drivers/include/syscall.h +++ b/drivers/include/syscall.h @@ -6,6 +6,14 @@ typedef u32 addr_t; typedef u32 count_t; +typedef struct +{ + int width; + int height; + int bpp; + int freq; +}videomode_t; + /////////////////////////////////////////////////////////////////////////////// #define STDCALL __attribute__ ((stdcall)) __attribute__ ((dllimport)) @@ -36,6 +44,7 @@ int STDCALL UserFree(void *mem)__asm__("UserFree"); void* STDCALL GetDisplay(void)__asm__("GetDisplay"); u32 IMPORT GetTimerTicks(void)__asm__("GetTimerTicks"); +u64 IMPORT GetClockNs(void)__asm__("GetClockNs"); addr_t STDCALL AllocPage(void)__asm__("AllocPage"); addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages"); @@ -512,9 +521,14 @@ static inline void __iomem *ioremap(u32 offset, size_t size) return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100); } +static inline void __iomem *ioremap_nocache(u32 offset, size_t size) +{ + return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_NOCACHE|0x100); +} + static inline void __iomem *ioremap_wc(u32 offset, size_t size) { - return (void __iomem*) MapIoMem(offset, size, PG_SW|0x100); + return (void __iomem*) MapIoMem(offset, size, PG_SW|PG_WRITEC|0x100); } @@ -546,7 +560,7 @@ static inline void vfree(void *addr) KernelFree(addr); } -static inline int power_supply_is_system_supplied(void) { return -1; } +static inline int power_supply_is_system_supplied(void) { return -1; }; #endif diff --git a/drivers/include/uapi/asm-generic/errno.h b/drivers/include/uapi/asm-generic/errno.h index 1e1ea6e6e..88e0914cf 100644 --- a/drivers/include/uapi/asm-generic/errno.h +++ b/drivers/include/uapi/asm-generic/errno.h @@ -6,7 +6,16 @@ #define EDEADLK 35 /* Resource deadlock would occur */ #define ENAMETOOLONG 36 /* File name too long */ #define ENOLCK 37 /* No record locks available */ -#define ENOSYS 38 /* Function not implemented */ + +/* + * This error code is special: arch syscall entry code will return + * -ENOSYS if users try to call a syscall that doesn't exist. To keep + * failures of syscalls that really do exist distinguishable from + * failures due to attempts to use a nonexistent syscall, syscall + * implementations should refrain from returning -ENOSYS. + */ +#define ENOSYS 38 /* Invalid system call number */ + #define ENOTEMPTY 39 /* Directory not empty */ #define ELOOP 40 /* Too many symbolic links encountered */ #define EWOULDBLOCK EAGAIN /* Operation would block */ diff --git a/drivers/include/uapi/asm/e820.h b/drivers/include/uapi/asm/e820.h index d993e33f5..9dafe59cf 100644 --- a/drivers/include/uapi/asm/e820.h +++ b/drivers/include/uapi/asm/e820.h @@ -32,7 +32,18 @@ #define E820_ACPI 3 #define E820_NVS 4 #define E820_UNUSABLE 5 +#define E820_PMEM 7 +/* + * This is a non-standardized way to represent ADR or NVDIMM regions that + * persist over a reboot. The kernel will ignore their special capabilities + * unless the CONFIG_X86_PMEM_LEGACY option is set. + * + * ( Note that older platforms also used 6 for the same type of memory, + * but newer versions switched to 12 as 6 was assigned differently. Some + * time they will learn... ) + */ +#define E820_PRAM 12 /* * reserved RAM used by kernel itself diff --git a/drivers/include/uapi/asm/msr.h b/drivers/include/uapi/asm/msr.h index 155e51048..c41f4fe25 100644 --- a/drivers/include/uapi/asm/msr.h +++ b/drivers/include/uapi/asm/msr.h @@ -1,8 +1,6 @@ #ifndef _UAPI_ASM_X86_MSR_H #define _UAPI_ASM_X86_MSR_H -#include - #ifndef __ASSEMBLY__ #include diff --git a/drivers/include/uapi/asm/posix_types.h b/drivers/include/uapi/asm/posix_types.h index f565f6dd5..85506b383 100644 --- a/drivers/include/uapi/asm/posix_types.h +++ b/drivers/include/uapi/asm/posix_types.h @@ -1,5 +1,9 @@ -# ifdef CONFIG_X86_32 +#ifndef __KERNEL__ +# ifdef __i386__ # include +# elif defined(__ILP32__) +# include # else # include # endif +#endif diff --git a/drivers/include/uapi/asm/processor-flags.h b/drivers/include/uapi/asm/processor-flags.h index 180a0c3c2..79887abcb 100644 --- a/drivers/include/uapi/asm/processor-flags.h +++ b/drivers/include/uapi/asm/processor-flags.h @@ -37,8 +37,6 @@ #define X86_EFLAGS_VM _BITUL(X86_EFLAGS_VM_BIT) #define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ #define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) -#define X86_EFLAGS_AC_BIT 18 /* Alignment Check/Access Control */ -#define X86_EFLAGS_AC _BITUL(X86_EFLAGS_AC_BIT) #define X86_EFLAGS_VIF_BIT 19 /* Virtual Interrupt Flag */ #define X86_EFLAGS_VIF _BITUL(X86_EFLAGS_VIF_BIT) #define X86_EFLAGS_VIP_BIT 20 /* Virtual Interrupt Pending */ diff --git a/drivers/include/uapi/asm/ptrace-abi.h b/drivers/include/uapi/asm/ptrace-abi.h new file mode 100644 index 000000000..580aee307 --- /dev/null +++ b/drivers/include/uapi/asm/ptrace-abi.h @@ -0,0 +1,93 @@ +#ifndef _ASM_X86_PTRACE_ABI_H +#define _ASM_X86_PTRACE_ABI_H + +#ifdef __i386__ + +#define EBX 0 +#define ECX 1 +#define EDX 2 +#define ESI 3 +#define EDI 4 +#define EBP 5 +#define EAX 6 +#define DS 7 +#define ES 8 +#define FS 9 +#define GS 10 +#define ORIG_EAX 11 +#define EIP 12 +#define CS 13 +#define EFL 14 +#define UESP 15 +#define SS 16 +#define FRAME_SIZE 17 + +#else /* __i386__ */ + +#if defined(__ASSEMBLY__) || defined(__FRAME_OFFSETS) +/* + * C ABI says these regs are callee-preserved. They aren't saved on kernel entry + * unless syscall needs a complete, fully filled "struct pt_regs". + */ +#define R15 0 +#define R14 8 +#define R13 16 +#define R12 24 +#define RBP 32 +#define RBX 40 +/* These regs are callee-clobbered. Always saved on kernel entry. */ +#define R11 48 +#define R10 56 +#define R9 64 +#define R8 72 +#define RAX 80 +#define RCX 88 +#define RDX 96 +#define RSI 104 +#define RDI 112 +/* + * On syscall entry, this is syscall#. On CPU exception, this is error code. + * On hw interrupt, it's IRQ number: + */ +#define ORIG_RAX 120 +/* Return frame for iretq */ +#define RIP 128 +#define CS 136 +#define EFLAGS 144 +#define RSP 152 +#define SS 160 +#endif /* __ASSEMBLY__ */ + +/* top of stack page */ +#define FRAME_SIZE 168 + +#endif /* !__i386__ */ + +/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ +#define PTRACE_GETREGS 12 +#define PTRACE_SETREGS 13 +#define PTRACE_GETFPREGS 14 +#define PTRACE_SETFPREGS 15 +#define PTRACE_GETFPXREGS 18 +#define PTRACE_SETFPXREGS 19 + +#define PTRACE_OLDSETOPTIONS 21 + +/* only useful for access 32bit programs / kernels */ +#define PTRACE_GET_THREAD_AREA 25 +#define PTRACE_SET_THREAD_AREA 26 + +#ifdef __x86_64__ +# define PTRACE_ARCH_PRCTL 30 +#endif + +#define PTRACE_SYSEMU 31 +#define PTRACE_SYSEMU_SINGLESTEP 32 + +#define PTRACE_SINGLEBLOCK 33 /* resume execution until next branch */ + +#ifndef __ASSEMBLY__ +#include +#endif + +#endif /* _ASM_X86_PTRACE_ABI_H */ diff --git a/drivers/include/uapi/asm/ptrace.h b/drivers/include/uapi/asm/ptrace.h index 86fc2bb82..bc16115af 100644 --- a/drivers/include/uapi/asm/ptrace.h +++ b/drivers/include/uapi/asm/ptrace.h @@ -1,262 +1,85 @@ -#ifndef _ASM_X86_PTRACE_H -#define _ASM_X86_PTRACE_H +#ifndef _UAPI_ASM_X86_PTRACE_H +#define _UAPI_ASM_X86_PTRACE_H + +#include /* For __user */ +#include +#include -#include -#include -#include #ifndef __ASSEMBLY__ + #ifdef __i386__ +/* this struct defines the way the registers are stored on the + stack during a system call. */ + +#ifndef __KERNEL__ struct pt_regs { - unsigned long bx; - unsigned long cx; - unsigned long dx; - unsigned long si; - unsigned long di; - unsigned long bp; - unsigned long ax; - unsigned long ds; - unsigned long es; - unsigned long fs; - unsigned long gs; - unsigned long orig_ax; - unsigned long ip; - unsigned long cs; - unsigned long flags; - unsigned long sp; - unsigned long ss; + long ebx; + long ecx; + long edx; + long esi; + long edi; + long ebp; + long eax; + int xds; + int xes; + int xfs; + int xgs; + long orig_eax; + long eip; + int xcs; + long eflags; + long esp; + int xss; }; +#endif /* __KERNEL__ */ + #else /* __i386__ */ +#ifndef __KERNEL__ + struct pt_regs { +/* + * C ABI says these regs are callee-preserved. They aren't saved on kernel entry + * unless syscall needs a complete, fully filled "struct pt_regs". + */ unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; - unsigned long bp; - unsigned long bx; -/* arguments: non interrupts/non tracing syscalls only save up to here*/ + unsigned long rbp; + unsigned long rbx; +/* These regs are callee-clobbered. Always saved on kernel entry. */ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; - unsigned long ax; - unsigned long cx; - unsigned long dx; - unsigned long si; - unsigned long di; - unsigned long orig_ax; -/* end of arguments */ -/* cpu exception frame or undefined */ - unsigned long ip; + unsigned long rax; + unsigned long rcx; + unsigned long rdx; + unsigned long rsi; + unsigned long rdi; +/* + * On syscall entry, this is syscall#. On CPU exception, this is error code. + * On hw interrupt, it's IRQ number: + */ + unsigned long orig_rax; +/* Return frame for iretq */ + unsigned long rip; unsigned long cs; - unsigned long flags; - unsigned long sp; + unsigned long eflags; + unsigned long rsp; unsigned long ss; /* top of stack page */ }; +#endif /* __KERNEL__ */ #endif /* !__i386__ */ -#ifdef CONFIG_PARAVIRT -#include -#endif -struct cpuinfo_x86; -struct task_struct; - -extern unsigned long profile_pc(struct pt_regs *regs); -#define profile_pc profile_pc - -extern unsigned long -convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); -extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, - int error_code, int si_code); - - -extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch); -extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch, - unsigned long phase1_result); - -extern long syscall_trace_enter(struct pt_regs *); -extern void syscall_trace_leave(struct pt_regs *); - -static inline unsigned long regs_return_value(struct pt_regs *regs) -{ - return regs->ax; -} - -/* - * user_mode_vm(regs) determines whether a register set came from user mode. - * This is true if V8086 mode was enabled OR if the register set was from - * protected mode with RPL-3 CS value. This tricky test checks that with - * one comparison. Many places in the kernel can bypass this full check - * if they have already ruled out V8086 mode, so user_mode(regs) can be used. - */ -static inline int user_mode(struct pt_regs *regs) -{ -#ifdef CONFIG_X86_32 - return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; -#else - return !!(regs->cs & 3); -#endif -} - -static inline int user_mode_vm(struct pt_regs *regs) -{ -#ifdef CONFIG_X86_32 - return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= - USER_RPL; -#else - return user_mode(regs); -#endif -} - -static inline int v8086_mode(struct pt_regs *regs) -{ -#ifdef CONFIG_X86_32 - return (regs->flags & X86_VM_MASK); -#else - return 0; /* No V86 mode support in long mode */ -#endif -} - -#ifdef CONFIG_X86_64 -static inline bool user_64bit_mode(struct pt_regs *regs) -{ -#ifndef CONFIG_PARAVIRT - /* - * On non-paravirt systems, this is the only long mode CPL 3 - * selector. We do not allow long mode selectors in the LDT. - */ - return regs->cs == __USER_CS; -#else - /* Headers are too twisted for this to go in paravirt.h. */ - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; -#endif -} - -#define current_user_stack_pointer() this_cpu_read(old_rsp) -/* ia32 vs. x32 difference */ -#define compat_user_stack_pointer() \ - (test_thread_flag(TIF_IA32) \ - ? current_pt_regs()->sp \ - : this_cpu_read(old_rsp)) -#endif - -#ifdef CONFIG_X86_32 -extern unsigned long kernel_stack_pointer(struct pt_regs *regs); -#else -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ - return regs->sp; -} -#endif - -#define GET_IP(regs) ((regs)->ip) -#define GET_FP(regs) ((regs)->bp) -#define GET_USP(regs) ((regs)->sp) - -#include - -/* Query offset/name of register from its name/offset */ -extern int regs_query_register_offset(const char *name); -extern const char *regs_query_register_name(unsigned int offset); -#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) - -/** - * regs_get_register() - get register value from its offset - * @regs: pt_regs from which register value is gotten. - * @offset: offset number of the register. - * - * regs_get_register returns the value of a register. The @offset is the - * offset of the register in struct pt_regs address which specified by @regs. - * If @offset is bigger than MAX_REG_OFFSET, this returns 0. - */ -static inline unsigned long regs_get_register(struct pt_regs *regs, - unsigned int offset) -{ - if (unlikely(offset > MAX_REG_OFFSET)) - return 0; -#ifdef CONFIG_X86_32 - /* - * Traps from the kernel do not save sp and ss. - * Use the helper function to retrieve sp. - */ - if (offset == offsetof(struct pt_regs, sp) && - regs->cs == __KERNEL_CS) - return kernel_stack_pointer(regs); -#endif - return *(unsigned long *)((unsigned long)regs + offset); -} - -/** - * regs_within_kernel_stack() - check the address in the stack - * @regs: pt_regs which contains kernel stack pointer. - * @addr: address which is checked. - * - * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). - * If @addr is within the kernel stack, it returns true. If not, returns false. - */ -static inline int regs_within_kernel_stack(struct pt_regs *regs, - unsigned long addr) -{ - return ((addr & ~(THREAD_SIZE - 1)) == - (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); -} - -/** - * regs_get_kernel_stack_nth() - get Nth entry of the stack - * @regs: pt_regs which contains kernel stack pointer. - * @n: stack entry number. - * - * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which - * is specified by @regs. If the @n th entry is NOT in the kernel stack, - * this returns 0. - */ -static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, - unsigned int n) -{ - unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); - addr += n; - if (regs_within_kernel_stack(regs, (unsigned long)addr)) - return *addr; - else - return 0; -} - -#define arch_has_single_step() (1) -#ifdef CONFIG_X86_DEBUGCTLMSR -#define arch_has_block_step() (1) -#else -#define arch_has_block_step() (boot_cpu_data.x86 >= 6) -#endif - -#define ARCH_HAS_USER_SINGLE_STEP_INFO - -/* - * When hitting ptrace_stop(), we cannot return using SYSRET because - * that does not restore the full CPU state, only a minimal set. The - * ptracer can change arbitrary register values, which is usually okay - * because the usual ptrace stops run off the signal delivery path which - * forces IRET; however, ptrace_event() stops happen in arbitrary places - * in the kernel and don't force IRET path. - * - * So force IRET path after a ptrace stop. - */ -#define arch_ptrace_stop_needed(code, info) \ -({ \ - set_thread_flag(TIF_NOTIFY_RESUME); \ - false; \ -}) - -struct user_desc; -extern int do_get_thread_area(struct task_struct *p, int idx, - struct user_desc __user *info); -extern int do_set_thread_area(struct task_struct *p, int idx, - struct user_desc __user *info, int can_allocate); #endif /* !__ASSEMBLY__ */ -#endif /* _ASM_X86_PTRACE_H */ + +#endif /* _UAPI_ASM_X86_PTRACE_H */ diff --git a/drivers/include/uapi/asm/sigcontext.h b/drivers/include/uapi/asm/sigcontext.h index d8b9f9081..d485232f1 100644 --- a/drivers/include/uapi/asm/sigcontext.h +++ b/drivers/include/uapi/asm/sigcontext.h @@ -1,221 +1,360 @@ #ifndef _UAPI_ASM_X86_SIGCONTEXT_H #define _UAPI_ASM_X86_SIGCONTEXT_H +/* + * Linux signal context definitions. The sigcontext includes a complex + * hierarchy of CPU and FPU state, available to user-space (on the stack) when + * a signal handler is executed. + * + * As over the years this ABI grew from its very simple roots towards + * supporting more and more CPU state organically, some of the details (which + * were rather clever hacks back in the days) became a bit quirky by today. + * + * The current ABI includes flexible provisions for future extensions, so we + * won't have to grow new quirks for quite some time. Promise! + */ + #include #include -#define FP_XSTATE_MAGIC1 0x46505853U -#define FP_XSTATE_MAGIC2 0x46505845U -#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) +#define FP_XSTATE_MAGIC1 0x46505853U +#define FP_XSTATE_MAGIC2 0x46505845U +#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) /* - * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame - * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes - * are used to extended the fpstate pointer in the sigcontext, which now - * includes the extended state information along with fpstate information. + * Bytes 464..511 in the current 512-byte layout of the FXSAVE/FXRSTOR frame + * are reserved for SW usage. On CPUs supporting XSAVE/XRSTOR, these bytes are + * used to extend the fpstate pointer in the sigcontext, which now includes the + * extended state information along with fpstate information. * - * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved - * area and FP_XSTATE_MAGIC2 at the end of memory layout - * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the - * extended state information in the memory layout pointed by the fpstate - * pointer in sigcontext. + * If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then there's a + * sw_reserved.extended_size bytes large extended context area present. (The + * last 32-bit word of this extended area (at the + * fpstate+extended_size-FP_XSTATE_MAGIC2_SIZE address) is set to + * FP_XSTATE_MAGIC2 so that you can sanity check your size calculations.) + * + * This extended area typically grows with newer CPUs that have larger and + * larger XSAVE areas. */ struct _fpx_sw_bytes { - __u32 magic1; /* FP_XSTATE_MAGIC1 */ - __u32 extended_size; /* total size of the layout referred by - * fpstate pointer in the sigcontext. - */ - __u64 xstate_bv; - /* feature bit mask (including fp/sse/extended - * state) that is present in the memory - * layout. - */ - __u32 xstate_size; /* actual xsave state size, based on the - * features saved in the layout. - * 'extended_size' will be greater than - * 'xstate_size'. - */ - __u32 padding[7]; /* for future use. */ + /* + * If set to FP_XSTATE_MAGIC1 then this is an xstate context. + * 0 if a legacy frame. + */ + __u32 magic1; + + /* + * Total size of the fpstate area: + * + * - if magic1 == 0 then it's sizeof(struct _fpstate) + * - if magic1 == FP_XSTATE_MAGIC1 then it's sizeof(struct _xstate) + * plus extensions (if any) + */ + __u32 extended_size; + + /* + * Feature bit mask (including FP/SSE/extended state) that is present + * in the memory layout: + */ + __u64 xfeatures; + + /* + * Actual XSAVE state size, based on the xfeatures saved in the layout. + * 'extended_size' is greater than 'xstate_size': + */ + __u32 xstate_size; + + /* For future use: */ + __u32 padding[7]; +}; + +/* + * As documented in the iBCS2 standard: + * + * The first part of "struct _fpstate" is just the normal i387 hardware setup, + * the extra "status" word is used to save the coprocessor status word before + * entering the handler. + * + * The FPU state data structure has had to grow to accommodate the extended FPU + * state required by the Streaming SIMD Extensions. There is no documented + * standard to accomplish this at the moment. + */ + +/* 10-byte legacy floating point register: */ +struct _fpreg { + __u16 significand[4]; + __u16 exponent; +}; + +/* 16-byte floating point register: */ +struct _fpxreg { + __u16 significand[4]; + __u16 exponent; + __u16 padding[3]; +}; + +/* 16-byte XMM register: */ +struct _xmmreg { + __u32 element[4]; +}; + +#define X86_FXSR_MAGIC 0x0000 + +/* + * The 32-bit FPU frame: + */ +struct _fpstate_32 { + /* Legacy FPU environment: */ + __u32 cw; + __u32 sw; + __u32 tag; + __u32 ipoff; + __u32 cssel; + __u32 dataoff; + __u32 datasel; + struct _fpreg _st[8]; + __u16 status; + __u16 magic; /* 0xffff: regular FPU data only */ + /* 0x0000: FXSR FPU data */ + + /* FXSR FPU environment */ + __u32 _fxsr_env[6]; /* FXSR FPU env is ignored */ + __u32 mxcsr; + __u32 reserved; + struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ + struct _xmmreg _xmm[8]; /* First 8 XMM registers */ + union { + __u32 padding1[44]; /* Second 8 XMM registers plus padding */ + __u32 padding[44]; /* Alias name for old user-space */ + }; + + union { + __u32 padding2[12]; + struct _fpx_sw_bytes sw_reserved; /* Potential extended state is encoded here */ + }; +}; + +/* + * The 64-bit FPU frame. (FXSAVE format and later) + * + * Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is + * larger: 'struct _xstate'. Note that 'struct _xstate' embedds + * 'struct _fpstate' so that you can always assume the _fpstate portion + * exists so that you can check the magic value. + * + * Note2: Reserved fields may someday contain valuable data. Always + * save/restore them when you change signal frames. + */ +struct _fpstate_64 { + __u16 cwd; + __u16 swd; + /* Note this is not the same as the 32-bit/x87/FSAVE twd: */ + __u16 twd; + __u16 fop; + __u64 rip; + __u64 rdp; + __u32 mxcsr; + __u32 mxcsr_mask; + __u32 st_space[32]; /* 8x FP registers, 16 bytes each */ + __u32 xmm_space[64]; /* 16x XMM registers, 16 bytes each */ + __u32 reserved2[12]; + union { + __u32 reserved3[12]; + struct _fpx_sw_bytes sw_reserved; /* Potential extended state is encoded here */ + }; }; #ifdef __i386__ -/* - * As documented in the iBCS2 standard.. - * - * The first part of "struct _fpstate" is just the normal i387 - * hardware setup, the extra "status" word is used to save the - * coprocessor status word before entering the handler. - * - * Pentium III FXSR, SSE support - * Gareth Hughes , May 2000 - * - * The FPU state data structure has had to grow to accommodate the - * extended FPU state required by the Streaming SIMD Extensions. - * There is no documented standard to accomplish this at the moment. - */ -struct _fpreg { - unsigned short significand[4]; - unsigned short exponent; -}; - -struct _fpxreg { - unsigned short significand[4]; - unsigned short exponent; - unsigned short padding[3]; -}; - -struct _xmmreg { - unsigned long element[4]; -}; - -struct _fpstate { - /* Regular FPU environment */ - unsigned long cw; - unsigned long sw; - unsigned long tag; - unsigned long ipoff; - unsigned long cssel; - unsigned long dataoff; - unsigned long datasel; - struct _fpreg _st[8]; - unsigned short status; - unsigned short magic; /* 0xffff = regular FPU data only */ - - /* FXSR FPU environment */ - unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ - unsigned long mxcsr; - unsigned long reserved; - struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ - struct _xmmreg _xmm[8]; - unsigned long padding1[44]; - - union { - unsigned long padding2[12]; - struct _fpx_sw_bytes sw_reserved; /* represents the extended - * state info */ - }; -}; - -#define X86_FXSR_MAGIC 0x0000 - -#ifndef __KERNEL__ -/* - * User-space might still rely on the old definition: - */ -struct sigcontext { - unsigned short gs, __gsh; - unsigned short fs, __fsh; - unsigned short es, __esh; - unsigned short ds, __dsh; - unsigned long edi; - unsigned long esi; - unsigned long ebp; - unsigned long esp; - unsigned long ebx; - unsigned long edx; - unsigned long ecx; - unsigned long eax; - unsigned long trapno; - unsigned long err; - unsigned long eip; - unsigned short cs, __csh; - unsigned long eflags; - unsigned long esp_at_signal; - unsigned short ss, __ssh; - struct _fpstate __user *fpstate; - unsigned long oldmask; - unsigned long cr2; -}; -#endif /* !__KERNEL__ */ - -#else /* __i386__ */ - -/* FXSAVE frame */ -/* Note: reserved1/2 may someday contain valuable data. Always save/restore - them when you change signal frames. */ -struct _fpstate { - __u16 cwd; - __u16 swd; - __u16 twd; /* Note this is not the same as the - 32bit/x87/FSAVE twd */ - __u16 fop; - __u64 rip; - __u64 rdp; - __u32 mxcsr; - __u32 mxcsr_mask; - __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ - __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ - __u32 reserved2[12]; - union { - __u32 reserved3[12]; - struct _fpx_sw_bytes sw_reserved; /* represents the extended - * state information */ - }; -}; - -#ifndef __KERNEL__ -/* - * User-space might still rely on the old definition: - */ -struct sigcontext { - __u64 r8; - __u64 r9; - __u64 r10; - __u64 r11; - __u64 r12; - __u64 r13; - __u64 r14; - __u64 r15; - __u64 rdi; - __u64 rsi; - __u64 rbp; - __u64 rbx; - __u64 rdx; - __u64 rax; - __u64 rcx; - __u64 rsp; - __u64 rip; - __u64 eflags; /* RFLAGS */ - __u16 cs; - __u16 gs; - __u16 fs; - __u16 __pad0; - __u64 err; - __u64 trapno; - __u64 oldmask; - __u64 cr2; - struct _fpstate __user *fpstate; /* zero when no FPU context */ -#ifdef __ILP32__ - __u32 __fpstate_pad; +# define _fpstate _fpstate_32 +#else +# define _fpstate _fpstate_64 #endif - __u64 reserved1[8]; -}; -#endif /* !__KERNEL__ */ -#endif /* !__i386__ */ - -struct _xsave_hdr { - __u64 xstate_bv; - __u64 reserved1[2]; - __u64 reserved2[5]; +struct _header { + __u64 xfeatures; + __u64 reserved1[2]; + __u64 reserved2[5]; }; struct _ymmh_state { - /* 16 * 16 bytes for each YMMH-reg */ - __u32 ymmh_space[64]; + /* 16x YMM registers, 16 bytes each: */ + __u32 ymmh_space[64]; }; /* - * Extended state pointed by the fpstate pointer in the sigcontext. - * In addition to the fpstate, information encoded in the xstate_hdr - * indicates the presence of other extended state information - * supported by the processor and OS. + * Extended state pointed to by sigcontext::fpstate. + * + * In addition to the fpstate, information encoded in _xstate::xstate_hdr + * indicates the presence of other extended state information supported + * by the CPU and kernel: */ struct _xstate { - struct _fpstate fpstate; - struct _xsave_hdr xstate_hdr; - struct _ymmh_state ymmh; - /* new processor state extensions go here */ + struct _fpstate fpstate; + struct _header xstate_hdr; + struct _ymmh_state ymmh; + /* New processor state extensions go here: */ }; +/* + * The 32-bit signal frame: + */ +struct sigcontext_32 { + __u16 gs, __gsh; + __u16 fs, __fsh; + __u16 es, __esh; + __u16 ds, __dsh; + __u32 di; + __u32 si; + __u32 bp; + __u32 sp; + __u32 bx; + __u32 dx; + __u32 cx; + __u32 ax; + __u32 trapno; + __u32 err; + __u32 ip; + __u16 cs, __csh; + __u32 flags; + __u32 sp_at_signal; + __u16 ss, __ssh; + + /* + * fpstate is really (struct _fpstate *) or (struct _xstate *) + * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved + * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end + * of extended memory layout. See comments at the definition of + * (struct _fpx_sw_bytes) + */ + __u32 fpstate; /* Zero when no FPU/extended context */ + __u32 oldmask; + __u32 cr2; +}; + +/* + * The 64-bit signal frame: + */ +struct sigcontext_64 { + __u64 r8; + __u64 r9; + __u64 r10; + __u64 r11; + __u64 r12; + __u64 r13; + __u64 r14; + __u64 r15; + __u64 di; + __u64 si; + __u64 bp; + __u64 bx; + __u64 dx; + __u64 ax; + __u64 cx; + __u64 sp; + __u64 ip; + __u64 flags; + __u16 cs; + __u16 gs; + __u16 fs; + __u16 __pad0; + __u64 err; + __u64 trapno; + __u64 oldmask; + __u64 cr2; + + /* + * fpstate is really (struct _fpstate *) or (struct _xstate *) + * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved + * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end + * of extended memory layout. See comments at the definition of + * (struct _fpx_sw_bytes) + */ + __u64 fpstate; /* Zero when no FPU/extended context */ + __u64 reserved1[8]; +}; + +/* + * Create the real 'struct sigcontext' type: + */ +#ifdef __KERNEL__ +# ifdef __i386__ +# define sigcontext sigcontext_32 +# else +# define sigcontext sigcontext_64 +# endif +#endif + +/* + * The old user-space sigcontext definition, just in case user-space still + * relies on it. The kernel definition (in asm/sigcontext.h) has unified + * field names but otherwise the same layout. + */ +#ifndef __KERNEL__ + +#define _fpstate_ia32 _fpstate_32 +#define sigcontext_ia32 sigcontext_32 + + +# ifdef __i386__ +struct sigcontext { + __u16 gs, __gsh; + __u16 fs, __fsh; + __u16 es, __esh; + __u16 ds, __dsh; + __u32 edi; + __u32 esi; + __u32 ebp; + __u32 esp; + __u32 ebx; + __u32 edx; + __u32 ecx; + __u32 eax; + __u32 trapno; + __u32 err; + __u32 eip; + __u16 cs, __csh; + __u32 eflags; + __u32 esp_at_signal; + __u16 ss, __ssh; + struct _fpstate __user *fpstate; + __u32 oldmask; + __u32 cr2; +}; +# else /* __x86_64__: */ +struct sigcontext { + __u64 r8; + __u64 r9; + __u64 r10; + __u64 r11; + __u64 r12; + __u64 r13; + __u64 r14; + __u64 r15; + __u64 rdi; + __u64 rsi; + __u64 rbp; + __u64 rbx; + __u64 rdx; + __u64 rax; + __u64 rcx; + __u64 rsp; + __u64 rip; + __u64 eflags; /* RFLAGS */ + __u16 cs; + __u16 gs; + __u16 fs; + __u16 __pad0; + __u64 err; + __u64 trapno; + __u64 oldmask; + __u64 cr2; + struct _fpstate __user *fpstate; /* Zero when no FPU context */ +# ifdef __ILP32__ + __u32 __fpstate_pad; +# endif + __u64 reserved1[8]; +}; +# endif /* __x86_64__ */ +#endif /* !__KERNEL__ */ + #endif /* _UAPI_ASM_X86_SIGCONTEXT_H */ diff --git a/drivers/include/uapi/drm/drm.h b/drivers/include/uapi/drm/drm.h index 9634a1bce..ad4871f4a 100644 --- a/drivers/include/uapi/drm/drm.h +++ b/drivers/include/uapi/drm/drm.h @@ -630,6 +630,7 @@ struct drm_gem_open { */ #define DRM_CAP_CURSOR_WIDTH 0x8 #define DRM_CAP_CURSOR_HEIGHT 0x9 +#define DRM_CAP_ADDFB2_MODIFIERS 0x10 /** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -654,6 +655,13 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 +/** + * DRM_CLIENT_CAP_ATOMIC + * + * If set to 1, the DRM core will expose atomic properties to userspace + */ +#define DRM_CLIENT_CAP_ATOMIC 3 + /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; @@ -777,6 +785,9 @@ struct drm_prime_handle { #define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties) #define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property) #define DRM_IOCTL_MODE_CURSOR2 DRM_IOWR(0xBB, struct drm_mode_cursor2) +#define DRM_IOCTL_MODE_ATOMIC DRM_IOWR(0xBC, struct drm_mode_atomic) +#define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) +#define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) /** * Device specific ioctls should only be in their respective headers diff --git a/drivers/include/uapi/drm/drm_fourcc.h b/drivers/include/uapi/drm/drm_fourcc.h index 646ae5f39..0b69a7753 100644 --- a/drivers/include/uapi/drm/drm_fourcc.h +++ b/drivers/include/uapi/drm/drm_fourcc.h @@ -34,6 +34,13 @@ /* color index */ #define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ +/* 8 bpp Red */ +#define DRM_FORMAT_R8 fourcc_code('R', '8', ' ', ' ') /* [7:0] R */ + +/* 16 bpp RG */ +#define DRM_FORMAT_RG88 fourcc_code('R', 'G', '8', '8') /* [15:0] R:G 8:8 little endian */ +#define DRM_FORMAT_GR88 fourcc_code('G', 'R', '8', '8') /* [15:0] G:R 8:8 little endian */ + /* 8 bpp RGB */ #define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ #define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ @@ -109,9 +116,6 @@ #define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ #define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ -/* special NV12 tiled format */ -#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */ - /* * 3 plane YCbCr * index 0: Y plane, [7:0] Y @@ -132,4 +136,97 @@ #define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ #define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ + +/* + * Format Modifiers: + * + * Format modifiers describe, typically, a re-ordering or modification + * of the data in a plane of an FB. This can be used to express tiled/ + * swizzled formats, or compression, or a combination of the two. + * + * The upper 8 bits of the format modifier are a vendor-id as assigned + * below. The lower 56 bits are assigned as vendor sees fit. + */ + +/* Vendor Ids: */ +#define DRM_FORMAT_MOD_NONE 0 +#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 +#define DRM_FORMAT_MOD_VENDOR_AMD 0x02 +#define DRM_FORMAT_MOD_VENDOR_NV 0x03 +#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 +#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 +/* add more to the end as needed */ + +#define fourcc_mod_code(vendor, val) \ + ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) + +/* + * Format Modifier tokens: + * + * When adding a new token please document the layout with a code comment, + * similar to the fourcc codes above. drm_fourcc.h is considered the + * authoritative source for all of these. + */ + +/* Intel framebuffer modifiers */ + +/* + * Intel X-tiling layout + * + * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb) + * in row-major layout. Within the tile bytes are laid out row-major, with + * a platform-dependent stride. On top of that the memory can apply + * platform-depending swizzling of some higher address bits into bit6. + * + * This format is highly platforms specific and not useful for cross-driver + * sharing. It exists since on a given platform it does uniquely identify the + * layout in a simple way for i915-specific userspace. + */ +#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1) + +/* + * Intel Y-tiling layout + * + * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb) + * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes) + * chunks column-major, with a platform-dependent height. On top of that the + * memory can apply platform-depending swizzling of some higher address bits + * into bit6. + * + * This format is highly platforms specific and not useful for cross-driver + * sharing. It exists since on a given platform it does uniquely identify the + * layout in a simple way for i915-specific userspace. + */ +#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2) + +/* + * Intel Yf-tiling layout + * + * This is a tiled layout using 4Kb tiles in row-major layout. + * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which + * are arranged in four groups (two wide, two high) with column-major layout. + * Each group therefore consits out of four 256 byte units, which are also laid + * out as 2x2 column-major. + * 256 byte units are made out of four 64 byte blocks of pixels, producing + * either a square block or a 2:1 unit. + * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width + * in pixel depends on the pixel depth. + */ +#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) + +/* + * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks + * + * Macroblocks are laid in a Z-shape, and each pixel data is following the + * standard NV12 style. + * As for NV12, an image is the result of two frame buffers: one for Y, + * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer). + * Alignment requirements are (for each buffer): + * - multiple of 128 pixels for the width + * - multiple of 32 pixels for the height + * + * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html + */ +#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) + #endif /* DRM_FOURCC_H */ diff --git a/drivers/include/uapi/drm/drm_mode.h b/drivers/include/uapi/drm/drm_mode.h index 86574b000..6c11ca401 100644 --- a/drivers/include/uapi/drm/drm_mode.h +++ b/drivers/include/uapi/drm/drm_mode.h @@ -105,8 +105,16 @@ struct drm_mode_modeinfo { __u32 clock; - __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; - __u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; + __u16 hdisplay; + __u16 hsync_start; + __u16 hsync_end; + __u16 htotal; + __u16 hskew; + __u16 vdisplay; + __u16 vsync_start; + __u16 vsync_end; + __u16 vtotal; + __u16 vscan; __u32 vrefresh; @@ -124,8 +132,10 @@ struct drm_mode_card_res { __u32 count_crtcs; __u32 count_connectors; __u32 count_encoders; - __u32 min_width, max_width; - __u32 min_height, max_height; + __u32 min_width; + __u32 max_width; + __u32 min_height; + __u32 max_height; }; struct drm_mode_crtc { @@ -135,7 +145,8 @@ struct drm_mode_crtc { __u32 crtc_id; /**< Id */ __u32 fb_id; /**< Id of framebuffer */ - __u32 x, y; /**< Position on the frameuffer */ + __u32 x; /**< x Position on the framebuffer */ + __u32 y; /**< y Position on the framebuffer */ __u32 gamma_size; __u32 mode_valid; @@ -153,12 +164,16 @@ struct drm_mode_set_plane { __u32 flags; /* see above flags */ /* Signed dest location allows it to be partially off screen */ - __s32 crtc_x, crtc_y; - __u32 crtc_w, crtc_h; + __s32 crtc_x; + __s32 crtc_y; + __u32 crtc_w; + __u32 crtc_h; /* Source values are 16.16 fixed point */ - __u32 src_x, src_y; - __u32 src_h, src_w; + __u32 src_x; + __u32 src_y; + __u32 src_h; + __u32 src_w; }; struct drm_mode_get_plane { @@ -244,7 +259,8 @@ struct drm_mode_get_connector { __u32 connector_type_id; __u32 connection; - __u32 mm_width, mm_height; /**< HxW in millimeters */ + __u32 mm_width; /**< width in millimeters */ + __u32 mm_height; /**< height in millimeters */ __u32 subpixel; __u32 pad; @@ -272,6 +288,13 @@ struct drm_mode_get_connector { #define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) #define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) +/* the PROP_ATOMIC flag is used to hide properties from userspace that + * is not aware of atomic properties. This is mostly to work around + * older userspace (DDX drivers) that read/write each prop they find, + * witout being aware that this could be triggering a lengthy modeset. + */ +#define DRM_MODE_PROP_ATOMIC 0x80000000 + struct drm_mode_property_enum { __u64 value; char name[DRM_PROP_NAME_LEN]; @@ -320,7 +343,8 @@ struct drm_mode_get_blob { struct drm_mode_fb_cmd { __u32 fb_id; - __u32 width, height; + __u32 width; + __u32 height; __u32 pitch; __u32 bpp; __u32 depth; @@ -329,16 +353,18 @@ struct drm_mode_fb_cmd { }; #define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ +#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */ struct drm_mode_fb_cmd2 { __u32 fb_id; - __u32 width, height; + __u32 width; + __u32 height; __u32 pixel_format; /* fourcc code from drm_fourcc.h */ __u32 flags; /* see above flags */ /* * In case of planar formats, this ioctl allows up to 4 - * buffer objects with offets and pitches per plane. + * buffer objects with offsets and pitches per plane. * The pitch and offset order is dictated by the fourcc, * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as: * @@ -346,13 +372,21 @@ struct drm_mode_fb_cmd2 { * followed by an interleaved U/V plane containing * 8 bit 2x2 subsampled colour difference samples. * - * So it would consist of Y as offset[0] and UV as - * offeset[1]. Note that offset[0] will generally - * be 0. + * So it would consist of Y as offsets[0] and UV as + * offsets[1]. Note that offsets[0] will generally + * be 0 (but this is not required). + * + * To accommodate tiled, compressed, etc formats, a per-plane + * modifier can be specified. The default value of zero + * indicates "native" format as specified by the fourcc. + * Vendor specific modifier token. This allows, for example, + * different tiling/swizzling pattern on different planes. + * See discussion above of DRM_FORMAT_MOD_xxx. */ __u32 handles[4]; __u32 pitches[4]; /* pitch for each plane */ __u32 offsets[4]; /* offset of each plane */ + __u64 modifier[4]; /* ie, tiling, compressed (per plane) */ }; #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 @@ -519,4 +553,47 @@ struct drm_mode_destroy_dumb { uint32_t handle; }; +/* page-flip flags are valid, plus: */ +#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100 +#define DRM_MODE_ATOMIC_NONBLOCK 0x0200 +#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400 + +#define DRM_MODE_ATOMIC_FLAGS (\ + DRM_MODE_PAGE_FLIP_EVENT |\ + DRM_MODE_PAGE_FLIP_ASYNC |\ + DRM_MODE_ATOMIC_TEST_ONLY |\ + DRM_MODE_ATOMIC_NONBLOCK |\ + DRM_MODE_ATOMIC_ALLOW_MODESET) + +struct drm_mode_atomic { + __u32 flags; + __u32 count_objs; + __u64 objs_ptr; + __u64 count_props_ptr; + __u64 props_ptr; + __u64 prop_values_ptr; + __u64 reserved; + __u64 user_data; +}; + +/** + * Create a new 'blob' data property, copying length bytes from data pointer, + * and returning new blob ID. + */ +struct drm_mode_create_blob { + /** Pointer to data to copy. */ + __u64 data; + /** Length of data to copy. */ + __u32 length; + /** Return: new property ID. */ + __u32 blob_id; +}; + +/** + * Destroy a user-created blob property. + */ +struct drm_mode_destroy_blob { + __u32 blob_id; +}; + #endif diff --git a/drivers/include/uapi/drm/i915_drm.h b/drivers/include/uapi/drm/i915_drm.h index 9d6fc0eea..c20aad19b 100644 --- a/drivers/include/uapi/drm/i915_drm.h +++ b/drivers/include/uapi/drm/i915_drm.h @@ -171,8 +171,12 @@ typedef struct _drm_i915_sarea { #define I915_BOX_TEXTURE_LOAD 0x8 #define I915_BOX_LOST_CONTEXT 0x10 -/* I915 specific ioctls - * The device specific ioctl range is 0x40 to 0x79. +/* + * i915 specific ioctls. + * + * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie + * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset + * against DRM_COMMAND_BASE and should be between [0x0, 0x60). */ #define DRM_I915_INIT 0x00 #define DRM_I915_FLUSH 0x01 @@ -224,6 +228,8 @@ typedef struct _drm_i915_sarea { #define DRM_I915_REG_READ 0x31 #define DRM_I915_GET_RESET_STATS 0x32 #define DRM_I915_GEM_USERPTR 0x33 +#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34 +#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) @@ -268,13 +274,15 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image) #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs) #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) -#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) +#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) +#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param) +#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -341,9 +349,20 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_WT 27 #define I915_PARAM_CMD_PARSER_VERSION 28 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 +#define I915_PARAM_MMAP_VERSION 30 +#define I915_PARAM_HAS_BSD2 31 +#define I915_PARAM_REVISION 32 +#define I915_PARAM_SUBSLICE_TOTAL 33 +#define I915_PARAM_EU_TOTAL 34 +#define I915_PARAM_HAS_GPU_RESET 35 +#define I915_PARAM_HAS_RESOURCE_STREAMER 36 typedef struct drm_i915_getparam { - int param; + __s32 param; + /* + * WARNING: Using pointers instead of fixed-size u64 means we need to write + * compat32 code. Don't repeat this mistake. + */ int __user *value; } drm_i915_getparam_t; @@ -488,6 +507,14 @@ struct drm_i915_gem_mmap { * This is a fixed-size type for 32/64 compatibility. */ __u64 addr_ptr; + + /** + * Flags for extended behaviour. + * + * Added in version 2. + */ + __u64 flags; +#define I915_MMAP_WC 0x1 }; struct drm_i915_gem_mmap_gtt { @@ -663,7 +690,8 @@ struct drm_i915_gem_exec_object2 { #define EXEC_OBJECT_NEEDS_FENCE (1<<0) #define EXEC_OBJECT_NEEDS_GTT (1<<1) #define EXEC_OBJECT_WRITE (1<<2) -#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) +#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) +#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1) __u64 flags; __u64 rsvd1; @@ -737,7 +765,18 @@ struct drm_i915_gem_execbuffer2 { */ #define I915_EXEC_HANDLE_LUT (1<<12) -#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) +/** Used for switching BSD rings on the platforms with two BSD rings */ +#define I915_EXEC_BSD_MASK (3<<13) +#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */ +#define I915_EXEC_BSD_RING1 (1<<13) +#define I915_EXEC_BSD_RING2 (2<<13) + +/** Tell the kernel that the batchbuffer is processed by + * the resource streamer. + */ +#define I915_EXEC_RESOURCE_STREAMER (1<<15) + +#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ @@ -973,6 +1012,7 @@ struct drm_intel_overlay_put_image { /* flags */ #define I915_OVERLAY_UPDATE_ATTRS (1<<0) #define I915_OVERLAY_UPDATE_GAMMA (1<<1) +#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) struct drm_intel_overlay_attrs { __u32 flags; __u32 color_key; @@ -1042,6 +1082,14 @@ struct drm_i915_reg_read { __u64 offset; __u64 val; /* Return value */ }; +/* Known registers: + * + * Render engine timestamp - 0x2358 + 64bit - gen7+ + * - Note this register returns an invalid value if using the default + * single instruction 8byte read, in order to workaround that use + * offset (0x2538 | 1) instead. + * + */ struct drm_i915_reset_stats { __u32 ctx_id; @@ -1073,6 +1121,16 @@ struct drm_i915_gem_userptr { __u32 handle; }; +struct drm_i915_gem_context_param { + __u32 ctx_id; + __u32 size; + __u64 param; +#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 +#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 + __u64 value; +}; + + struct drm_i915_mask { __u32 handle; __u32 width; @@ -1100,6 +1158,7 @@ struct drm_i915_mask_update { __u32 height; __u32 bo_pitch; __u32 bo_map; + __u32 forced; }; diff --git a/drivers/include/uapi/drm/radeon_drm.h b/drivers/include/uapi/drm/radeon_drm.h index 50d0fb41a..01aa2a8e3 100644 --- a/drivers/include/uapi/drm/radeon_drm.h +++ b/drivers/include/uapi/drm/radeon_drm.h @@ -33,7 +33,7 @@ #ifndef __RADEON_DRM_H__ #define __RADEON_DRM_H__ -#include +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the X server file (radeon_sarea.h) @@ -1034,6 +1034,12 @@ struct drm_radeon_cs { #define RADEON_INFO_VRAM_USAGE 0x1e #define RADEON_INFO_GTT_USAGE 0x1f #define RADEON_INFO_ACTIVE_CU_COUNT 0x20 +#define RADEON_INFO_CURRENT_GPU_TEMP 0x21 +#define RADEON_INFO_CURRENT_GPU_SCLK 0x22 +#define RADEON_INFO_CURRENT_GPU_MCLK 0x23 +#define RADEON_INFO_READ_REG 0x24 +#define RADEON_INFO_VA_UNMAP_WORKING 0x25 +#define RADEON_INFO_GPU_RESET_COUNTER 0x26 struct drm_radeon_info { uint32_t request; diff --git a/drivers/include/uapi/drm/vmwgfx_drm.h b/drivers/include/uapi/drm/vmwgfx_drm.h index c472bedbe..05b204954 100644 --- a/drivers/include/uapi/drm/vmwgfx_drm.h +++ b/drivers/include/uapi/drm/vmwgfx_drm.h @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA + * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -64,6 +64,7 @@ #define DRM_VMW_GB_SURFACE_CREATE 23 #define DRM_VMW_GB_SURFACE_REF 24 #define DRM_VMW_SYNCCPU 25 +#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 /*************************************************************************/ /** @@ -88,6 +89,8 @@ #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 +#define DRM_VMW_PARAM_SCREEN_TARGET 11 +#define DRM_VMW_PARAM_DX 12 /** * enum drm_vmw_handle_type - handle type for ref ioctls @@ -296,7 +299,7 @@ union drm_vmw_surface_reference_arg { * Argument to the DRM_VMW_EXECBUF Ioctl. */ -#define DRM_VMW_EXECBUF_VERSION 1 +#define DRM_VMW_EXECBUF_VERSION 2 struct drm_vmw_execbuf_arg { uint64_t commands; @@ -305,6 +308,8 @@ struct drm_vmw_execbuf_arg { uint64_t fence_rep; uint32_t version; uint32_t flags; + uint32_t context_handle; + uint32_t pad64; }; /** @@ -825,7 +830,6 @@ struct drm_vmw_update_layout_arg { enum drm_vmw_shader_type { drm_vmw_shader_type_vs = 0, drm_vmw_shader_type_ps, - drm_vmw_shader_type_gs }; @@ -907,6 +911,8 @@ enum drm_vmw_surface_flags { * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID * if none. * @base_size Size of the base mip level for all faces. + * @array_size Must be zero for non-DX hardware, and if non-zero + * svga3d_flags must have proper bind flags setup. * * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. @@ -919,7 +925,7 @@ struct drm_vmw_gb_surface_create_req { uint32_t multisample_count; uint32_t autogen_filter; uint32_t buffer_handle; - uint32_t pad64; + uint32_t array_size; struct drm_vmw_size base_size; }; @@ -1059,4 +1065,28 @@ struct drm_vmw_synccpu_arg { uint32_t pad64; }; +/*************************************************************************/ +/** + * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. + * + * Allocates a device unique context id, and queues a create context command + * for the host. Does not wait for host completion. + */ +enum drm_vmw_extended_context { + drm_vmw_context_legacy, + drm_vmw_context_dx +}; + +/** + * union drm_vmw_extended_context_arg + * + * @req: Context type. + * @rep: Context identifier. + * + * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. + */ +union drm_vmw_extended_context_arg { + enum drm_vmw_extended_context req; + struct drm_vmw_context_arg rep; +}; #endif diff --git a/drivers/include/uapi/linux/media-bus-format.h b/drivers/include/uapi/linux/media-bus-format.h new file mode 100644 index 000000000..190d491d5 --- /dev/null +++ b/drivers/include/uapi/linux/media-bus-format.h @@ -0,0 +1,137 @@ +/* + * Media Bus API header + * + * Copyright (C) 2009, Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MEDIA_BUS_FORMAT_H +#define __LINUX_MEDIA_BUS_FORMAT_H + +/* + * These bus formats uniquely identify data formats on the data bus. Format 0 + * is reserved, MEDIA_BUS_FMT_FIXED shall be used by host-client pairs, where + * the data format is fixed. Additionally, "2X8" means that one pixel is + * transferred in two 8-bit samples, "BE" or "LE" specify in which order those + * samples are transferred over the bus: "LE" means that the least significant + * bits are transferred first, "BE" means that the most significant bits are + * transferred first, and "PADHI" and "PADLO" define which bits - low or high, + * in the incomplete high byte, are filled with padding bits. + * + * The bus formats are grouped by type, bus_width, bits per component, samples + * per pixel and order of subsamples. Numerical values are sorted using generic + * numerical sort order (8 thus comes before 10). + * + * As their value can't change when a new bus format is inserted in the + * enumeration, the bus formats are explicitly given a numerical value. The next + * free values for each category are listed below, update them when inserting + * new pixel codes. + */ + +#define MEDIA_BUS_FMT_FIXED 0x0001 + +/* RGB - next is 0x1018 */ +#define MEDIA_BUS_FMT_RGB444_1X12 0x1016 +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 +#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE 0x1003 +#define MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE 0x1004 +#define MEDIA_BUS_FMT_RGB565_1X16 0x1017 +#define MEDIA_BUS_FMT_BGR565_2X8_BE 0x1005 +#define MEDIA_BUS_FMT_BGR565_2X8_LE 0x1006 +#define MEDIA_BUS_FMT_RGB565_2X8_BE 0x1007 +#define MEDIA_BUS_FMT_RGB565_2X8_LE 0x1008 +#define MEDIA_BUS_FMT_RGB666_1X18 0x1009 +#define MEDIA_BUS_FMT_RBG888_1X24 0x100e +#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015 +#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010 +#define MEDIA_BUS_FMT_BGR888_1X24 0x1013 +#define MEDIA_BUS_FMT_GBR888_1X24 0x1014 +#define MEDIA_BUS_FMT_RGB888_1X24 0x100a +#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b +#define MEDIA_BUS_FMT_RGB888_2X12_LE 0x100c +#define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011 +#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012 +#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d +#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f + +/* YUV (including grey) - next is 0x2026 */ +#define MEDIA_BUS_FMT_Y8_1X8 0x2001 +#define MEDIA_BUS_FMT_UV8_1X8 0x2015 +#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002 +#define MEDIA_BUS_FMT_VYUY8_1_5X8 0x2003 +#define MEDIA_BUS_FMT_YUYV8_1_5X8 0x2004 +#define MEDIA_BUS_FMT_YVYU8_1_5X8 0x2005 +#define MEDIA_BUS_FMT_UYVY8_2X8 0x2006 +#define MEDIA_BUS_FMT_VYUY8_2X8 0x2007 +#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008 +#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009 +#define MEDIA_BUS_FMT_Y10_1X10 0x200a +#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018 +#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019 +#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b +#define MEDIA_BUS_FMT_YVYU10_2X10 0x200c +#define MEDIA_BUS_FMT_Y12_1X12 0x2013 +#define MEDIA_BUS_FMT_UYVY12_2X12 0x201c +#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d +#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e +#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f +#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f +#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010 +#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011 +#define MEDIA_BUS_FMT_YVYU8_1X16 0x2012 +#define MEDIA_BUS_FMT_YDYUYDYV8_1X16 0x2014 +#define MEDIA_BUS_FMT_UYVY10_1X20 0x201a +#define MEDIA_BUS_FMT_VYUY10_1X20 0x201b +#define MEDIA_BUS_FMT_YUYV10_1X20 0x200d +#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e +#define MEDIA_BUS_FMT_VUY8_1X24 0x2024 +#define MEDIA_BUS_FMT_YUV8_1X24 0x2025 +#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020 +#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021 +#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022 +#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023 +#define MEDIA_BUS_FMT_YUV10_1X30 0x2016 +#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017 + +/* Bayer - next is 0x3019 */ +#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001 +#define MEDIA_BUS_FMT_SGBRG8_1X8 0x3013 +#define MEDIA_BUS_FMT_SGRBG8_1X8 0x3002 +#define MEDIA_BUS_FMT_SRGGB8_1X8 0x3014 +#define MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8 0x3015 +#define MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8 0x3016 +#define MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8 0x3017 +#define MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8 0x3018 +#define MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 0x300b +#define MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 0x300c +#define MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 0x3009 +#define MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 0x300d +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE 0x3003 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE 0x3004 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE 0x3005 +#define MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE 0x3006 +#define MEDIA_BUS_FMT_SBGGR10_1X10 0x3007 +#define MEDIA_BUS_FMT_SGBRG10_1X10 0x300e +#define MEDIA_BUS_FMT_SGRBG10_1X10 0x300a +#define MEDIA_BUS_FMT_SRGGB10_1X10 0x300f +#define MEDIA_BUS_FMT_SBGGR12_1X12 0x3008 +#define MEDIA_BUS_FMT_SGBRG12_1X12 0x3010 +#define MEDIA_BUS_FMT_SGRBG12_1X12 0x3011 +#define MEDIA_BUS_FMT_SRGGB12_1X12 0x3012 + +/* JPEG compressed formats - next is 0x4002 */ +#define MEDIA_BUS_FMT_JPEG_1X8 0x4001 + +/* Vendor specific formats - next is 0x5002 */ + +/* S5C73M3 sensor specific interleaved UYVY and JPEG */ +#define MEDIA_BUS_FMT_S5C_UYVY_JPEG_1X8 0x5001 + +/* HSV - next is 0x6002 */ +#define MEDIA_BUS_FMT_AHSV8888_1X32 0x6001 + +#endif /* __LINUX_MEDIA_BUS_FORMAT_H */ diff --git a/drivers/include/uapi/linux/swab.h b/drivers/include/uapi/linux/swab.h new file mode 100644 index 000000000..0e011eb91 --- /dev/null +++ b/drivers/include/uapi/linux/swab.h @@ -0,0 +1,288 @@ +#ifndef _UAPI_LINUX_SWAB_H +#define _UAPI_LINUX_SWAB_H + +#include +#include +#include + +/* + * casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define ___constant_swab16(x) ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8))) + +#define ___constant_swab32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24))) + +#define ___constant_swab64(x) ((__u64)( \ + (((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56))) + +#define ___constant_swahw32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \ + (((__u32)(x) & (__u32)0xffff0000UL) >> 16))) + +#define ___constant_swahb32(x) ((__u32)( \ + (((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \ + (((__u32)(x) & (__u32)0xff00ff00UL) >> 8))) + +/* + * Implement the following as inlines, but define the interface using + * macros to allow constant folding when possible: + * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32 + */ + +static inline __attribute_const__ __u16 __fswab16(__u16 val) +{ +#ifdef __HAVE_BUILTIN_BSWAP16__ + return __builtin_bswap16(val); +#elif defined (__arch_swab16) + return __arch_swab16(val); +#else + return ___constant_swab16(val); +#endif +} + +static inline __attribute_const__ __u32 __fswab32(__u32 val) +{ +#ifdef __HAVE_BUILTIN_BSWAP32__ + return __builtin_bswap32(val); +#elif defined(__arch_swab32) + return __arch_swab32(val); +#else + return ___constant_swab32(val); +#endif +} + +static inline __attribute_const__ __u64 __fswab64(__u64 val) +{ +#ifdef __HAVE_BUILTIN_BSWAP64__ + return __builtin_bswap64(val); +#elif defined (__arch_swab64) + return __arch_swab64(val); +#elif defined(__SWAB_64_THRU_32__) + __u32 h = val >> 32; + __u32 l = val & ((1ULL << 32) - 1); + return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); +#else + return ___constant_swab64(val); +#endif +} + +static inline __attribute_const__ __u32 __fswahw32(__u32 val) +{ +#ifdef __arch_swahw32 + return __arch_swahw32(val); +#else + return ___constant_swahw32(val); +#endif +} + +static inline __attribute_const__ __u32 __fswahb32(__u32 val) +{ +#ifdef __arch_swahb32 + return __arch_swahb32(val); +#else + return ___constant_swahb32(val); +#endif +} + +/** + * __swab16 - return a byteswapped 16-bit value + * @x: value to byteswap + */ +#define __swab16(x) \ + (__builtin_constant_p((__u16)(x)) ? \ + ___constant_swab16(x) : \ + __fswab16(x)) + +/** + * __swab32 - return a byteswapped 32-bit value + * @x: value to byteswap + */ +#define __swab32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swab32(x) : \ + __fswab32(x)) + +/** + * __swab64 - return a byteswapped 64-bit value + * @x: value to byteswap + */ +#define __swab64(x) \ + (__builtin_constant_p((__u64)(x)) ? \ + ___constant_swab64(x) : \ + __fswab64(x)) + +/** + * __swahw32 - return a word-swapped 32-bit value + * @x: value to wordswap + * + * __swahw32(0x12340000) is 0x00001234 + */ +#define __swahw32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swahw32(x) : \ + __fswahw32(x)) + +/** + * __swahb32 - return a high and low byte-swapped 32-bit value + * @x: value to byteswap + * + * __swahb32(0x12345678) is 0x34127856 + */ +#define __swahb32(x) \ + (__builtin_constant_p((__u32)(x)) ? \ + ___constant_swahb32(x) : \ + __fswahb32(x)) + +/** + * __swab16p - return a byteswapped 16-bit value from a pointer + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline __u16 __swab16p(const __u16 *p) +{ +#ifdef __arch_swab16p + return __arch_swab16p(p); +#else + return __swab16(*p); +#endif +} + +/** + * __swab32p - return a byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline __u32 __swab32p(const __u32 *p) +{ +#ifdef __arch_swab32p + return __arch_swab32p(p); +#else + return __swab32(*p); +#endif +} + +/** + * __swab64p - return a byteswapped 64-bit value from a pointer + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline __u64 __swab64p(const __u64 *p) +{ +#ifdef __arch_swab64p + return __arch_swab64p(p); +#else + return __swab64(*p); +#endif +} + +/** + * __swahw32p - return a wordswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping. + */ +static inline __u32 __swahw32p(const __u32 *p) +{ +#ifdef __arch_swahw32p + return __arch_swahw32p(p); +#else + return __swahw32(*p); +#endif +} + +/** + * __swahb32p - return a high and low byteswapped 32-bit value from a pointer + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high/low byteswapping. + */ +static inline __u32 __swahb32p(const __u32 *p) +{ +#ifdef __arch_swahb32p + return __arch_swahb32p(p); +#else + return __swahb32(*p); +#endif +} + +/** + * __swab16s - byteswap a 16-bit value in-place + * @p: pointer to a naturally-aligned 16-bit value + */ +static inline void __swab16s(__u16 *p) +{ +#ifdef __arch_swab16s + __arch_swab16s(p); +#else + *p = __swab16p(p); +#endif +} +/** + * __swab32s - byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + */ +static inline void __swab32s(__u32 *p) +{ +#ifdef __arch_swab32s + __arch_swab32s(p); +#else + *p = __swab32p(p); +#endif +} + +/** + * __swab64s - byteswap a 64-bit value in-place + * @p: pointer to a naturally-aligned 64-bit value + */ +static inline void __swab64s(__u64 *p) +{ +#ifdef __arch_swab64s + __arch_swab64s(p); +#else + *p = __swab64p(p); +#endif +} + +/** + * __swahw32s - wordswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahw32() for details of wordswapping + */ +static inline void __swahw32s(__u32 *p) +{ +#ifdef __arch_swahw32s + __arch_swahw32s(p); +#else + *p = __swahw32p(p); +#endif +} + +/** + * __swahb32s - high and low byteswap a 32-bit value in-place + * @p: pointer to a naturally-aligned 32-bit value + * + * See __swahb32() for details of high and low byte swapping + */ +static inline void __swahb32s(__u32 *p) +{ +#ifdef __arch_swahb32s + __arch_swahb32s(p); +#else + *p = __swahb32p(p); +#endif +} + + +#endif /* _UAPI_LINUX_SWAB_H */ diff --git a/drivers/include/video/display_timing.h b/drivers/include/video/display_timing.h new file mode 100644 index 000000000..28d9d0d56 --- /dev/null +++ b/drivers/include/video/display_timing.h @@ -0,0 +1,102 @@ +/* + * Copyright 2012 Steffen Trumtrar + * + * description of display timings + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_DISPLAY_TIMING_H +#define __LINUX_DISPLAY_TIMING_H + +#include +#include + +enum display_flags { + DISPLAY_FLAGS_HSYNC_LOW = BIT(0), + DISPLAY_FLAGS_HSYNC_HIGH = BIT(1), + DISPLAY_FLAGS_VSYNC_LOW = BIT(2), + DISPLAY_FLAGS_VSYNC_HIGH = BIT(3), + + /* data enable flag */ + DISPLAY_FLAGS_DE_LOW = BIT(4), + DISPLAY_FLAGS_DE_HIGH = BIT(5), + /* drive data on pos. edge */ + DISPLAY_FLAGS_PIXDATA_POSEDGE = BIT(6), + /* drive data on neg. edge */ + DISPLAY_FLAGS_PIXDATA_NEGEDGE = BIT(7), + DISPLAY_FLAGS_INTERLACED = BIT(8), + DISPLAY_FLAGS_DOUBLESCAN = BIT(9), + DISPLAY_FLAGS_DOUBLECLK = BIT(10), +}; + +/* + * A single signal can be specified via a range of minimal and maximal values + * with a typical value, that lies somewhere inbetween. + */ +struct timing_entry { + u32 min; + u32 typ; + u32 max; +}; + +/* + * Single "mode" entry. This describes one set of signal timings a display can + * have in one setting. This struct can later be converted to struct videomode + * (see include/video/videomode.h). As each timing_entry can be defined as a + * range, one struct display_timing may become multiple struct videomodes. + * + * Example: hsync active high, vsync active low + * + * Active Video + * Video ______________________XXXXXXXXXXXXXXXXXXXXXX_____________________ + * |<- sync ->|<- back ->|<----- active ----->|<- front ->|<- sync.. + * | | porch | | porch | + * + * HSync _|¯¯¯¯¯¯¯¯¯¯|___________________________________________|¯¯¯¯¯¯¯¯¯ + * + * VSync ¯|__________|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|_________ + */ +struct display_timing { + struct timing_entry pixelclock; + + struct timing_entry hactive; /* hor. active video */ + struct timing_entry hfront_porch; /* hor. front porch */ + struct timing_entry hback_porch; /* hor. back porch */ + struct timing_entry hsync_len; /* hor. sync len */ + + struct timing_entry vactive; /* ver. active video */ + struct timing_entry vfront_porch; /* ver. front porch */ + struct timing_entry vback_porch; /* ver. back porch */ + struct timing_entry vsync_len; /* ver. sync len */ + + enum display_flags flags; /* display flags */ +}; + +/* + * This describes all timing settings a display provides. + * The native_mode is the default setting for this display. + * Drivers that can handle multiple videomodes should work with this struct and + * convert each entry to the desired end result. + */ +struct display_timings { + unsigned int num_timings; + unsigned int native_mode; + + struct display_timing **timings; +}; + +/* get one entry from struct display_timings */ +static inline struct display_timing *display_timings_get(const struct + display_timings *disp, + unsigned int index) +{ + if (disp->num_timings > index) + return disp->timings[index]; + else + return NULL; +} + +void display_timings_release(struct display_timings *disp); + +#endif diff --git a/drivers/include/video/of_videomode.h b/drivers/include/video/of_videomode.h new file mode 100644 index 000000000..a07efcc51 --- /dev/null +++ b/drivers/include/video/of_videomode.h @@ -0,0 +1,18 @@ +/* + * Copyright 2012 Steffen Trumtrar + * + * videomode of-helpers + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_OF_VIDEOMODE_H +#define __LINUX_OF_VIDEOMODE_H + +struct device_node; +struct videomode; + +int of_get_videomode(struct device_node *np, struct videomode *vm, + int index); + +#endif /* __LINUX_OF_VIDEOMODE_H */ diff --git a/drivers/include/video/videomode.h b/drivers/include/video/videomode.h new file mode 100644 index 000000000..3f1049d87 --- /dev/null +++ b/drivers/include/video/videomode.h @@ -0,0 +1,58 @@ +/* + * Copyright 2012 Steffen Trumtrar + * + * generic videomode description + * + * This file is released under the GPLv2 + */ + +#ifndef __LINUX_VIDEOMODE_H +#define __LINUX_VIDEOMODE_H + +#include +#include