diff --git a/drivers/ddk/linux/div64.c b/drivers/ddk/linux/div64.c index 7a96b80a66..ce9ac7a836 100644 --- a/drivers/ddk/linux/div64.c +++ b/drivers/ddk/linux/div64.c @@ -13,7 +13,8 @@ * * Code generated for this function might be very inefficient * for some CPUs. __div64_32() can be overridden by linking arch-specific - * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S. + * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S + * or by defining a preprocessor macro in arch/include/asm/div64.h. */ #include @@ -23,6 +24,7 @@ /* Not needed on 64bit architectures */ #if BITS_PER_LONG == 32 +#ifndef __div64_32 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) { uint64_t rem = *n; @@ -55,8 +57,8 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) *n = res; return rem; } - EXPORT_SYMBOL(__div64_32); +#endif #ifndef div_s64_rem s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) diff --git a/drivers/ddk/linux/kasprintf.c b/drivers/ddk/linux/kasprintf.c index 04b6cb8f8c..b414dc0c4d 100644 --- a/drivers/ddk/linux/kasprintf.c +++ b/drivers/ddk/linux/kasprintf.c @@ -13,24 +13,42 @@ /* Simplified asprintf. */ char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { - unsigned int len; + unsigned int first, second; char *p; va_list aq; va_copy(aq, ap); - len = vsnprintf(NULL, 0, fmt, aq); + first = vsnprintf(NULL, 0, fmt, aq); va_end(aq); - p = kmalloc(len+1, gfp); + p = kmalloc(first+1, gfp); if (!p) return NULL; - vsnprintf(p, len+1, fmt, ap); + second = vsnprintf(p, first+1, fmt, ap); + WARN(first != second, "different return values (%u and %u) from vsnprintf(\"%s\", ...)", + first, second, fmt); return p; } EXPORT_SYMBOL(kvasprintf); +/* + * If fmt contains no % (or is exactly %s), use kstrdup_const. If fmt + * (or the sole vararg) points to rodata, we will then save a memory + * allocation and string copy. In any case, the return value should be + * freed using kfree_const(). + */ +const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list ap) +{ + if (!strchr(fmt, '%')) + return strdup(fmt); + if (!strcmp(fmt, "%s")) + return strdup(va_arg(ap, const char*)); + return kvasprintf(gfp, fmt, ap); +} +EXPORT_SYMBOL(kvasprintf_const); + char *kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; diff --git a/drivers/ddk/linux/scatterlist.c b/drivers/ddk/linux/scatterlist.c index 547777bf20..d5cd3c8660 100644 --- a/drivers/ddk/linux/scatterlist.c +++ b/drivers/ddk/linux/scatterlist.c @@ -578,9 +578,9 @@ EXPORT_SYMBOL(sg_miter_next); * * Description: * Stops mapping iterator @miter. @miter should have been started - * started using sg_miter_start(). A stopped iteration can be - * resumed by calling sg_miter_next() on it. This is useful when - * resources (kmap) need to be released during iteration. + * using sg_miter_start(). A stopped iteration can be resumed by + * calling sg_miter_next() on it. This is useful when resources (kmap) + * need to be released during iteration. * * Context: * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care diff --git a/drivers/ddk/linux/string.c b/drivers/ddk/linux/string.c index 30f04ffc94..2cf32ad782 100644 --- a/drivers/ddk/linux/string.c +++ b/drivers/ddk/linux/string.c @@ -28,6 +28,34 @@ #include +#ifndef __HAVE_ARCH_STRNCPY +/** + * strncpy - Copy a length-limited, C-string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @count bytes. + * + * In the case where the length of @src is less than that of + * count, the remainder of @dest will be padded with %NUL. + * + */ +char *strncpy(char *dest, const char *src, size_t count) +{ + char *tmp = dest; + + while (count) { + if ((*tmp = *src) != 0) + src++; + tmp++; + count--; + } + return dest; +} +EXPORT_SYMBOL(strncpy); +#endif #ifndef __HAVE_ARCH_STRLCPY /** diff --git a/drivers/include/asm-generic/barrier.h b/drivers/include/asm-generic/barrier.h new file mode 100644 index 0000000000..1cceca1469 --- /dev/null +++ b/drivers/include/asm-generic/barrier.h @@ -0,0 +1,211 @@ +/* + * Generic barrier definitions, originally based on MN10300 definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_BARRIER_H +#define __ASM_GENERIC_BARRIER_H + +#ifndef __ASSEMBLY__ + +#include + +#ifndef nop +#define nop() asm volatile ("nop") +#endif + +/* + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. + * + * Fall back to compiler barriers if nothing better is provided. + */ + +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb +#define rmb() mb() +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef dma_wmb +#define dma_wmb() wmb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif + +#ifndef __smp_mb +#define __smp_mb() mb() +#endif + +#ifndef __smp_rmb +#define __smp_rmb() rmb() +#endif + +#ifndef __smp_wmb +#define __smp_wmb() wmb() +#endif + +#ifndef __smp_read_barrier_depends +#define __smp_read_barrier_depends() read_barrier_depends() +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_mb +#define smp_mb() __smp_mb() +#endif + +#ifndef smp_rmb +#define smp_rmb() __smp_rmb() +#endif + +#ifndef smp_wmb +#define smp_wmb() __smp_wmb() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() __smp_read_barrier_depends() +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_mb +#define smp_mb() barrier() +#endif + +#ifndef smp_rmb +#define smp_rmb() barrier() +#endif + +#ifndef smp_wmb +#define smp_wmb() barrier() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() do { } while (0) +#endif + +#endif /* CONFIG_SMP */ + +#ifndef __smp_store_mb +#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) +#endif + +#ifndef __smp_mb__before_atomic +#define __smp_mb__before_atomic() __smp_mb() +#endif + +#ifndef __smp_mb__after_atomic +#define __smp_mb__after_atomic() __smp_mb() +#endif + +#ifndef __smp_store_release +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef __smp_load_acquire +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + ___p1; \ +}) +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_store_mb +#define smp_store_mb(var, value) __smp_store_mb(var, value) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() __smp_mb__before_atomic() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() __smp_mb__after_atomic() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) __smp_store_release(p, v) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) __smp_load_acquire(p) +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_store_mb +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() barrier() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() barrier() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) +#endif + +#endif + +/* Barriers for virtual machine guests when talking to an SMP host */ +#define virt_mb() __smp_mb() +#define virt_rmb() __smp_rmb() +#define virt_wmb() __smp_wmb() +#define virt_read_barrier_depends() __smp_read_barrier_depends() +#define virt_store_mb(var, value) __smp_store_mb(var, value) +#define virt_mb__before_atomic() __smp_mb__before_atomic() +#define virt_mb__after_atomic() __smp_mb__after_atomic() +#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_load_acquire(p) __smp_load_acquire(p) + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/drivers/include/asm-generic/bug.h b/drivers/include/asm-generic/bug.h new file mode 100644 index 0000000000..630dd23722 --- /dev/null +++ b/drivers/include/asm-generic/bug.h @@ -0,0 +1,211 @@ +#ifndef _ASM_GENERIC_BUG_H +#define _ASM_GENERIC_BUG_H + +#include + +#ifdef CONFIG_GENERIC_BUG +#define BUGFLAG_WARNING (1 << 0) +#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8)) +#define BUG_GET_TAINT(bug) ((bug)->flags >> 8) +#endif + +#ifndef __ASSEMBLY__ +#include + +#ifdef CONFIG_BUG + +#ifdef CONFIG_GENERIC_BUG +struct bug_entry { +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + unsigned long bug_addr; +#else + signed int bug_addr_disp; +#endif +#ifdef CONFIG_DEBUG_BUGVERBOSE +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + const char *file; +#else + signed int file_disp; +#endif + unsigned short line; +#endif + unsigned short flags; +}; +#endif /* CONFIG_GENERIC_BUG */ + +/* + * Don't use BUG() or BUG_ON() unless there's really no way out; one + * example might be detecting data structure corruption in the middle + * of an operation that can't be backed out of. If the (sub)system + * can somehow continue operating, perhaps with reduced functionality, + * it's probably not BUG-worthy. + * + * If you're tempted to BUG(), think again: is completely giving up + * really the *only* solution? There are usually better options, where + * users don't need to reboot ASAP and can mostly shut down cleanly. + */ +#ifndef HAVE_ARCH_BUG +#define BUG() do { \ + printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + panic("BUG!"); \ +} while (0) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) +#endif + +/* + * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report + * significant issues that need prompt attention if they should ever + * appear at runtime. Use the versions with printk format strings + * to provide better diagnostics. + */ +#ifndef __WARN_TAINT +extern __printf(3, 4) +void warn_slowpath_fmt(const char *file, const int line, + const char *fmt, ...); +extern __printf(4, 5) +void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, + const char *fmt, ...); +extern void warn_slowpath_null(const char *file, const int line); +#define WANT_WARN_ON_SLOWPATH +#define __WARN() warn_slowpath_null(__FILE__, __LINE__) +#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) +#define __WARN_printf_taint(taint, arg...) \ + warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg) +#else +#define __WARN() __WARN_TAINT(TAINT_WARN) +#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0) +#define __WARN_printf_taint(taint, arg...) \ + do { printk(arg); __WARN_TAINT(taint); } while (0) +#endif + +#ifndef WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_TAINT(condition, taint, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf_taint(taint, format); \ + unlikely(__ret_warn_on); \ +}) + +#define WARN_ON_ONCE(condition) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN_ON(!__warned)) \ + __warned = true; \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_ONCE(condition, format...) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN(!__warned, format)) \ + __warned = true; \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once)) \ + if (WARN_TAINT(!__warned, taint, format)) \ + __warned = true; \ + unlikely(__ret_warn_once); \ +}) + +#else /* !CONFIG_BUG */ +#ifndef HAVE_ARCH_BUG +#define BUG() do {} while (1) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (condition) ; } while (0) +#endif + +#ifndef HAVE_ARCH_WARN_ON +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + no_printk(format); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#define WARN_ON_ONCE(condition) WARN_ON(condition) +#define WARN_ONCE(condition, format...) WARN(condition, format) +#define WARN_TAINT(condition, taint, format...) WARN(condition, format) +#define WARN_TAINT_ONCE(condition, taint, format...) WARN(condition, format) + +#endif + +/* + * WARN_ON_SMP() is for cases that the warning is either + * meaningless for !SMP or may even cause failures. + * This is usually used for cases that we have + * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked() + * returns 0 for uniprocessor settings. + * It can also be used with values that are only defined + * on SMP: + * + * struct foo { + * [...] + * #ifdef CONFIG_SMP + * int bar; + * #endif + * }; + * + * void func(struct foo *zoot) + * { + * WARN_ON_SMP(!zoot->bar); + * + * For CONFIG_SMP, WARN_ON_SMP() should act the same as WARN_ON(), + * and should be a nop and return false for uniprocessor. + * + * if (WARN_ON_SMP(x)) returns true only when CONFIG_SMP is set + * and x is true. + */ +#ifdef CONFIG_SMP +# define WARN_ON_SMP(x) WARN_ON(x) +#else +/* + * Use of ({0;}) because WARN_ON_SMP(x) may be used either as + * a stand alone line statement or as a condition in an if () + * statement. + * A simple "0" would cause gcc to give a "statement has no effect" + * warning. + */ +# define WARN_ON_SMP(x) ({0;}) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/drivers/include/asm-generic/iomap.h b/drivers/include/asm-generic/iomap.h new file mode 100644 index 0000000000..4b160bba3e --- /dev/null +++ b/drivers/include/asm-generic/iomap.h @@ -0,0 +1,102 @@ +#ifndef __GENERIC_IO_H +#define __GENERIC_IO_H + +#include +#include + +/* + * These are the "generic" interfaces for doing new-style + * memory-mapped or PIO accesses. Architectures may do + * their own arch-optimized versions, these just act as + * wrappers around the old-style IO register access functions: + * read[bwl]/write[bwl]/in[bwl]/out[bwl] + * + * Don't include this directly, include it from . + */ + +/* + * Read/write from/to an (offsettable) iomem cookie. It might be a PIO + * access or a MMIO access, these functions don't care. The info is + * encoded in the hardware mapping set up by the mapping functions + * (or the cookie itself, depending on implementation and hw). + * + * The generic routines just encode the PIO/MMIO as part of the + * cookie, and coldly assume that the MMIO IO mappings are not + * in the low address range. Architectures for which this is not + * true can't use this generic implementation. + */ +extern unsigned int ioread8(void __iomem *); +extern unsigned int ioread16(void __iomem *); +extern unsigned int ioread16be(void __iomem *); +//extern unsigned int ioread32(void __iomem *); + +#ifndef ioread32 +#define ioread32 ioread32 +static inline u32 ioread32(const volatile void __iomem *addr) +{ + return readl(addr); +} +#endif + +extern unsigned int ioread32be(void __iomem *); + +extern void iowrite8(u8, void __iomem *); +extern void iowrite16(u16, void __iomem *); +extern void iowrite16be(u16, void __iomem *); +//extern void iowrite32(u32, void __iomem *); +#ifndef iowrite32 +//#define iowrite32 iowrite32 +static inline void iowrite32(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} +#endif + +extern void iowrite32be(u32, void __iomem *); + +/* + * "string" versions of the above. Note that they + * use native byte ordering for the accesses (on + * the assumption that IO and memory agree on a + * byte order, and CPU byteorder is irrelevant). + * + * They do _not_ update the port address. If you + * want MMIO that copies stuff laid out in MMIO + * memory across multiple ports, use "memcpy_toio()" + * and friends. + */ +extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count); +extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count); + +extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count); +extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count); + +#ifdef CONFIG_HAS_IOPORT_MAP +/* Create a virtual mapping cookie for an IO port range */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); +#endif + +#ifndef ARCH_HAS_IOREMAP_WC +#define ioremap_wc ioremap_nocache +#endif + +#ifndef ARCH_HAS_IOREMAP_WT +#define ioremap_wt ioremap_nocache +#endif + +#ifdef CONFIG_PCI +/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */ +struct pci_dev; +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); +#elif defined(CONFIG_GENERIC_IOMAP) +struct pci_dev; +static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ } +#endif + +#include + +#endif diff --git a/drivers/include/asm-generic/pci_iomap.h b/drivers/include/asm-generic/pci_iomap.h new file mode 100644 index 0000000000..b1e17fcee2 --- /dev/null +++ b/drivers/include/asm-generic/pci_iomap.h @@ -0,0 +1,59 @@ +/* Generic I/O port emulation, based on MN10300 code + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#ifndef __ASM_GENERIC_PCI_IOMAP_H +#define __ASM_GENERIC_PCI_IOMAP_H + +struct pci_dev; +#ifdef CONFIG_PCI +/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); +/* Create a virtual mapping cookie for a port on a given PCI device. + * Do not call this directly, it exists to make it easier for architectures + * to override */ +#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP +extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, + unsigned int nr); +#else +#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) +#endif + +#elif defined(CONFIG_GENERIC_PCI_IOMAP) +static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} + +static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max) +{ + return NULL; +} +static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} +static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} +#endif + +#endif /* __ASM_GENERIC_IO_H */ diff --git a/drivers/include/asm/atomic.h b/drivers/include/asm/atomic.h index e733c99991..b796127f71 100644 --- a/drivers/include/asm/atomic.h +++ b/drivers/include/asm/atomic.h @@ -3,7 +3,6 @@ #include #include -#include #include #include #include diff --git a/drivers/include/asm/barrier.h b/drivers/include/asm/barrier.h index ae973ec5ce..dd237805c8 100644 --- a/drivers/include/asm/barrier.h +++ b/drivers/include/asm/barrier.h @@ -53,31 +53,31 @@ * model and we should fall back to full barriers. */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + __smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + __smp_mb(); \ ___p1; \ }) #else /* regular x86 TSO memory ordering */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ @@ -88,7 +88,9 @@ do { \ #endif /* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +#include #endif /* _ASM_X86_BARRIER_H */ diff --git a/drivers/include/asm/bug.h b/drivers/include/asm/bug.h new file mode 100644 index 0000000000..ba38ebbace --- /dev/null +++ b/drivers/include/asm/bug.h @@ -0,0 +1,37 @@ +#ifndef _ASM_X86_BUG_H +#define _ASM_X86_BUG_H + +#define HAVE_ARCH_BUG + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#ifdef CONFIG_X86_32 +# define __BUG_C0 "2:\t.long 1b, %c0\n" +#else +# define __BUG_C0 "2:\t.long 1b - 2b, %c0 - 2b\n" +#endif + +#define BUG() \ +do { \ + asm volatile("1:\tud2\n" \ + ".pushsection __bug_table,\"a\"\n" \ + __BUG_C0 \ + "\t.word %c1, 0\n" \ + "\t.org 2b+%c2\n" \ + ".popsection" \ + : : "i" (__FILE__), "i" (__LINE__), \ + "i" (sizeof(struct bug_entry))); \ + unreachable(); \ +} while (0) + +#else +#define BUG() \ +do { \ + asm volatile("ud2"); \ + unreachable(); \ +} while (0) +#endif + +#include + +#endif /* _ASM_X86_BUG_H */ diff --git a/drivers/include/asm/cpufeature.h b/drivers/include/asm/cpufeature.h index 3ec7286c86..e77b74a0d2 100644 --- a/drivers/include/asm/cpufeature.h +++ b/drivers/include/asm/cpufeature.h @@ -12,7 +12,7 @@ #include #endif -#define NCAPINTS 14 /* N 32-bit words worth of info */ +#define NCAPINTS 16 /* N 32-bit words worth of info */ #define NBUGINTS 1 /* N 32-bit bug flags */ /* @@ -181,22 +181,17 @@ /* * Auxiliary flags: Linux defined - For features scattered in various - * CPUID levels like 0x6, 0xA etc, word 7 + * CPUID levels like 0x6, 0xA etc, word 7. + * + * Reuse free bits when adding new feature flags! */ -#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ -#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ + #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ -#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ -#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ -#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ + #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ -#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ -#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ -#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ -#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ -#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ + #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ /* Virtualization flags: Linux defined, word 8 */ @@ -205,16 +200,7 @@ #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ -#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ -#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */ -#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ -#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ -#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ -#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ -#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */ -#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ -#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ -#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ + #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ @@ -259,6 +245,30 @@ /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ +/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */ +#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ +#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */ +#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */ +#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */ +#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */ +#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */ +#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */ +#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ +#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ +#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ + +/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */ +#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */ +#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */ +#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */ +#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */ +#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */ +#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */ +#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */ +#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */ +#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */ +#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */ + /* * BUG word(s) */ @@ -279,6 +289,26 @@ #include #include +enum cpuid_leafs +{ + CPUID_1_EDX = 0, + CPUID_8000_0001_EDX, + CPUID_8086_0001_EDX, + CPUID_LNX_1, + CPUID_1_ECX, + CPUID_C000_0001_EDX, + CPUID_8000_0001_ECX, + CPUID_LNX_2, + CPUID_LNX_3, + CPUID_7_0_EBX, + CPUID_D_1_EAX, + CPUID_F_0_EDX, + CPUID_F_1_EDX, + CPUID_8000_0008_EBX, + CPUID_6_EAX, + CPUID_8000_000A_EDX, +}; + #ifdef CONFIG_X86_FEATURE_NAMES extern const char * const x86_cap_flags[NCAPINTS*32]; extern const char * const x86_power_flags[32]; @@ -356,60 +386,31 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; } while (0) #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) -#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) -#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) -#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) -#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) -#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) -#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3) #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) -#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) -#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) -#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) -#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) -#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) -#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) -#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) -#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) -#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) -#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) -#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) -#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) -#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) -#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) -#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) -#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) -#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) -#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) -#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) -#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) -#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) -#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) -#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) -#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) -#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) -#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT) -#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT) +/* + * Do not add any more of those clumsy macros - use static_cpu_has_safe() for + * fast paths and boot_cpu_has() otherwise! + */ -#if __GNUC__ >= 4 +#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS) extern void warn_pre_alternatives(void); extern bool __static_cpu_has_safe(u16 bit); diff --git a/drivers/include/asm/fixmap.h b/drivers/include/asm/fixmap.h index f80d70009f..6d7d0e52ed 100644 --- a/drivers/include/asm/fixmap.h +++ b/drivers/include/asm/fixmap.h @@ -19,7 +19,6 @@ #include #include #include -#include #ifdef CONFIG_X86_32 #include #include @@ -72,10 +71,6 @@ enum fixed_addresses { #ifdef CONFIG_X86_VSYSCALL_EMULATION VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT, #endif -#ifdef CONFIG_PARAVIRT_CLOCK - PVCLOCK_FIXMAP_BEGIN, - PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1, -#endif #endif FIX_DBGP_BASE, FIX_EARLYCON_MEM_BASE, diff --git a/drivers/include/asm/io.h b/drivers/include/asm/io.h index e69de29bb2..4d24105e6b 100644 --- a/drivers/include/asm/io.h +++ b/drivers/include/asm/io.h @@ -0,0 +1,328 @@ +#ifndef _ASM_X86_IO_H +#define _ASM_X86_IO_H + +/* + * This file contains the definitions for the x86 IO instructions + * inb/inw/inl/outb/outw/outl and the "string versions" of the same + * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" + * versions of the single-IO instructions (inb_p/inw_p/..). + * + * This file is not meant to be obfuscating: it's just complicated + * to (a) handle it all in a way that makes gcc able to optimize it + * as well as possible and (b) trying to avoid writing the same thing + * over and over again with slight variations and possibly making a + * mistake somewhere. + */ + +/* + * Thanks to James van Artsdalen for a better timing-fix than + * the two short jumps: using outb's to a nonexistent port seems + * to guarantee better timings even on fast machines. + * + * On the other hand, I'd like to be sure of a non-existent port: + * I feel a bit unsafe about using 0x80 (should be safe, though) + * + * Linus + */ + + /* + * Bit simplified and optimized by Jan Hubicka + * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. + * + * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, + * isa_read[wl] and isa_write[wl] fixed + * - Arnaldo Carvalho de Melo + */ + +#define ARCH_HAS_IOREMAP_WC +#define ARCH_HAS_IOREMAP_WT + +#include +#include + +#define build_mmio_read(name, size, type, reg, barrier) \ +static inline type name(const volatile void __iomem *addr) \ +{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ +:"m" (*(volatile type __force *)addr) barrier); return ret; } + +#define build_mmio_write(name, size, type, reg, barrier) \ +static inline void name(type val, volatile void __iomem *addr) \ +{ asm volatile("mov" size " %0,%1": :reg (val), \ +"m" (*(volatile type __force *)addr) barrier); } + +build_mmio_read(readb, "b", unsigned char, "=q", :"memory") +build_mmio_read(readw, "w", unsigned short, "=r", :"memory") +build_mmio_read(readl, "l", unsigned int, "=r", :"memory") + +build_mmio_read(__readb, "b", unsigned char, "=q", ) +build_mmio_read(__readw, "w", unsigned short, "=r", ) +build_mmio_read(__readl, "l", unsigned int, "=r", ) + +build_mmio_write(writeb, "b", unsigned char, "q", :"memory") +build_mmio_write(writew, "w", unsigned short, "r", :"memory") +build_mmio_write(writel, "l", unsigned int, "r", :"memory") + +build_mmio_write(__writeb, "b", unsigned char, "q", ) +build_mmio_write(__writew, "w", unsigned short, "r", ) +build_mmio_write(__writel, "l", unsigned int, "r", ) + +#define readb_relaxed(a) __readb(a) +#define readw_relaxed(a) __readw(a) +#define readl_relaxed(a) __readl(a) +#define __raw_readb __readb +#define __raw_readw __readw +#define __raw_readl __readl + +#define writeb_relaxed(v, a) __writeb(v, a) +#define writew_relaxed(v, a) __writew(v, a) +#define writel_relaxed(v, a) __writel(v, a) +#define __raw_writeb __writeb +#define __raw_writew __writew +#define __raw_writel __writel + +#define mmiowb() barrier() + +#ifdef CONFIG_X86_64 + +build_mmio_read(readq, "q", unsigned long, "=r", :"memory") +build_mmio_write(writeq, "q", unsigned long, "r", :"memory") + +#define readq_relaxed(a) readq(a) +#define writeq_relaxed(v, a) writeq(v, a) + +#define __raw_readq(a) readq(a) +#define __raw_writeq(val, addr) writeq(val, addr) + +/* Let people know that we have them */ +#define readq readq +#define writeq writeq + +#endif + +/** + * virt_to_phys - map virtual addresses to physical + * @address: address to remap + * + * The returned physical address is the physical (CPU) mapping for + * the memory address given. It is only valid to use this function on + * addresses directly mapped or allocated via kmalloc. + * + * This function does not give bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ + + +/** + * phys_to_virt - map physical address to virtual + * @address: address to remap + * + * The returned virtual address is a current CPU mapping for + * the memory address given. It is only valid to use this function on + * addresses that have a kernel mapping + * + * This function does not handle bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ + +#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) +#define isa_bus_to_virt phys_to_virt + +/* + * However PCI ones are not necessarily 1:1 and therefore these interfaces + * are forbidden in portable PCI drivers. + * + * Allow them on x86 for legacy drivers, though. + */ +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +/** + * ioremap - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * If the area you are trying to map is a PCI BAR you should have a + * look at pci_iomap(). + */ +//extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); +#define ioremap_uc ioremap_uc + +extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, + unsigned long prot_val); + + /* + * The default ioremap() behavior is non-cached: + */ +//static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) +//{ +// return ioremap_nocache(offset, size); +//} + +//extern void iounmap(volatile void __iomem *addr); + +extern void set_iounmap_nonlazy(void); + +#ifdef __KERNEL__ + +#include + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +static inline void +memset_io(volatile void __iomem *addr, unsigned char val, size_t count) +{ + memset((void __force *)addr, val, count); +} + +static inline void +memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) +{ + memcpy(dst, (const void __force *)src, count); +} + +static inline void +memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) +{ + memcpy((void __force *)dst, src, count); +} + +/* + * ISA space is 'always mapped' on a typical x86 system, no need to + * explicitly ioremap() it. The fact that the ISA IO space is mapped + * to PAGE_OFFSET is pure coincidence - it does not mean ISA values + * are physical addresses. The following constant pointer can be + * used as the IO-area pointer (it can be iounmapped as well, so the + * analogy with PCI is quite large): + */ +#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) + +/* + * Cache management + * + * This needed for two cases + * 1. Out of order aware processors + * 2. Accidentally out of order processors (PPro errata #51) + */ + +static inline void flush_write_buffers(void) +{ +#if defined(CONFIG_X86_PPRO_FENCE) + asm volatile("lock; addl $0,0(%%esp)": : :"memory"); +#endif +} + +#endif /* __KERNEL__ */ + +extern void native_io_delay(void); + +extern int io_delay_type; +extern void io_delay_init(void); + +#if defined(CONFIG_PARAVIRT) +#include +#else + +static inline void slow_down_io(void) +{ + native_io_delay(); +#ifdef REALLY_SLOW_IO + native_io_delay(); + native_io_delay(); + native_io_delay(); +#endif +} + +#endif + +#define BUILDIO(bwl, bw, type) \ +static inline void out##bwl(unsigned type value, int port) \ +{ \ + asm volatile("out" #bwl " %" #bw "0, %w1" \ + : : "a"(value), "Nd"(port)); \ +} \ + \ +static inline unsigned type in##bwl(int port) \ +{ \ + unsigned type value; \ + asm volatile("in" #bwl " %w1, %" #bw "0" \ + : "=a"(value) : "Nd"(port)); \ + return value; \ +} \ + \ +static inline void out##bwl##_p(unsigned type value, int port) \ +{ \ + out##bwl(value, port); \ + slow_down_io(); \ +} \ + \ +static inline unsigned type in##bwl##_p(int port) \ +{ \ + unsigned type value = in##bwl(port); \ + slow_down_io(); \ + return value; \ +} \ + \ +static inline void outs##bwl(int port, const void *addr, unsigned long count) \ +{ \ + asm volatile("rep; outs" #bwl \ + : "+S"(addr), "+c"(count) : "d"(port)); \ +} \ + \ +static inline void ins##bwl(int port, void *addr, unsigned long count) \ +{ \ + asm volatile("rep; ins" #bwl \ + : "+D"(addr), "+c"(count) : "d"(port)); \ +} + +BUILDIO(b, b, char) +BUILDIO(w, w, short) +BUILDIO(l, , int) + +extern void *xlate_dev_mem_ptr(phys_addr_t phys); +extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); + +extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, + enum page_cache_mode pcm); +//extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); +extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); + +extern bool is_early_ioremap_ptep(pte_t *ptep); + +#ifdef CONFIG_XEN +#include +struct bio_vec; + +extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, + const struct bio_vec *vec2); + +#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ + (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ + (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2))) +#endif /* CONFIG_XEN */ + +#define IO_SPACE_LIMIT 0xffff + +#ifdef CONFIG_MTRR +extern int __must_check arch_phys_wc_index(int handle); +#define arch_phys_wc_index arch_phys_wc_index + +extern int __must_check arch_phys_wc_add(unsigned long base, + unsigned long size); +extern void arch_phys_wc_del(int handle); +#define arch_phys_wc_add arch_phys_wc_add +#endif + +#endif /* _ASM_X86_IO_H */ diff --git a/drivers/include/asm/msr-index.h b/drivers/include/asm/msr-index.h index 690b4027e1..b05402ef3b 100644 --- a/drivers/include/asm/msr-index.h +++ b/drivers/include/asm/msr-index.h @@ -321,6 +321,7 @@ #define MSR_F15H_PERF_CTR 0xc0010201 #define MSR_F15H_NB_PERF_CTL 0xc0010240 #define MSR_F15H_NB_PERF_CTR 0xc0010241 +#define MSR_F15H_IC_CFG 0xc0011021 /* Fam 10h MSRs */ #define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 diff --git a/drivers/include/asm/msr.h b/drivers/include/asm/msr.h index a7b1622cdd..6999188788 100644 --- a/drivers/include/asm/msr.h +++ b/drivers/include/asm/msr.h @@ -32,6 +32,16 @@ struct msr_regs_info { int err; }; +struct saved_msr { + bool valid; + struct msr_info info; +}; + +struct saved_msrs { + unsigned int num; + struct saved_msr *array; +}; + static inline unsigned long long native_read_tscp(unsigned int *aux) { unsigned long low, high; @@ -161,7 +171,7 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) static inline void wrmsrl(unsigned msr, u64 val) { - native_write_msr(msr, (u32)val, (u32)(val >> 32)); + native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); } /* wrmsr with exception handling */ diff --git a/drivers/include/asm/pgtable.h b/drivers/include/asm/pgtable.h index df28c63a7c..f4e9ab8bd1 100644 --- a/drivers/include/asm/pgtable.h +++ b/drivers/include/asm/pgtable.h @@ -69,9 +69,6 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); #define pmd_clear(pmd) native_pmd_clear(pmd) #define pte_update(mm, addr, ptep) do { } while (0) -#define pte_update_defer(mm, addr, ptep) do { } while (0) -#define pmd_update(mm, addr, ptep) do { } while (0) -#define pmd_update_defer(mm, addr, ptep) do { } while (0) #define pgd_val(x) native_pgd_val(x) #define __pgd(x) native_make_pgd(x) @@ -165,20 +162,22 @@ static inline int pmd_large(pmd_t pte) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline int pmd_trans_splitting(pmd_t pmd) -{ - return pmd_val(pmd) & _PAGE_SPLITTING; -} - static inline int pmd_trans_huge(pmd_t pmd) { - return pmd_val(pmd) & _PAGE_PSE; + return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; } static inline int has_transparent_hugepage(void) { return cpu_has_pse; } + +#ifdef __HAVE_ARCH_PTE_DEVMAP +static inline int pmd_devmap(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DEVMAP); +} +#endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline pte_t pte_set_flags(pte_t pte, pteval_t set) @@ -255,6 +254,11 @@ static inline pte_t pte_mkspecial(pte_t pte) return pte_set_flags(pte, _PAGE_SPECIAL); } +static inline pte_t pte_mkdevmap(pte_t pte) +{ + return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); +} + static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) { pmdval_t v = native_pmd_val(pmd); @@ -274,6 +278,11 @@ static inline pmd_t pmd_mkold(pmd_t pmd) return pmd_clear_flags(pmd, _PAGE_ACCESSED); } +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + return pmd_clear_flags(pmd, _PAGE_DIRTY); +} + static inline pmd_t pmd_wrprotect(pmd_t pmd) { return pmd_clear_flags(pmd, _PAGE_RW); @@ -284,6 +293,11 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); } +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + return pmd_set_flags(pmd, _PAGE_DEVMAP); +} + static inline pmd_t pmd_mkhuge(pmd_t pmd) { return pmd_set_flags(pmd, _PAGE_PSE); @@ -465,6 +479,13 @@ static inline int pte_present(pte_t a) return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); } +#ifdef __HAVE_ARCH_PTE_DEVMAP +static inline int pte_devmap(pte_t a) +{ + return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; +} +#endif + #define pte_accessible pte_accessible static inline bool pte_accessible(struct mm_struct *mm, pte_t a) { @@ -731,14 +752,9 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, * updates should either be sets, clears, or set_pte_atomic for P->P * transitions, which means this hook should only be called for user PTEs. * This hook implies a P->P protection or access change has taken place, which - * requires a subsequent TLB flush. The notification can optionally be delayed - * until the TLB flush event by using the pte_update_defer form of the - * interface, but care must be taken to assure that the flush happens while - * still holding the same page table lock so that the shadow and primary pages - * do not become out of sync on SMP. + * requires a subsequent TLB flush. */ #define pte_update(mm, addr, ptep) do { } while (0) -#define pte_update_defer(mm, addr, ptep) do { } while (0) #endif /* @@ -816,10 +832,6 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); -#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH -extern void pmdp_splitting_flush(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmdp); - #define __HAVE_ARCH_PMD_WRITE static inline int pmd_write(pmd_t pmd) { @@ -830,9 +842,7 @@ static inline int pmd_write(pmd_t pmd) static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { - pmd_t pmd = native_pmdp_get_and_clear(pmdp); - pmd_update(mm, addr, pmdp); - return pmd; + return native_pmdp_get_and_clear(pmdp); } #define __HAVE_ARCH_PMDP_SET_WRPROTECT @@ -840,7 +850,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); - pmd_update(mm, addr, pmdp); } /* diff --git a/drivers/include/asm/pgtable_types.h b/drivers/include/asm/pgtable_types.h index 79c91853e5..ac2ac485ed 100644 --- a/drivers/include/asm/pgtable_types.h +++ b/drivers/include/asm/pgtable_types.h @@ -22,9 +22,10 @@ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_SOFTW2 /* only valid on a PSE pmd */ #define _PAGE_BIT_HIDDEN _PAGE_BIT_SOFTW3 /* hidden by kmemcheck */ #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ +#define _PAGE_BIT_SOFTW4 58 /* available for programmer */ +#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ /* If _PAGE_BIT_PRESENT is clear, we use these: */ @@ -46,7 +47,6 @@ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) -#define _PAGE_SPLITTING (_AT(pteval_t, 1) << _PAGE_BIT_SPLITTING) #define __HAVE_ARCH_PTE_SPECIAL #ifdef CONFIG_KMEMCHECK @@ -85,8 +85,11 @@ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) +#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP) +#define __HAVE_ARCH_PTE_DEVMAP #else #define _PAGE_NX (_AT(pteval_t, 0)) +#define _PAGE_DEVMAP (_AT(pteval_t, 0)) #endif #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) diff --git a/drivers/include/asm/x86_init.h b/drivers/include/asm/x86_init.h index dfd40dab99..cc5c18a2e0 100644 --- a/drivers/include/asm/x86_init.h +++ b/drivers/include/asm/x86_init.h @@ -82,13 +82,11 @@ struct x86_init_paging { * struct x86_init_timers - platform specific timer setup * @setup_perpcu_clockev: set up the per cpu clock event device for the * boot cpu - * @tsc_pre_init: platform function called before TSC init * @timer_init: initialize the platform timer (default PIT/HPET) * @wallclock_init: init the wallclock device */ struct x86_init_timers { void (*setup_percpu_clockev)(void); - void (*tsc_pre_init)(void); void (*timer_init)(void); void (*wallclock_init)(void); }; diff --git a/drivers/include/drm/drmP.h b/drivers/include/drm/drmP.h index c86bede8b5..59c783a9eb 100644 --- a/drivers/include/drm/drmP.h +++ b/drivers/include/drm/drmP.h @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -919,8 +920,7 @@ static inline int drm_debugfs_remove_files(const struct drm_info_list *files, #endif extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, - struct drm_gem_object *obj, - int flags); + struct drm_gem_object *obj, int flags); extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); @@ -950,7 +950,7 @@ void drm_dev_ref(struct drm_device *dev); void drm_dev_unref(struct drm_device *dev); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); -int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...); +int drm_dev_set_unique(struct drm_device *dev, const char *name); struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); @@ -971,6 +971,11 @@ extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); extern int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); +static inline int drm_pci_set_busid(struct drm_device *dev, + struct drm_master *master) +{ + return -ENOSYS; +} #endif #define DRM_PCIE_SPEED_25 1 @@ -978,6 +983,11 @@ extern int drm_get_pci_dev(struct pci_dev *pdev, #define DRM_PCIE_SPEED_80 4 extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); +extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw); + +/* platform section */ +extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); +extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m); /* returns true if currently okay to sleep */ static __inline__ bool drm_can_sleep(void) @@ -985,6 +995,9 @@ static __inline__ bool drm_can_sleep(void) return true; } +/* helper for handling conditionals in various for_each macros */ +#define for_each_if(condition) if (!(condition)) {} else + static __inline__ int drm_device_is_pcie(struct drm_device *dev) { return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); diff --git a/drivers/include/drm/drm_atomic.h b/drivers/include/drm/drm_atomic.h index 4b74c97d29..d3eaa5df18 100644 --- a/drivers/include/drm/drm_atomic.h +++ b/drivers/include/drm/drm_atomic.h @@ -130,10 +130,6 @@ int __must_check drm_atomic_add_affected_planes(struct drm_atomic_state *state, struct drm_crtc *crtc); -int -drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc); - void drm_atomic_legacy_backoff(struct drm_atomic_state *state); void @@ -149,7 +145,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((connector) = (state)->connectors[__i], \ (connector_state) = (state)->connector_states[__i], 1); \ (__i)++) \ - if (connector) + for_each_if (connector) #define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ for ((__i) = 0; \ @@ -157,7 +153,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((crtc) = (state)->crtcs[__i], \ (crtc_state) = (state)->crtc_states[__i], 1); \ (__i)++) \ - if (crtc_state) + for_each_if (crtc_state) #define for_each_plane_in_state(state, plane, plane_state, __i) \ for ((__i) = 0; \ @@ -165,7 +161,7 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); ((plane) = (state)->planes[__i], \ (plane_state) = (state)->plane_states[__i], 1); \ (__i)++) \ - if (plane_state) + for_each_if (plane_state) static inline bool drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) { diff --git a/drivers/include/drm/drm_atomic_helper.h b/drivers/include/drm/drm_atomic_helper.h index 8cba54a2a0..fe5efada9d 100644 --- a/drivers/include/drm/drm_atomic_helper.h +++ b/drivers/include/drm/drm_atomic_helper.h @@ -42,6 +42,10 @@ int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async); +bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, + struct drm_atomic_state *old_state, + struct drm_crtc *crtc); + void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); @@ -62,6 +66,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, void drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state); void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); +void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, + bool atomic); void drm_atomic_helper_swap_state(struct drm_device *dev, struct drm_atomic_state *state); @@ -81,6 +87,12 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set); int __drm_atomic_helper_set_config(struct drm_mode_set *set, struct drm_atomic_state *state); +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx); +struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); +int drm_atomic_helper_resume(struct drm_device *dev, + struct drm_atomic_state *state); + int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); @@ -118,6 +130,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); +void __drm_atomic_helper_connector_reset(struct drm_connector *connector, + struct drm_connector_state *conn_state); void drm_atomic_helper_connector_reset(struct drm_connector *connector); void __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, diff --git a/drivers/include/drm/drm_crtc.h b/drivers/include/drm/drm_crtc.h index e4a72db33a..6caf6e5387 100644 --- a/drivers/include/drm/drm_crtc.h +++ b/drivers/include/drm/drm_crtc.h @@ -85,7 +85,11 @@ static inline uint64_t I642U64(int64_t val) return (uint64_t)*((uint64_t *)&val); } -/* rotation property bits */ +/* + * Rotation property bits. DRM_ROTATE_ rotates the image by the + * specified amount in degrees in counter clockwise direction. DRM_REFLECT_X and + * DRM_REFLECT_Y reflects the image along the specified axis prior to rotation + */ #define DRM_ROTATE_MASK 0x0f #define DRM_ROTATE_0 0 #define DRM_ROTATE_90 1 @@ -158,23 +162,60 @@ struct drm_tile_group { u8 group_data[8]; }; +/** + * struct drm_framebuffer_funcs - framebuffer hooks + */ struct drm_framebuffer_funcs { - /* note: use drm_framebuffer_remove() */ + /** + * @destroy: + * + * Clean up framebuffer resources, specifically also unreference the + * backing storage. The core guarantees to call this function for every + * framebuffer successfully created by ->fb_create() in + * &drm_mode_config_funcs. Drivers must also call + * drm_framebuffer_cleanup() to release DRM core resources for this + * framebuffer. + */ void (*destroy)(struct drm_framebuffer *framebuffer); + + /** + * @create_handle: + * + * Create a buffer handle in the driver-specific buffer manager (either + * GEM or TTM) valid for the passed-in struct &drm_file. This is used by + * the core to implement the GETFB IOCTL, which returns (for + * sufficiently priviledged user) also a native buffer handle. This can + * be used for seamless transitions between modesetting clients by + * copying the current screen contents to a private buffer and blending + * between that and the new contents. + * + * GEM based drivers should call drm_gem_handle_create() to create the + * handle. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*create_handle)(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle); - /* - * Optional callback for the dirty fb ioctl. + /** + * @dirty: * - * Userspace can notify the driver via this callback - * that a area of the framebuffer has changed and should - * be flushed to the display hardware. + * Optional callback for the dirty fb IOCTL. * - * See documentation in drm_mode.h for the struct - * drm_mode_fb_dirty_cmd for more information as all - * the semantics and arguments have a one to one mapping - * on this function. + * Userspace can notify the driver via this callback that an area of the + * framebuffer has changed and should be flushed to the display + * hardware. This can also be used internally, e.g. by the fbdev + * emulation, though that's not the case currently. + * + * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd + * for more information as all the semantics and arguments have a one to + * one mapping on this function. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. */ int (*dirty)(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, @@ -250,6 +291,11 @@ struct drm_plane; struct drm_bridge; struct drm_atomic_state; +struct drm_crtc_helper_funcs; +struct drm_encoder_helper_funcs; +struct drm_connector_helper_funcs; +struct drm_plane_helper_funcs; + /** * struct drm_crtc_state - mutable CRTC state * @crtc: backpointer to the CRTC @@ -260,6 +306,7 @@ struct drm_atomic_state; * @active_changed: crtc_state->active has been toggled. * @connectors_changed: connectors to this crtc have been updated * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes + * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors * @last_vblank_count: for helpers and drivers to capture the vblank of the * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings @@ -293,6 +340,8 @@ struct drm_crtc_state { */ u32 plane_mask; + u32 connector_mask; + /* last_vblank_count: for vblank waits before cleanup */ u32 last_vblank_count; @@ -311,23 +360,6 @@ struct drm_crtc_state { /** * struct drm_crtc_funcs - control CRTCs for a given device - * @save: save CRTC state - * @restore: restore CRTC state - * @reset: reset CRTC after state has been invalidated (e.g. resume) - * @cursor_set: setup the cursor - * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set - * @cursor_move: move the cursor - * @gamma_set: specify color ramp for CRTC - * @destroy: deinit and free object - * @set_property: called when a property is changed - * @set_config: apply a new CRTC configuration - * @page_flip: initiate a page flip - * @atomic_duplicate_state: duplicate the atomic state for this CRTC - * @atomic_destroy_state: destroy an atomic state for this CRTC - * @atomic_set_property: set a property on an atomic state for this CRTC - * (do not call directly, use drm_atomic_crtc_set_property()) - * @atomic_get_property: get a property on an atomic state for this CRTC - * (do not call directly, use drm_atomic_crtc_get_property()) * * The drm_crtc_funcs structure is the central CRTC management structure * in the DRM. Each CRTC controls one or more connectors (note that the name @@ -339,54 +371,317 @@ struct drm_crtc_state { * bus accessors. */ struct drm_crtc_funcs { - /* Save CRTC state */ - void (*save)(struct drm_crtc *crtc); /* suspend? */ - /* Restore CRTC state */ - void (*restore)(struct drm_crtc *crtc); /* resume? */ - /* Reset CRTC state */ + /** + * @reset: + * + * Reset CRTC hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_crtc_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_crtc *crtc); - /* cursor controls */ + /** + * @cursor_set: + * + * Update the cursor image. The cursor position is relative to the CRTC + * and can be partially or fully outside of the visible area. + * + * Note that contrary to all other KMS functions the legacy cursor entry + * points don't take a framebuffer object, but instead take directly a + * raw buffer object id from the driver's buffer manager (which is + * either GEM or TTM for current drivers). + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height); + + /** + * @cursor_set2: + * + * Update the cursor image, including hotspot information. The hotspot + * must not affect the cursor position in CRTC coordinates, but is only + * meant as a hint for virtualized display hardware to coordinate the + * guests and hosts cursor position. The cursor hotspot is relative to + * the cursor image. Otherwise this works exactly like @cursor_set. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_set2)(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y); + + /** + * @cursor_move: + * + * Update the cursor position. The cursor does not need to be visible + * when this hook is called. + * + * This entry point is deprecated, drivers should instead implement + * universal plane support and register a proper cursor plane using + * drm_crtc_init_with_planes(). + * + * This callback is optional. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*cursor_move)(struct drm_crtc *crtc, int x, int y); - /* Set gamma on the CRTC */ + /** + * @gamma_set: + * + * Set gamma on the CRTC. + * + * This callback is optional. + * + * NOTE: + * + * Drivers that support gamma tables and also fbdev emulation through + * the provided helper library need to take care to fill out the gamma + * hooks for both. Currently there's a bit an unfortunate duplication + * going on, which should eventually be unified to just one set of + * hooks. + */ void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size); - /* Object destroy routine */ + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a CRTC cannot be hotplugged + * in DRM. + */ void (*destroy)(struct drm_crtc *crtc); + /** + * @set_config: + * + * This is the main legacy entry point to change the modeset state on a + * CRTC. All the details of the desired configuration are passed in a + * struct &drm_mode_set - see there for details. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_set_config() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_config)(struct drm_mode_set *set); - /* - * Flip to the given framebuffer. This implements the page - * flip ioctl described in drm_mode.h, specifically, the - * implementation must return immediately and block all - * rendering to the current fb until the flip has completed. - * If userspace set the event flag in the ioctl, the event - * argument will point to an event to send back when the flip - * completes, otherwise it will be NULL. + /** + * @page_flip: + * + * Legacy entry point to schedule a flip to the given framebuffer. + * + * Page flipping is a synchronization mechanism that replaces the frame + * buffer being scanned out by the CRTC with a new frame buffer during + * vertical blanking, avoiding tearing (except when requested otherwise + * through the DRM_MODE_PAGE_FLIP_ASYNC flag). When an application + * requests a page flip the DRM core verifies that the new frame buffer + * is large enough to be scanned out by the CRTC in the currently + * configured mode and then calls the CRTC ->page_flip() operation with a + * pointer to the new frame buffer. + * + * The driver must wait for any pending rendering to the new framebuffer + * to complete before executing the flip. It should also wait for any + * pending rendering from other drivers if the underlying buffer is a + * shared dma-buf. + * + * An application can request to be notified when the page flip has + * completed. The drm core will supply a struct &drm_event in the event + * parameter in this case. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the flip. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * FIXME: + * + * Up to that point drivers need to manage events themselves and can use + * even->base.list freely for that. Specifically they need to ensure + * that they don't send out page flip (or vblank) events for which the + * corresponding drm file has been closed already. The drm core + * unfortunately does not (yet) take care of that. Therefore drivers + * currently must clean up and release pending events in their + * ->preclose driver function. + * + * This callback is optional. + * + * NOTE: + * + * Very early versions of the KMS ABI mandated that the driver must + * block (but not reject) any rendering to the old framebuffer until the + * flip operation has completed and the old framebuffer is no longer + * visible. This requirement has been lifted, and userspace is instead + * expected to request delivery of an event and wait with recycling old + * buffers until such has been received. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. Note that if a + * ->page_flip() operation is already pending the callback should return + * -EBUSY. Pageflips on a disabled CRTC (either by setting a NULL mode + * or just runtime disabled through DPMS respectively the new atomic + * "ACTIVE" state) should result in an -EINVAL error code. Note that + * drm_atomic_helper_page_flip() checks this already for atomic drivers. */ int (*page_flip)(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags); + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_crtc_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_crtc *crtc, struct drm_property *property, uint64_t val); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this CRTC and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_crtc should use + * drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_crtc_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before crtc->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_crtc *crtc, struct drm_crtc_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_crtc_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which should never happen, the core only + * asks for properties attached to this CRTC). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val); + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCRTC IOCTL. + * + * Do not call this function directly, use + * drm_atomic_crtc_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this CRTC). + */ int (*atomic_get_property)(struct drm_crtc *crtc, const struct drm_crtc_state *state, struct drm_property *property, @@ -416,7 +711,7 @@ struct drm_crtc_funcs { * @properties: property tracking for this CRTC * @state: current atomic state for this CRTC * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for - * legacy ioctls + * legacy IOCTLs * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. @@ -426,6 +721,8 @@ struct drm_crtc { struct device_node *port; struct list_head head; + char *name; + /* * crtc mutex * @@ -463,14 +760,14 @@ struct drm_crtc { uint16_t *gamma_store; /* if you are using the helper */ - const void *helper_private; + const struct drm_crtc_helper_funcs *helper_private; struct drm_object_properties properties; struct drm_crtc_state *state; /* - * For legacy crtc ioctls so that atomic drivers can get at the locking + * For legacy crtc IOCTLs so that atomic drivers can get at the locking * acquire context. */ struct drm_modeset_acquire_ctx *acquire_ctx; @@ -495,54 +792,239 @@ struct drm_connector_state { /** * struct drm_connector_funcs - control connectors on a given device - * @dpms: set power state - * @save: save connector state - * @restore: restore connector state - * @reset: reset connector after state has been invalidated (e.g. resume) - * @detect: is this connector active? - * @fill_modes: fill mode list for this connector - * @set_property: property for this connector may need an update - * @destroy: make object go away - * @force: notify the driver that the connector is forced on - * @atomic_duplicate_state: duplicate the atomic state for this connector - * @atomic_destroy_state: destroy an atomic state for this connector - * @atomic_set_property: set a property on an atomic state for this connector - * (do not call directly, use drm_atomic_connector_set_property()) - * @atomic_get_property: get a property on an atomic state for this connector - * (do not call directly, use drm_atomic_connector_get_property()) * * Each CRTC may have one or more connectors attached to it. The functions * below allow the core DRM code to control connectors, enumerate available modes, * etc. */ struct drm_connector_funcs { + /** + * @dpms: + * + * Legacy entry point to set the per-connector DPMS state. Legacy DPMS + * is exposed as a standard property on the connector, but diverted to + * this callback in the drm core. Note that atomic drivers don't + * implement the 4 level DPMS support on the connector any more, but + * instead only have an on/off "ACTIVE" property on the CRTC object. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_dpms() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*dpms)(struct drm_connector *connector, int mode); - void (*save)(struct drm_connector *connector); - void (*restore)(struct drm_connector *connector); + + /** + * @reset: + * + * Reset connector hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_connector_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_connector *connector); - /* Check to see if anything is attached to the connector. - * @force is set to false whilst polling, true when checking the - * connector due to user request. @force can be used by the driver - * to avoid expensive, destructive operations during automated - * probing. + /** + * @detect: + * + * Check to see if anything is attached to the connector. The parameter + * force is set to false whilst polling, true when checking the + * connector due to a user request. force can be used by the driver to + * avoid expensive, destructive operations during automated probing. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + * + * RETURNS: + * + * drm_connector_status indicating the connector's status. */ enum drm_connector_status (*detect)(struct drm_connector *connector, bool force); - int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); - int (*set_property)(struct drm_connector *connector, struct drm_property *property, - uint64_t val); - void (*destroy)(struct drm_connector *connector); + + /** + * @force: + * + * This function is called to update internal encoder state when the + * connector is forced to a certain state by userspace, either through + * the sysfs interfaces or on the kernel cmdline. In that case the + * @detect callback isn't called. + * + * FIXME: + * + * Note that this hook is only called by the probe helper. It's not in + * the helper library vtable purely for historical reasons. The only DRM + * core entry point to probe connector state is @fill_modes. + */ void (*force)(struct drm_connector *connector); - /* atomic update handling */ + /** + * @fill_modes: + * + * Entry point for output detection and basic mode validation. The + * driver should reprobe the output if needed (e.g. when hotplug + * handling is unreliable), add all detected modes to connector->modes + * and filter out any the device can't support in any configuration. It + * also needs to filter out any modes wider or higher than the + * parameters max_width and max_height indicate. + * + * The drivers must also prune any modes no longer valid from + * connector->modes. Furthermore it must update connector->status and + * connector->edid. If no EDID has been received for this output + * connector->edid must be NULL. + * + * Drivers using the probe helpers should use + * drm_helper_probe_single_connector_modes() or + * drm_helper_probe_single_connector_modes_nomerge() to implement this + * function. + * + * RETURNS: + * + * The number of modes detected and filled into connector->modes. + */ + int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); + + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * connector. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_connector_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*set_property)(struct drm_connector *connector, struct drm_property *property, + uint64_t val); + + /** + * @destroy: + * + * Clean up connector resources. This is called at driver unload time + * through drm_mode_config_cleanup(). It can also be called at runtime + * when a connector is being hot-unplugged for drivers that support + * connector hotplugging (e.g. DisplayPort MST). + */ + void (*destroy)(struct drm_connector *connector); + + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this connector and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_connector_state should use + * drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_connector_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before connector->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_connector *connector, struct drm_connector_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_connector_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this connector). No other validation + * is allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_connector *connector, struct drm_connector_state *state, struct drm_property *property, uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETCONNECTOR IOCTL. + * + * Do not call this function directly, use + * drm_atomic_connector_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which shouldn't ever happen, the core only asks for + * properties attached to this connector). + */ int (*atomic_get_property)(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, @@ -551,13 +1033,26 @@ struct drm_connector_funcs { /** * struct drm_encoder_funcs - encoder controls - * @reset: reset state (e.g. at init or resume time) - * @destroy: cleanup and free associated data * * Encoders sit between CRTCs and connectors. */ struct drm_encoder_funcs { + /** + * @reset: + * + * Reset encoder hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + */ void (*reset)(struct drm_encoder *encoder); + + /** + * @destroy: + * + * Clean up encoder resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since an encoder cannot be + * hotplugged in DRM. + */ void (*destroy)(struct drm_encoder *encoder); }; @@ -593,7 +1088,7 @@ struct drm_encoder { struct drm_crtc *crtc; struct drm_bridge *bridge; const struct drm_encoder_funcs *funcs; - const void *helper_private; + const struct drm_encoder_helper_funcs *helper_private; }; /* should we poll this connector for connects and disconnects */ @@ -671,6 +1166,7 @@ struct drm_connector { struct drm_mode_object base; char *name; + int connector_id; int connector_type; int connector_type_id; bool interlace_allowed; @@ -698,7 +1194,7 @@ struct drm_connector { /* requested DPMS state */ int dpms; - const void *helper_private; + const struct drm_connector_helper_funcs *helper_private; /* forced on connector */ struct drm_cmdline_mode cmdline_mode; @@ -778,40 +1274,203 @@ struct drm_plane_state { /** * struct drm_plane_funcs - driver plane control functions - * @update_plane: update the plane configuration - * @disable_plane: shut down the plane - * @destroy: clean up plane resources - * @reset: reset plane after state has been invalidated (e.g. resume) - * @set_property: called when a property is changed - * @atomic_duplicate_state: duplicate the atomic state for this plane - * @atomic_destroy_state: destroy an atomic state for this plane - * @atomic_set_property: set a property on an atomic state for this plane - * (do not call directly, use drm_atomic_plane_set_property()) - * @atomic_get_property: get a property on an atomic state for this plane - * (do not call directly, use drm_atomic_plane_get_property()) */ struct drm_plane_funcs { + /** + * @update_plane: + * + * This is the legacy entry point to enable and configure the plane for + * the given CRTC and framebuffer. It is never called to disable the + * plane, i.e. the passed-in crtc and fb paramters are never NULL. + * + * The source rectangle in frame buffer memory coordinates is given by + * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point + * values). Devices that don't support subpixel plane coordinates can + * ignore the fractional part. + * + * The destination rectangle in CRTC coordinates is given by the + * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values). + * Devices scale the source rectangle to the destination rectangle. If + * scaling is not supported, and the source rectangle size doesn't match + * the destination rectangle size, the driver must return a + * -EINVAL error. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_update_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h); + + /** + * @disable_plane: + * + * This is the legacy entry point to disable the plane. The DRM core + * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call + * with the frame buffer ID set to 0. Disabled planes must not be + * processed by the CRTC. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_disable_plane() to implement this hook. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*disable_plane)(struct drm_plane *plane); + + /** + * @destroy: + * + * Clean up plane resources. This is only called at driver unload time + * through drm_mode_config_cleanup() since a plane cannot be hotplugged + * in DRM. + */ void (*destroy)(struct drm_plane *plane); + + /** + * @reset: + * + * Reset plane hardware and software state to off. This function isn't + * called by the core directly, only through drm_mode_config_reset(). + * It's not a helper hook only for historical reasons. + * + * Atomic drivers can use drm_atomic_helper_plane_reset() to reset + * atomic state using this hook. + */ void (*reset)(struct drm_plane *plane); + /** + * @set_property: + * + * This is the legacy entry point to update a property attached to the + * plane. + * + * Drivers implementing atomic modeset should use + * drm_atomic_helper_plane_set_property() to implement this hook. + * + * This callback is optional if the driver does not support any legacy + * driver-private properties. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ int (*set_property)(struct drm_plane *plane, struct drm_property *property, uint64_t val); - /* atomic update handling */ + /** + * @atomic_duplicate_state: + * + * Duplicate the current atomic state for this plane and return it. + * The core and helpers gurantee that any atomic state duplicated with + * this hook and still owned by the caller (i.e. not transferred to the + * driver by calling ->atomic_commit() from struct + * &drm_mode_config_funcs) will be cleaned up by calling the + * @atomic_destroy_state hook in this structure. + * + * Atomic drivers which don't subclass struct &drm_plane_state should use + * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the + * state structure to extend it with driver-private state should use + * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is + * duplicated in a consistent fashion across drivers. + * + * It is an error to call this hook before plane->state has been + * initialized correctly. + * + * NOTE: + * + * If the duplicate state references refcounted resources this hook must + * acquire a reference for each of them. The driver must release these + * references again in @atomic_destroy_state. + * + * RETURNS: + * + * Duplicated atomic state or NULL when the allocation failed. + */ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); + + /** + * @atomic_destroy_state: + * + * Destroy a state duplicated with @atomic_duplicate_state and release + * or unreference all resources it references + */ void (*atomic_destroy_state)(struct drm_plane *plane, struct drm_plane_state *state); + + /** + * @atomic_set_property: + * + * Decode a driver-private property value and store the decoded value + * into the passed-in state structure. Since the atomic core decodes all + * standardized properties (even for extensions beyond the core set of + * properties which might not be implemented by all drivers) this + * requires drivers to subclass the state structure. + * + * Such driver-private properties should really only be implemented for + * truly hardware/vendor specific state. Instead it is preferred to + * standardize atomic extension and decode the properties used to expose + * such an extension in the core. + * + * Do not call this function directly, use + * drm_atomic_plane_set_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * NOTE: + * + * This function is called in the state assembly phase of atomic + * modesets, which can be aborted for any reason (including on + * userspace's request to just check whether a configuration would be + * possible). Drivers MUST NOT touch any persistent state (hardware or + * software) or data structures except the passed in @state parameter. + * + * Also since userspace controls in which order properties are set this + * function must not do any input validation (since the state update is + * incomplete and hence likely inconsistent). Instead any such input + * validation must be done in the various atomic_check callbacks. + * + * RETURNS: + * + * 0 if the property has been found, -EINVAL if the property isn't + * implemented by the driver (which shouldn't ever happen, the core only + * asks for properties attached to this plane). No other validation is + * allowed by the driver. The core already checks that the property + * value is within the range (integer, valid enum value, ...) the driver + * set when registering the property. + */ int (*atomic_set_property)(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, uint64_t val); + + /** + * @atomic_get_property: + * + * Reads out the decoded driver-private property. This is used to + * implement the GETPLANE IOCTL. + * + * Do not call this function directly, use + * drm_atomic_plane_get_property() instead. + * + * This callback is optional if the driver does not support any + * driver-private atomic properties. + * + * RETURNS: + * + * 0 on success, -EINVAL if the property isn't implemented by the + * driver (which should never happen, the core only asks for + * properties attached to this plane). + */ int (*atomic_get_property)(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, @@ -824,6 +1483,7 @@ enum drm_plane_type { DRM_PLANE_TYPE_CURSOR, }; + /** * struct drm_plane - central DRM plane control structure * @dev: DRM device this plane belongs to @@ -846,6 +1506,8 @@ struct drm_plane { struct drm_device *dev; struct list_head head; + char *name; + struct drm_modeset_lock mutex; struct drm_mode_object base; @@ -866,7 +1528,7 @@ struct drm_plane { enum drm_plane_type type; - const void *helper_private; + const struct drm_plane_helper_funcs *helper_private; struct drm_plane_state *state; }; @@ -874,24 +1536,114 @@ struct drm_plane { /** * struct drm_bridge_funcs - drm_bridge control functions * @attach: Called during drm_bridge_attach - * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge - * @disable: Called right before encoder prepare, disables the bridge - * @post_disable: Called right after encoder prepare, for lockstepped disable - * @mode_set: Set this mode to the bridge - * @pre_enable: Called right before encoder commit, for lockstepped commit - * @enable: Called right after encoder commit, enables the bridge */ struct drm_bridge_funcs { int (*attach)(struct drm_bridge *bridge); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The paramater + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or the next + * &drm_bridge. The parameter adjusted_mode is the input mode the bridge + * requires. It can be modified by this callback and does not need to + * match mode. + * + * This is the only hook that allows a bridge to reject a modeset. If + * this function passes all other callbacks must succeed for this + * configuration. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Drivers MUST + * NOT touch any persistent state (hardware or software) or data + * structures except the passed in @state parameter. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ bool (*mode_fixup)(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @disable: + * + * This callback should disable the bridge. It is called right before + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called before that + * bridge's ->disable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is still running when this callback is called. + */ void (*disable)(struct drm_bridge *bridge); + + /** + * @post_disable: + * + * This callback should disable the bridge. It is called right after + * the preceding element in the display pipe is disabled. If the + * preceding element is a bridge this means it's called after that + * bridge's ->post_disable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->disable(), + * ->prepare() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge must assume that the display pipe (i.e. clocks and timing + * singals) feeding it is no longer running when this callback is + * called. + */ void (*post_disable)(struct drm_bridge *bridge); + + /** + * @mode_set: + * + * This callback should set the given mode on the bridge. It is called + * after the ->mode_set() callback for the preceding element in the + * display pipeline has been called already. The display pipe (i.e. + * clocks and timing signals) is off when this function is called. + */ void (*mode_set)(struct drm_bridge *bridge, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); + /** + * @pre_enable: + * + * This callback should enable the bridge. It is called right before + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called before that + * bridge's ->pre_enable() function. If the preceding element is a + * &drm_encoder it's called right before the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The display pipe (i.e. clocks and timing signals) feeding this bridge + * will not yet be running when this callback is called. The bridge must + * not enable the display link feeding the next bridge in the chain (if + * there is one) when this callback is called. + */ void (*pre_enable)(struct drm_bridge *bridge); + + /** + * @enable: + * + * This callback should enable the bridge. It is called right after + * the preceding element in the display pipe is enabled. If the + * preceding element is a bridge this means it's called after that + * bridge's ->enable() function. If the preceding element is a + * &drm_encoder it's called right after the encoder's ->enable(), + * ->commit() or ->dpms() hook from struct &drm_encoder_helper_funcs. + * + * The bridge can assume that the display pipe (i.e. clocks and timing + * signals) feeding it is running when this callback is called. This + * callback must enable the display link feeding the next bridge in the + * chain if there is one. + */ void (*enable)(struct drm_bridge *bridge); }; @@ -922,7 +1674,7 @@ struct drm_bridge { * struct drm_atomic_state - the global state object for atomic updates * @dev: parent DRM device * @allow_modeset: allow full modeset - * @legacy_cursor_update: hint to enforce legacy cursor ioctl semantics + * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics * @planes: pointer to array of plane pointers * @plane_states: pointer to array of plane states pointers * @crtcs: pointer to array of CRTC pointers @@ -977,31 +1729,265 @@ struct drm_mode_set { /** * struct drm_mode_config_funcs - basic driver provided mode setting functions - * @fb_create: create a new framebuffer object - * @output_poll_changed: function to handle output configuration changes - * @atomic_check: check whether a given atomic state update is possible - * @atomic_commit: commit an atomic state update previously verified with - * atomic_check() - * @atomic_state_alloc: allocate a new atomic state - * @atomic_state_clear: clear the atomic state - * @atomic_state_free: free the atomic state * * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that * involve drivers. */ struct drm_mode_config_funcs { + /** + * @fb_create: + * + * Create a new framebuffer object. The core does basic checks on the + * requested metadata, but most of that is left to the driver. See + * struct &drm_mode_fb_cmd2 for details. + * + * If the parameters are deemed valid and the backing storage objects in + * the underlying memory manager all exist, then the driver allocates + * a new &drm_framebuffer structure, subclassed to contain + * driver-specific information (like the internal native buffer object + * references). It also needs to fill out all relevant metadata, which + * should be done by calling drm_helper_mode_fill_fb_struct(). + * + * The initialization is finalized by calling drm_framebuffer_init(), + * which registers the framebuffer and makes it accessible to other + * threads. + * + * RETURNS: + * + * A new framebuffer with an initial reference count of 1 or a negative + * error code encoded with ERR_PTR(). + */ struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, - struct drm_mode_fb_cmd2 *mode_cmd); + const struct drm_mode_fb_cmd2 *mode_cmd); + + /** + * @output_poll_changed: + * + * Callback used by helpers to inform the driver of output configuration + * changes. + * + * Drivers implementing fbdev emulation with the helpers can call + * drm_fb_helper_hotplug_changed from this hook to inform the fbdev + * helper of output changes. + * + * FIXME: + * + * Except that there's no vtable for device-level helper callbacks + * there's no reason this is a core function. + */ void (*output_poll_changed)(struct drm_device *dev); + /** + * @atomic_check: + * + * This is the only hook to validate an atomic modeset update. This + * function must reject any modeset and state changes which the hardware + * or driver doesn't support. This includes but is of course not limited + * to: + * + * - Checking that the modes, framebuffers, scaling and placement + * requirements and so on are within the limits of the hardware. + * + * - Checking that any hidden shared resources are not oversubscribed. + * This can be shared PLLs, shared lanes, overall memory bandwidth, + * display fifo space (where shared between planes or maybe even + * CRTCs). + * + * - Checking that virtualized resources exported to userspace are not + * oversubscribed. For various reasons it can make sense to expose + * more planes, crtcs or encoders than which are physically there. One + * example is dual-pipe operations (which generally should be hidden + * from userspace if when lockstepped in hardware, exposed otherwise), + * where a plane might need 1 hardware plane (if it's just on one + * pipe), 2 hardware planes (when it spans both pipes) or maybe even + * shared a hardware plane with a 2nd plane (if there's a compatible + * plane requested on the area handled by the other pipe). + * + * - Check that any transitional state is possible and that if + * requested, the update can indeed be done in the vblank period + * without temporarily disabling some functions. + * + * - Check any other constraints the driver or hardware might have. + * + * - This callback also needs to correctly fill out the &drm_crtc_state + * in this update to make sure that drm_atomic_crtc_needs_modeset() + * reflects the nature of the possible update and returns true if and + * only if the update cannot be applied without tearing within one + * vblank on that CRTC. The core uses that information to reject + * updates which require a full modeset (i.e. blanking the screen, or + * at least pausing updates for a substantial amount of time) if + * userspace has disallowed that in its request. + * + * - The driver also does not need to repeat basic input validation + * like done for the corresponding legacy entry points. The core does + * that before calling this hook. + * + * See the documentation of @atomic_commit for an exhaustive list of + * error conditions which don't have to be checked at the + * ->atomic_check() stage? + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_check(), or one of the exported sub-functions of + * it. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EINVAL, if any of the above constraints are violated. + * + * - -EDEADLK, when returned from an attempt to acquire an additional + * &drm_modeset_lock through drm_modeset_lock(). + * + * - -ENOMEM, if allocating additional state sub-structures failed due + * to lack of memory. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point all errors are + * treated equally. + */ int (*atomic_check)(struct drm_device *dev, - struct drm_atomic_state *a); + struct drm_atomic_state *state); + + /** + * @atomic_commit: + * + * This is the only hook to commit an atomic modeset update. The core + * guarantees that @atomic_check has been called successfully before + * calling this function, and that nothing has been changed in the + * interim. + * + * See the documentation for struct &drm_atomic_state for how exactly + * an atomic modeset update is described. + * + * Drivers using the atomic helpers can implement this hook using + * drm_atomic_helper_commit(), or one of the exported sub-functions of + * it. + * + * Asynchronous commits (as indicated with the async parameter) must + * do any preparatory work which might result in an unsuccessful commit + * in the context of this callback. The only exceptions are hardware + * errors resulting in -EIO. But even in that case the driver must + * ensure that the display pipe is at least running, to avoid + * compositors crashing when pageflips don't work. Anything else, + * specifically committing the update to the hardware, should be done + * without blocking the caller. For updates which do not require a + * modeset this must be guaranteed. + * + * The driver must wait for any pending rendering to the new + * framebuffers to complete before executing the flip. It should also + * wait for any pending rendering from other drivers if the underlying + * buffer is a shared dma-buf. Asynchronous commits must not wait for + * rendering in the context of this callback. + * + * An application can request to be notified when the atomic commit has + * completed. These events are per-CRTC and can be distinguished by the + * CRTC index supplied in &drm_event to userspace. + * + * The drm core will supply a struct &drm_event in the event + * member of each CRTC's &drm_crtc_state structure. This can be handled by the + * drm_crtc_send_vblank_event() function, which the driver should call on + * the provided event upon completion of the atomic commit. Note that if + * the driver supports vblank signalling and timestamping the vblank + * counters and timestamps must agree with the ones returned from page + * flip events. With the current vblank helper infrastructure this can + * be achieved by holding a vblank reference while the page flip is + * pending, acquired through drm_crtc_vblank_get() and released with + * drm_crtc_vblank_put(). Drivers are free to implement their own vblank + * counter and timestamp tracking though, e.g. if they have accurate + * timestamp registers in hardware. + * + * NOTE: + * + * Drivers are not allowed to shut down any display pipe successfully + * enabled through an atomic commit on their own. Doing so can result in + * compositors crashing if a page flip is suddenly rejected because the + * pipe is off. + * + * RETURNS: + * + * 0 on success or one of the below negative error codes: + * + * - -EBUSY, if an asynchronous updated is requested and there is + * an earlier updated pending. Drivers are allowed to support a queue + * of outstanding updates, but currently no driver supports that. + * Note that drivers must wait for preceding updates to complete if a + * synchronous update is requested, they are not allowed to fail the + * commit in that case. + * + * - -ENOMEM, if the driver failed to allocate memory. Specifically + * this can happen when trying to pin framebuffers, which must only + * be done when committing the state. + * + * - -ENOSPC, as a refinement of the more generic -ENOMEM to indicate + * that the driver has run out of vram, iommu space or similar GPU + * address space needed for framebuffer. + * + * - -EIO, if the hardware completely died. + * + * - -EINTR, -EAGAIN or -ERESTARTSYS, if the IOCTL should be restarted. + * This can either be due to a pending signal, or because the driver + * needs to completely bail out to recover from an exceptional + * situation like a GPU hang. From a userspace point of view all errors are + * treated equally. + * + * This list is exhaustive. Specifically this hook is not allowed to + * return -EINVAL (any invalid requests should be caught in + * @atomic_check) or -EDEADLK (this function must not acquire + * additional modeset locks). + */ int (*atomic_commit)(struct drm_device *dev, - struct drm_atomic_state *a, + struct drm_atomic_state *state, bool async); + + /** + * @atomic_state_alloc: + * + * This optional hook can be used by drivers that want to subclass struct + * &drm_atomic_state to be able to track their own driver-private global + * state easily. If this hook is implemented, drivers must also + * implement @atomic_state_clear and @atomic_state_free. + * + * RETURNS: + * + * A new &drm_atomic_state on success or NULL on failure. + */ struct drm_atomic_state *(*atomic_state_alloc)(struct drm_device *dev); + + /** + * @atomic_state_clear: + * + * This hook must clear any driver private state duplicated into the + * passed-in &drm_atomic_state. This hook is called when the caller + * encountered a &drm_modeset_lock deadlock and needs to drop all + * already acquired locks as part of the deadlock avoidance dance + * implemented in drm_modeset_lock_backoff(). + * + * Any duplicated state must be invalidated since a concurrent atomic + * update might change it, and the drm atomic interfaces always apply + * updates as relative changes to the current state. + * + * Drivers that implement this must call drm_atomic_state_default_clear() + * to clear common state. + */ void (*atomic_state_clear)(struct drm_atomic_state *state); + + /** + * @atomic_state_free: + * + * This hook needs driver private resources and the &drm_atomic_state + * itself. Note that the core first calls drm_atomic_state_clear() to + * avoid code duplicate between the clear and free hooks. + * + * Drivers that implement this must call drm_atomic_state_default_free() + * to release common resources. + */ void (*atomic_state_free)(struct drm_atomic_state *state); }; @@ -1010,7 +1996,7 @@ struct drm_mode_config_funcs { * @mutex: mutex protecting KMS related lists and structures * @connection_mutex: ww mutex protecting connector state and routing * @acquire_ctx: global implicit acquire context used by atomic drivers for - * legacy ioctls + * legacy IOCTLs * @idr_mutex: mutex for KMS ID allocation and management * @crtc_idr: main KMS ID tracking object * @fb_lock: mutex to protect fb state and lists @@ -1062,6 +2048,7 @@ struct drm_mode_config { struct list_head fb_list; int num_connector; + struct ida connector_ida; struct list_head connector_list; int num_encoder; struct list_head encoder_list; @@ -1166,7 +2153,7 @@ struct drm_mode_config { */ #define drm_for_each_plane_mask(plane, dev, plane_mask) \ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ - if ((plane_mask) & (1 << drm_plane_index(plane))) + for_each_if ((plane_mask) & (1 << drm_plane_index(plane))) #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) @@ -1183,11 +2170,13 @@ struct drm_prop_enum_list { char *name; }; -extern int drm_crtc_init_with_planes(struct drm_device *dev, +extern __printf(6, 7) +int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, - const struct drm_crtc_funcs *funcs); + const struct drm_crtc_funcs *funcs, + const char *name, ...); extern void drm_crtc_cleanup(struct drm_crtc *crtc); extern unsigned int drm_crtc_index(struct drm_crtc *crtc); @@ -1213,7 +2202,11 @@ int drm_connector_register(struct drm_connector *connector); void drm_connector_unregister(struct drm_connector *connector); extern void drm_connector_cleanup(struct drm_connector *connector); -extern unsigned int drm_connector_index(struct drm_connector *connector); +static inline unsigned drm_connector_index(struct drm_connector *connector) +{ + return connector->connector_id; +} + /* helper to unplug all connectors from sysfs for device */ extern void drm_connector_unplug_all(struct drm_device *dev); @@ -1233,10 +2226,11 @@ void drm_bridge_mode_set(struct drm_bridge *bridge, void drm_bridge_pre_enable(struct drm_bridge *bridge); void drm_bridge_enable(struct drm_bridge *bridge); -extern int drm_encoder_init(struct drm_device *dev, +extern __printf(5, 6) +int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, - int encoder_type); + int encoder_type, const char *name, ...); /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? @@ -1251,13 +2245,15 @@ static inline bool drm_encoder_crtc_ok(struct drm_encoder *encoder, return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); } -extern int drm_universal_plane_init(struct drm_device *dev, +extern __printf(8, 9) +int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, - enum drm_plane_type type); + enum drm_plane_type type, + const char *name, ...); extern int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, @@ -1543,7 +2539,7 @@ static inline struct drm_property *drm_property_find(struct drm_device *dev, /* Plane list iterator for legacy (overlay only) planes. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ - if (plane->type == DRM_PLANE_TYPE_OVERLAY) + for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY) #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) diff --git a/drivers/include/drm/drm_crtc_helper.h b/drivers/include/drm/drm_crtc_helper.h index 3002d31538..4928aa1ef7 100644 --- a/drivers/include/drm/drm_crtc_helper.h +++ b/drivers/include/drm/drm_crtc_helper.h @@ -40,148 +40,7 @@ #include #include - -enum mode_set_atomic { - LEAVE_ATOMIC_MODE_SET, - ENTER_ATOMIC_MODE_SET, -}; - -/** - * struct drm_crtc_helper_funcs - helper operations for CRTCs - * @dpms: set power state - * @prepare: prepare the CRTC, called before @mode_set - * @commit: commit changes to CRTC, called after @mode_set - * @mode_fixup: try to fixup proposed mode for this CRTC - * @mode_set: set this mode - * @mode_set_nofb: set mode only (no scanout buffer attached) - * @mode_set_base: update the scanout buffer - * @mode_set_base_atomic: non-blocking mode set (used for kgdb support) - * @load_lut: load color palette - * @disable: disable CRTC when no longer in use - * @enable: enable CRTC - * @atomic_check: check for validity of an atomic state - * @atomic_begin: begin atomic update - * @atomic_flush: flush atomic update - * - * The helper operations are called by the mid-layer CRTC helper. - * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). - */ -struct drm_crtc_helper_funcs { - /* - * Control power levels on the CRTC. If the mode passed in is - * unsupported, the provider must use the next lowest power level. - */ - void (*dpms)(struct drm_crtc *crtc, int mode); - void (*prepare)(struct drm_crtc *crtc); - void (*commit)(struct drm_crtc *crtc); - - /* Provider can fixup or change mode timings before modeset occurs */ - bool (*mode_fixup)(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - /* Actually set the mode */ - int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode, int x, int y, - struct drm_framebuffer *old_fb); - /* Actually set the mode for atomic helpers, optional */ - void (*mode_set_nofb)(struct drm_crtc *crtc); - - /* Move the crtc on the current fb to the given position *optional* */ - int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, - struct drm_framebuffer *old_fb); - int (*mode_set_base_atomic)(struct drm_crtc *crtc, - struct drm_framebuffer *fb, int x, int y, - enum mode_set_atomic); - - /* reload the current crtc LUT */ - void (*load_lut)(struct drm_crtc *crtc); - - void (*disable)(struct drm_crtc *crtc); - void (*enable)(struct drm_crtc *crtc); - - /* atomic helpers */ - int (*atomic_check)(struct drm_crtc *crtc, - struct drm_crtc_state *state); - void (*atomic_begin)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); - void (*atomic_flush)(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state); -}; - -/** - * struct drm_encoder_helper_funcs - helper operations for encoders - * @dpms: set power state - * @save: save connector state - * @restore: restore connector state - * @mode_fixup: try to fixup proposed mode for this connector - * @prepare: part of the disable sequence, called before the CRTC modeset - * @commit: called after the CRTC modeset - * @mode_set: set this mode, optional for atomic helpers - * @get_crtc: return CRTC that the encoder is currently attached to - * @detect: connection status detection - * @disable: disable encoder when not in use (overrides DPMS off) - * @enable: enable encoder - * @atomic_check: check for validity of an atomic update - * - * The helper operations are called by the mid-layer CRTC helper. - * - * Note that with atomic helpers @dpms, @prepare and @commit hooks are - * deprecated. Used @enable and @disable instead exclusively. - * - * With legacy crtc helpers there's a big semantic difference between @disable - * and the other hooks: @disable also needs to release any resources acquired in - * @mode_set (like shared PLLs). - */ -struct drm_encoder_helper_funcs { - void (*dpms)(struct drm_encoder *encoder, int mode); - void (*save)(struct drm_encoder *encoder); - void (*restore)(struct drm_encoder *encoder); - - bool (*mode_fixup)(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - void (*prepare)(struct drm_encoder *encoder); - void (*commit)(struct drm_encoder *encoder); - void (*mode_set)(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); - /* detect for DAC style encoders */ - enum drm_connector_status (*detect)(struct drm_encoder *encoder, - struct drm_connector *connector); - void (*disable)(struct drm_encoder *encoder); - - void (*enable)(struct drm_encoder *encoder); - - /* atomic helpers */ - int (*atomic_check)(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state); -}; - -/** - * struct drm_connector_helper_funcs - helper operations for connectors - * @get_modes: get mode list for this connector - * @mode_valid: is this mode valid on the given connector? (optional) - * @best_encoder: return the preferred encoder for this connector - * @atomic_best_encoder: atomic version of @best_encoder - * - * The helper operations are called by the mid-layer CRTC helper. - */ -struct drm_connector_helper_funcs { - int (*get_modes)(struct drm_connector *connector); - enum drm_mode_status (*mode_valid)(struct drm_connector *connector, - struct drm_display_mode *mode); - struct drm_encoder *(*best_encoder)(struct drm_connector *connector); - struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, - struct drm_connector_state *connector_state); -}; +#include extern void drm_helper_disable_unused_functions(struct drm_device *dev); extern int drm_crtc_helper_set_config(struct drm_mode_set *set); @@ -197,25 +56,7 @@ extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode); extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, - struct drm_mode_fb_cmd2 *mode_cmd); - -static inline void drm_crtc_helper_add(struct drm_crtc *crtc, - const struct drm_crtc_helper_funcs *funcs) -{ - crtc->helper_private = funcs; -} - -static inline void drm_encoder_helper_add(struct drm_encoder *encoder, - const struct drm_encoder_helper_funcs *funcs) -{ - encoder->helper_private = funcs; -} - -static inline void drm_connector_helper_add(struct drm_connector *connector, - const struct drm_connector_helper_funcs *funcs) -{ - connector->helper_private = funcs; -} + const struct drm_mode_fb_cmd2 *mode_cmd); extern void drm_helper_resume_force_mode(struct drm_device *dev); @@ -229,10 +70,6 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); -extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector - *connector, - uint32_t maxX, - uint32_t maxY); extern void drm_kms_helper_poll_init(struct drm_device *dev); extern void drm_kms_helper_poll_fini(struct drm_device *dev); extern bool drm_helper_hpd_irq_event(struct drm_device *dev); diff --git a/drivers/include/drm/drm_dp_helper.h b/drivers/include/drm/drm_dp_helper.h index 8e3d5cbccf..db2ba4973a 100644 --- a/drivers/include/drm/drm_dp_helper.h +++ b/drivers/include/drm/drm_dp_helper.h @@ -455,16 +455,52 @@ # define DP_EDP_14 0x03 #define DP_EDP_GENERAL_CAP_1 0x701 +# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_PIN_ENABLE_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_AUX_ENABLE_CAP (1 << 2) +# define DP_EDP_PANEL_SELF_TEST_PIN_ENABLE_CAP (1 << 3) +# define DP_EDP_PANEL_SELF_TEST_AUX_ENABLE_CAP (1 << 4) +# define DP_EDP_FRC_ENABLE_CAP (1 << 5) +# define DP_EDP_COLOR_ENGINE_CAP (1 << 6) +# define DP_EDP_SET_POWER_CAP (1 << 7) #define DP_EDP_BACKLIGHT_ADJUSTMENT_CAP 0x702 +# define DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP (1 << 0) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP (1 << 1) +# define DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT (1 << 2) +# define DP_EDP_BACKLIGHT_AUX_PWM_PRODUCT_CAP (1 << 3) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_CAP (1 << 4) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP (1 << 5) +# define DP_EDP_DYNAMIC_BACKLIGHT_CAP (1 << 6) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_CAP (1 << 7) #define DP_EDP_GENERAL_CAP_2 0x703 +# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0) #define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */ +# define DP_EDP_X_REGION_CAP_MASK (0xf << 0) +# define DP_EDP_X_REGION_CAP_SHIFT 0 +# define DP_EDP_Y_REGION_CAP_MASK (0xf << 4) +# define DP_EDP_Y_REGION_CAP_SHIFT 4 #define DP_EDP_DISPLAY_CONTROL_REGISTER 0x720 +# define DP_EDP_BACKLIGHT_ENABLE (1 << 0) +# define DP_EDP_BLACK_VIDEO_ENABLE (1 << 1) +# define DP_EDP_FRC_ENABLE (1 << 2) +# define DP_EDP_COLOR_ENGINE_ENABLE (1 << 3) +# define DP_EDP_VBLANK_BACKLIGHT_UPDATE_ENABLE (1 << 7) #define DP_EDP_BACKLIGHT_MODE_SET_REGISTER 0x721 +# define DP_EDP_BACKLIGHT_CONTROL_MODE_MASK (3 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PWM (0 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET (1 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD (2 << 0) +# define DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT (3 << 0) +# define DP_EDP_BACKLIGHT_FREQ_PWM_PIN_PASSTHRU_ENABLE (1 << 2) +# define DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE (1 << 3) +# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4) +# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5) +# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */ #define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722 #define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723 diff --git a/drivers/include/drm/drm_dp_mst_helper.h b/drivers/include/drm/drm_dp_mst_helper.h index f356f97164..fdb47051d5 100644 --- a/drivers/include/drm/drm_dp_mst_helper.h +++ b/drivers/include/drm/drm_dp_mst_helper.h @@ -88,6 +88,7 @@ struct drm_dp_mst_port { struct drm_dp_mst_topology_mgr *mgr; struct edid *cached_edid; /* for DP logical ports - make tiling work */ + bool has_audio; }; /** @@ -214,13 +215,13 @@ struct drm_dp_sideband_msg_rx { struct drm_dp_sideband_msg_hdr initial_hdr; }; - +#define DRM_DP_MAX_SDP_STREAMS 16 struct drm_dp_allocate_payload { u8 port_number; u8 number_sdp_streams; u8 vcpi; u16 pbn; - u8 sdp_stream_sink[8]; + u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; }; struct drm_dp_allocate_payload_ack_reply { @@ -417,7 +418,7 @@ struct drm_dp_payload { struct drm_dp_mst_topology_mgr { struct device *dev; - struct drm_dp_mst_topology_cbs *cbs; + const struct drm_dp_mst_topology_cbs *cbs; int max_dpcd_transaction_bytes; struct drm_dp_aux *aux; /* auxch for this topology mgr to use */ int max_payloads; @@ -477,6 +478,8 @@ int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handl enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); +bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port); struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); diff --git a/drivers/include/drm/drm_fb_helper.h b/drivers/include/drm/drm_fb_helper.h index f42ca2aa86..a4dffbed3a 100644 --- a/drivers/include/drm/drm_fb_helper.h +++ b/drivers/include/drm/drm_fb_helper.h @@ -34,6 +34,11 @@ struct drm_fb_helper; #include +enum mode_set_atomic { + LEAVE_ATOMIC_MODE_SET, + ENTER_ATOMIC_MODE_SET, +}; + struct drm_fb_offset { int x, y; }; @@ -74,25 +79,76 @@ struct drm_fb_helper_surface_size { /** * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library - * @gamma_set: Set the given gamma lut register on the given crtc. - * @gamma_get: Read the given gamma lut register on the given crtc, used to - * save the current lut when force-restoring the fbdev for e.g. - * kdbg. - * @fb_probe: Driver callback to allocate and initialize the fbdev info - * structure. Furthermore it also needs to allocate the drm - * framebuffer used to back the fbdev. - * @initial_config: Setup an initial fbdev display configuration * * Driver callbacks used by the fbdev emulation helper library. */ struct drm_fb_helper_funcs { + /** + * @gamma_set: + * + * Set the given gamma LUT register on the given CRTC. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); + /** + * @gamma_get: + * + * Read the given gamma LUT register on the given CRTC, used to save the + * current LUT when force-restoring the fbdev for e.g. kdbg. + * + * This callback is optional. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ void (*gamma_get)(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); + /** + * @fb_probe: + * + * Driver callback to allocate and initialize the fbdev info structure. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory. + * + * RETURNS: + * + * The driver should return 0 on success and a negative error code on + * failure. + */ int (*fb_probe)(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes); + + /** + * @initial_config: + * + * Driver callback to setup an initial fbdev display configuration. + * Drivers can use this callback to tell the fbdev emulation what the + * preferred initial configuration is. This is useful to implement + * smooth booting where the fbdev (and subsequently all userspace) never + * changes the mode, but always inherits the existing configuration. + * + * This callback is optional. + * + * RETURNS: + * + * The driver should return true if a suitable initial configuration has + * been filled out and false when the fbdev helper should fall back to + * the default probing logic. + */ bool (*initial_config)(struct drm_fb_helper *fb_helper, struct drm_fb_helper_crtc **crtcs, struct drm_display_mode **modes, @@ -105,18 +161,22 @@ struct drm_fb_helper_connector { }; /** - * struct drm_fb_helper - helper to emulate fbdev on top of kms + * struct drm_fb_helper - main structure to emulate fbdev on top of KMS * @fb: Scanout framebuffer object * @dev: DRM device * @crtc_count: number of possible CRTCs * @crtc_info: per-CRTC helper state (mode, x/y offset, etc) * @connector_count: number of connected connectors * @connector_info_alloc_count: size of connector_info + * @connector_info: array of per-connector information * @funcs: driver callbacks for fb helper * @fbdev: emulated fbdev device info struct * @pseudo_palette: fake palette of 16 colors - * @kernel_fb_list: list_head in kernel_fb_helper_list - * @delayed_hotplug: was there a hotplug while kms master active? + * + * This is the main structure used by the fbdev helpers. Drivers supporting + * fbdev emulation should embedded this into their overall driver structure. + * Drivers must also fill out a struct &drm_fb_helper_funcs with a few + * operations. */ struct drm_fb_helper { struct drm_framebuffer *fb; @@ -129,10 +189,21 @@ struct drm_fb_helper { const struct drm_fb_helper_funcs *funcs; struct fb_info *fbdev; u32 pseudo_palette[17]; + + /** + * @kernel_fb_list: + * + * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit. + */ struct list_head kernel_fb_list; - /* we got a hotplug but fbdev wasn't running the console - delay until next set_par */ + /** + * @delayed_hotplug: + * + * A hotplug was received while fbdev wasn't in control of the DRM + * device, i.e. another KMS master was active. The output configuration + * needs to be reprobe when fbdev is in control again. + */ bool delayed_hotplug; /** diff --git a/drivers/include/drm/drm_gem.h b/drivers/include/drm/drm_gem.h index 15e7f00738..0b3e11ab87 100644 --- a/drivers/include/drm/drm_gem.h +++ b/drivers/include/drm/drm_gem.h @@ -35,76 +35,129 @@ */ /** - * This structure defines the drm_mm memory object, which will be used by the - * DRM for its buffer objects. + * struct drm_gem_object - GEM buffer object + * + * This structure defines the generic parts for GEM buffer objects, which are + * mostly around handling mmap and userspace handles. + * + * Buffer objects are often abbreviated to BO. */ struct drm_gem_object { - /** Reference count of this object */ + /** + * @refcount: + * + * Reference count of this object + * + * Please use drm_gem_object_reference() to acquire and + * drm_gem_object_unreference() or drm_gem_object_unreference_unlocked() + * to release a reference to a GEM buffer object. + */ struct kref refcount; /** - * handle_count - gem file_priv handle count of this object + * @handle_count: + * + * This is the GEM file_priv handle count of this object. * * Each handle also holds a reference. Note that when the handle_count * drops to 0 any global names (e.g. the id in the flink namespace) will * be cleared. * * Protected by dev->object_name_lock. - * */ + */ unsigned handle_count; - /** Related drm device */ + /** + * @dev: DRM dev this object belongs to. + */ struct drm_device *dev; - /** File representing the shmem storage */ + /** + * @filp: + * + * SHMEM file node used as backing storage for swappable buffer objects. + * GEM also supports driver private objects with driver-specific backing + * storage (contiguous CMA memory, special reserved blocks). In this + * case @filp is NULL. + */ struct file *filp; - /* Mapping info for this object */ + /** + * @vma_node: + * + * Mapping info for this object to support mmap. Drivers are supposed to + * allocate the mmap offset using drm_gem_create_mmap_offset(). The + * offset itself can be retrieved using drm_vma_node_offset_addr(). + * + * Memory mapping itself is handled by drm_gem_mmap(), which also checks + * that userspace is allowed to access the object. + */ struct drm_vma_offset_node vma_node; /** + * @size: + * * Size of the object, in bytes. Immutable over the object's * lifetime. */ size_t size; /** + * @name: + * * Global name for this object, starts at 1. 0 means unnamed. - * Access is covered by the object_name_lock in the related drm_device + * Access is covered by dev->object_name_lock. This is used by the GEM_FLINK + * and GEM_OPEN ioctls. */ int name; /** - * Memory domains. These monitor which caches contain read/write data + * @read_domains: + * + * Read memory domains. These monitor which caches contain read/write data * related to the object. When transitioning from one set of domains * to another, the driver is called to ensure that caches are suitably - * flushed and invalidated + * flushed and invalidated. */ uint32_t read_domains; + + /** + * @write_domain: Corresponding unique write memory domain. + */ uint32_t write_domain; /** + * @pending_read_domains: + * * While validating an exec operation, the * new read/write domain values are computed here. * They will be transferred to the above values * at the point that any cache flushing occurs */ uint32_t pending_read_domains; + + /** + * @pending_write_domain: Write domain similar to @pending_read_domains. + */ uint32_t pending_write_domain; /** - * dma_buf - dma buf associated with this GEM object + * @dma_buf: + * + * dma-buf associated with this GEM object. * * Pointer to the dma-buf associated with this gem object (either * through importing or exporting). We break the resulting reference * loop when the last gem handle for this object is released. * - * Protected by obj->object_name_lock + * Protected by obj->object_name_lock. */ struct dma_buf *dma_buf; /** - * import_attach - dma buf attachment backing this object + * @import_attach: + * + * dma-buf attachment backing this object. * * Any foreign dma_buf imported as a gem object has this set to the * attachment point for the device. This is invariant over the lifetime @@ -133,12 +186,30 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); +/** + * drm_gem_object_reference - acquire a GEM BO reference + * @obj: GEM buffer object + * + * This acquires additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ static inline void drm_gem_object_reference(struct drm_gem_object *obj) { kref_get(&obj->refcount); } +/** + * drm_gem_object_unreference - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must hold the dev->struct_mutex + * lock when calling this function, even when the driver doesn't use + * dev->struct_mutex for anything. + * + * For drivers not encumbered with legacy locking use + * drm_gem_object_unreference_unlocked() instead. + */ static inline void drm_gem_object_unreference(struct drm_gem_object *obj) { @@ -149,6 +220,13 @@ drm_gem_object_unreference(struct drm_gem_object *obj) } } +/** + * drm_gem_object_unreference_unlocked - release a GEM BO reference + * @obj: GEM buffer object + * + * This releases a reference to @obj. Callers must not hold the + * dev->struct_mutex lock when calling this function. + */ static inline void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) { diff --git a/drivers/include/drm/drm_mipi_dsi.h b/drivers/include/drm/drm_mipi_dsi.h index f1d8d0dbb4..1b3b1f8c8c 100644 --- a/drivers/include/drm/drm_mipi_dsi.h +++ b/drivers/include/drm/drm_mipi_dsi.h @@ -163,9 +163,36 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) return container_of(dev, struct mipi_dsi_device, dev); } +/** + * mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any + * given pixel format defined by the MIPI DSI + * specification + * @fmt: MIPI DSI pixel format + * + * Returns: The number of bits per pixel of the given pixel format. + */ +static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB888: + case MIPI_DSI_FMT_RGB666: + return 24; + + case MIPI_DSI_FMT_RGB666_PACKED: + return 18; + + case MIPI_DSI_FMT_RGB565: + return 16; + } + + return -EINVAL; +} + struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np); int mipi_dsi_attach(struct mipi_dsi_device *dsi); int mipi_dsi_detach(struct mipi_dsi_device *dsi); +int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi); +int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi); int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi, u16 value); diff --git a/drivers/include/drm/drm_mm.h b/drivers/include/drm/drm_mm.h index 62c9546f77..aa18492ce7 100644 --- a/drivers/include/drm/drm_mm.h +++ b/drivers/include/drm/drm_mm.h @@ -148,8 +148,7 @@ static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) { - return list_entry(hole_node->node_list.next, - struct drm_mm_node, node_list)->start; + return list_next_entry(hole_node, node_list)->start; } /** @@ -180,6 +179,14 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) &(mm)->head_node.node_list, \ node_list) +#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ + for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ + &entry->hole_stack != &(mm)->hole_stack ? \ + hole_start = drm_mm_hole_node_start(entry), \ + hole_end = drm_mm_hole_node_end(entry), \ + 1 : 0; \ + entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) + /** * drm_mm_for_each_hole - iterator to walk over all holes * @entry: drm_mm_node used internally to track progress @@ -200,20 +207,7 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) * going backwards. */ #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ - for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) - -#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ - for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ - &entry->hole_stack != &(mm)->hole_stack ? \ - hole_start = drm_mm_hole_node_start(entry), \ - hole_end = drm_mm_hole_node_end(entry), \ - 1 : 0; \ - entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) + __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) /* * Basic range manager support (drm_mm.c) diff --git a/drivers/include/drm/drm_modes.h b/drivers/include/drm/drm_modes.h index 08a8cac9e5..625966a906 100644 --- a/drivers/include/drm/drm_modes.h +++ b/drivers/include/drm/drm_modes.h @@ -35,46 +35,91 @@ * structures). */ +/** + * enum drm_mode_status - hardware support status of a mode + * @MODE_OK: Mode OK + * @MODE_HSYNC: hsync out of range + * @MODE_VSYNC: vsync out of range + * @MODE_H_ILLEGAL: mode has illegal horizontal timings + * @MODE_V_ILLEGAL: mode has illegal horizontal timings + * @MODE_BAD_WIDTH: requires an unsupported linepitch + * @MODE_NOMODE: no mode with a matching name + * @MODE_NO_INTERLACE: interlaced mode not supported + * @MODE_NO_DBLESCAN: doublescan mode not supported + * @MODE_NO_VSCAN: multiscan mode not supported + * @MODE_MEM: insufficient video memory + * @MODE_VIRTUAL_X: mode width too large for specified virtual size + * @MODE_VIRTUAL_Y: mode height too large for specified virtual size + * @MODE_MEM_VIRT: insufficient video memory given virtual size + * @MODE_NOCLOCK: no fixed clock available + * @MODE_CLOCK_HIGH: clock required is too high + * @MODE_CLOCK_LOW: clock required is too low + * @MODE_CLOCK_RANGE: clock/mode isn't in a ClockRange + * @MODE_BAD_HVALUE: horizontal timing was out of range + * @MODE_BAD_VVALUE: vertical timing was out of range + * @MODE_BAD_VSCAN: VScan value out of range + * @MODE_HSYNC_NARROW: horizontal sync too narrow + * @MODE_HSYNC_WIDE: horizontal sync too wide + * @MODE_HBLANK_NARROW: horizontal blanking too narrow + * @MODE_HBLANK_WIDE: horizontal blanking too wide + * @MODE_VSYNC_NARROW: vertical sync too narrow + * @MODE_VSYNC_WIDE: vertical sync too wide + * @MODE_VBLANK_NARROW: vertical blanking too narrow + * @MODE_VBLANK_WIDE: vertical blanking too wide + * @MODE_PANEL: exceeds panel dimensions + * @MODE_INTERLACE_WIDTH: width too large for interlaced mode + * @MODE_ONE_WIDTH: only one width is supported + * @MODE_ONE_HEIGHT: only one height is supported + * @MODE_ONE_SIZE: only one resolution is supported + * @MODE_NO_REDUCED: monitor doesn't accept reduced blanking + * @MODE_NO_STEREO: stereo modes not supported + * @MODE_STALE: mode has become stale + * @MODE_BAD: unspecified reason + * @MODE_ERROR: error condition + * + * This enum is used to filter out modes not supported by the driver/hardware + * combination. + */ enum drm_mode_status { - MODE_OK = 0, /* Mode OK */ - MODE_HSYNC, /* hsync out of range */ - MODE_VSYNC, /* vsync out of range */ - MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ - MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ - MODE_BAD_WIDTH, /* requires an unsupported linepitch */ - MODE_NOMODE, /* no mode with a matching name */ - MODE_NO_INTERLACE, /* interlaced mode not supported */ - MODE_NO_DBLESCAN, /* doublescan mode not supported */ - MODE_NO_VSCAN, /* multiscan mode not supported */ - MODE_MEM, /* insufficient video memory */ - MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ - MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ - MODE_MEM_VIRT, /* insufficient video memory given virtual size */ - MODE_NOCLOCK, /* no fixed clock available */ - MODE_CLOCK_HIGH, /* clock required is too high */ - MODE_CLOCK_LOW, /* clock required is too low */ - MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ - MODE_BAD_HVALUE, /* horizontal timing was out of range */ - MODE_BAD_VVALUE, /* vertical timing was out of range */ - MODE_BAD_VSCAN, /* VScan value out of range */ - MODE_HSYNC_NARROW, /* horizontal sync too narrow */ - MODE_HSYNC_WIDE, /* horizontal sync too wide */ - MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ - MODE_HBLANK_WIDE, /* horizontal blanking too wide */ - MODE_VSYNC_NARROW, /* vertical sync too narrow */ - MODE_VSYNC_WIDE, /* vertical sync too wide */ - MODE_VBLANK_NARROW, /* vertical blanking too narrow */ - MODE_VBLANK_WIDE, /* vertical blanking too wide */ - MODE_PANEL, /* exceeds panel dimensions */ - MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ - MODE_ONE_WIDTH, /* only one width is supported */ - MODE_ONE_HEIGHT, /* only one height is supported */ - MODE_ONE_SIZE, /* only one resolution is supported */ - MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ - MODE_NO_STEREO, /* stereo modes not supported */ - MODE_UNVERIFIED = -3, /* mode needs to reverified */ - MODE_BAD = -2, /* unspecified reason */ - MODE_ERROR = -1 /* error condition */ + MODE_OK = 0, + MODE_HSYNC, + MODE_VSYNC, + MODE_H_ILLEGAL, + MODE_V_ILLEGAL, + MODE_BAD_WIDTH, + MODE_NOMODE, + MODE_NO_INTERLACE, + MODE_NO_DBLESCAN, + MODE_NO_VSCAN, + MODE_MEM, + MODE_VIRTUAL_X, + MODE_VIRTUAL_Y, + MODE_MEM_VIRT, + MODE_NOCLOCK, + MODE_CLOCK_HIGH, + MODE_CLOCK_LOW, + MODE_CLOCK_RANGE, + MODE_BAD_HVALUE, + MODE_BAD_VVALUE, + MODE_BAD_VSCAN, + MODE_HSYNC_NARROW, + MODE_HSYNC_WIDE, + MODE_HBLANK_NARROW, + MODE_HBLANK_WIDE, + MODE_VSYNC_NARROW, + MODE_VSYNC_WIDE, + MODE_VBLANK_NARROW, + MODE_VBLANK_WIDE, + MODE_PANEL, + MODE_INTERLACE_WIDTH, + MODE_ONE_WIDTH, + MODE_ONE_HEIGHT, + MODE_ONE_SIZE, + MODE_NO_REDUCED, + MODE_NO_STEREO, + MODE_STALE = -3, + MODE_BAD = -2, + MODE_ERROR = -1 }; #define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ @@ -96,17 +141,125 @@ enum drm_mode_status { #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF +/** + * struct drm_display_mode - DRM kernel-internal display mode structure + * @hdisplay: horizontal display size + * @hsync_start: horizontal sync start + * @hsync_end: horizontal sync end + * @htotal: horizontal total size + * @hskew: horizontal skew?! + * @vdisplay: vertical display size + * @vsync_start: vertical sync start + * @vsync_end: vertical sync end + * @vtotal: vertical total size + * @vscan: vertical scan?! + * @crtc_hdisplay: hardware mode horizontal display size + * @crtc_hblank_start: hardware mode horizontal blank start + * @crtc_hblank_end: hardware mode horizontal blank end + * @crtc_hsync_start: hardware mode horizontal sync start + * @crtc_hsync_end: hardware mode horizontal sync end + * @crtc_htotal: hardware mode horizontal total size + * @crtc_hskew: hardware mode horizontal skew?! + * @crtc_vdisplay: hardware mode vertical display size + * @crtc_vblank_start: hardware mode vertical blank start + * @crtc_vblank_end: hardware mode vertical blank end + * @crtc_vsync_start: hardware mode vertical sync start + * @crtc_vsync_end: hardware mode vertical sync end + * @crtc_vtotal: hardware mode vertical total size + * + * The horizontal and vertical timings are defined per the following diagram. + * + * + * Active Front Sync Back + * Region Porch Porch + * <-----------------------><----------------><-------------><--------------> + * //////////////////////| + * ////////////////////// | + * ////////////////////// |.................. ................ + * _______________ + * <----- [hv]display -----> + * <------------- [hv]sync_start ------------> + * <--------------------- [hv]sync_end ---------------------> + * <-------------------------------- [hv]total ----------------------------->* + * + * This structure contains two copies of timings. First are the plain timings, + * which specify the logical mode, as it would be for a progressive 1:1 scanout + * at the refresh rate userspace can observe through vblank timestamps. Then + * there's the hardware timings, which are corrected for interlacing, + * double-clocking and similar things. They are provided as a convenience, and + * can be appropriately computed using drm_mode_set_crtcinfo(). + */ struct drm_display_mode { - /* Header */ + /** + * @head: + * + * struct list_head for mode lists. + */ struct list_head head; + + /** + * @base: + * + * A display mode is a normal modeset object, possibly including public + * userspace id. + * + * FIXME: + * + * This can probably be removed since the entire concept of userspace + * managing modes explicitly has never landed in upstream kernel mode + * setting support. + */ struct drm_mode_object base; + /** + * @name: + * + * Human-readable name of the mode, filled out with drm_mode_set_name(). + */ char name[DRM_DISPLAY_MODE_LEN]; + /** + * @status: + * + * Status of the mode, used to filter out modes not supported by the + * hardware. See enum &drm_mode_status. + */ enum drm_mode_status status; + + /** + * @type: + * + * A bitmask of flags, mostly about the source of a mode. Possible flags + * are: + * + * - DRM_MODE_TYPE_BUILTIN: Meant for hard-coded modes, effectively + * unused. + * - DRM_MODE_TYPE_PREFERRED: Preferred mode, usually the native + * resolution of an LCD panel. There should only be one preferred + * mode per connector at any given time. + * - DRM_MODE_TYPE_DRIVER: Mode created by the driver, which is all of + * them really. Drivers must set this bit for all modes they create + * and expose to userspace. + * + * Plus a big list of flags which shouldn't be used at all, but are + * still around since these flags are also used in the userspace ABI: + * + * - DRM_MODE_TYPE_DEFAULT: Again a leftover, use + * DRM_MODE_TYPE_PREFERRED instead. + * - DRM_MODE_TYPE_CLOCK_C and DRM_MODE_TYPE_CRTC_C: Define leftovers + * which are stuck around for hysterical raisins only. No one has an + * idea what they were meant for. Don't use. + * - DRM_MODE_TYPE_USERDEF: Mode defined by userspace, again a vestige + * from older kms designs where userspace had to first add a custom + * mode to the kernel's mode list before it could use it. Don't use. + */ unsigned int type; - /* Proposed mode values */ + /** + * @clock: + * + * Pixel clock in kHz. + */ int clock; /* in kHz */ int hdisplay; int hsync_start; @@ -118,14 +271,74 @@ struct drm_display_mode { int vsync_end; int vtotal; int vscan; + /** + * @flags: + * + * Sync and timing flags: + * + * - DRM_MODE_FLAG_PHSYNC: horizontal sync is active high. + * - DRM_MODE_FLAG_NHSYNC: horizontal sync is active low. + * - DRM_MODE_FLAG_PVSYNC: vertical sync is active high. + * - DRM_MODE_FLAG_NVSYNC: vertical sync is active low. + * - DRM_MODE_FLAG_INTERLACE: mode is interlaced. + * - DRM_MODE_FLAG_DBLSCAN: mode uses doublescan. + * - DRM_MODE_FLAG_CSYNC: mode uses composite sync. + * - DRM_MODE_FLAG_PCSYNC: composite sync is active high. + * - DRM_MODE_FLAG_NCSYNC: composite sync is active low. + * - DRM_MODE_FLAG_HSKEW: hskew provided (not used?). + * - DRM_MODE_FLAG_BCAST: not used? + * - DRM_MODE_FLAG_PIXMUX: not used? + * - DRM_MODE_FLAG_DBLCLK: double-clocked mode. + * - DRM_MODE_FLAG_CLKDIV2: half-clocked mode. + * + * Additionally there's flags to specify how 3D modes are packed: + * + * - DRM_MODE_FLAG_3D_NONE: normal, non-3D mode. + * - DRM_MODE_FLAG_3D_FRAME_PACKING: 2 full frames for left and right. + * - DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE: interleaved like fields. + * - DRM_MODE_FLAG_3D_LINE_ALTERNATIVE: interleaved lines. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL: side-by-side full frames. + * - DRM_MODE_FLAG_3D_L_DEPTH: ? + * - DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH: ? + * - DRM_MODE_FLAG_3D_TOP_AND_BOTTOM: frame split into top and bottom + * parts. + * - DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF: frame split into left and + * right parts. + */ unsigned int flags; - /* Addressable image size (may be 0 for projectors, etc.) */ + /** + * @width_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ int width_mm; + + /** + * @height_mm: + * + * Addressable size of the output in mm, projectors should set this to + * 0. + */ int height_mm; - /* Actual mode we give to hw */ - int crtc_clock; /* in KHz */ + /** + * @crtc_clock: + * + * Actual pixel or dot clock in the hardware. This differs from the + * logical @clock when e.g. using interlacing, double-clocking, stereo + * modes or other fancy stuff that changes the timings and signals + * actually sent over the wire. + * + * This is again in kHz. + * + * Note that with digital outputs like HDMI or DP there's usually a + * massive confusion between the dot clock and the signal clock at the + * bit encoding level. Especially when a 8b/10b encoding is used and the + * difference is exactly a factor of 10. + */ + int crtc_clock; int crtc_hdisplay; int crtc_hblank_start; int crtc_hblank_end; @@ -140,12 +353,48 @@ struct drm_display_mode { int crtc_vsync_end; int crtc_vtotal; - /* Driver private mode info */ + /** + * @private: + * + * Pointer for driver private data. This can only be used for mode + * objects passed to drivers in modeset operations. It shouldn't be used + * by atomic drivers since they can store any additional data by + * subclassing state structures. + */ int *private; + + /** + * @private_flags: + * + * Similar to @private, but just an integer. + */ int private_flags; - int vrefresh; /* in Hz */ - int hsync; /* in kHz */ + /** + * @vrefresh: + * + * Vertical refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in Hz. + */ + int vrefresh; + + /** + * @hsync: + * + * Horizontal refresh rate, for debug output in human readable form. Not + * used in a functional way. + * + * This value is in kHz. + */ + int hsync; + + /** + * @picture_aspect_ratio: + * + * Field for setting the HDMI picture aspect ratio of a mode. + */ enum hdmi_picture_aspect picture_aspect_ratio; }; @@ -222,6 +471,8 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, const struct drm_display_mode *mode); bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); +bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2); bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); @@ -232,7 +483,7 @@ enum drm_mode_status drm_mode_validate_size(const struct drm_display_mode *mode, void drm_mode_prune_invalid(struct drm_device *dev, struct list_head *mode_list, bool verbose); void drm_mode_sort(struct list_head *mode_list); -void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits); +void drm_mode_connector_list_update(struct drm_connector *connector); /* parsing cmdline modes */ bool diff --git a/drivers/include/drm/drm_modeset_helper_vtables.h b/drivers/include/drm/drm_modeset_helper_vtables.h new file mode 100644 index 0000000000..a126a0d7ae --- /dev/null +++ b/drivers/include/drm/drm_modeset_helper_vtables.h @@ -0,0 +1,928 @@ +/* + * Copyright © 2006 Keith Packard + * Copyright © 2007-2008 Dave Airlie + * Copyright © 2007-2008 Intel Corporation + * Jesse Barnes + * Copyright © 2011-2013 Intel Corporation + * Copyright © 2015 Intel Corporation + * Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __DRM_MODESET_HELPER_VTABLES_H__ +#define __DRM_MODESET_HELPER_VTABLES_H__ + +#include + +/** + * DOC: overview + * + * The DRM mode setting helper functions are common code for drivers to use if + * they wish. Drivers are not forced to use this code in their + * implementations but it would be useful if the code they do use at least + * provides a consistent interface and operation to userspace. Therefore it is + * highly recommended to use the provided helpers as much as possible. + * + * Because there is only one pointer per modeset object to hold a vfunc table + * for helper libraries they are by necessity shared among the different + * helpers. + * + * To make this clear all the helper vtables are pulled together in this location here. + */ + +enum mode_set_atomic; + +/** + * struct drm_crtc_helper_funcs - helper operations for CRTCs + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_crtc_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the CRTC. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy CRTC helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable a CRTC by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling a CRTC to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ + void (*dpms)(struct drm_crtc *crtc, int mode); + + /** + * @prepare: + * + * This callback should prepare the CRTC for a subsequent modeset, which + * in practice means the driver should disable the CRTC if it is + * running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ + void (*prepare)(struct drm_crtc *crtc); + + /** + * @commit: + * + * This callback should commit the new mode on the CRTC after a modeset, + * which in practice means the driver should enable the CRTC. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling a CRTC to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ + void (*commit)(struct drm_crtc *crtc); + + /** + * @mode_fixup: + * + * This callback is used to validate a mode. The parameter mode is the + * display mode that userspace requested, adjusted_mode is the mode the + * encoders need to be fed with. Note that this is the inverse semantics + * of the meaning for the &drm_encoder and &drm_bridge + * ->mode_fixup() functions. If the CRTC cannot support the requested + * conversion from mode to adjusted_mode it should reject the modeset. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. + * + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @mode_set: + * + * This callback is used by the legacy CRTC helpers to set a new mode, + * position and framebuffer. Since it ties the primary plane to every + * mode change it is incompatible with universal plane support. And + * since it can't update other planes it's incompatible with atomic + * modeset support. + * + * This callback is only used by CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_nofb: + * + * This callback is used to update the display mode of a CRTC without + * changing anything of the primary plane configuration. This fits the + * requirement of atomic and hence is used by the atomic helpers. It is + * also used by the transitional plane helpers to implement a + * @mode_set hook in drm_helper_crtc_mode_set(). + * + * Note that the display pipe is completely off when this function is + * called. Atomic drivers which need hardware to be running before they + * program the new display mode (e.g. because they implement runtime PM) + * should not use this hook. This is because the helper library calls + * this hook only once per mode change and not every time the display + * pipeline is suspended using either DPMS or the new "ACTIVE" property. + * Which means register values set in this callback might get reset when + * the CRTC is suspended, but not restored. Such drivers should instead + * move all their CRTC setup into the @enable callback. + * + * This callback is optional. + */ + void (*mode_set_nofb)(struct drm_crtc *crtc); + + /** + * @mode_set_base: + * + * This callback is used by the legacy CRTC helpers to set a new + * framebuffer and scanout position. It is optional and used as an + * optimized fast-path instead of a full mode set operation with all the + * resulting flickering. If it is not present + * drm_crtc_helper_set_config() will fall back to a full modeset, using + * the ->mode_set() callback. Since it can't update other planes it's + * incompatible with atomic modeset support. + * + * This callback is only used by the CRTC helpers and deprecated. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb); + + /** + * @mode_set_base_atomic: + * + * This callback is used by the fbdev helpers to set a new framebuffer + * and scanout without sleeping, i.e. from an atomic calling context. It + * is only used to implement kgdb support. + * + * This callback is optional and only needed for kgdb support in the fbdev + * helpers. + * + * RETURNS: + * + * 0 on success or a negative error code on failure. + */ + int (*mode_set_base_atomic)(struct drm_crtc *crtc, + struct drm_framebuffer *fb, int x, int y, + enum mode_set_atomic); + + /** + * @load_lut: + * + * Load a LUT prepared with the @gamma_set functions from + * &drm_fb_helper_funcs. + * + * This callback is optional and is only used by the fbdev emulation + * helpers. + * + * FIXME: + * + * This callback is functionally redundant with the core gamma table + * support and simply exists because the fbdev hasn't yet been + * refactored to use the core gamma table interfaces. + */ + void (*load_lut)(struct drm_crtc *crtc); + + /** + * @disable: + * + * This callback should be used to disable the CRTC. With the atomic + * drivers it is called after all encoders connected to this CRTC have + * been shut off already using their own ->disable hook. If that + * sequence is too simple drivers can just add their own hooks and call + * it from this CRTC callback here by looping over all encoders + * connected to it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the CRTC level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * CRTC: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_crtc *crtc); + + /** + * @enable: + * + * This callback should be used to enable the CRTC. With the atomic + * drivers it is called before all encoders connected to this CRTC are + * enabled through the encoder's own ->enable hook. If that sequence is + * too simple drivers can just add their own hooks and call it from this + * CRTC callback here by looping over all encoders connected to it using + * for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the CRTC level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_crtc *crtc); + + /** + * @atomic_check: + * + * Drivers should check plane-update related CRTC constraints in this + * hook. They can also check mode related limitations but need to be + * aware of the calling order, since this hook is used by + * drm_atomic_helper_check_planes() whereas the preparations needed to + * check output routing and the display mode is done in + * drm_atomic_helper_check_modeset(). Therefore drivers that want to + * check output routing and display mode constraints in this callback + * must ensure that drm_atomic_helper_check_modeset() has been called + * beforehand. This is calling order used by the default helper + * implementation in drm_atomic_helper_check(). + * + * When using drm_atomic_helper_check_planes() CRTCs' ->atomic_check() + * hooks are called after the ones for planes, which allows drivers to + * assign shared resources requested by planes in the CRTC callback + * here. For more complicated dependencies the driver can call the provided + * check helpers multiple times until the computed state has a final + * configuration and everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check&compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_crtc *crtc, + struct drm_crtc_state *state); + + /** + * @atomic_begin: + * + * Drivers should prepare for an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might be vblank + * evasion, blocking updates by setting bits or doing preparatory work + * for e.g. manual update display. + * + * This hook is called before any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_begin)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); + /** + * @atomic_flush: + * + * Drivers should finalize an atomic update of multiple planes on + * a CRTC in this hook. Depending upon hardware this might include + * checking that vblank evasion was successful, unblocking updates by + * setting bits or setting the GO bit to flush out all updates. + * + * Simple hardware or hardware with special requirements can commit and + * flush out all updates for all planes from this hook and forgo all the + * other commit hooks for plane updates. + * + * This hook is called after any plane commit functions are called. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_flush)(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state); +}; + +/** + * drm_crtc_helper_add - sets the helper vtable for a crtc + * @crtc: DRM CRTC + * @funcs: helper vtable to set for @crtc + */ +static inline void drm_crtc_helper_add(struct drm_crtc *crtc, + const struct drm_crtc_helper_funcs *funcs) +{ + crtc->helper_private = funcs; +} + +/** + * struct drm_encoder_helper_funcs - helper operations for encoders + * + * These hooks are used by the legacy CRTC helpers, the transitional plane + * helpers and the new atomic modesetting helpers. + */ +struct drm_encoder_helper_funcs { + /** + * @dpms: + * + * Callback to control power levels on the encoder. If the mode passed in + * is unsupported, the provider must use the next lowest power level. + * This is used by the legacy encoder helpers to implement DPMS + * functionality in drm_helper_connector_dpms(). + * + * This callback is also used to disable an encoder by calling it with + * DRM_MODE_DPMS_OFF if the @disable hook isn't used. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling and disabling an encoder to + * facilitate transitions to atomic, but it is deprecated. Instead + * @enable and @disable should be used. + */ + void (*dpms)(struct drm_encoder *encoder, int mode); + + /** + * @mode_fixup: + * + * This callback is used to validate and adjust a mode. The parameter + * mode is the display mode that should be fed to the next element in + * the display chain, either the final &drm_connector or a &drm_bridge. + * The parameter adjusted_mode is the input mode the encoder requires. It + * can be modified by this callback and does not need to match mode. + * + * This function is used by both legacy CRTC helpers and atomic helpers. + * With atomic helpers it is optional. + * + * NOTE: + * + * This function is called in the check phase of atomic modesets, which + * can be aborted for any reason (including on userspace's request to + * just check whether a configuration would be possible). Atomic drivers + * MUST NOT touch any persistent state (hardware or software) or data + * structures except the passed in adjusted_mode parameter. + * + * This is in contrast to the legacy CRTC helpers where this was + * allowed. + * + * Atomic drivers which need to inspect and adjust more state should + * instead use the @atomic_check callback. + * + * Also beware that neither core nor helpers filter modes before + * passing them to the driver: While the list of modes that is + * advertised to userspace is filtered using the connector's + * ->mode_valid() callback, neither the core nor the helpers do any + * filtering on modes passed in from userspace when setting a mode. It + * is therefore possible for userspace to pass in a mode that was + * previously filtered out using ->mode_valid() or add a custom mode + * that wasn't probed from EDID or similar to begin with. Even though + * this is an advanced feature and rarely used nowadays, some users rely + * on being able to specify modes manually so drivers must be prepared + * to deal with it. Specifically this means that all drivers need not + * only validate modes in ->mode_valid() but also in ->mode_fixup() to + * make sure invalid modes passed in from userspace are rejected. + * + * RETURNS: + * + * True if an acceptable configuration is possible, false if the modeset + * operation should be rejected. + */ + bool (*mode_fixup)(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @prepare: + * + * This callback should prepare the encoder for a subsequent modeset, + * which in practice means the driver should disable the encoder if it + * is running. Most drivers ended up implementing this by calling their + * @dpms hook with DRM_MODE_DPMS_OFF. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for disabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @disable should + * be used. + */ + void (*prepare)(struct drm_encoder *encoder); + + /** + * @commit: + * + * This callback should commit the new mode on the encoder after a modeset, + * which in practice means the driver should enable the encoder. Most + * drivers ended up implementing this by calling their @dpms hook with + * DRM_MODE_DPMS_ON. + * + * This callback is used by the legacy CRTC helpers. Atomic helpers + * also support using this hook for enabling an encoder to facilitate + * transitions to atomic, but it is deprecated. Instead @enable should + * be used. + */ + void (*commit)(struct drm_encoder *encoder); + + /** + * @mode_set: + * + * This callback is used to update the display mode of an encoder. + * + * Note that the display pipe is completely off when this function is + * called. Drivers which need hardware to be running before they program + * the new display mode (because they implement runtime PM) should not + * use this hook, because the helper library calls it only once and not + * every time the display pipeline is suspend using either DPMS or the + * new "ACTIVE" property. Such drivers should instead move all their + * encoder setup into the ->enable() callback. + * + * This callback is used both by the legacy CRTC helpers and the atomic + * modeset helpers. It is optional in the atomic helpers. + */ + void (*mode_set)(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + /** + * @get_crtc: + * + * This callback is used by the legacy CRTC helpers to work around + * deficiencies in its own book-keeping. + * + * Do not use, use atomic helpers instead, which get the book keeping + * right. + * + * FIXME: + * + * Currently only nouveau is using this, and as soon as nouveau is + * atomic we can ditch this hook. + */ + struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); + + /** + * @detect: + * + * This callback can be used by drivers who want to do detection on the + * encoder object instead of in connector functions. + * + * It is not used by any helper and therefore has purely driver-specific + * semantics. New drivers shouldn't use this and instead just implement + * their own private callbacks. + * + * FIXME: + * + * This should just be converted into a pile of driver vfuncs. + * Currently radeon, amdgpu and nouveau are using it. + */ + enum drm_connector_status (*detect)(struct drm_encoder *encoder, + struct drm_connector *connector); + + /** + * @disable: + * + * This callback should be used to disable the encoder. With the atomic + * drivers it is called before this encoder's CRTC has been shut off + * using the CRTC's own ->disable hook. If that sequence is too simple + * drivers can just add their own driver private encoder hooks and call + * them from CRTC's callback by looping over all encoders connected to + * it using for_each_encoder_on_crtc(). + * + * This hook is used both by legacy CRTC helpers and atomic helpers. + * Atomic drivers don't need to implement it if there's no need to + * disable anything at the encoder level. To ensure that runtime PM + * handling (using either DPMS or the new "ACTIVE" property) works + * @disable must be the inverse of @enable for atomic drivers. + * + * NOTE: + * + * With legacy CRTC helpers there's a big semantic difference between + * @disable and other hooks (like @prepare or @dpms) used to shut down a + * encoder: @disable is only called when also logically disabling the + * display pipeline and needs to release any resources acquired in + * @mode_set (like shared PLLs, or again release pinned framebuffers). + * + * Therefore @disable must be the inverse of @mode_set plus @commit for + * drivers still using legacy CRTC helpers, which is different from the + * rules under atomic. + */ + void (*disable)(struct drm_encoder *encoder); + + /** + * @enable: + * + * This callback should be used to enable the encoder. With the atomic + * drivers it is called after this encoder's CRTC has been enabled using + * the CRTC's own ->enable hook. If that sequence is too simple drivers + * can just add their own driver private encoder hooks and call them + * from CRTC's callback by looping over all encoders connected to it + * using for_each_encoder_on_crtc(). + * + * This hook is used only by atomic helpers, for symmetry with @disable. + * Atomic drivers don't need to implement it if there's no need to + * enable anything at the encoder level. To ensure that runtime PM handling + * (using either DPMS or the new "ACTIVE" property) works + * @enable must be the inverse of @disable for atomic drivers. + */ + void (*enable)(struct drm_encoder *encoder); + + /** + * @atomic_check: + * + * This callback is used to validate encoder state for atomic drivers. + * Since the encoder is the object connecting the CRTC and connector it + * gets passed both states, to be able to validate interactions and + * update the CRTC to match what the encoder needs for the requested + * connector. + * + * This function is used by the atomic helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state); +}; + +/** + * drm_encoder_helper_add - sets the helper vtable for an encoder + * @encoder: DRM encoder + * @funcs: helper vtable to set for @encoder + */ +static inline void drm_encoder_helper_add(struct drm_encoder *encoder, + const struct drm_encoder_helper_funcs *funcs) +{ + encoder->helper_private = funcs; +} + +/** + * struct drm_connector_helper_funcs - helper operations for connectors + * + * These functions are used by the atomic and legacy modeset helpers and by the + * probe helpers. + */ +struct drm_connector_helper_funcs { + /** + * @get_modes: + * + * This function should fill in all modes currently valid for the sink + * into the connector->probed_modes list. It should also update the + * EDID property by calling drm_mode_connector_update_edid_property(). + * + * The usual way to implement this is to cache the EDID retrieved in the + * probe callback somewhere in the driver-private connector structure. + * In this function drivers then parse the modes in the EDID and add + * them by calling drm_add_edid_modes(). But connectors that driver a + * fixed panel can also manually add specific modes using + * drm_mode_probed_add(). Drivers which manually add modes should also + * make sure that the @display_info, @width_mm and @height_mm fields of the + * struct #drm_connector are filled in. + * + * Virtual drivers that just want some standard VESA mode with a given + * resolution can call drm_add_modes_noedid(), and mark the preferred + * one using drm_set_preferred_mode(). + * + * Finally drivers that support audio probably want to update the ELD + * data, too, using drm_edid_to_eld(). + * + * This function is only called after the ->detect() hook has indicated + * that a sink is connected and when the EDID isn't overridden through + * sysfs or the kernel commandline. + * + * This callback is used by the probe helpers in e.g. + * drm_helper_probe_single_connector_modes(). + * + * RETURNS: + * + * The number of modes added by calling drm_mode_probed_add(). + */ + int (*get_modes)(struct drm_connector *connector); + + /** + * @mode_valid: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * NOTE: + * + * This only filters the mode list supplied to userspace in the + * GETCONNECOTR IOCTL. Userspace is free to create modes of its own and + * ask the kernel to use them. It this case the atomic helpers or legacy + * CRTC helpers will not call this function. Drivers therefore must + * still fully validate any mode passed in in a modeset request. + * + * RETURNS: + * + * Either MODE_OK or one of the failure reasons in enum + * &drm_mode_status. + */ + enum drm_mode_status (*mode_valid)(struct drm_connector *connector, + struct drm_display_mode *mode); + /** + * @best_encoder: + * + * This function should select the best encoder for the given connector. + * + * This function is used by both the atomic helpers (in the + * drm_atomic_helper_check_modeset() function) and in the legacy CRTC + * helpers. + * + * NOTE: + * + * In atomic drivers this function is called in the check phase of an + * atomic update. The driver is not allowed to change or inspect + * anything outside of arguments passed-in. Atomic drivers which need to + * inspect dynamic configuration state should instead use + * @atomic_best_encoder. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*best_encoder)(struct drm_connector *connector); + + /** + * @atomic_best_encoder: + * + * This is the atomic version of @best_encoder for atomic drivers which + * need to select the best encoder depending upon the desired + * configuration and can't select it statically. + * + * This function is used by drm_atomic_helper_check_modeset() and either + * this or @best_encoder is required. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * Encoder that should be used for the given connector and connector + * state, or NULL if no suitable encoder exists. Note that the helpers + * will ensure that encoders aren't used twice, drivers should not check + * for this. + */ + struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, + struct drm_connector_state *connector_state); +}; + +/** + * drm_connector_helper_add - sets the helper vtable for a connector + * @connector: DRM connector + * @funcs: helper vtable to set for @connector + */ +static inline void drm_connector_helper_add(struct drm_connector *connector, + const struct drm_connector_helper_funcs *funcs) +{ + connector->helper_private = funcs; +} + +/** + * struct drm_plane_helper_funcs - helper operations for planes + * + * These functions are used by the atomic helpers and by the transitional plane + * helpers. + */ +struct drm_plane_helper_funcs { + /** + * @prepare_fb: + * + * This hook is to prepare a framebuffer for scanout by e.g. pinning + * it's backing storage or relocating it into a contiguous block of + * VRAM. Other possible preparatory work includes flushing caches. + * + * This function must not block for outstanding rendering, since it is + * called in the context of the atomic IOCTL even for async commits to + * be able to return any errors to userspace. Instead the recommended + * way is to fill out the fence member of the passed-in + * &drm_plane_state. If the driver doesn't support native fences then + * equivalent functionality should be implemented through private + * members in the plane structure. + * + * The helpers will call @cleanup_fb with matching arguments for every + * successful call to this hook. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * RETURNS: + * + * 0 on success or one of the following negative error codes allowed by + * the atomic_commit hook in &drm_mode_config_funcs. When using helpers + * this callback is the only one which can fail an atomic commit, + * everything else must complete successfully. + */ + int (*prepare_fb)(struct drm_plane *plane, + const struct drm_plane_state *new_state); + /** + * @cleanup_fb: + * + * This hook is called to clean up any resources allocated for the given + * framebuffer and plane configuration in @prepare_fb. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*cleanup_fb)(struct drm_plane *plane, + const struct drm_plane_state *old_state); + + /** + * @atomic_check: + * + * Drivers should check plane specific constraints in this hook. + * + * When using drm_atomic_helper_check_planes() plane's ->atomic_check() + * hooks are called before the ones for CRTCs, which allows drivers to + * request shared resources that the CRTC controls here. For more + * complicated dependencies the driver can call the provided check helpers + * multiple times until the computed state has a final configuration and + * everything has been checked. + * + * This function is also allowed to inspect any other object's state and + * can add more state objects to the atomic commit if needed. Care must + * be taken though to ensure that state check&compute functions for + * these added states are all called, and derived state in other objects + * all updated. Again the recommendation is to just call check helpers + * until a maximal configuration is reached. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + * + * NOTE: + * + * This function is called in the check phase of an atomic update. The + * driver is not allowed to change anything outside of the free-standing + * state objects passed-in or assembled in the overall &drm_atomic_state + * update tracking structure. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*atomic_check)(struct drm_plane *plane, + struct drm_plane_state *state); + + /** + * @atomic_update: + * + * Drivers should use this function to update the plane state. This + * hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_update)(struct drm_plane *plane, + struct drm_plane_state *old_state); + /** + * @atomic_disable: + * + * Drivers should use this function to unconditionally disable a plane. + * This hook is called in-between the ->atomic_begin() and + * ->atomic_flush() of &drm_crtc_helper_funcs. It is an alternative to + * @atomic_update, which will be called for disabling planes, too, if + * the @atomic_disable hook isn't implemented. + * + * This hook is also useful to disable planes in preparation of a modeset, + * by calling drm_atomic_helper_disable_planes_on_crtc() from the + * ->disable() hook in &drm_crtc_helper_funcs. + * + * Note that the power state of the display pipe when this function is + * called depends upon the exact helpers and calling sequence the driver + * has picked. See drm_atomic_commit_planes() for a discussion of the + * tradeoffs and variants of plane commit helpers. + * + * This callback is used by the atomic modeset helpers and by the + * transitional plane helpers, but it is optional. + */ + void (*atomic_disable)(struct drm_plane *plane, + struct drm_plane_state *old_state); +}; + +/** + * drm_plane_helper_add - sets the helper vtable for a plane + * @plane: DRM plane + * @funcs: helper vtable to set for @plane + */ +static inline void drm_plane_helper_add(struct drm_plane *plane, + const struct drm_plane_helper_funcs *funcs) +{ + plane->helper_private = funcs; +} + +#endif diff --git a/drivers/include/drm/drm_modeset_lock.h b/drivers/include/drm/drm_modeset_lock.h index 94938d8934..1e70a67773 100644 --- a/drivers/include/drm/drm_modeset_lock.h +++ b/drivers/include/drm/drm_modeset_lock.h @@ -138,7 +138,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); struct drm_modeset_acquire_ctx * drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); -int drm_modeset_lock_all_crtcs(struct drm_device *dev, +int drm_modeset_lock_all_ctx(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx); #endif /* DRM_MODESET_LOCK_H_ */ diff --git a/drivers/include/drm/drm_plane_helper.h b/drivers/include/drm/drm_plane_helper.h index 5a7f9d4efb..4421f3f4ca 100644 --- a/drivers/include/drm/drm_plane_helper.h +++ b/drivers/include/drm/drm_plane_helper.h @@ -26,6 +26,7 @@ #include #include +#include /* * Drivers that don't allow primary plane scaling may pass this macro in place @@ -36,46 +37,9 @@ */ #define DRM_PLANE_HELPER_NO_SCALING (1<<16) -/** - * DOC: plane helpers - * - * Helper functions to assist with creation and handling of CRTC primary - * planes. - */ - int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, const struct drm_crtc_funcs *funcs); -/** - * drm_plane_helper_funcs - helper operations for CRTCs - * @prepare_fb: prepare a framebuffer for use by the plane - * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane - * @atomic_check: check that a given atomic state is valid and can be applied - * @atomic_update: apply an atomic state to the plane (mandatory) - * @atomic_disable: disable the plane - * - * The helper operations are called by the mid-layer CRTC helper. - */ -struct drm_plane_helper_funcs { - int (*prepare_fb)(struct drm_plane *plane, - const struct drm_plane_state *new_state); - void (*cleanup_fb)(struct drm_plane *plane, - const struct drm_plane_state *old_state); - - int (*atomic_check)(struct drm_plane *plane, - struct drm_plane_state *state); - void (*atomic_update)(struct drm_plane *plane, - struct drm_plane_state *old_state); - void (*atomic_disable)(struct drm_plane *plane, - struct drm_plane_state *old_state); -}; - -static inline void drm_plane_helper_add(struct drm_plane *plane, - const struct drm_plane_helper_funcs *funcs) -{ - plane->helper_private = funcs; -} - int drm_plane_helper_check_update(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, diff --git a/drivers/include/drm/drm_rect.h b/drivers/include/drm/drm_rect.h index 26bb55e9e8..83bb156d43 100644 --- a/drivers/include/drm/drm_rect.h +++ b/drivers/include/drm/drm_rect.h @@ -162,7 +162,8 @@ int drm_rect_calc_hscale_relaxed(struct drm_rect *src, int drm_rect_calc_vscale_relaxed(struct drm_rect *src, struct drm_rect *dst, int min_vscale, int max_vscale); -void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); +void drm_rect_debug_print(const char *prefix, + const struct drm_rect *r, bool fixed_point); void drm_rect_rotate(struct drm_rect *r, int width, int height, unsigned int rotation); diff --git a/drivers/include/drm/i915_component.h b/drivers/include/drm/i915_component.h index 30d89e0da2..b46fa0ef30 100644 --- a/drivers/include/drm/i915_component.h +++ b/drivers/include/drm/i915_component.h @@ -31,47 +31,94 @@ #define MAX_PORTS 5 /** - * struct i915_audio_component_ops - callbacks defined in gfx driver - * @owner: the module owner - * @get_power: get the POWER_DOMAIN_AUDIO power well - * @put_power: put the POWER_DOMAIN_AUDIO power well - * @codec_wake_override: Enable/Disable generating the codec wake signal - * @get_cdclk_freq: get the Core Display Clock in KHz - * @sync_audio_rate: set n/cts based on the sample rate + * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver */ struct i915_audio_component_ops { + /** + * @owner: i915 module + */ struct module *owner; + /** + * @get_power: get the POWER_DOMAIN_AUDIO power well + * + * Request the power well to be turned on. + */ void (*get_power)(struct device *); + /** + * @put_power: put the POWER_DOMAIN_AUDIO power well + * + * Allow the power well to be turned off. + */ void (*put_power)(struct device *); + /** + * @codec_wake_override: Enable/disable codec wake signal + */ void (*codec_wake_override)(struct device *, bool enable); + /** + * @get_cdclk_freq: Get the Core Display Clock in kHz + */ int (*get_cdclk_freq)(struct device *); + /** + * @sync_audio_rate: set n/cts based on the sample rate + * + * Called from audio driver. After audio driver sets the + * sample rate, it will call this function to set n/cts + */ int (*sync_audio_rate)(struct device *, int port, int rate); + /** + * @get_eld: fill the audio state and ELD bytes for the given port + * + * Called from audio driver to get the HDMI/DP audio state of the given + * digital port, and also fetch ELD bytes to the given pointer. + * + * It returns the byte size of the original ELD (not the actually + * copied size), zero for an invalid ELD, or a negative error code. + * + * Note that the returned size may be over @max_bytes. Then it + * implies that only a part of ELD has been copied to the buffer. + */ + int (*get_eld)(struct device *, int port, bool *enabled, + unsigned char *buf, int max_bytes); }; +/** + * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver + */ struct i915_audio_component_audio_ops { + /** + * @audio_ptr: Pointer to be used in call to pin_eld_notify + */ void *audio_ptr; /** - * Call from i915 driver, notifying the HDA driver that - * pin sense and/or ELD information has changed. - * @audio_ptr: HDA driver object - * @port: Which port has changed (PORTA / PORTB / PORTC etc) + * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed + * + * Called when the i915 driver has set up audio pipeline or has just + * begun to tear it down. This allows the HDA driver to update its + * status accordingly (even when the HDA controller is in power save + * mode). */ void (*pin_eld_notify)(void *audio_ptr, int port); }; /** - * struct i915_audio_component - used for audio video interaction - * @dev: the device from gfx driver - * @aud_sample_rate: the array of audio sample rate per port - * @ops: callback for audio driver calling - * @audio_ops: Call from i915 driver + * struct i915_audio_component - Used for direct communication between i915 and hda drivers */ struct i915_audio_component { + /** + * @dev: i915 device, used as parameter for ops + */ struct device *dev; + /** + * @aud_sample_rate: the array of audio sample rate per port + */ int aud_sample_rate[MAX_PORTS]; - + /** + * @ops: Ops implemented by i915 driver, called by hda driver + */ const struct i915_audio_component_ops *ops; - + /** + * @audio_ops: Ops implemented by hda driver, called by i915 driver + */ const struct i915_audio_component_audio_ops *audio_ops; }; diff --git a/drivers/include/drm/i915_pciids.h b/drivers/include/drm/i915_pciids.h index 2cdc723d75..6d05dd5030 100644 --- a/drivers/include/drm/i915_pciids.h +++ b/drivers/include/drm/i915_pciids.h @@ -277,22 +277,61 @@ INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */ #define INTEL_SKL_GT3_IDS(info) \ - INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \ - INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \ - INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ \ + INTEL_VGA_DEVICE(0x192A, info) /* SRV GT3 */ + +#define INTEL_SKL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x193D, info), /* WKS GT4 */ \ + INTEL_VGA_DEVICE(0x193A, info) /* SRV GT4 */ #define INTEL_SKL_IDS(info) \ INTEL_SKL_GT1_IDS(info), \ INTEL_SKL_GT2_IDS(info), \ - INTEL_SKL_GT3_IDS(info) + INTEL_SKL_GT3_IDS(info), \ + INTEL_SKL_GT4_IDS(info) #define INTEL_BXT_IDS(info) \ INTEL_VGA_DEVICE(0x0A84, info), \ INTEL_VGA_DEVICE(0x1A84, info), \ - INTEL_VGA_DEVICE(0x1A85, info), \ - INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \ - INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */ + INTEL_VGA_DEVICE(0x5A84, info) + +#define INTEL_KBL_GT1_IDS(info) \ + INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \ + INTEL_VGA_DEVICE(0x5917, info), /* DT GT1.5 */ \ + INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ + INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ + INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ + INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ + INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ + +#define INTEL_KBL_GT2_IDS(info) \ + INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \ + INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \ + INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \ + INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \ + INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \ + INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \ + INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ + +#define INTEL_KBL_GT3_IDS(info) \ + INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ + INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ + INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */ + +#define INTEL_KBL_GT4_IDS(info) \ + INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ + INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \ + INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \ + INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */ + +#define INTEL_KBL_IDS(info) \ + INTEL_KBL_GT1_IDS(info), \ + INTEL_KBL_GT2_IDS(info), \ + INTEL_KBL_GT3_IDS(info), \ + INTEL_KBL_GT4_IDS(info) #endif /* _I915_PCIIDS_H */ diff --git a/drivers/include/drm/ttm/ttm_bo_api.h b/drivers/include/drm/ttm/ttm_bo_api.h index b7bfa513e6..afae2316bd 100644 --- a/drivers/include/drm/ttm/ttm_bo_api.h +++ b/drivers/include/drm/ttm/ttm_bo_api.h @@ -316,20 +316,6 @@ ttm_bo_reference(struct ttm_buffer_object *bo) */ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait); - -/** - * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo - * - * @placement: Return immediately if buffer is busy. - * @mem: The struct ttm_mem_reg indicating the region where the bo resides - * @new_flags: Describes compatible placement found - * - * Returns true if the placement is compatible - */ -extern bool ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem, - uint32_t *new_flags); - /** * ttm_bo_validate * @@ -397,6 +383,16 @@ extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); */ extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); +/** + * ttm_bo_move_to_lru_tail + * + * @bo: The buffer object. + * + * Move this BO to the tail of all lru lists used to lookup and reserve an + * object. This function must be called with struct ttm_bo_global::lru_lock + * held, and is used to make a BO less likely to be considered for eviction. + */ +extern void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); /** * ttm_bo_lock_delayed_workqueue diff --git a/drivers/include/drm/ttm/ttm_bo_driver.h b/drivers/include/drm/ttm/ttm_bo_driver.h index 3542415934..4395d6bc42 100644 --- a/drivers/include/drm/ttm/ttm_bo_driver.h +++ b/drivers/include/drm/ttm/ttm_bo_driver.h @@ -826,10 +826,10 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, * reserved, the validation sequence is checked against the validation * sequence of the process currently reserving the buffer, * and if the current validation sequence is greater than that of the process - * holding the reservation, the function returns -EAGAIN. Otherwise it sleeps + * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps * waiting for the buffer to become unreserved, after which it retries * reserving. - * The caller should, when receiving an -EAGAIN error + * The caller should, when receiving an -EDEADLK error * release all its buffer reservations, wait for @bo to become unreserved, and * then rerun the validation with the same validation sequence. This procedure * will always guarantee that the process with the lowest validation sequence diff --git a/drivers/include/linux/acpi.h b/drivers/include/linux/acpi.h index aa459587c0..64fbbbe8c6 100644 --- a/drivers/include/linux/acpi.h +++ b/drivers/include/linux/acpi.h @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include #include @@ -318,6 +320,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares, bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, struct resource_win *win); unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); +unsigned int acpi_dev_get_irq_type(int triggering, int polarity); bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, struct resource *res); @@ -920,7 +923,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev, return NULL; } -#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ static const void * __acpi_table_##name[] \ __attribute__((unused)) \ = { (void *) table_id, \ diff --git a/drivers/include/linux/backlight.h b/drivers/include/linux/backlight.h index b4938224fb..c6724881c6 100644 --- a/drivers/include/linux/backlight.h +++ b/drivers/include/linux/backlight.h @@ -11,6 +11,8 @@ #include #include #include +#include + /* Notes on locking: * * backlight_device->ops_lock is an internal backlight lock protecting the @@ -43,4 +45,6 @@ enum backlight_notification { BACKLIGHT_UNREGISTERED, }; +struct backlight_device; +struct fb_info; #endif diff --git a/drivers/include/linux/bug.h b/drivers/include/linux/bug.h index 5aea07226e..53bae04605 100644 --- a/drivers/include/linux/bug.h +++ b/drivers/include/linux/bug.h @@ -1,56 +1,26 @@ -#ifndef _ASM_GENERIC_BUG_H -#define _ASM_GENERIC_BUG_H +#ifndef _LINUX_BUG_H +#define _LINUX_BUG_H +#include #include -int printf(const char *fmt, ...); +enum bug_trap_type { + BUG_TRAP_TYPE_NONE = 0, + BUG_TRAP_TYPE_WARN = 1, + BUG_TRAP_TYPE_BUG = 2, +}; -#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) -//#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) -#define __WARN_printf(arg...) do { printf(arg); __WARN(); } while (0) +struct pt_regs; -#define WARN(condition, format...) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) \ - __WARN_printf(format); \ - unlikely(__ret_warn_on); \ -}) - - -#define WARN_ON(condition) ({ \ - int __ret_warn_on = !!(condition); \ - if (unlikely(__ret_warn_on)) \ - __WARN(); \ - unlikely(__ret_warn_on); \ -}) - - -#define WARN_ONCE(condition, format...) ({ \ - static bool __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once)) \ - if (WARN(!__warned, format)) \ - __warned = true; \ - unlikely(__ret_warn_once); \ -}) - - -#define WARN_ON_ONCE(condition) ({ \ - static bool __warned; \ - int __ret_warn_once = !!(condition); \ - \ - if (unlikely(__ret_warn_once)) \ - if (WARN_ON(!__warned)) \ - __warned = true; \ - unlikely(__ret_warn_once); \ -}) - -#define BUG() do { \ - printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ - } while (0) - -#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) +#ifdef __CHECKER__ +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) +#define BUILD_BUG_ON_ZERO(e) (0) +#define BUILD_BUG_ON_NULL(e) ((void*)0) +#define BUILD_BUG_ON_INVALID(e) (0) +#define BUILD_BUG_ON_MSG(cond, msg) (0) +#define BUILD_BUG_ON(condition) (0) +#define BUILD_BUG() (0) +#else /* __CHECKER__ */ /* Force a compilation error if a constant expression is not a power of 2 */ #define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ @@ -113,10 +83,30 @@ int printf(const char *fmt, ...); */ #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") +#endif /* __CHECKER__ */ +#ifdef CONFIG_GENERIC_BUG +#include +static inline int is_warning_bug(const struct bug_entry *bug) +{ + return bug->flags & BUGFLAG_WARNING; +} -#define pr_warn_once(fmt, ...) \ - printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +const struct bug_entry *find_bug(unsigned long bugaddr); -#endif +enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); + +/* These are defined by the architecture */ +int is_valid_bugaddr(unsigned long addr); + +#else /* !CONFIG_GENERIC_BUG */ + +static inline enum bug_trap_type report_bug(unsigned long bug_addr, + struct pt_regs *regs) +{ + return BUG_TRAP_TYPE_BUG; +} + +#endif /* CONFIG_GENERIC_BUG */ +#endif /* _LINUX_BUG_H */ diff --git a/drivers/include/linux/byteorder/little_endian.h b/drivers/include/linux/byteorder/little_endian.h index 27eb335679..d5c957d16d 100644 --- a/drivers/include/linux/byteorder/little_endian.h +++ b/drivers/include/linux/byteorder/little_endian.h @@ -1,108 +1,7 @@ #ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H #define _LINUX_BYTEORDER_LITTLE_ENDIAN_H -#ifndef __LITTLE_ENDIAN -#define __LITTLE_ENDIAN 1234 -#endif -#ifndef __LITTLE_ENDIAN_BITFIELD -#define __LITTLE_ENDIAN_BITFIELD -#endif +#include -#include -#include - -#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) -#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) -#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) -#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) -#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) -#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) -#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) -#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) -#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) -#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) -#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) -#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) -#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) -#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) -#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) -#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) -#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) -#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) -#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) -#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) -#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) -#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) -#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) -#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) -#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) -#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) -#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) -#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) - -static inline __le64 __cpu_to_le64p(const __u64 *p) -{ - return (__force __le64)*p; -} -static inline __u64 __le64_to_cpup(const __le64 *p) -{ - return (__force __u64)*p; -} -static inline __le32 __cpu_to_le32p(const __u32 *p) -{ - return (__force __le32)*p; -} -static inline __u32 __le32_to_cpup(const __le32 *p) -{ - return (__force __u32)*p; -} -static inline __le16 __cpu_to_le16p(const __u16 *p) -{ - return (__force __le16)*p; -} -static inline __u16 __le16_to_cpup(const __le16 *p) -{ - return (__force __u16)*p; -} -static inline __be64 __cpu_to_be64p(const __u64 *p) -{ - return (__force __be64)__swab64p(p); -} -static inline __u64 __be64_to_cpup(const __be64 *p) -{ - return __swab64p((__u64 *)p); -} -static inline __be32 __cpu_to_be32p(const __u32 *p) -{ - return (__force __be32)__swab32p(p); -} -static inline __u32 __be32_to_cpup(const __be32 *p) -{ - return __swab32p((__u32 *)p); -} -static inline __be16 __cpu_to_be16p(const __u16 *p) -{ - return (__force __be16)__swab16p(p); -} -static inline __u16 __be16_to_cpup(const __be16 *p) -{ - return __swab16p((__u16 *)p); -} -#define __cpu_to_le64s(x) do { (void)(x); } while (0) -#define __le64_to_cpus(x) do { (void)(x); } while (0) -#define __cpu_to_le32s(x) do { (void)(x); } while (0) -#define __le32_to_cpus(x) do { (void)(x); } while (0) -#define __cpu_to_le16s(x) do { (void)(x); } while (0) -#define __le16_to_cpus(x) do { (void)(x); } while (0) -#define __cpu_to_be64s(x) __swab64s((x)) -#define __be64_to_cpus(x) __swab64s((x)) -#define __cpu_to_be32s(x) __swab32s((x)) -#define __be32_to_cpus(x) __swab32s((x)) -#define __cpu_to_be16s(x) __swab16s((x)) -#define __be16_to_cpus(x) __swab16s((x)) - -#ifdef __KERNEL__ #include -#endif - #endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/drivers/include/linux/clocksource.h b/drivers/include/linux/clocksource.h index 7784b597e9..6013021a3b 100644 --- a/drivers/include/linux/clocksource.h +++ b/drivers/include/linux/clocksource.h @@ -62,12 +62,18 @@ struct module; * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary * @owner: module reference, must be set by clocksource in modules + * + * Note: This struct is not used in hotpathes of the timekeeping code + * because the timekeeper caches the hot path fields in its own data + * structure, so no line cache alignment is required, + * + * The pointer to the clocksource itself is handed to the read + * callback. If you need extra information there you can wrap struct + * clocksource into your own struct. Depending on the amount of + * information you need you should consider to cache line align that + * structure. */ struct clocksource { - /* - * Hotpath data, fits in a single cache line when the - * clocksource itself is cacheline aligned. - */ cycle_t (*read)(struct clocksource *cs); cycle_t mask; u32 mult; @@ -95,7 +101,7 @@ struct clocksource { cycle_t wd_last; #endif struct module *owner; -} ____cacheline_aligned; +}; /* * Clock source flags bits:: diff --git a/drivers/include/linux/compiler-gcc.h b/drivers/include/linux/compiler-gcc.h index 7d153abfdb..4af9b47b97 100644 --- a/drivers/include/linux/compiler-gcc.h +++ b/drivers/include/linux/compiler-gcc.h @@ -251,9 +251,7 @@ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ -#if GCC_VERSION >= 70000 -#define KASAN_ABI_VERSION 5 -#elif GCC_VERSION >= 50000 +#if GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 diff --git a/drivers/include/linux/compiler.h b/drivers/include/linux/compiler.h index f6b454f7a3..9b7c062c1b 100644 --- a/drivers/include/linux/compiler.h +++ b/drivers/include/linux/compiler.h @@ -299,6 +299,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) +/** + * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + * + * The control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. ACQUIRE. + */ +#define smp_cond_acquire(cond) do { \ + while (!(cond)) \ + cpu_relax(); \ + smp_rmb(); /* ctrl + rmb := acquire */ \ +} while (0) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/drivers/include/linux/component.h b/drivers/include/linux/component.h index c00dcc3026..a559eebc0e 100644 --- a/drivers/include/linux/component.h +++ b/drivers/include/linux/component.h @@ -1,39 +1,48 @@ #ifndef COMPONENT_H #define COMPONENT_H +#include + struct device; struct component_ops { - int (*bind)(struct device *, struct device *, void *); - void (*unbind)(struct device *, struct device *, void *); + int (*bind)(struct device *comp, struct device *master, + void *master_data); + void (*unbind)(struct device *comp, struct device *master, + void *master_data); }; int component_add(struct device *, const struct component_ops *); void component_del(struct device *, const struct component_ops *); -int component_bind_all(struct device *, void *); -void component_unbind_all(struct device *, void *); +int component_bind_all(struct device *master, void *master_data); +void component_unbind_all(struct device *master, void *master_data); struct master; struct component_master_ops { - int (*add_components)(struct device *, struct master *); - int (*bind)(struct device *); - void (*unbind)(struct device *); + int (*bind)(struct device *master); + void (*unbind)(struct device *master); }; -int component_master_add(struct device *, const struct component_master_ops *); void component_master_del(struct device *, const struct component_master_ops *); -int component_master_add_child(struct master *master, - int (*compare)(struct device *, void *), void *compare_data); - struct component_match; int component_master_add_with_match(struct device *, const struct component_master_ops *, struct component_match *); -void component_match_add(struct device *, struct component_match **, +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); +static inline void component_match_add(struct device *master, + struct component_match **matchptr, + int (*compare)(struct device *, void *), void *compare_data) +{ + component_match_add_release(master, matchptr, NULL, compare, + compare_data); +} + #endif diff --git a/drivers/include/linux/cpumask.h b/drivers/include/linux/cpumask.h index a91b3b75da..fc14275ff3 100644 --- a/drivers/include/linux/cpumask.h +++ b/drivers/include/linux/cpumask.h @@ -85,10 +85,14 @@ extern int nr_cpu_ids; * only one CPU. */ -extern const struct cpumask *const cpu_possible_mask; -extern const struct cpumask *const cpu_online_mask; -extern const struct cpumask *const cpu_present_mask; -extern const struct cpumask *const cpu_active_mask; +extern struct cpumask __cpu_possible_mask; +extern struct cpumask __cpu_online_mask; +extern struct cpumask __cpu_present_mask; +extern struct cpumask __cpu_active_mask; +#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) +#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) +#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) +#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) #if NR_CPUS > 1 #define num_online_cpus() cpumask_weight(cpu_online_mask) @@ -556,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp, static inline int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp) { - return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); + return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); } /** @@ -571,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp) { return bitmap_parselist_user(buf, len, cpumask_bits(dstp), - nr_cpumask_bits); + nr_cpu_ids); } /** @@ -586,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) char *nl = strchr(buf, '\n'); unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); - return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); + return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); } /** @@ -598,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp) */ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) { - return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); + return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); } /** @@ -716,14 +720,49 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) /* Wrappers for arch boot code to manipulate normally-constant masks */ -void set_cpu_possible(unsigned int cpu, bool possible); -void set_cpu_present(unsigned int cpu, bool present); -void set_cpu_online(unsigned int cpu, bool online); -void set_cpu_active(unsigned int cpu, bool active); void init_cpu_present(const struct cpumask *src); void init_cpu_possible(const struct cpumask *src); void init_cpu_online(const struct cpumask *src); +static inline void +set_cpu_possible(unsigned int cpu, bool possible) +{ + if (possible) + cpumask_set_cpu(cpu, &__cpu_possible_mask); + else + cpumask_clear_cpu(cpu, &__cpu_possible_mask); +} + +static inline void +set_cpu_present(unsigned int cpu, bool present) +{ + if (present) + cpumask_set_cpu(cpu, &__cpu_present_mask); + else + cpumask_clear_cpu(cpu, &__cpu_present_mask); +} + +static inline void +set_cpu_online(unsigned int cpu, bool online) +{ + if (online) { + cpumask_set_cpu(cpu, &__cpu_online_mask); + cpumask_set_cpu(cpu, &__cpu_active_mask); + } else { + cpumask_clear_cpu(cpu, &__cpu_online_mask); + } +} + +static inline void +set_cpu_active(unsigned int cpu, bool active) +{ + if (active) + cpumask_set_cpu(cpu, &__cpu_active_mask); + else + cpumask_clear_cpu(cpu, &__cpu_active_mask); +} + + /** * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * * @bitmap: the bitmap diff --git a/drivers/include/linux/dma-attrs.h b/drivers/include/linux/dma-attrs.h index c8e1831d75..99c0be00b4 100644 --- a/drivers/include/linux/dma-attrs.h +++ b/drivers/include/linux/dma-attrs.h @@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct dma_attrs *attrs) bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); } -#ifdef CONFIG_HAVE_DMA_ATTRS /** * dma_set_attr - set a specific attribute * @attr: attribute to set @@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) BUG_ON(attr >= DMA_ATTR_MAX); return test_bit(attr, attrs->flags); } -#else /* !CONFIG_HAVE_DMA_ATTRS */ -static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ -} -static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ - return 0; -} -#endif /* CONFIG_HAVE_DMA_ATTRS */ #endif /* _DMA_ATTR_H */ diff --git a/drivers/include/linux/dma-mapping.h b/drivers/include/linux/dma-mapping.h index a1ba2b146f..28c35bd5cd 100644 --- a/drivers/include/linux/dma-mapping.h +++ b/drivers/include/linux/dma-mapping.h @@ -8,6 +8,7 @@ #include #include #include +#include extern void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, diff --git a/drivers/include/linux/dmi.h b/drivers/include/linux/dmi.h index 5055ac3414..5e9c74cf88 100644 --- a/drivers/include/linux/dmi.h +++ b/drivers/include/linux/dmi.h @@ -22,6 +22,7 @@ enum dmi_device_type { DMI_DEV_TYPE_IPMI = -1, DMI_DEV_TYPE_OEM_STRING = -2, DMI_DEV_TYPE_DEV_ONBOARD = -3, + DMI_DEV_TYPE_DEV_SLOT = -4, }; enum dmi_entry_type { diff --git a/drivers/include/linux/err.h b/drivers/include/linux/err.h index a729120644..56762ab417 100644 --- a/drivers/include/linux/err.h +++ b/drivers/include/linux/err.h @@ -37,7 +37,7 @@ static inline bool __must_check IS_ERR(__force const void *ptr) static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) { - return !ptr || IS_ERR_VALUE((unsigned long)ptr); + return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr); } /** diff --git a/drivers/include/linux/fb.h b/drivers/include/linux/fb.h index b17601e735..13e5dc99b2 100644 --- a/drivers/include/linux/fb.h +++ b/drivers/include/linux/fb.h @@ -1,413 +1,19 @@ #ifndef _LINUX_FB_H #define _LINUX_FB_H -#include -#include +#include +#include -struct dentry; - -/* Definitions of frame buffers */ - -#define FB_MAX 32 /* sufficient for now */ - -/* ioctls - 0x46 is 'F' */ -#define FBIOGET_VSCREENINFO 0x4600 -#define FBIOPUT_VSCREENINFO 0x4601 -#define FBIOGET_FSCREENINFO 0x4602 -#define FBIOGETCMAP 0x4604 -#define FBIOPUTCMAP 0x4605 -#define FBIOPAN_DISPLAY 0x4606 -#ifdef __KERNEL__ #define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user) -#else -#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor) -#endif -/* 0x4607-0x460B are defined below */ -/* #define FBIOGET_MONITORSPEC 0x460C */ -/* #define FBIOPUT_MONITORSPEC 0x460D */ -/* #define FBIOSWITCH_MONIBIT 0x460E */ -#define FBIOGET_CON2FBMAP 0x460F -#define FBIOPUT_CON2FBMAP 0x4610 -#define FBIOBLANK 0x4611 /* arg: 0 or vesa level + 1 */ -#define FBIOGET_VBLANK _IOR('F', 0x12, struct fb_vblank) -#define FBIO_ALLOC 0x4613 -#define FBIO_FREE 0x4614 -#define FBIOGET_GLYPH 0x4615 -#define FBIOGET_HWCINFO 0x4616 -#define FBIOPUT_MODEINFO 0x4617 -#define FBIOGET_DISPINFO 0x4618 -#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) -#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ -#define FB_TYPE_PLANES 1 /* Non interleaved planes */ -#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */ -#define FB_TYPE_TEXT 3 /* Text/attributes */ -#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */ - -#define FB_AUX_TEXT_MDA 0 /* Monochrome text */ -#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */ -#define FB_AUX_TEXT_S3_MMIO 2 /* S3 MMIO fasttext */ -#define FB_AUX_TEXT_MGA_STEP16 3 /* MGA Millenium I: text, attr, 14 reserved bytes */ -#define FB_AUX_TEXT_MGA_STEP8 4 /* other MGAs: text, attr, 6 reserved bytes */ -#define FB_AUX_TEXT_SVGA_GROUP 8 /* 8-15: SVGA tileblit compatible modes */ -#define FB_AUX_TEXT_SVGA_MASK 7 /* lower three bits says step */ -#define FB_AUX_TEXT_SVGA_STEP2 8 /* SVGA text mode: text, attr */ -#define FB_AUX_TEXT_SVGA_STEP4 9 /* SVGA text mode: text, attr, 2 reserved bytes */ -#define FB_AUX_TEXT_SVGA_STEP8 10 /* SVGA text mode: text, attr, 6 reserved bytes */ -#define FB_AUX_TEXT_SVGA_STEP16 11 /* SVGA text mode: text, attr, 14 reserved bytes */ -#define FB_AUX_TEXT_SVGA_LAST 15 /* reserved up to 15 */ - -#define FB_AUX_VGA_PLANES_VGA4 0 /* 16 color planes (EGA/VGA) */ -#define FB_AUX_VGA_PLANES_CFB4 1 /* CFB4 in planes (VGA) */ -#define FB_AUX_VGA_PLANES_CFB8 2 /* CFB8 in planes (VGA) */ - -#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */ -#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */ -#define FB_VISUAL_TRUECOLOR 2 /* True color */ -#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */ -#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */ -#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */ - -#define FB_ACCEL_NONE 0 /* no hardware accelerator */ -#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */ -#define FB_ACCEL_AMIGABLITT 2 /* Amiga Blitter */ -#define FB_ACCEL_S3_TRIO64 3 /* Cybervision64 (S3 Trio64) */ -#define FB_ACCEL_NCR_77C32BLT 4 /* RetinaZ3 (NCR 77C32BLT) */ -#define FB_ACCEL_S3_VIRGE 5 /* Cybervision64/3D (S3 ViRGE) */ -#define FB_ACCEL_ATI_MACH64GX 6 /* ATI Mach 64GX family */ -#define FB_ACCEL_DEC_TGA 7 /* DEC 21030 TGA */ -#define FB_ACCEL_ATI_MACH64CT 8 /* ATI Mach 64CT family */ -#define FB_ACCEL_ATI_MACH64VT 9 /* ATI Mach 64CT family VT class */ -#define FB_ACCEL_ATI_MACH64GT 10 /* ATI Mach 64CT family GT class */ -#define FB_ACCEL_SUN_CREATOR 11 /* Sun Creator/Creator3D */ -#define FB_ACCEL_SUN_CGSIX 12 /* Sun cg6 */ -#define FB_ACCEL_SUN_LEO 13 /* Sun leo/zx */ -#define FB_ACCEL_IMS_TWINTURBO 14 /* IMS Twin Turbo */ -#define FB_ACCEL_3DLABS_PERMEDIA2 15 /* 3Dlabs Permedia 2 */ -#define FB_ACCEL_MATROX_MGA2064W 16 /* Matrox MGA2064W (Millenium) */ -#define FB_ACCEL_MATROX_MGA1064SG 17 /* Matrox MGA1064SG (Mystique) */ -#define FB_ACCEL_MATROX_MGA2164W 18 /* Matrox MGA2164W (Millenium II) */ -#define FB_ACCEL_MATROX_MGA2164W_AGP 19 /* Matrox MGA2164W (Millenium II) */ -#define FB_ACCEL_MATROX_MGAG100 20 /* Matrox G100 (Productiva G100) */ -#define FB_ACCEL_MATROX_MGAG200 21 /* Matrox G200 (Myst, Mill, ...) */ -#define FB_ACCEL_SUN_CG14 22 /* Sun cgfourteen */ -#define FB_ACCEL_SUN_BWTWO 23 /* Sun bwtwo */ -#define FB_ACCEL_SUN_CGTHREE 24 /* Sun cgthree */ -#define FB_ACCEL_SUN_TCX 25 /* Sun tcx */ -#define FB_ACCEL_MATROX_MGAG400 26 /* Matrox G400 */ -#define FB_ACCEL_NV3 27 /* nVidia RIVA 128 */ -#define FB_ACCEL_NV4 28 /* nVidia RIVA TNT */ -#define FB_ACCEL_NV5 29 /* nVidia RIVA TNT2 */ -#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */ -#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */ -#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */ -#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */ -#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */ -#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */ -#define FB_ACCEL_SIS_GLAMOUR 36 /* SiS 300/630/540 */ -#define FB_ACCEL_3DLABS_PERMEDIA3 37 /* 3Dlabs Permedia 3 */ -#define FB_ACCEL_ATI_RADEON 38 /* ATI Radeon family */ -#define FB_ACCEL_I810 39 /* Intel 810/815 */ -#define FB_ACCEL_SIS_GLAMOUR_2 40 /* SiS 315, 650, 740 */ -#define FB_ACCEL_SIS_XABRE 41 /* SiS 330 ("Xabre") */ -#define FB_ACCEL_I830 42 /* Intel 830M/845G/85x/865G */ -#define FB_ACCEL_NV_10 43 /* nVidia Arch 10 */ -#define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ -#define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ -#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ -#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ -#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ -#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ -#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ -#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ -#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ -#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ -#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ -#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ -#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ -#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ -#define FB_ACCEL_NEOMAGIC_NM2097 93 /* NeoMagic NM2097 */ -#define FB_ACCEL_NEOMAGIC_NM2160 94 /* NeoMagic NM2160 */ -#define FB_ACCEL_NEOMAGIC_NM2200 95 /* NeoMagic NM2200 */ -#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ -#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ -#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ -#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ - -#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ -#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ -#define FB_ACCEL_SAVAGE3D_MV 0x82 /* S3 Savage3D-MV */ -#define FB_ACCEL_SAVAGE2000 0x83 /* S3 Savage2000 */ -#define FB_ACCEL_SAVAGE_MX_MV 0x84 /* S3 Savage/MX-MV */ -#define FB_ACCEL_SAVAGE_MX 0x85 /* S3 Savage/MX */ -#define FB_ACCEL_SAVAGE_IX_MV 0x86 /* S3 Savage/IX-MV */ -#define FB_ACCEL_SAVAGE_IX 0x87 /* S3 Savage/IX */ -#define FB_ACCEL_PROSAVAGE_PM 0x88 /* S3 ProSavage PM133 */ -#define FB_ACCEL_PROSAVAGE_KM 0x89 /* S3 ProSavage KM133 */ -#define FB_ACCEL_S3TWISTER_P 0x8a /* S3 Twister */ -#define FB_ACCEL_S3TWISTER_K 0x8b /* S3 TwisterK */ -#define FB_ACCEL_SUPERSAVAGE 0x8c /* S3 Supersavage */ -#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */ -#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */ - -#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */ - -struct fb_fix_screeninfo { - char id[16]; /* identification string eg "TT Builtin" */ - unsigned long smem_start; /* Start of frame buffer mem */ - /* (physical address) */ - __u32 smem_len; /* Length of frame buffer mem */ - __u32 type; /* see FB_TYPE_* */ - __u32 type_aux; /* Interleave for interleaved Planes */ - __u32 visual; /* see FB_VISUAL_* */ - __u16 xpanstep; /* zero if no hardware panning */ - __u16 ypanstep; /* zero if no hardware panning */ - __u16 ywrapstep; /* zero if no hardware ywrap */ - __u32 line_length; /* length of a line in bytes */ - unsigned long mmio_start; /* Start of Memory Mapped I/O */ - /* (physical address) */ - __u32 mmio_len; /* Length of Memory Mapped I/O */ - __u32 accel; /* Indicate to driver which */ - /* specific chip/card we have */ - __u16 reserved[3]; /* Reserved for future compatibility */ -}; - -/* Interpretation of offset for color fields: All offsets are from the right, - * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you - * can use the offset as right argument to <<). A pixel afterwards is a bit - * stream and is written to video memory as that unmodified. - * - * For pseudocolor: offset and length should be the same for all color - * components. Offset specifies the position of the least significant bit - * of the pallette index in a pixel value. Length indicates the number - * of available palette entries (i.e. # of entries = 1 << length). - */ -struct fb_bitfield { - __u32 offset; /* beginning of bitfield */ - __u32 length; /* length of bitfield */ - __u32 msb_right; /* != 0 : Most significant bit is */ - /* right */ -}; - -#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ -#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */ - -#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ -#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ -#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */ -#define FB_ACTIVATE_MASK 15 - /* values */ -#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */ -#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */ -#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ -#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ -#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ - -#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */ - -#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ -#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ -#define FB_SYNC_EXT 4 /* external sync */ -#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ -#define FB_SYNC_BROADCAST 16 /* broadcast video timings */ - /* vtotal = 144d/288n/576i => PAL */ - /* vtotal = 121d/242n/484i => NTSC */ -#define FB_SYNC_ON_GREEN 32 /* sync on green */ - -#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ -#define FB_VMODE_INTERLACED 1 /* interlaced */ -#define FB_VMODE_DOUBLE 2 /* double scan */ -#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */ -#define FB_VMODE_MASK 255 - -#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ -#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ -#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */ - -/* - * Display rotation support - */ -#define FB_ROTATE_UR 0 -#define FB_ROTATE_CW 1 -#define FB_ROTATE_UD 2 -#define FB_ROTATE_CCW 3 - -#define PICOS2KHZ(a) (1000000000UL/(a)) -#define KHZ2PICOS(a) (1000000000UL/(a)) - -struct fb_var_screeninfo { - __u32 xres; /* visible resolution */ - __u32 yres; - __u32 xres_virtual; /* virtual resolution */ - __u32 yres_virtual; - __u32 xoffset; /* offset from virtual to visible */ - __u32 yoffset; /* resolution */ - - __u32 bits_per_pixel; /* guess what */ - __u32 grayscale; /* != 0 Graylevels instead of colors */ - - struct fb_bitfield red; /* bitfield in fb mem if true color, */ - struct fb_bitfield green; /* else only length is significant */ - struct fb_bitfield blue; - struct fb_bitfield transp; /* transparency */ - - __u32 nonstd; /* != 0 Non standard pixel format */ - - __u32 activate; /* see FB_ACTIVATE_* */ - - __u32 height; /* height of picture in mm */ - __u32 width; /* width of picture in mm */ - - __u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ - - /* Timing: All values in pixclocks, except pixclock (of course) */ - __u32 pixclock; /* pixel clock in ps (pico seconds) */ - __u32 left_margin; /* time from sync to picture */ - __u32 right_margin; /* time from picture to sync */ - __u32 upper_margin; /* time from sync to picture */ - __u32 lower_margin; - __u32 hsync_len; /* length of horizontal sync */ - __u32 vsync_len; /* length of vertical sync */ - __u32 sync; /* see FB_SYNC_* */ - __u32 vmode; /* see FB_VMODE_* */ - __u32 rotate; /* angle we rotate counter clockwise */ - __u32 reserved[5]; /* Reserved for future compatibility */ -}; - -struct fb_cmap { - __u32 start; /* First entry */ - __u32 len; /* Number of entries */ - __u16 *red; /* Red values */ - __u16 *green; - __u16 *blue; - __u16 *transp; /* transparency, can be NULL */ -}; - -struct fb_con2fbmap { - __u32 console; - __u32 framebuffer; -}; - -/* VESA Blanking Levels */ -#define VESA_NO_BLANKING 0 -#define VESA_VSYNC_SUSPEND 1 -#define VESA_HSYNC_SUSPEND 2 -#define VESA_POWERDOWN 3 - - -enum { - /* screen: unblanked, hsync: on, vsync: on */ - FB_BLANK_UNBLANK = VESA_NO_BLANKING, - - /* screen: blanked, hsync: on, vsync: on */ - FB_BLANK_NORMAL = VESA_NO_BLANKING + 1, - - /* screen: blanked, hsync: on, vsync: off */ - FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1, - - /* screen: blanked, hsync: off, vsync: on */ - FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1, - - /* screen: blanked, hsync: off, vsync: off */ - FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1 -}; - -#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */ -#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */ -#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */ -#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */ -#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */ -#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ -#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ -#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ -#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ - -struct fb_vblank { - __u32 flags; /* FB_VBLANK flags */ - __u32 count; /* counter of retraces since boot */ - __u32 vcount; /* current scanline position */ - __u32 hcount; /* current scandot position */ - __u32 reserved[4]; /* reserved for future compatibility */ -}; - -/* Internal HW accel */ -#define ROP_COPY 0 -#define ROP_XOR 1 - -struct fb_copyarea { - __u32 dx; - __u32 dy; - __u32 width; - __u32 height; - __u32 sx; - __u32 sy; -}; - -struct fb_fillrect { - __u32 dx; /* screen-relative */ - __u32 dy; - __u32 width; - __u32 height; - __u32 color; - __u32 rop; -}; - -struct fb_image { - __u32 dx; /* Where to place image */ - __u32 dy; - __u32 width; /* Size of image */ - __u32 height; - __u32 fg_color; /* Only used when a mono bitmap */ - __u32 bg_color; - __u8 depth; /* Depth of the image */ - const char *data; /* Pointer to image data */ - struct fb_cmap cmap; /* color map info */ -}; - -/* - * hardware cursor control - */ - -#define FB_CUR_SETIMAGE 0x01 -#define FB_CUR_SETPOS 0x02 -#define FB_CUR_SETHOT 0x04 -#define FB_CUR_SETCMAP 0x08 -#define FB_CUR_SETSHAPE 0x10 -#define FB_CUR_SETSIZE 0x20 -#define FB_CUR_SETALL 0xFF - -struct fbcurpos { - __u16 x, y; -}; - -struct fb_cursor { - __u16 set; /* what to set */ - __u16 enable; /* cursor on/off */ - __u16 rop; /* bitop operation */ - const char *mask; /* cursor mask bits */ - struct fbcurpos hot; /* cursor hot spot */ - struct fb_image image; /* Cursor image */ -}; - -#ifdef CONFIG_FB_BACKLIGHT -/* Settings for the generic backlight code */ -#define FB_BACKLIGHT_LEVELS 128 -#define FB_BACKLIGHT_MAX 0xFF -#endif - -//#ifdef __KERNEL__ - -//#include -//#include -//#include -//#include -//#include +#include +#include +#include +#include #include -#include -//#include +#include #include -//#include +#include struct vm_area_struct; struct fb_info; @@ -569,7 +175,27 @@ struct fb_blit_caps { u32 flags; }; +#ifdef CONFIG_FB_NOTIFY +extern int fb_register_client(struct notifier_block *nb); +extern int fb_unregister_client(struct notifier_block *nb); extern int fb_notifier_call_chain(unsigned long val, void *v); +#else +static inline int fb_register_client(struct notifier_block *nb) +{ + return 0; +}; + +static inline int fb_unregister_client(struct notifier_block *nb) +{ + return 0; +}; + +static inline int fb_notifier_call_chain(unsigned long val, void *v) +{ + return 0; +}; +#endif + /* * Pixmap structure definition * @@ -1050,6 +676,13 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, } /* drivers/video/fb_defio.c */ +extern void fb_deferred_io_init(struct fb_info *info); +extern void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file); +extern void fb_deferred_io_cleanup(struct fb_info *info); +extern int fb_deferred_io_fsync(struct file *file, loff_t start, + loff_t end, int datasync); static inline bool fb_be_math(struct fb_info *info) { diff --git a/drivers/include/linux/fs.h b/drivers/include/linux/fs.h index 3bde2bc893..93003909b8 100644 --- a/drivers/include/linux/fs.h +++ b/drivers/include/linux/fs.h @@ -94,4 +94,5 @@ struct file { #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ +struct inode; #endif /* _LINUX_FS_H */ diff --git a/drivers/include/linux/gfp.h b/drivers/include/linux/gfp.h index 4e7f19ec0f..c33d532993 100644 --- a/drivers/include/linux/gfp.h +++ b/drivers/include/linux/gfp.h @@ -2,7 +2,7 @@ #define __LINUX_GFP_H #include -#include +#include #include #include @@ -29,7 +29,7 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x20000u #define ___GFP_THISNODE 0x40000u #define ___GFP_ATOMIC 0x80000u -#define ___GFP_NOACCOUNT 0x100000u +#define ___GFP_ACCOUNT 0x100000u #define ___GFP_NOTRACK 0x200000u #define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_OTHER_NODE 0x800000u @@ -72,11 +72,15 @@ struct vm_area_struct; * * __GFP_THISNODE forces the allocation to be satisified from the requested * node with no fallbacks or placement policy enforcements. + * + * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant + * to kmem allocations). */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) +#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) /* * Watermark modifiers -- controls access to emergency reserves @@ -103,7 +107,6 @@ struct vm_area_struct; #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) -#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* * Reclaim modifiers @@ -196,6 +199,9 @@ struct vm_area_struct; * GFP_KERNEL is typical for kernel-internal allocations. The caller requires * ZONE_NORMAL or a lower zone for direct access but can direct reclaim. * + * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. + * * GFP_NOWAIT is for kernel allocations that should not stall for direct * reclaim, start physical IO or use any filesystem callback. * @@ -235,6 +241,7 @@ struct vm_area_struct; */ #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOIO (__GFP_RECLAIM) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) @@ -249,16 +256,9 @@ struct vm_area_struct; __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ ~__GFP_KSWAPD_RECLAIM) -/* Convert GFP flags to their corresponding migrate type */ -#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) -#define GFP_MOVABLE_SHIFT 3 - -#undef GFP_MOVABLE_MASK -#undef GFP_MOVABLE_SHIFT - static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) { - return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); + return !!(gfp_flags & __GFP_DIRECT_RECLAIM); } #ifdef CONFIG_HIGHMEM diff --git a/drivers/include/linux/hashtable.h b/drivers/include/linux/hashtable.h index 519b6e2d76..661e5c2a8e 100644 --- a/drivers/include/linux/hashtable.h +++ b/drivers/include/linux/hashtable.h @@ -16,6 +16,10 @@ struct hlist_head name[1 << (bits)] = \ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + #define DECLARE_HASHTABLE(name, bits) \ struct hlist_head name[1 << (bits)] diff --git a/drivers/include/linux/i2c.h b/drivers/include/linux/i2c.h index 0dc397fbb8..f91837479d 100644 --- a/drivers/include/linux/i2c.h +++ b/drivers/include/linux/i2c.h @@ -30,6 +30,7 @@ #include /* for struct device */ #include /* for completion */ #include +#include /* for swab16 */ #include extern struct bus_type i2c_bus_type; diff --git a/drivers/include/linux/idr.h b/drivers/include/linux/idr.h index 94c671d43b..447b659a6b 100644 --- a/drivers/include/linux/idr.h +++ b/drivers/include/linux/idr.h @@ -135,6 +135,20 @@ static inline void *idr_find(struct idr *idr, int id) #define idr_for_each_entry(idp, entry, id) \ for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) +/** + * idr_for_each_entry - continue iteration over an idr's elements of a given type + * @idp: idr handle + * @entry: the type * to use as cursor + * @id: id entry's key + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define idr_for_each_entry_continue(idp, entry, id) \ + for ((entry) = idr_get_next((idp), &(id)); \ + entry; \ + ++id, (entry) = idr_get_next((idp), &(id))) + /* * IDA - IDR based id allocator, use when translation from id to * pointer isn't necessary. diff --git a/drivers/include/linux/interrupt.h b/drivers/include/linux/interrupt.h index 12ddc7d532..57a1b0b334 100644 --- a/drivers/include/linux/interrupt.h +++ b/drivers/include/linux/interrupt.h @@ -65,6 +65,17 @@ #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) +/* + * These values can be returned by request_any_context_irq() and + * describe the context the interrupt will be run in. + * + * IRQC_IS_HARDIRQ - interrupt runs in hardirq context + * IRQC_IS_NESTED - interrupt runs in a nested threaded context + */ +enum { + IRQC_IS_HARDIRQ = 0, + IRQC_IS_NESTED, +}; extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); extern int arch_early_irq_init(void); diff --git a/drivers/include/linux/io.h b/drivers/include/linux/io.h index f571c8433d..0de6ca0e4e 100644 --- a/drivers/include/linux/io.h +++ b/drivers/include/linux/io.h @@ -22,6 +22,14 @@ #include #include #include +#include struct device; struct resource; + +__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); +void __ioread32_copy(void *to, const void __iomem *from, size_t count); +void __iowrite64_copy(void __iomem *to, const void *from, size_t count); +void *memremap(resource_size_t offset, size_t size, unsigned long flags); +void memunmap(void *addr); + #endif /* _LINUX_IO_H */ diff --git a/drivers/include/linux/ioport.h b/drivers/include/linux/ioport.h index aa812ad6c4..f62a3e4e7b 100644 --- a/drivers/include/linux/ioport.h +++ b/drivers/include/linux/ioport.h @@ -181,5 +181,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2) } +/* Convenience shorthand with allocation */ +#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) +#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) +#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) +#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) +#define request_mem_region_exclusive(start,n,name) \ + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) +#define rename_region(region, newname) do { (region)->name = (newname); } while (0) #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/drivers/include/linux/jiffies.h b/drivers/include/linux/jiffies.h index 400090a14b..bfa5919c1b 100644 --- a/drivers/include/linux/jiffies.h +++ b/drivers/include/linux/jiffies.h @@ -5,12 +5,11 @@ #include #include #include -//#include +#include //#include /* for HZ */ #define HZ 100 -#define CLOCK_TICK_RATE 1193182ul /* * The following defines establish the engineering parameters of the PLL diff --git a/drivers/include/linux/kernel.h b/drivers/include/linux/kernel.h index a39a709441..3d1966388e 100644 --- a/drivers/include/linux/kernel.h +++ b/drivers/include/linux/kernel.h @@ -714,32 +714,6 @@ int del_timer(struct timer_list *timer); # define del_timer_sync(t) del_timer(t) -#define build_mmio_read(name, size, type, reg, barrier) \ -static inline type name(const volatile void __iomem *addr) \ -{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ -:"m" (*(volatile type __force *)addr) barrier); return ret; } - -#define build_mmio_write(name, size, type, reg, barrier) \ -static inline void name(type val, volatile void __iomem *addr) \ -{ asm volatile("mov" size " %0,%1": :reg (val), \ -"m" (*(volatile type __force *)addr) barrier); } - -build_mmio_read(readb, "b", unsigned char, "=q", :"memory") -build_mmio_read(readw, "w", unsigned short, "=r", :"memory") -build_mmio_read(readl, "l", unsigned int, "=r", :"memory") - -build_mmio_read(__readb, "b", unsigned char, "=q", ) -build_mmio_read(__readw, "w", unsigned short, "=r", ) -build_mmio_read(__readl, "l", unsigned int, "=r", ) - -build_mmio_write(writeb, "b", unsigned char, "q", :"memory") -build_mmio_write(writew, "w", unsigned short, "r", :"memory") -build_mmio_write(writel, "l", unsigned int, "r", :"memory") - -build_mmio_write(__writeb, "b", unsigned char, "q", ) -build_mmio_write(__writew, "w", unsigned short, "r", ) -build_mmio_write(__writel, "l", unsigned int, "r", ) - #define readb_relaxed(a) __readb(a) #define readw_relaxed(a) __readw(a) #define readl_relaxed(a) __readl(a) @@ -872,6 +846,14 @@ static inline long copy_to_user(void __user *to, return __copy_to_user(to, from, n); } +#define CAP_SYS_ADMIN 21 + +static inline bool capable(int cap) +{ + return true; +} + + void *kmap(struct page *page); void *kmap_atomic(struct page *page); void kunmap(struct page *page); @@ -879,10 +861,14 @@ void kunmap_atomic(void *vaddr); typedef u64 async_cookie_t; -#define iowrite32(v, addr) writel((v), (addr)) +//#define iowrite32(v, addr) writel((v), (addr)) #define __init #define CONFIG_PAGE_OFFSET 0 +typedef long long __kernel_long_t; +typedef unsigned long long __kernel_ulong_t; +#define __kernel_long_t __kernel_long_t + #endif diff --git a/drivers/include/linux/list.h b/drivers/include/linux/list.h index ecd961ad3f..bccb960e59 100644 --- a/drivers/include/linux/list.h +++ b/drivers/include/linux/list.h @@ -24,7 +24,7 @@ static inline void INIT_LIST_HEAD(struct list_head *list) { - list->next = list; + WRITE_ONCE(list->next, list); list->prev = list; } @@ -42,7 +42,7 @@ static inline void __list_add(struct list_head *new, next->prev = new; new->next = next; new->prev = prev; - prev->next = new; + WRITE_ONCE(prev->next, new); } #else extern void __list_add(struct list_head *new, @@ -186,7 +186,7 @@ static inline int list_is_last(const struct list_head *list, */ static inline int list_empty(const struct list_head *head) { - return head->next == head; + return READ_ONCE(head->next) == head; } /** @@ -608,7 +608,7 @@ static inline int hlist_unhashed(const struct hlist_node *h) static inline int hlist_empty(const struct hlist_head *h) { - return !h->first; + return !READ_ONCE(h->first); } static inline void __hlist_del(struct hlist_node *n) @@ -642,7 +642,7 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) n->next = first; if (first) first->pprev = &n->next; - h->first = n; + WRITE_ONCE(h->first, n); n->pprev = &h->first; } @@ -653,14 +653,14 @@ static inline void hlist_add_before(struct hlist_node *n, n->pprev = next->pprev; n->next = next; next->pprev = &n->next; - *(n->pprev) = n; + WRITE_ONCE(*(n->pprev), n); } static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; - prev->next = n; + WRITE_ONCE(prev->next, n); n->pprev = &prev->next; if (n->next) diff --git a/drivers/include/linux/lockdep.h b/drivers/include/linux/lockdep.h index 4b5a15b331..2f9b7a649f 100644 --- a/drivers/include/linux/lockdep.h +++ b/drivers/include/linux/lockdep.h @@ -66,7 +66,7 @@ struct lock_class { /* * class-hash: */ - struct list_head hash_entry; + struct hlist_node hash_entry; /* * global list of all lock-classes: @@ -199,7 +199,7 @@ struct lock_chain { u8 irq_context; u8 depth; u16 base; - struct list_head entry; + struct hlist_node entry; u64 chain_key; }; diff --git a/drivers/include/linux/log2.h b/drivers/include/linux/log2.h index c373295f35..fd7ff3d91e 100644 --- a/drivers/include/linux/log2.h +++ b/drivers/include/linux/log2.h @@ -15,6 +15,12 @@ #include #include +/* + * deal with unrepresentable constant logarithms + */ +extern __attribute__((const, noreturn)) +int ____ilog2_NaN(void); + /* * non-constant log of base 2 calculators * - the arch may override these in asm/bitops.h if they can be implemented @@ -79,7 +85,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define ilog2(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n) < 2 ? 0 : \ + (n) < 1 ? ____ilog2_NaN() : \ (n) & (1ULL << 63) ? 63 : \ (n) & (1ULL << 62) ? 62 : \ (n) & (1ULL << 61) ? 61 : \ @@ -142,7 +148,10 @@ unsigned long __rounddown_pow_of_two(unsigned long n) (n) & (1ULL << 4) ? 4 : \ (n) & (1ULL << 3) ? 3 : \ (n) & (1ULL << 2) ? 2 : \ - 1 ) : \ + (n) & (1ULL << 1) ? 1 : \ + (n) & (1ULL << 0) ? 0 : \ + ____ilog2_NaN() \ + ) : \ (sizeof(n) <= 4) ? \ __ilog2_u32(n) : \ __ilog2_u64(n) \ @@ -194,17 +203,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n) * ... and so on. */ -static inline __attribute_const__ -int __order_base_2(unsigned long n) -{ - return n > 1 ? ilog2(n - 1) + 1 : 0; -} +#define order_base_2(n) ilog2(roundup_pow_of_two(n)) -#define order_base_2(n) \ -( \ - __builtin_constant_p(n) ? ( \ - ((n) == 0 || (n) == 1) ? 0 : \ - ilog2((n) - 1) + 1) : \ - __order_base_2(n) \ -) #endif /* _LINUX_LOG2_H */ diff --git a/drivers/include/linux/mmdebug.h b/drivers/include/linux/mmdebug.h index 772362adf4..053824b0a4 100644 --- a/drivers/include/linux/mmdebug.h +++ b/drivers/include/linux/mmdebug.h @@ -56,4 +56,10 @@ void dump_mm(const struct mm_struct *mm); #define VIRTUAL_BUG_ON(cond) do { } while (0) #endif +#ifdef CONFIG_DEBUG_VM_PGFLAGS +#define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page) +#else +#define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond) +#endif + #endif diff --git a/drivers/include/linux/mmzone.h b/drivers/include/linux/mmzone.h new file mode 100644 index 0000000000..4f9daf7145 --- /dev/null +++ b/drivers/include/linux/mmzone.h @@ -0,0 +1,54 @@ +#ifndef _LINUX_MMZONE_H +#define _LINUX_MMZONE_H + +#include +#include +#include +#include +#include + +/* Free memory management - zoned buddy allocator. */ +#ifndef CONFIG_FORCE_MAX_ZONEORDER +#define MAX_ORDER 11 +#else +#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER +#endif +#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) + +/* + * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed + * costly to service. That is between allocation orders which should + * coalesce naturally under reasonable reclaim pressure and those which + * will not. + */ +#define PAGE_ALLOC_COSTLY_ORDER 3 + +enum { + MIGRATE_UNMOVABLE, + MIGRATE_MOVABLE, + MIGRATE_RECLAIMABLE, + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, +#ifdef CONFIG_CMA + /* + * MIGRATE_CMA migration type is designed to mimic the way + * ZONE_MOVABLE works. Only movable pages can be allocated + * from MIGRATE_CMA pageblocks and page allocator never + * implicitly change migration type of MIGRATE_CMA pageblock. + * + * The way to use it is to change migratetype of a range of + * pageblocks to MIGRATE_CMA which can be done by + * __free_pageblock_cma() function. What is important though + * is that a range of pageblocks must be aligned to + * MAX_ORDER_NR_PAGES should biggest page be bigger then + * a single pageblock. + */ + MIGRATE_CMA, +#endif +#ifdef CONFIG_MEMORY_ISOLATION + MIGRATE_ISOLATE, /* can't allocate from here */ +#endif + MIGRATE_TYPES +}; + +#endif /* _LINUX_MMZONE_H */ diff --git a/drivers/include/linux/mod_devicetable.h b/drivers/include/linux/mod_devicetable.h index 82dc8537e9..2c4d376618 100644 --- a/drivers/include/linux/mod_devicetable.h +++ b/drivers/include/linux/mod_devicetable.h @@ -404,7 +404,7 @@ struct virtio_device_id { * For Hyper-V devices we use the device guid as the id. */ struct hv_vmbus_device_id { - __u8 guid[16]; + uuid_le guid; kernel_ulong_t driver_data; /* Data private to the driver */ }; diff --git a/drivers/include/linux/notifier.h b/drivers/include/linux/notifier.h new file mode 100644 index 0000000000..1d4cfe3214 --- /dev/null +++ b/drivers/include/linux/notifier.h @@ -0,0 +1,69 @@ +/* + * Routines to manage notifier chains for passing status changes to any + * interested routines. We need this instead of hard coded call lists so + * that modules can poke their nose into the innards. The network devices + * needed them so here they are for the rest of you. + * + * Alan Cox + */ + +#ifndef _LINUX_NOTIFIER_H +#define _LINUX_NOTIFIER_H +#include +#include +#include +/* + * Notifier chains are of four types: + * + * Atomic notifier chains: Chain callbacks run in interrupt/atomic + * context. Callouts are not allowed to block. + * Blocking notifier chains: Chain callbacks run in process context. + * Callouts are allowed to block. + * Raw notifier chains: There are no restrictions on callbacks, + * registration, or unregistration. All locking and protection + * must be provided by the caller. + * SRCU notifier chains: A variant of blocking notifier chains, with + * the same restrictions. + * + * atomic_notifier_chain_register() may be called from an atomic context, + * but blocking_notifier_chain_register() and srcu_notifier_chain_register() + * must be called from a process context. Ditto for the corresponding + * _unregister() routines. + * + * atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(), + * and srcu_notifier_chain_unregister() _must not_ be called from within + * the call chain. + * + * SRCU notifier chains are an alternative form of blocking notifier chains. + * They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for + * protection of the chain links. This means there is _very_ low overhead + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very + * often but notifier_blocks will seldom be removed. Also, SRCU notifier + * chains are slightly more difficult to use because they require special + * runtime initialization. + */ + +struct notifier_block; + +typedef int (*notifier_fn_t)(struct notifier_block *nb, + unsigned long action, void *data); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block __rcu *next; + int priority; +}; + +/* Console keyboard events. + * Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and + * KBD_KEYSYM. */ +#define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */ +#define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */ +#define KBD_UNICODE 0x0003 /* Keyboard unicode */ +#define KBD_KEYSYM 0x0004 /* Keyboard keysym */ +#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */ + + +#endif /* _LINUX_NOTIFIER_H */ diff --git a/drivers/include/linux/pci.h b/drivers/include/linux/pci.h index b2470faa20..7d62567f58 100644 --- a/drivers/include/linux/pci.h +++ b/drivers/include/linux/pci.h @@ -990,23 +990,6 @@ static inline int pci_is_managed(struct pci_dev *pdev) return pdev->is_managed; } -static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq) -{ - pdev->irq = irq; - pdev->irq_managed = 1; -} - -static inline void pci_reset_managed_irq(struct pci_dev *pdev) -{ - pdev->irq = 0; - pdev->irq_managed = 0; -} - -static inline bool pci_has_managed_irq(struct pci_dev *pdev) -{ - return pdev->irq_managed && pdev->irq > 0; -} - void pci_disable_device(struct pci_dev *dev); extern unsigned int pcibios_max_latency; @@ -1267,8 +1250,6 @@ struct msix_entry { u16 entry; /* driver uses to specify entry, OS writes */ }; -void pci_msi_setup_pci_dev(struct pci_dev *dev); - #ifdef CONFIG_PCI_MSI int pci_msi_vec_count(struct pci_dev *dev); void pci_msi_shutdown(struct pci_dev *dev); @@ -1956,6 +1937,16 @@ static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } #endif /* CONFIG_OF */ +#ifdef CONFIG_ACPI +struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); + +void +pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); +#else +static inline struct irq_domain * +pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } +#endif + #ifdef CONFIG_EEH static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) { @@ -2003,4 +1994,6 @@ int enum_pci_devices(void); const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist); +struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); + #endif /* LINUX_PCI_H */ diff --git a/drivers/include/linux/pm.h b/drivers/include/linux/pm.h index 528be67877..6a5d654f44 100644 --- a/drivers/include/linux/pm.h +++ b/drivers/include/linux/pm.h @@ -573,6 +573,7 @@ struct dev_pm_info { struct wakeup_source *wakeup; bool wakeup_path:1; bool syscore:1; + bool no_pm_callbacks:1; /* Owned by the PM core */ #else unsigned int should_wakeup:1; #endif diff --git a/drivers/include/linux/pm_runtime.h b/drivers/include/linux/pm_runtime.h index 2d66664b08..dc99fd4dd5 100644 --- a/drivers/include/linux/pm_runtime.h +++ b/drivers/include/linux/pm_runtime.h @@ -10,6 +10,7 @@ #define _LINUX_PM_RUNTIME_H #include +#include #include #include @@ -38,6 +39,7 @@ extern int pm_runtime_force_resume(struct device *dev); extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); +extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); extern int pm_runtime_barrier(struct device *dev); @@ -142,6 +144,10 @@ static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) { return -ENOSYS; } +static inline int pm_runtime_get_if_in_use(struct device *dev) +{ + return -EINVAL; +} static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } static inline int pm_runtime_barrier(struct device *dev) { return 0; } diff --git a/drivers/include/linux/poison.h b/drivers/include/linux/poison.h index ba6e2eb69e..82ba7fdb3a 100644 --- a/drivers/include/linux/poison.h +++ b/drivers/include/linux/poison.h @@ -27,11 +27,15 @@ * Magic number "tsta" to indicate a static timer initializer * for the object debugging code. */ -#define TIMER_ENTRY_STATIC ((void *) 0x74737461) +#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/debug-pagealloc.c **********/ #define PAGE_POISON 0xaa +/********** mm/page_alloc.c ************/ + +#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA) + /********** mm/slab.c **********/ /* * Magic nums for obj red zoning. diff --git a/drivers/include/linux/printk.h b/drivers/include/linux/printk.h index b565f693fa..d1d94fe01e 100644 --- a/drivers/include/linux/printk.h +++ b/drivers/include/linux/printk.h @@ -68,13 +68,15 @@ struct va_format { /* * Dummy printk for disabled debugging statements to use whilst maintaining - * gcc's format and side-effect checking. + * gcc's format checking. */ -static inline __printf(1, 2) -int no_printk(const char *fmt, ...) -{ - return 0; -} +#define no_printk(fmt, ...) \ +do { \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ +} while (0) + + __printf(1, 2) int dbgprintf(const char *fmt, ...); diff --git a/drivers/include/linux/property.h b/drivers/include/linux/property.h index 0a3705a7c9..6682ca1f3f 100644 --- a/drivers/include/linux/property.h +++ b/drivers/include/linux/property.h @@ -144,14 +144,18 @@ static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode, /** * struct property_entry - "Built-in" device property representation. * @name: Name of the property. - * @type: Type of the property. - * @nval: Number of items of type @type making up the value. - * @value: Value of the property (an array of @nval items of type @type). + * @length: Length of data making up the value. + * @is_array: True when the property is an array. + * @is_string: True when property is a string. + * @pointer: Pointer to the property (an array of items of the given type). + * @value: Value of the property (when it is a single item of the given type). */ struct property_entry { const char *name; - enum dev_prop_type type; - size_t nval; + size_t length; + bool is_array; + bool is_string; + union { union { void *raw_data; u8 *u8_data; @@ -159,8 +163,80 @@ struct property_entry { u32 *u32_data; u64 *u64_data; const char **str; + } pointer; + union { + unsigned long long raw_data; + u8 u8_data; + u16 u16_data; + u32 u32_data; + u64 u64_data; + const char *str; } value; }; +}; + +/* + * Note: the below four initializers for the anonymous union are carefully + * crafted to avoid gcc-4.4.4's problems with initialization of anon unions + * and structs. + */ + +#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ + .is_array = true, \ + .is_string = false, \ + { .pointer = { _type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) + +#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ + .is_array = true, \ + .is_string = true, \ + { .pointer = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_type_), \ + .is_string = false, \ + { .value = { ._type_##_data = _val_ } }, \ +} + +#define PROPERTY_ENTRY_U8(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u8, _val_) +#define PROPERTY_ENTRY_U16(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u16, _val_) +#define PROPERTY_ENTRY_U32(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u32, _val_) +#define PROPERTY_ENTRY_U64(_name_, _val_) \ + PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) + +#define PROPERTY_ENTRY_STRING(_name_, _val_) \ +{ \ + .name = _name_, \ + .length = sizeof(_val_), \ + .is_string = true, \ + { .value = { .str = _val_ } }, \ +} + +#define PROPERTY_ENTRY_BOOL(_name_) \ +{ \ + .name = _name_, \ +} /** * struct property_set - Collection of "built-in" device properties. @@ -172,7 +248,8 @@ struct property_set { struct property_entry *properties; }; -void device_add_property_set(struct device *dev, struct property_set *pset); +int device_add_property_set(struct device *dev, const struct property_set *pset); +void device_remove_property_set(struct device *dev); bool device_dma_supported(struct device *dev); diff --git a/drivers/include/linux/pwm.h b/drivers/include/linux/pwm.h index 7de60f0b8f..4dd50e6612 100644 --- a/drivers/include/linux/pwm.h +++ b/drivers/include/linux/pwm.h @@ -179,6 +179,8 @@ struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id); void pwm_put(struct pwm_device *pwm); struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); +struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, + const char *con_id); void devm_pwm_put(struct device *dev, struct pwm_device *pwm); bool pwm_can_sleep(struct pwm_device *pwm); @@ -192,11 +194,36 @@ static inline void *pwm_get_chip_data(struct pwm_device *pwm) { return NULL; } + +static inline int pwmchip_add(struct pwm_chip *chip) +{ + return -EINVAL; +} + +static inline int pwmchip_add_inversed(struct pwm_chip *chip) +{ + return -EINVAL; +} + +static inline int pwmchip_remove(struct pwm_chip *chip) +{ + return -EINVAL; +} + +static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, + unsigned int index, + const char *label) +{ + return ERR_PTR(-ENODEV); +} + static inline struct pwm_device *pwm_get(struct device *dev, const char *consumer) { return ERR_PTR(-ENODEV); } + + static inline void pwm_put(struct pwm_device *pwm) { } @@ -206,6 +233,8 @@ static inline struct pwm_device *devm_pwm_get(struct device *dev, { return ERR_PTR(-ENODEV); } + + static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) { } diff --git a/drivers/include/linux/rbtree.h b/drivers/include/linux/rbtree.h index a5aa7ae671..b6900099ea 100644 --- a/drivers/include/linux/rbtree.h +++ b/drivers/include/linux/rbtree.h @@ -50,7 +50,7 @@ struct rb_root { #define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) -#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) +#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ #define RB_EMPTY_NODE(node) \ diff --git a/drivers/include/linux/rculist.h b/drivers/include/linux/rculist.h index 5ed5409860..85a7454b0b 100644 --- a/drivers/include/linux/rculist.h +++ b/drivers/include/linux/rculist.h @@ -179,32 +179,31 @@ static inline void list_replace_rcu(struct list_head *old, } /** - * list_splice_init_rcu - splice an RCU-protected list into an existing list. + * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice - * @head: the place in the list to splice the first list into + * @prev: points to the last element of the existing list + * @next: points to the first element of the existing list * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... * - * @head can be RCU-read traversed concurrently with this function. + * The list pointed to by @prev and @next can be RCU-read traversed + * concurrently with this function. * * Note that this function blocks. * - * Important note: the caller must take whatever action is necessary to - * prevent any other updates to @head. In principle, it is possible - * to modify the list as soon as sync() begins execution. - * If this sort of thing becomes necessary, an alternative version - * based on call_rcu() could be created. But only if -really- - * needed -- there is no shortage of RCU API members. + * Important note: the caller must take whatever action is necessary to prevent + * any other updates to the existing list. In principle, it is possible to + * modify the list as soon as sync() begins execution. If this sort of thing + * becomes necessary, an alternative version based on call_rcu() could be + * created. But only if -really- needed -- there is no shortage of RCU API + * members. */ -static inline void list_splice_init_rcu(struct list_head *list, - struct list_head *head, +static inline void __list_splice_init_rcu(struct list_head *list, + struct list_head *prev, + struct list_head *next, void (*sync)(void)) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - - if (list_empty(list)) - return; /* * "first" and "last" tracking list, so initialize it. RCU readers @@ -231,10 +230,40 @@ static inline void list_splice_init_rcu(struct list_head *list, * this function. */ - last->next = at; - rcu_assign_pointer(list_next_rcu(head), first); - first->prev = head; - at->prev = last; + last->next = next; + rcu_assign_pointer(list_next_rcu(prev), first); + first->prev = prev; + next->prev = last; +} + +/** + * list_splice_init_rcu - splice an RCU-protected list into an existing list, + * designed for stacks. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head, head->next, sync); +} + +/** + * list_splice_tail_init_rcu - splice an RCU-protected list into an existing + * list, designed for queues. + * @list: the RCU-protected list to splice + * @head: the place in the existing list to splice the first list into + * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... + */ +static inline void list_splice_tail_init_rcu(struct list_head *list, + struct list_head *head, + void (*sync)(void)) +{ + if (!list_empty(list)) + __list_splice_init_rcu(list, head->prev, head, sync); } /** @@ -304,6 +333,42 @@ static inline void list_splice_init_rcu(struct list_head *list, &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) +/** + * list_entry_lockless - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_entry_lockless(ptr, type, member) \ + container_of((typeof(ptr))lockless_dereference(ptr), type, member) + +/** + * list_for_each_entry_lockless - iterate over rcu list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu(), but requires some implicit RCU + * read-side guarding. One example is running within a special + * exception-time environment where preemption is disabled and where + * lockdep cannot be invoked (in which case updaters must use RCU-sched, + * as in synchronize_sched(), call_rcu_sched(), and friends). Another + * example is when items are added to the list, but never deleted. + */ +#define list_for_each_entry_lockless(pos, head, member) \ + for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) + /** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. diff --git a/drivers/include/linux/rcupdate.h b/drivers/include/linux/rcupdate.h index cbcc49e369..2abf974ef6 100644 --- a/drivers/include/linux/rcupdate.h +++ b/drivers/include/linux/rcupdate.h @@ -48,10 +48,17 @@ #include +#ifndef CONFIG_TINY_RCU extern int rcu_expedited; /* for sysctl */ +extern int rcu_normal; /* also for sysctl */ +#endif /* #ifndef CONFIG_TINY_RCU */ #ifdef CONFIG_TINY_RCU /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ +static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ +{ + return true; +} static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ { return false; @@ -65,6 +72,7 @@ static inline void rcu_unexpedite_gp(void) { } #else /* #ifdef CONFIG_TINY_RCU */ +bool rcu_gp_is_normal(void); /* Internal RCU use. */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ void rcu_expedite_gp(void); void rcu_unexpedite_gp(void); @@ -283,7 +291,6 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -void rcu_end_inkernel_boot(void); void rcu_sched_qs(void); void rcu_bh_qs(void); void rcu_check_callbacks(int user); @@ -291,6 +298,12 @@ struct notifier_block; int rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu); +#ifndef CONFIG_TINY_RCU +void rcu_end_inkernel_boot(void); +#else /* #ifndef CONFIG_TINY_RCU */ +static inline void rcu_end_inkernel_boot(void) { } +#endif /* #ifndef CONFIG_TINY_RCU */ + #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); @@ -341,9 +354,9 @@ static inline void rcu_init_nohz(void) */ #define RCU_NONIDLE(a) \ do { \ - rcu_irq_enter(); \ + rcu_irq_enter_irqson(); \ do { a; } while (0); \ - rcu_irq_exit(); \ + rcu_irq_exit_irqson(); \ } while (0) /* @@ -703,7 +716,7 @@ static inline void rcu_preempt_sleep_check(void) * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * - * The tracing version of rcu_dereference_raw() must not call + * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) @@ -753,6 +766,28 @@ static inline void rcu_preempt_sleep_check(void) */ #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) +/** + * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism + * @p: The pointer to hand off + * + * This is simply an identity function, but it documents where a pointer + * is handed off from RCU to some other synchronization mechanism, for + * example, reference counting or locking. In C11, it would map to + * kill_dependency(). It could be used as follows: + * + * rcu_read_lock(); + * p = rcu_dereference(gp); + * long_lived = is_long_lived(p); + * if (long_lived) { + * if (!atomic_inc_not_zero(p->refcnt)) + * long_lived = false; + * else + * p = rcu_pointer_handoff(p); + * } + * rcu_read_unlock(); + */ +#define rcu_pointer_handoff(p) (p) + /** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * @@ -985,7 +1020,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define RCU_INIT_POINTER(p, v) \ do { \ rcu_dereference_sparse(p, __rcu); \ - p = RCU_INITIALIZER(v); \ + WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) /** diff --git a/drivers/include/linux/rcutiny.h b/drivers/include/linux/rcutiny.h index 2d75a23779..4b6e8b301d 100644 --- a/drivers/include/linux/rcutiny.h +++ b/drivers/include/linux/rcutiny.h @@ -175,6 +175,14 @@ static inline void rcu_irq_enter(void) { } +static inline void rcu_irq_exit_irqson(void) +{ +} + +static inline void rcu_irq_enter_irqson(void) +{ +} + static inline void rcu_irq_exit(void) { } diff --git a/drivers/include/linux/seq_file.h b/drivers/include/linux/seq_file.h index d9ad7937f9..4228bdad58 100644 --- a/drivers/include/linux/seq_file.h +++ b/drivers/include/linux/seq_file.h @@ -5,6 +5,10 @@ #include #include #include +struct file; +struct path; +struct inode; +struct dentry; struct seq_file { char *buf; diff --git a/drivers/include/linux/seqlock.h b/drivers/include/linux/seqlock.h index e623c6f971..e0582106ef 100644 --- a/drivers/include/linux/seqlock.h +++ b/drivers/include/linux/seqlock.h @@ -234,7 +234,53 @@ static inline void raw_write_seqcount_end(seqcount_t *s) s->sequence++; } -/* +/** + * raw_write_seqcount_barrier - do a seq write barrier + * @s: pointer to seqcount_t + * + * This can be used to provide an ordering guarantee instead of the + * usual consistency guarantee. It is one wmb cheaper, because we can + * collapse the two back-to-back wmb()s. + * + * seqcount_t seq; + * bool X = true, Y = false; + * + * void read(void) + * { + * bool x, y; + * + * do { + * int s = read_seqcount_begin(&seq); + * + * x = X; y = Y; + * + * } while (read_seqcount_retry(&seq, s)); + * + * BUG_ON(!x && !y); + * } + * + * void write(void) + * { + * Y = true; + * + * raw_write_seqcount_barrier(seq); + * + * X = false; + * } + */ +static inline void raw_write_seqcount_barrier(seqcount_t *s) +{ + s->sequence++; + smp_wmb(); + s->sequence++; +} + +static inline int raw_read_seqcount_latch(seqcount_t *s) +{ + return lockless_dereference(s->sequence); +} + +/** * raw_write_seqcount_latch - redirect readers to even/odd copy * @s: pointer to seqcount_t * diff --git a/drivers/include/linux/slab.h b/drivers/include/linux/slab.h index bb19b0eda2..df57f6510f 100644 --- a/drivers/include/linux/slab.h +++ b/drivers/include/linux/slab.h @@ -86,6 +86,11 @@ #else # define SLAB_FAILSLAB 0x00000000UL #endif +#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) +# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ +#else +# define SLAB_ACCOUNT 0x00000000UL +#endif /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ @@ -113,14 +118,14 @@ void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); -static inline void *krealloc(void *p, size_t new_size, gfp_t flags) +static inline void *krealloc(const void *p, size_t new_size, gfp_t flags) { - return __builtin_realloc(p, new_size); + return __builtin_realloc((void*)p, new_size); } -static inline void kfree(void *p) +static inline void kfree(const void *p) { - __builtin_free(p); + __builtin_free((void*)p); } static __always_inline void *kmalloc(size_t size, gfp_t flags) { diff --git a/drivers/include/linux/string.h b/drivers/include/linux/string.h index 2398aaea3e..0cec16e63d 100644 --- a/drivers/include/linux/string.h +++ b/drivers/include/linux/string.h @@ -10,6 +10,7 @@ extern char *strndup_user(const char __user *, long); extern void *memdup_user(const void __user *, size_t); +extern void *memdup_user_nul(const void __user *, size_t); /* * Include machine specific inline routines @@ -127,11 +128,7 @@ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); extern bool sysfs_streq(const char *s1, const char *s2); -extern int kstrtobool(const char *s, bool *res); -static inline int strtobool(const char *s, bool *res) -{ - return kstrtobool(s, res); -} +extern int strtobool(const char *s, bool *res); #ifdef CONFIG_BINARY_PRINTF int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); diff --git a/drivers/include/linux/sysfs.h b/drivers/include/linux/sysfs.h index 9992e1a126..4159344eb7 100644 --- a/drivers/include/linux/sysfs.h +++ b/drivers/include/linux/sysfs.h @@ -31,6 +31,15 @@ struct attribute { struct lock_class_key skey; #endif }; +struct attribute_group { + const char *name; + umode_t (*is_visible)(struct kobject *, + struct attribute *, int); + umode_t (*is_bin_visible)(struct kobject *, + struct bin_attribute *, int); + struct attribute **attrs; + struct bin_attribute **bin_attrs; +}; #ifdef CONFIG_SYSFS int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns); @@ -216,6 +225,33 @@ static inline void sysfs_delete_link(struct kobject *k, struct kobject *t, { } +static inline int sysfs_create_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return 0; +} + +static inline int sysfs_create_groups(struct kobject *kobj, + const struct attribute_group **groups) +{ + return 0; +} + +static inline int sysfs_update_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return 0; +} + +static inline void sysfs_remove_group(struct kobject *kobj, + const struct attribute_group *grp) +{ +} + +static inline void sysfs_remove_groups(struct kobject *kobj, + const struct attribute_group **groups) +{ +} static inline int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) @@ -228,6 +264,17 @@ static inline void sysfs_remove_file_from_group(struct kobject *kobj, { } +static inline int sysfs_merge_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + return 0; +} + +static inline void sysfs_unmerge_group(struct kobject *kobj, + const struct attribute_group *grp) +{ +} + static inline int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name) diff --git a/drivers/include/linux/sysrq.h b/drivers/include/linux/sysrq.h index 760b7b0ea1..a8a3060df1 100644 --- a/drivers/include/linux/sysrq.h +++ b/drivers/include/linux/sysrq.h @@ -1,2 +1,14 @@ +/* -*- linux-c -*- + * + * $Id: sysrq.h,v 1.3 1997/07/17 11:54:33 mj Exp $ + * + * Linux Magic System Request Key Hacks + * + * (c) 1997 Martin Mares + * + * (c) 2000 Crutcher Dunnavant + * overhauled to use key registration + * based upon discusions in irc://irc.openprojects.net/#kernelnewbies + */ // stub diff --git a/drivers/include/linux/timer.h b/drivers/include/linux/timer.h index 03ce2630f5..039ce47ae0 100644 --- a/drivers/include/linux/timer.h +++ b/drivers/include/linux/timer.h @@ -2,6 +2,11 @@ #define _LINUX_TIMER_H #include +#include +#include +#include + +struct tvec_base; unsigned long __round_jiffies(unsigned long j, int cpu); unsigned long __round_jiffies_relative(unsigned long j, int cpu); diff --git a/drivers/include/linux/vmalloc.h b/drivers/include/linux/vmalloc.h index b33ee40cb4..21786c19ea 100644 --- a/drivers/include/linux/vmalloc.h +++ b/drivers/include/linux/vmalloc.h @@ -13,7 +13,6 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ -#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ diff --git a/drivers/include/uapi/drm/drm.h b/drivers/include/uapi/drm/drm.h index ad4871f4ae..24d62c8e6f 100644 --- a/drivers/include/uapi/drm/drm.h +++ b/drivers/include/uapi/drm/drm.h @@ -54,6 +54,7 @@ typedef int32_t __s32; typedef uint32_t __u32; typedef int64_t __s64; typedef uint64_t __u64; +typedef size_t __kernel_size_t; typedef unsigned long drm_handle_t; #endif @@ -129,11 +130,11 @@ struct drm_version { int version_major; /**< Major version */ int version_minor; /**< Minor version */ int version_patchlevel; /**< Patch level */ - size_t name_len; /**< Length of name buffer */ + __kernel_size_t name_len; /**< Length of name buffer */ char __user *name; /**< Name of driver */ - size_t date_len; /**< Length of date buffer */ + __kernel_size_t date_len; /**< Length of date buffer */ char __user *date; /**< User-space buffer to hold date */ - size_t desc_len; /**< Length of desc buffer */ + __kernel_size_t desc_len; /**< Length of desc buffer */ char __user *desc; /**< User-space buffer to hold desc */ }; @@ -143,7 +144,7 @@ struct drm_version { * \sa drmGetBusid() and drmSetBusId(). */ struct drm_unique { - size_t unique_len; /**< Length of unique */ + __kernel_size_t unique_len; /**< Length of unique */ char __user *unique; /**< Unique name for driver instantiation */ }; diff --git a/drivers/include/uapi/drm/drm_fourcc.h b/drivers/include/uapi/drm/drm_fourcc.h index 0b69a77535..4d8da699a6 100644 --- a/drivers/include/uapi/drm/drm_fourcc.h +++ b/drivers/include/uapi/drm/drm_fourcc.h @@ -24,7 +24,7 @@ #ifndef DRM_FOURCC_H #define DRM_FOURCC_H -#include +#include "drm.h" #define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ ((__u32)(c) << 16) | ((__u32)(d) << 24)) @@ -225,7 +225,7 @@ * - multiple of 128 pixels for the width * - multiple of 32 pixels for the height * - * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html + * For more information: see https://linuxtv.org/downloads/v4l-dvb-apis/re32.html */ #define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) diff --git a/drivers/include/uapi/drm/drm_mode.h b/drivers/include/uapi/drm/drm_mode.h index 6c11ca401d..50adb46204 100644 --- a/drivers/include/uapi/drm/drm_mode.h +++ b/drivers/include/uapi/drm/drm_mode.h @@ -27,7 +27,7 @@ #ifndef _DRM_MODE_H #define _DRM_MODE_H -#include +#include "drm.h" #define DRM_DISPLAY_INFO_LEN 32 #define DRM_CONNECTOR_NAME_LEN 32 @@ -526,14 +526,14 @@ struct drm_mode_crtc_page_flip { /* create a dumb scanout buffer */ struct drm_mode_create_dumb { - uint32_t height; - uint32_t width; - uint32_t bpp; - uint32_t flags; + __u32 height; + __u32 width; + __u32 bpp; + __u32 flags; /* handle, pitch, size will be returned */ - uint32_t handle; - uint32_t pitch; - uint64_t size; + __u32 handle; + __u32 pitch; + __u64 size; }; /* set up for mmap of a dumb scanout buffer */ @@ -550,7 +550,7 @@ struct drm_mode_map_dumb { }; struct drm_mode_destroy_dumb { - uint32_t handle; + __u32 handle; }; /* page-flip flags are valid, plus: */ diff --git a/drivers/include/uapi/drm/drm_sarea.h b/drivers/include/uapi/drm/drm_sarea.h index 413a5642d4..1d1a858a20 100644 --- a/drivers/include/uapi/drm/drm_sarea.h +++ b/drivers/include/uapi/drm/drm_sarea.h @@ -32,7 +32,7 @@ #ifndef _DRM_SAREA_H_ #define _DRM_SAREA_H_ -#include +#include "drm.h" /* SAREA area needs to be at least a page */ #if defined(__alpha__) diff --git a/drivers/include/uapi/drm/i915_drm.h b/drivers/include/uapi/drm/i915_drm.h index c20aad19bc..fad171ad2f 100644 --- a/drivers/include/uapi/drm/i915_drm.h +++ b/drivers/include/uapi/drm/i915_drm.h @@ -27,7 +27,7 @@ #ifndef _UAPI_I915_DRM_H_ #define _UAPI_I915_DRM_H_ -#include +#include "drm.h" /* Please note that modifications to all structs defined here are * subject to backwards-compatibility constraints. @@ -356,6 +356,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_EU_TOTAL 34 #define I915_PARAM_HAS_GPU_RESET 35 #define I915_PARAM_HAS_RESOURCE_STREAMER 36 +#define I915_PARAM_HAS_EXEC_SOFTPIN 37 typedef struct drm_i915_getparam { __s32 param; @@ -682,8 +683,12 @@ struct drm_i915_gem_exec_object2 { __u64 alignment; /** - * Returned value of the updated offset of the object, for future - * presumed_offset writes. + * When the EXEC_OBJECT_PINNED flag is specified this is populated by + * the user with the GTT offset at which this object will be pinned. + * When the I915_EXEC_NO_RELOC flag is specified this must contain the + * presumed_offset of the object. + * During execbuffer2 the kernel populates it with the value of the + * current GTT offset of the object, for future presumed_offset writes. */ __u64 offset; @@ -691,7 +696,8 @@ struct drm_i915_gem_exec_object2 { #define EXEC_OBJECT_NEEDS_GTT (1<<1) #define EXEC_OBJECT_WRITE (1<<2) #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) -#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1) +#define EXEC_OBJECT_PINNED (1<<4) +#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1) __u64 flags; __u64 rsvd1; @@ -1079,6 +1085,12 @@ struct drm_i915_gem_context_destroy { }; struct drm_i915_reg_read { + /* + * Register offset. + * For 64bit wide registers where the upper 32bits don't immediately + * follow the lower 32bits, the offset of the lower 32bits must + * be specified + */ __u64 offset; __u64 val; /* Return value */ }; @@ -1127,6 +1139,7 @@ struct drm_i915_gem_context_param { __u64 param; #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 __u64 value; }; diff --git a/drivers/include/uapi/drm/radeon_drm.h b/drivers/include/uapi/drm/radeon_drm.h index 01aa2a8e3f..ccb9bcd826 100644 --- a/drivers/include/uapi/drm/radeon_drm.h +++ b/drivers/include/uapi/drm/radeon_drm.h @@ -793,9 +793,9 @@ typedef struct drm_radeon_surface_free { #define RADEON_GEM_DOMAIN_VRAM 0x4 struct drm_radeon_gem_info { - uint64_t gart_size; - uint64_t vram_size; - uint64_t vram_visible; + __u64 gart_size; + __u64 vram_size; + __u64 vram_visible; }; #define RADEON_GEM_NO_BACKING_STORE (1 << 0) @@ -807,11 +807,11 @@ struct drm_radeon_gem_info { #define RADEON_GEM_NO_CPU_ACCESS (1 << 4) struct drm_radeon_gem_create { - uint64_t size; - uint64_t alignment; - uint32_t handle; - uint32_t initial_domain; - uint32_t flags; + __u64 size; + __u64 alignment; + __u32 handle; + __u32 initial_domain; + __u32 flags; }; /* @@ -825,10 +825,10 @@ struct drm_radeon_gem_create { #define RADEON_GEM_USERPTR_REGISTER (1 << 3) struct drm_radeon_gem_userptr { - uint64_t addr; - uint64_t size; - uint32_t flags; - uint32_t handle; + __u64 addr; + __u64 size; + __u32 flags; + __u32 handle; }; #define RADEON_TILING_MACRO 0x1 @@ -850,72 +850,72 @@ struct drm_radeon_gem_userptr { #define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf struct drm_radeon_gem_set_tiling { - uint32_t handle; - uint32_t tiling_flags; - uint32_t pitch; + __u32 handle; + __u32 tiling_flags; + __u32 pitch; }; struct drm_radeon_gem_get_tiling { - uint32_t handle; - uint32_t tiling_flags; - uint32_t pitch; + __u32 handle; + __u32 tiling_flags; + __u32 pitch; }; struct drm_radeon_gem_mmap { - uint32_t handle; - uint32_t pad; - uint64_t offset; - uint64_t size; - uint64_t addr_ptr; + __u32 handle; + __u32 pad; + __u64 offset; + __u64 size; + __u64 addr_ptr; }; struct drm_radeon_gem_set_domain { - uint32_t handle; - uint32_t read_domains; - uint32_t write_domain; + __u32 handle; + __u32 read_domains; + __u32 write_domain; }; struct drm_radeon_gem_wait_idle { - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; }; struct drm_radeon_gem_busy { - uint32_t handle; - uint32_t domain; + __u32 handle; + __u32 domain; }; struct drm_radeon_gem_pread { /** Handle for the object being read. */ - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; /** Offset into the object to read from */ - uint64_t offset; + __u64 offset; /** Length of data to read */ - uint64_t size; + __u64 size; /** Pointer to write the data into. */ /* void *, but pointers are not 32/64 compatible */ - uint64_t data_ptr; + __u64 data_ptr; }; struct drm_radeon_gem_pwrite { /** Handle for the object being written to. */ - uint32_t handle; - uint32_t pad; + __u32 handle; + __u32 pad; /** Offset into the object to write to */ - uint64_t offset; + __u64 offset; /** Length of data to write */ - uint64_t size; + __u64 size; /** Pointer to read the data from. */ /* void *, but pointers are not 32/64 compatible */ - uint64_t data_ptr; + __u64 data_ptr; }; /* Sets or returns a value associated with a buffer. */ struct drm_radeon_gem_op { - uint32_t handle; /* buffer */ - uint32_t op; /* RADEON_GEM_OP_* */ - uint64_t value; /* input or return value */ + __u32 handle; /* buffer */ + __u32 op; /* RADEON_GEM_OP_* */ + __u64 value; /* input or return value */ }; #define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 @@ -935,11 +935,11 @@ struct drm_radeon_gem_op { #define RADEON_VM_PAGE_SNOOPED (1 << 4) struct drm_radeon_gem_va { - uint32_t handle; - uint32_t operation; - uint32_t vm_id; - uint32_t flags; - uint64_t offset; + __u32 handle; + __u32 operation; + __u32 vm_id; + __u32 flags; + __u64 offset; }; #define RADEON_CHUNK_ID_RELOCS 0x01 @@ -961,29 +961,29 @@ struct drm_radeon_gem_va { /* 0 = normal, + = higher priority, - = lower priority */ struct drm_radeon_cs_chunk { - uint32_t chunk_id; - uint32_t length_dw; - uint64_t chunk_data; + __u32 chunk_id; + __u32 length_dw; + __u64 chunk_data; }; /* drm_radeon_cs_reloc.flags */ #define RADEON_RELOC_PRIO_MASK (0xf << 0) struct drm_radeon_cs_reloc { - uint32_t handle; - uint32_t read_domains; - uint32_t write_domain; - uint32_t flags; + __u32 handle; + __u32 read_domains; + __u32 write_domain; + __u32 flags; }; struct drm_radeon_cs { - uint32_t num_chunks; - uint32_t cs_id; - /* this points to uint64_t * which point to cs chunks */ - uint64_t chunks; + __u32 num_chunks; + __u32 cs_id; + /* this points to __u64 * which point to cs chunks */ + __u64 chunks; /* updates to the limits after this CS ioctl */ - uint64_t gart_limit; - uint64_t vram_limit; + __u64 gart_limit; + __u64 vram_limit; }; #define RADEON_INFO_DEVICE_ID 0x00 @@ -1042,9 +1042,9 @@ struct drm_radeon_cs { #define RADEON_INFO_GPU_RESET_COUNTER 0x26 struct drm_radeon_info { - uint32_t request; - uint32_t pad; - uint64_t value; + __u32 request; + __u32 pad; + __u64 value; }; /* Those correspond to the tile index to use, this is to explicitly state diff --git a/drivers/include/uapi/drm/vmwgfx_drm.h b/drivers/include/uapi/drm/vmwgfx_drm.h index 05b204954d..5b68b4d108 100644 --- a/drivers/include/uapi/drm/vmwgfx_drm.h +++ b/drivers/include/uapi/drm/vmwgfx_drm.h @@ -28,9 +28,7 @@ #ifndef __VMWGFX_DRM_H__ #define __VMWGFX_DRM_H__ -#ifndef __KERNEL__ -#include -#endif +#include "drm.h" #define DRM_VMW_MAX_SURFACE_FACES 6 #define DRM_VMW_MAX_MIP_LEVELS 24 @@ -111,9 +109,9 @@ enum drm_vmw_handle_type { */ struct drm_vmw_getparam_arg { - uint64_t value; - uint32_t param; - uint32_t pad64; + __u64 value; + __u32 param; + __u32 pad64; }; /*************************************************************************/ @@ -134,8 +132,8 @@ struct drm_vmw_getparam_arg { */ struct drm_vmw_context_arg { - int32_t cid; - uint32_t pad64; + __s32 cid; + __u32 pad64; }; /*************************************************************************/ @@ -165,7 +163,7 @@ struct drm_vmw_context_arg { * @mip_levels: Number of mip levels for each face. * An unused face should have 0 encoded. * @size_addr: Address of a user-space array of sruct drm_vmw_size - * cast to an uint64_t for 32-64 bit compatibility. + * cast to an __u64 for 32-64 bit compatibility. * The size of the array should equal the total number of mipmap levels. * @shareable: Boolean whether other clients (as identified by file descriptors) * may reference this surface. @@ -177,12 +175,12 @@ struct drm_vmw_context_arg { */ struct drm_vmw_surface_create_req { - uint32_t flags; - uint32_t format; - uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; - uint64_t size_addr; - int32_t shareable; - int32_t scanout; + __u32 flags; + __u32 format; + __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; + __u64 size_addr; + __s32 shareable; + __s32 scanout; }; /** @@ -197,7 +195,7 @@ struct drm_vmw_surface_create_req { */ struct drm_vmw_surface_arg { - int32_t sid; + __s32 sid; enum drm_vmw_handle_type handle_type; }; @@ -213,10 +211,10 @@ struct drm_vmw_surface_arg { */ struct drm_vmw_size { - uint32_t width; - uint32_t height; - uint32_t depth; - uint32_t pad64; + __u32 width; + __u32 height; + __u32 depth; + __u32 pad64; }; /** @@ -284,13 +282,13 @@ union drm_vmw_surface_reference_arg { /** * struct drm_vmw_execbuf_arg * - * @commands: User-space address of a command buffer cast to an uint64_t. + * @commands: User-space address of a command buffer cast to an __u64. * @command-size: Size in bytes of the command buffer. * @throttle-us: Sleep until software is less than @throttle_us * microseconds ahead of hardware. The driver may round this value * to the nearest kernel tick. * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an - * uint64_t. + * __u64. * @version: Allows expanding the execbuf ioctl parameters without breaking * backwards compatibility, since user-space will always tell the kernel * which version it uses. @@ -302,14 +300,14 @@ union drm_vmw_surface_reference_arg { #define DRM_VMW_EXECBUF_VERSION 2 struct drm_vmw_execbuf_arg { - uint64_t commands; - uint32_t command_size; - uint32_t throttle_us; - uint64_t fence_rep; - uint32_t version; - uint32_t flags; - uint32_t context_handle; - uint32_t pad64; + __u64 commands; + __u32 command_size; + __u32 throttle_us; + __u64 fence_rep; + __u32 version; + __u32 flags; + __u32 context_handle; + __u32 pad64; }; /** @@ -338,12 +336,12 @@ struct drm_vmw_execbuf_arg { */ struct drm_vmw_fence_rep { - uint32_t handle; - uint32_t mask; - uint32_t seqno; - uint32_t passed_seqno; - uint32_t pad64; - int32_t error; + __u32 handle; + __u32 mask; + __u32 seqno; + __u32 passed_seqno; + __u32 pad64; + __s32 error; }; /*************************************************************************/ @@ -373,8 +371,8 @@ struct drm_vmw_fence_rep { */ struct drm_vmw_alloc_dmabuf_req { - uint32_t size; - uint32_t pad64; + __u32 size; + __u32 pad64; }; /** @@ -391,11 +389,11 @@ struct drm_vmw_alloc_dmabuf_req { */ struct drm_vmw_dmabuf_rep { - uint64_t map_handle; - uint32_t handle; - uint32_t cur_gmr_id; - uint32_t cur_gmr_offset; - uint32_t pad64; + __u64 map_handle; + __u32 handle; + __u32 cur_gmr_id; + __u32 cur_gmr_offset; + __u32 pad64; }; /** @@ -428,8 +426,8 @@ union drm_vmw_alloc_dmabuf_arg { */ struct drm_vmw_unref_dmabuf_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ @@ -452,10 +450,10 @@ struct drm_vmw_unref_dmabuf_arg { */ struct drm_vmw_rect { - int32_t x; - int32_t y; - uint32_t w; - uint32_t h; + __s32 x; + __s32 y; + __u32 w; + __u32 h; }; /** @@ -477,21 +475,21 @@ struct drm_vmw_rect { */ struct drm_vmw_control_stream_arg { - uint32_t stream_id; - uint32_t enabled; + __u32 stream_id; + __u32 enabled; - uint32_t flags; - uint32_t color_key; + __u32 flags; + __u32 color_key; - uint32_t handle; - uint32_t offset; - int32_t format; - uint32_t size; - uint32_t width; - uint32_t height; - uint32_t pitch[3]; + __u32 handle; + __u32 offset; + __s32 format; + __u32 size; + __u32 width; + __u32 height; + __u32 pitch[3]; - uint32_t pad64; + __u32 pad64; struct drm_vmw_rect src; struct drm_vmw_rect dst; }; @@ -519,12 +517,12 @@ struct drm_vmw_control_stream_arg { */ struct drm_vmw_cursor_bypass_arg { - uint32_t flags; - uint32_t crtc_id; - int32_t xpos; - int32_t ypos; - int32_t xhot; - int32_t yhot; + __u32 flags; + __u32 crtc_id; + __s32 xpos; + __s32 ypos; + __s32 xhot; + __s32 yhot; }; /*************************************************************************/ @@ -542,8 +540,8 @@ struct drm_vmw_cursor_bypass_arg { */ struct drm_vmw_stream_arg { - uint32_t stream_id; - uint32_t pad64; + __u32 stream_id; + __u32 pad64; }; /*************************************************************************/ @@ -565,7 +563,7 @@ struct drm_vmw_stream_arg { /** * struct drm_vmw_get_3d_cap_arg * - * @buffer: Pointer to a buffer for capability data, cast to an uint64_t + * @buffer: Pointer to a buffer for capability data, cast to an __u64 * @size: Max size to copy * * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL @@ -573,9 +571,9 @@ struct drm_vmw_stream_arg { */ struct drm_vmw_get_3d_cap_arg { - uint64_t buffer; - uint32_t max_size; - uint32_t pad64; + __u64 buffer; + __u32 max_size; + __u32 pad64; }; /*************************************************************************/ @@ -624,14 +622,14 @@ struct drm_vmw_get_3d_cap_arg { */ struct drm_vmw_fence_wait_arg { - uint32_t handle; - int32_t cookie_valid; - uint64_t kernel_cookie; - uint64_t timeout_us; - int32_t lazy; - int32_t flags; - int32_t wait_options; - int32_t pad64; + __u32 handle; + __s32 cookie_valid; + __u64 kernel_cookie; + __u64 timeout_us; + __s32 lazy; + __s32 flags; + __s32 wait_options; + __s32 pad64; }; /*************************************************************************/ @@ -655,12 +653,12 @@ struct drm_vmw_fence_wait_arg { */ struct drm_vmw_fence_signaled_arg { - uint32_t handle; - uint32_t flags; - int32_t signaled; - uint32_t passed_seqno; - uint32_t signaled_flags; - uint32_t pad64; + __u32 handle; + __u32 flags; + __s32 signaled; + __u32 passed_seqno; + __u32 signaled_flags; + __u32 pad64; }; /*************************************************************************/ @@ -681,8 +679,8 @@ struct drm_vmw_fence_signaled_arg { */ struct drm_vmw_fence_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; @@ -703,9 +701,9 @@ struct drm_vmw_fence_arg { struct drm_vmw_event_fence { struct drm_event base; - uint64_t user_data; - uint32_t tv_sec; - uint32_t tv_usec; + __u64 user_data; + __u32 tv_sec; + __u32 tv_usec; }; /* @@ -717,17 +715,17 @@ struct drm_vmw_event_fence { /** * struct drm_vmw_fence_event_arg * - * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if + * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if * the fence is not supposed to be referenced by user-space. * @user_info: Info to be delivered with the event. * @handle: Attach the event to this fence only. * @flags: A set of flags as defined above. */ struct drm_vmw_fence_event_arg { - uint64_t fence_rep; - uint64_t user_data; - uint32_t handle; - uint32_t flags; + __u64 fence_rep; + __u64 user_data; + __u32 handle; + __u32 flags; }; @@ -747,7 +745,7 @@ struct drm_vmw_fence_event_arg { * @sid: Surface id to present from. * @dest_x: X placement coordinate for surface. * @dest_y: Y placement coordinate for surface. - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. * @num_clips: Number of cliprects given relative to the framebuffer origin, * in the same coordinate space as the frame buffer. * @pad64: Unused 64-bit padding. @@ -756,13 +754,13 @@ struct drm_vmw_fence_event_arg { */ struct drm_vmw_present_arg { - uint32_t fb_id; - uint32_t sid; - int32_t dest_x; - int32_t dest_y; - uint64_t clips_ptr; - uint32_t num_clips; - uint32_t pad64; + __u32 fb_id; + __u32 sid; + __s32 dest_x; + __s32 dest_y; + __u64 clips_ptr; + __u32 num_clips; + __u32 pad64; }; @@ -780,16 +778,16 @@ struct drm_vmw_present_arg { * struct drm_vmw_present_arg * @fb_id: fb_id to present / read back from. * @num_clips: Number of cliprects. - * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t. - * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t. + * @clips_ptr: Pointer to an array of clip rects cast to an __u64. + * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. * If this member is NULL, then the ioctl should not return a fence. */ struct drm_vmw_present_readback_arg { - uint32_t fb_id; - uint32_t num_clips; - uint64_t clips_ptr; - uint64_t fence_rep; + __u32 fb_id; + __u32 num_clips; + __u64 clips_ptr; + __u64 fence_rep; }; /*************************************************************************/ @@ -805,14 +803,14 @@ struct drm_vmw_present_readback_arg { * struct drm_vmw_update_layout_arg * * @num_outputs: number of active connectors - * @rects: pointer to array of drm_vmw_rect cast to an uint64_t + * @rects: pointer to array of drm_vmw_rect cast to an __u64 * * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. */ struct drm_vmw_update_layout_arg { - uint32_t num_outputs; - uint32_t pad64; - uint64_t rects; + __u32 num_outputs; + __u32 pad64; + __u64 rects; }; @@ -849,10 +847,10 @@ enum drm_vmw_shader_type { */ struct drm_vmw_shader_create_arg { enum drm_vmw_shader_type shader_type; - uint32_t size; - uint32_t buffer_handle; - uint32_t shader_handle; - uint64_t offset; + __u32 size; + __u32 buffer_handle; + __u32 shader_handle; + __u64 offset; }; /*************************************************************************/ @@ -871,8 +869,8 @@ struct drm_vmw_shader_create_arg { * Input argument to the DRM_VMW_UNREF_SHADER ioctl. */ struct drm_vmw_shader_arg { - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ @@ -918,14 +916,14 @@ enum drm_vmw_surface_flags { * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. */ struct drm_vmw_gb_surface_create_req { - uint32_t svga3d_flags; - uint32_t format; - uint32_t mip_levels; + __u32 svga3d_flags; + __u32 format; + __u32 mip_levels; enum drm_vmw_surface_flags drm_surface_flags; - uint32_t multisample_count; - uint32_t autogen_filter; - uint32_t buffer_handle; - uint32_t array_size; + __u32 multisample_count; + __u32 autogen_filter; + __u32 buffer_handle; + __u32 array_size; struct drm_vmw_size base_size; }; @@ -944,11 +942,11 @@ struct drm_vmw_gb_surface_create_req { * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. */ struct drm_vmw_gb_surface_create_rep { - uint32_t handle; - uint32_t backup_size; - uint32_t buffer_handle; - uint32_t buffer_size; - uint64_t buffer_map_handle; + __u32 handle; + __u32 backup_size; + __u32 buffer_handle; + __u32 buffer_size; + __u64 buffer_map_handle; }; /** @@ -1061,8 +1059,8 @@ enum drm_vmw_synccpu_op { struct drm_vmw_synccpu_arg { enum drm_vmw_synccpu_op op; enum drm_vmw_synccpu_flags flags; - uint32_t handle; - uint32_t pad64; + __u32 handle; + __u32 pad64; }; /*************************************************************************/ diff --git a/drivers/include/uapi/linux/byteorder/little_endian.h b/drivers/include/uapi/linux/byteorder/little_endian.h new file mode 100644 index 0000000000..d876736a00 --- /dev/null +++ b/drivers/include/uapi/linux/byteorder/little_endian.h @@ -0,0 +1,105 @@ +#ifndef _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif +#ifndef __LITTLE_ENDIAN_BITFIELD +#define __LITTLE_ENDIAN_BITFIELD +#endif + +#include +#include + +#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) +#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) +#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) +#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) +#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) +#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) +#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) +#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) +#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) +#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)*p; +} +static inline __u64 __le64_to_cpup(const __le64 *p) +{ + return (__force __u64)*p; +} +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)*p; +} +static inline __u32 __le32_to_cpup(const __le32 *p) +{ + return (__force __u32)*p; +} +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)*p; +} +static inline __u16 __le16_to_cpup(const __le16 *p) +{ + return (__force __u16)*p; +} +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)__swab64p(p); +} +static inline __u64 __be64_to_cpup(const __be64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)__swab32p(p); +} +static inline __u32 __be32_to_cpup(const __be32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)__swab16p(p); +} +static inline __u16 __be16_to_cpup(const __be16 *p) +{ + return __swab16p((__u16 *)p); +} +#define __cpu_to_le64s(x) do { (void)(x); } while (0) +#define __le64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le32s(x) do { (void)(x); } while (0) +#define __le32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le16s(x) do { (void)(x); } while (0) +#define __le16_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be64s(x) __swab64s((x)) +#define __be64_to_cpus(x) __swab64s((x)) +#define __cpu_to_be32s(x) __swab32s((x)) +#define __be32_to_cpus(x) __swab32s((x)) +#define __cpu_to_be16s(x) __swab16s((x)) +#define __be16_to_cpus(x) __swab16s((x)) + + +#endif /* _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/drivers/include/uapi/linux/fb.h b/drivers/include/uapi/linux/fb.h new file mode 100644 index 0000000000..fb795c3b3c --- /dev/null +++ b/drivers/include/uapi/linux/fb.h @@ -0,0 +1,402 @@ +#ifndef _UAPI_LINUX_FB_H +#define _UAPI_LINUX_FB_H + +#include +#include + +/* Definitions of frame buffers */ + +#define FB_MAX 32 /* sufficient for now */ + +/* ioctls + 0x46 is 'F' */ +#define FBIOGET_VSCREENINFO 0x4600 +#define FBIOPUT_VSCREENINFO 0x4601 +#define FBIOGET_FSCREENINFO 0x4602 +#define FBIOGETCMAP 0x4604 +#define FBIOPUTCMAP 0x4605 +#define FBIOPAN_DISPLAY 0x4606 +#ifndef __KERNEL__ +#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor) +#endif +/* 0x4607-0x460B are defined below */ +/* #define FBIOGET_MONITORSPEC 0x460C */ +/* #define FBIOPUT_MONITORSPEC 0x460D */ +/* #define FBIOSWITCH_MONIBIT 0x460E */ +#define FBIOGET_CON2FBMAP 0x460F +#define FBIOPUT_CON2FBMAP 0x4610 +#define FBIOBLANK 0x4611 /* arg: 0 or vesa level + 1 */ +#define FBIOGET_VBLANK _IOR('F', 0x12, struct fb_vblank) +#define FBIO_ALLOC 0x4613 +#define FBIO_FREE 0x4614 +#define FBIOGET_GLYPH 0x4615 +#define FBIOGET_HWCINFO 0x4616 +#define FBIOPUT_MODEINFO 0x4617 +#define FBIOGET_DISPINFO 0x4618 +#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) + +#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ +#define FB_TYPE_PLANES 1 /* Non interleaved planes */ +#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */ +#define FB_TYPE_TEXT 3 /* Text/attributes */ +#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */ +#define FB_TYPE_FOURCC 5 /* Type identified by a V4L2 FOURCC */ + +#define FB_AUX_TEXT_MDA 0 /* Monochrome text */ +#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */ +#define FB_AUX_TEXT_S3_MMIO 2 /* S3 MMIO fasttext */ +#define FB_AUX_TEXT_MGA_STEP16 3 /* MGA Millenium I: text, attr, 14 reserved bytes */ +#define FB_AUX_TEXT_MGA_STEP8 4 /* other MGAs: text, attr, 6 reserved bytes */ +#define FB_AUX_TEXT_SVGA_GROUP 8 /* 8-15: SVGA tileblit compatible modes */ +#define FB_AUX_TEXT_SVGA_MASK 7 /* lower three bits says step */ +#define FB_AUX_TEXT_SVGA_STEP2 8 /* SVGA text mode: text, attr */ +#define FB_AUX_TEXT_SVGA_STEP4 9 /* SVGA text mode: text, attr, 2 reserved bytes */ +#define FB_AUX_TEXT_SVGA_STEP8 10 /* SVGA text mode: text, attr, 6 reserved bytes */ +#define FB_AUX_TEXT_SVGA_STEP16 11 /* SVGA text mode: text, attr, 14 reserved bytes */ +#define FB_AUX_TEXT_SVGA_LAST 15 /* reserved up to 15 */ + +#define FB_AUX_VGA_PLANES_VGA4 0 /* 16 color planes (EGA/VGA) */ +#define FB_AUX_VGA_PLANES_CFB4 1 /* CFB4 in planes (VGA) */ +#define FB_AUX_VGA_PLANES_CFB8 2 /* CFB8 in planes (VGA) */ + +#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */ +#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */ +#define FB_VISUAL_TRUECOLOR 2 /* True color */ +#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */ +#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */ +#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */ +#define FB_VISUAL_FOURCC 6 /* Visual identified by a V4L2 FOURCC */ + +#define FB_ACCEL_NONE 0 /* no hardware accelerator */ +#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */ +#define FB_ACCEL_AMIGABLITT 2 /* Amiga Blitter */ +#define FB_ACCEL_S3_TRIO64 3 /* Cybervision64 (S3 Trio64) */ +#define FB_ACCEL_NCR_77C32BLT 4 /* RetinaZ3 (NCR 77C32BLT) */ +#define FB_ACCEL_S3_VIRGE 5 /* Cybervision64/3D (S3 ViRGE) */ +#define FB_ACCEL_ATI_MACH64GX 6 /* ATI Mach 64GX family */ +#define FB_ACCEL_DEC_TGA 7 /* DEC 21030 TGA */ +#define FB_ACCEL_ATI_MACH64CT 8 /* ATI Mach 64CT family */ +#define FB_ACCEL_ATI_MACH64VT 9 /* ATI Mach 64CT family VT class */ +#define FB_ACCEL_ATI_MACH64GT 10 /* ATI Mach 64CT family GT class */ +#define FB_ACCEL_SUN_CREATOR 11 /* Sun Creator/Creator3D */ +#define FB_ACCEL_SUN_CGSIX 12 /* Sun cg6 */ +#define FB_ACCEL_SUN_LEO 13 /* Sun leo/zx */ +#define FB_ACCEL_IMS_TWINTURBO 14 /* IMS Twin Turbo */ +#define FB_ACCEL_3DLABS_PERMEDIA2 15 /* 3Dlabs Permedia 2 */ +#define FB_ACCEL_MATROX_MGA2064W 16 /* Matrox MGA2064W (Millenium) */ +#define FB_ACCEL_MATROX_MGA1064SG 17 /* Matrox MGA1064SG (Mystique) */ +#define FB_ACCEL_MATROX_MGA2164W 18 /* Matrox MGA2164W (Millenium II) */ +#define FB_ACCEL_MATROX_MGA2164W_AGP 19 /* Matrox MGA2164W (Millenium II) */ +#define FB_ACCEL_MATROX_MGAG100 20 /* Matrox G100 (Productiva G100) */ +#define FB_ACCEL_MATROX_MGAG200 21 /* Matrox G200 (Myst, Mill, ...) */ +#define FB_ACCEL_SUN_CG14 22 /* Sun cgfourteen */ +#define FB_ACCEL_SUN_BWTWO 23 /* Sun bwtwo */ +#define FB_ACCEL_SUN_CGTHREE 24 /* Sun cgthree */ +#define FB_ACCEL_SUN_TCX 25 /* Sun tcx */ +#define FB_ACCEL_MATROX_MGAG400 26 /* Matrox G400 */ +#define FB_ACCEL_NV3 27 /* nVidia RIVA 128 */ +#define FB_ACCEL_NV4 28 /* nVidia RIVA TNT */ +#define FB_ACCEL_NV5 29 /* nVidia RIVA TNT2 */ +#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */ +#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */ +#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */ +#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */ +#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */ +#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */ +#define FB_ACCEL_SIS_GLAMOUR 36 /* SiS 300/630/540 */ +#define FB_ACCEL_3DLABS_PERMEDIA3 37 /* 3Dlabs Permedia 3 */ +#define FB_ACCEL_ATI_RADEON 38 /* ATI Radeon family */ +#define FB_ACCEL_I810 39 /* Intel 810/815 */ +#define FB_ACCEL_SIS_GLAMOUR_2 40 /* SiS 315, 650, 740 */ +#define FB_ACCEL_SIS_XABRE 41 /* SiS 330 ("Xabre") */ +#define FB_ACCEL_I830 42 /* Intel 830M/845G/85x/865G */ +#define FB_ACCEL_NV_10 43 /* nVidia Arch 10 */ +#define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ +#define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ +#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ +#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ +#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ +#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ +#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ +#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ +#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ +#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ +#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ +#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ +#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ +#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ +#define FB_ACCEL_NEOMAGIC_NM2097 93 /* NeoMagic NM2097 */ +#define FB_ACCEL_NEOMAGIC_NM2160 94 /* NeoMagic NM2160 */ +#define FB_ACCEL_NEOMAGIC_NM2200 95 /* NeoMagic NM2200 */ +#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ +#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ +#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ +#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ + +#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ +#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ +#define FB_ACCEL_SAVAGE3D_MV 0x82 /* S3 Savage3D-MV */ +#define FB_ACCEL_SAVAGE2000 0x83 /* S3 Savage2000 */ +#define FB_ACCEL_SAVAGE_MX_MV 0x84 /* S3 Savage/MX-MV */ +#define FB_ACCEL_SAVAGE_MX 0x85 /* S3 Savage/MX */ +#define FB_ACCEL_SAVAGE_IX_MV 0x86 /* S3 Savage/IX-MV */ +#define FB_ACCEL_SAVAGE_IX 0x87 /* S3 Savage/IX */ +#define FB_ACCEL_PROSAVAGE_PM 0x88 /* S3 ProSavage PM133 */ +#define FB_ACCEL_PROSAVAGE_KM 0x89 /* S3 ProSavage KM133 */ +#define FB_ACCEL_S3TWISTER_P 0x8a /* S3 Twister */ +#define FB_ACCEL_S3TWISTER_K 0x8b /* S3 TwisterK */ +#define FB_ACCEL_SUPERSAVAGE 0x8c /* S3 Supersavage */ +#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */ +#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */ + +#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */ + +#define FB_CAP_FOURCC 1 /* Device supports FOURCC-based formats */ + +struct fb_fix_screeninfo { + char id[16]; /* identification string eg "TT Builtin" */ + unsigned long smem_start; /* Start of frame buffer mem */ + /* (physical address) */ + __u32 smem_len; /* Length of frame buffer mem */ + __u32 type; /* see FB_TYPE_* */ + __u32 type_aux; /* Interleave for interleaved Planes */ + __u32 visual; /* see FB_VISUAL_* */ + __u16 xpanstep; /* zero if no hardware panning */ + __u16 ypanstep; /* zero if no hardware panning */ + __u16 ywrapstep; /* zero if no hardware ywrap */ + __u32 line_length; /* length of a line in bytes */ + unsigned long mmio_start; /* Start of Memory Mapped I/O */ + /* (physical address) */ + __u32 mmio_len; /* Length of Memory Mapped I/O */ + __u32 accel; /* Indicate to driver which */ + /* specific chip/card we have */ + __u16 capabilities; /* see FB_CAP_* */ + __u16 reserved[2]; /* Reserved for future compatibility */ +}; + +/* Interpretation of offset for color fields: All offsets are from the right, + * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you + * can use the offset as right argument to <<). A pixel afterwards is a bit + * stream and is written to video memory as that unmodified. + * + * For pseudocolor: offset and length should be the same for all color + * components. Offset specifies the position of the least significant bit + * of the pallette index in a pixel value. Length indicates the number + * of available palette entries (i.e. # of entries = 1 << length). + */ +struct fb_bitfield { + __u32 offset; /* beginning of bitfield */ + __u32 length; /* length of bitfield */ + __u32 msb_right; /* != 0 : Most significant bit is */ + /* right */ +}; + +#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ +#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */ + +#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ +#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ +#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */ +#define FB_ACTIVATE_MASK 15 + /* values */ +#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */ +#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */ +#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ +#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ +#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ + +#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */ + +#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ +#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ +#define FB_SYNC_EXT 4 /* external sync */ +#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ +#define FB_SYNC_BROADCAST 16 /* broadcast video timings */ + /* vtotal = 144d/288n/576i => PAL */ + /* vtotal = 121d/242n/484i => NTSC */ +#define FB_SYNC_ON_GREEN 32 /* sync on green */ + +#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ +#define FB_VMODE_INTERLACED 1 /* interlaced */ +#define FB_VMODE_DOUBLE 2 /* double scan */ +#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */ +#define FB_VMODE_MASK 255 + +#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ +#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ +#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */ + +/* + * Display rotation support + */ +#define FB_ROTATE_UR 0 +#define FB_ROTATE_CW 1 +#define FB_ROTATE_UD 2 +#define FB_ROTATE_CCW 3 + +#define PICOS2KHZ(a) (1000000000UL/(a)) +#define KHZ2PICOS(a) (1000000000UL/(a)) + +struct fb_var_screeninfo { + __u32 xres; /* visible resolution */ + __u32 yres; + __u32 xres_virtual; /* virtual resolution */ + __u32 yres_virtual; + __u32 xoffset; /* offset from virtual to visible */ + __u32 yoffset; /* resolution */ + + __u32 bits_per_pixel; /* guess what */ + __u32 grayscale; /* 0 = color, 1 = grayscale, */ + /* >1 = FOURCC */ + struct fb_bitfield red; /* bitfield in fb mem if true color, */ + struct fb_bitfield green; /* else only length is significant */ + struct fb_bitfield blue; + struct fb_bitfield transp; /* transparency */ + + __u32 nonstd; /* != 0 Non standard pixel format */ + + __u32 activate; /* see FB_ACTIVATE_* */ + + __u32 height; /* height of picture in mm */ + __u32 width; /* width of picture in mm */ + + __u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ + + /* Timing: All values in pixclocks, except pixclock (of course) */ + __u32 pixclock; /* pixel clock in ps (pico seconds) */ + __u32 left_margin; /* time from sync to picture */ + __u32 right_margin; /* time from picture to sync */ + __u32 upper_margin; /* time from sync to picture */ + __u32 lower_margin; + __u32 hsync_len; /* length of horizontal sync */ + __u32 vsync_len; /* length of vertical sync */ + __u32 sync; /* see FB_SYNC_* */ + __u32 vmode; /* see FB_VMODE_* */ + __u32 rotate; /* angle we rotate counter clockwise */ + __u32 colorspace; /* colorspace for FOURCC-based modes */ + __u32 reserved[4]; /* Reserved for future compatibility */ +}; + +struct fb_cmap { + __u32 start; /* First entry */ + __u32 len; /* Number of entries */ + __u16 *red; /* Red values */ + __u16 *green; + __u16 *blue; + __u16 *transp; /* transparency, can be NULL */ +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +/* VESA Blanking Levels */ +#define VESA_NO_BLANKING 0 +#define VESA_VSYNC_SUSPEND 1 +#define VESA_HSYNC_SUSPEND 2 +#define VESA_POWERDOWN 3 + + +enum { + /* screen: unblanked, hsync: on, vsync: on */ + FB_BLANK_UNBLANK = VESA_NO_BLANKING, + + /* screen: blanked, hsync: on, vsync: on */ + FB_BLANK_NORMAL = VESA_NO_BLANKING + 1, + + /* screen: blanked, hsync: on, vsync: off */ + FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1, + + /* screen: blanked, hsync: off, vsync: on */ + FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1, + + /* screen: blanked, hsync: off, vsync: off */ + FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1 +}; + +#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */ +#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */ +#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */ +#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */ +#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */ +#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ +#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ +#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ +#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ + +struct fb_vblank { + __u32 flags; /* FB_VBLANK flags */ + __u32 count; /* counter of retraces since boot */ + __u32 vcount; /* current scanline position */ + __u32 hcount; /* current scandot position */ + __u32 reserved[4]; /* reserved for future compatibility */ +}; + +/* Internal HW accel */ +#define ROP_COPY 0 +#define ROP_XOR 1 + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; /* screen-relative */ + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; /* Where to place image */ + __u32 dy; + __u32 width; /* Size of image */ + __u32 height; + __u32 fg_color; /* Only used when a mono bitmap */ + __u32 bg_color; + __u8 depth; /* Depth of the image */ + const char *data; /* Pointer to image data */ + struct fb_cmap cmap; /* color map info */ +}; + +/* + * hardware cursor control + */ + +#define FB_CUR_SETIMAGE 0x01 +#define FB_CUR_SETPOS 0x02 +#define FB_CUR_SETHOT 0x04 +#define FB_CUR_SETCMAP 0x08 +#define FB_CUR_SETSHAPE 0x10 +#define FB_CUR_SETSIZE 0x20 +#define FB_CUR_SETALL 0xFF + +struct fbcurpos { + __u16 x, y; +}; + +struct fb_cursor { + __u16 set; /* what to set */ + __u16 enable; /* cursor on/off */ + __u16 rop; /* bitop operation */ + const char *mask; /* cursor mask bits */ + struct fbcurpos hot; /* cursor hot spot */ + struct fb_image image; /* Cursor image */ +}; + +#ifdef CONFIG_FB_BACKLIGHT +/* Settings for the generic backlight code */ +#define FB_BACKLIGHT_LEVELS 128 +#define FB_BACKLIGHT_MAX 0xFF +#endif + + +#endif /* _UAPI_LINUX_FB_H */