i915 RC 10

git-svn-id: svn://kolibrios.org@3243 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-02-13 08:23:54 +00:00
parent f18f91c208
commit 082fd6ba1d
51 changed files with 7014 additions and 4796 deletions

View File

@ -60,6 +60,7 @@
//#include <linux/file.h> //#include <linux/file.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/irqreturn.h>
//#include <linux/smp_lock.h> /* For (un)lock_kernel */ //#include <linux/smp_lock.h> /* For (un)lock_kernel */
//#include <linux/dma-mapping.h> //#include <linux/dma-mapping.h>
//#include <linux/mm.h> //#include <linux/mm.h>
@ -170,7 +171,7 @@ int drm_err(const char *func, const char *format, ...);
/** \name Begin the DRM... */ /** \name Begin the DRM... */
/*@{*/ /*@{*/
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then #define DRM_DEBUG_CODE 0 /**< Include debugging code if > 1, then
also include looping detection. */ also include looping detection. */
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
@ -965,6 +966,15 @@ struct drm_driver {
#endif #endif
#define DRM_IRQ_ARGS int irq, void *arg
struct drm_driver {
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
};
#define DRM_MINOR_UNASSIGNED 0 #define DRM_MINOR_UNASSIGNED 0
#define DRM_MINOR_LEGACY 1 #define DRM_MINOR_LEGACY 1
#define DRM_MINOR_CONTROL 2 #define DRM_MINOR_CONTROL 2
@ -1172,7 +1182,7 @@ struct drm_device {
// struct drm_sigdata sigdata; /**< For block_all_signals */ // struct drm_sigdata sigdata; /**< For block_all_signals */
// sigset_t sigmask; // sigset_t sigmask;
// struct drm_driver *driver; struct drm_driver *driver;
// struct drm_local_map *agp_buffer_map; // struct drm_local_map *agp_buffer_map;
// unsigned int agp_buffer_token; // unsigned int agp_buffer_token;
// struct drm_minor *control; /**< Control node for card */ // struct drm_minor *control; /**< Control node for card */

View File

@ -0,0 +1,53 @@
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#ifndef _DRM_GLOBAL_H_
#define _DRM_GLOBAL_H_
enum drm_global_types {
DRM_GLOBAL_TTM_MEM = 0,
DRM_GLOBAL_TTM_BO,
DRM_GLOBAL_TTM_OBJECT,
DRM_GLOBAL_NUM
};
struct drm_global_reference {
enum drm_global_types global_type;
size_t size;
void *object;
int (*init) (struct drm_global_reference *);
void (*release) (struct drm_global_reference *);
};
extern void drm_global_init(void);
extern void drm_global_release(void);
extern int drm_global_item_ref(struct drm_global_reference *ref);
extern void drm_global_item_unref(struct drm_global_reference *ref);
#endif

View File

@ -19,6 +19,7 @@ struct intel_gtt {
unsigned int do_idle_maps : 1; unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */ /* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma; dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* for ppgtt PDE access */ /* for ppgtt PDE access */
u32 __iomem *gtt; u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */ /* needed for ioremap in drm/i915 */
@ -32,7 +33,8 @@ void intel_gmch_remove(void);
bool intel_enable_gtt(void); bool intel_enable_gtt(void);
void intel_gtt_chipset_flush(void); void intel_gtt_chipset_flush(void);
void intel_gtt_insert_sg_entries(struct pagelist *st, unsigned int pg_start, void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags); unsigned int flags);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);

View File

@ -0,0 +1,34 @@
#ifndef __ASM_GENERIC_SCATTERLIST_H
#define __ASM_GENERIC_SCATTERLIST_H
#include <linux/types.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
};
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif
#endif /* __ASM_GENERIC_SCATTERLIST_H */

View File

@ -63,3 +63,13 @@
#define __compiletime_warning(message) __attribute__((warning(message))) #define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message))) #define __compiletime_error(message) __attribute__((error(message)))
#endif #endif
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if __GNUC_MINOR__ >= 4
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif

View File

@ -10,6 +10,7 @@
# define __force __attribute__((force)) # define __force __attribute__((force))
# define __nocast __attribute__((nocast)) # define __nocast __attribute__((nocast))
# define __iomem __attribute__((noderef, address_space(2))) # define __iomem __attribute__((noderef, address_space(2)))
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1))) # define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0))) # define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1) # define __acquire(x) __context__(x,1)
@ -33,6 +34,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __chk_user_ptr(x) (void)0 # define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0 # define __chk_io_ptr(x) (void)0
# define __builtin_warning(x, y...) (1) # define __builtin_warning(x, y...) (1)
# define __must_hold(x)
# define __acquires(x) # define __acquires(x)
# define __releases(x) # define __releases(x)
# define __acquire(x) (void)0 # define __acquire(x) (void)0
@ -42,6 +44,10 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __rcu # define __rcu
#endif #endif
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
#define __PASTE(a,b) ___PASTE(a,b)
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef __GNUC__ #ifdef __GNUC__

View File

@ -0,0 +1,65 @@
#ifndef _LINUX_ERR_H
#define _LINUX_ERR_H
#include <linux/compiler.h>
#include <errno.h>
/*
* Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a dentry
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
* error and pointer decisions.
*/
#define MAX_ERRNO 4095
#ifndef __ASSEMBLY__
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
static inline long __must_check PTR_ERR(const void *ptr)
{
return (long) ptr;
}
static inline long __must_check IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline long __must_check IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
*
* Explicitly cast an error-valued pointer to another pointer type in such a
* way as to make it clear that's what's going on.
*/
static inline void * __must_check ERR_CAST(const void *ptr)
{
/* cast away the const */
return (void *) ptr;
}
static inline int __must_check PTR_RET(const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
else
return 0;
}
#endif
#endif /* _LINUX_ERR_H */

View File

@ -172,6 +172,7 @@ extern struct i2c_client *i2c_verify_client(struct device *dev);
* @platform_data: stored in i2c_client.dev.platform_data * @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata * @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node * @of_node: pointer to OpenFirmware device node
* @acpi_node: ACPI device node
* @irq: stored in i2c_client.irq * @irq: stored in i2c_client.irq
* *
* I2C doesn't actually support hardware probing, although controllers and * I2C doesn't actually support hardware probing, although controllers and

View File

@ -331,39 +331,6 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
struct scatterlist {
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
unsigned int dma_length;
};
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
#define SG_MAX_SINGLE_ALLOC (4096 / sizeof(struct scatterlist))
struct scatterlist *sg_next(struct scatterlist *sg);
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
static inline addr_t sg_page(struct scatterlist *sg)
{
return (addr_t)((sg)->page_link & ~0x3);
}
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
@ -372,6 +339,7 @@ struct page
unsigned int addr; unsigned int addr;
}; };
#define page_to_phys(page) ((dma_addr_t)(page))
struct vm_fault { struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */ unsigned int flags; /* FAULT_FLAG_xxx flags */
@ -390,5 +358,9 @@ struct pagelist {
unsigned int nents; unsigned int nents;
}; };
#define page_cache_release(page) FreePage((addr_t)(page))
#define alloc_page(gfp_mask) (struct page*)AllocPage()
#endif #endif

View File

@ -498,14 +498,17 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING # ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else # else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif # endif
# define rwsem_release(l, n, i) lock_release(l, n, i) # define rwsem_release(l, n, i) lock_release(l, n, i)
#else #else
# define rwsem_acquire(l, s, t, i) do { } while (0) # define rwsem_acquire(l, s, t, i) do { } while (0)
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0) # define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0) # define rwsem_release(l, n, i) do { } while (0)
#endif #endif

View File

@ -0,0 +1 @@

View File

@ -9,6 +9,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h>

View File

@ -0,0 +1,84 @@
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
linux/include/linux/rbtree.h
To use rbtrees you'll have to implement your own insert and search cores.
This will avoid us to use callbacks and to drop drammatically performances.
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
See Documentation/rbtree.txt for documentation and samples.
*/
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
#include <linux/kernel.h>
#include <linux/stddef.h>
struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
struct rb_root {
struct rb_node *rb_node;
};
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
((node)->__rb_parent_color = (unsigned long)(node))
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
*rb_link = node;
}
#endif /* _LINUX_RBTREE_H */

View File

@ -0,0 +1,125 @@
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif
/*
* rwlock related methods
*
* split out from spinlock.h
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key);
# define rwlock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__rwlock_init((lock), #lock, &__key); \
} while (0)
#else
# define rwlock_init(lock) \
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_lock_flags(lock, flags) \
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_lock_flags(lock, flags) \
do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
/*
* Define the various rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_read_lock_irqsave(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_write_lock_irqsave(lock); \
} while (0)
#else
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_read_lock_irqsave(lock, flags); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_write_lock_irqsave(lock, flags); \
} while (0)
#endif
#define read_lock_irq(lock) _raw_read_lock_irq(lock)
#define read_lock_bh(lock) _raw_read_lock_bh(lock)
#define write_lock_irq(lock) _raw_write_lock_irq(lock)
#define write_lock_bh(lock) _raw_write_lock_bh(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
#define read_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_read_unlock_irqrestore(lock, flags); \
} while (0)
#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
#define write_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_write_unlock_irqrestore(lock, flags); \
} while (0)
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
#define write_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
write_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
#endif /* __LINUX_RWLOCK_H */

View File

@ -0,0 +1,274 @@
#ifndef _LINUX_SCATTERLIST_H
#define _LINUX_SCATTERLIST_H
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/mm.h>
#include <asm/types.h>
#include <asm/scatterlist.h>
//#include <asm/io.h>
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
/*
* Notes on SG table design.
*
* Architectures must provide an unsigned long page_link field in the
* scatterlist struct. We use that to place the page pointer AND encode
* information about the sg table as well. The two lower bits are reserved
* for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
*
* If bit 1 is set, then this sg entry is the last element in a list.
*
* See sg_next().
*
*/
#define SG_MAGIC 0x87654321
/*
* We overload the LSB of the page pointer to indicate whether it's
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
/**
* sg_assign_page - Assign a given page to an SG entry
* @sg: SG entry
* @page: The page
*
* Description:
* Assign page to sg entry. Also see sg_set_page(), the most commonly used
* variant.
*
**/
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
{
unsigned long page_link = sg->page_link & 0x3;
/*
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
BUG_ON((unsigned long) page & 0x03);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
}
/**
* sg_set_page - Set sg entry to point at given page
* @sg: SG entry
* @page: The page
* @len: Length of data
* @offset: Offset into page
*
* Description:
* Use this function to set an sg entry pointing at a page, never assign
* the page directly. We encode sg table information in the lower bits
* of the page pointer. See sg_page() for looking up the page belonging
* to an sg entry.
*
**/
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
}
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~0x3);
}
/**
* sg_set_buf - Set sg entry to point at given data
* @sg: SG entry
* @buf: Data
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
/*
* Loop over each sg element, following the pointer to a new list if necessary
*/
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
* @prv_nents: Number of entries in prv
* @sgl: Second scatterlist
*
* Description:
* Links @prv@ and @sgl@ together, to form a longer scatterlist.
*
**/
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
#ifndef ARCH_HAS_SG_CHAIN
BUG();
#endif
/*
* offset and length are unused for chain entry. Clear them.
*/
prv[prv_nents - 1].offset = 0;
prv[prv_nents - 1].length = 0;
/*
* Set lowest bit to indicate a link pointer, and make sure to clear
* the termination bit if it happens to be set.
*/
prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
}
/**
* sg_mark_end - Mark the end of the scatterlist
* @sg: SG entryScatterlist
*
* Description:
* Marks the passed in sg entry as the termination point for the sg
* table. A call to sg_next() on this entry will return NULL.
*
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
/*
* Set termination bit, clear potential chain bit
*/
sg->page_link |= 0x02;
sg->page_link &= ~0x01;
}
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
*
* Description:
* This calls page_to_phys() on the page in this sg entry, and adds the
* sg offset. The caller must know that it is legal to call page_to_phys()
* on the sg page.
*
**/
static inline dma_addr_t sg_phys(struct scatterlist *sg)
{
return page_to_phys(sg_page(sg)) + sg->offset;
}
/**
* sg_virt - Return virtual address of an sg entry
* @sg: SG entry
*
* Description:
* This calls page_address() on the page in this sg entry, and adds the
* sg offset. The caller must know that the sg page has a valid virtual
* mapping.
*
**/
//static inline void *sg_virt(struct scatterlist *sg)
//{
// return page_address(sg_page(sg)) + sg->offset;
//}
int sg_nents(struct scatterlist *sg);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.
*/
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
/*
* Mapping sg iterator
*
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
* long as an interation is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
* can be adjusted if the user can't consume all the bytes in one go.
* Also, a stopped iteration can be resumed by calling next on it.
* This is useful when iteration needs to release all resources and
* continue later (e.g. at the next interrupt).
*/
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
#define SG_MITER_FROM_SG (1 << 2) /* nop */
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
struct page *page; /* currently mapped page */
void *addr; /* pointer to the mapped area */
size_t length; /* length of the mapped area */
size_t consumed; /* number of consumed bytes */
/* these are internal states, keep away */
struct scatterlist *__sg; /* current entry */
unsigned int __nents; /* nr of remaining entries */
unsigned int __offset; /* offset within sg */
unsigned int __flags;
};
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags);
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
#endif /* _LINUX_SCATTERLIST_H */

View File

@ -1,2 +1,3 @@
#include <errno.h>
// stub // stub

View File

@ -17,7 +17,7 @@
#include <linux/lockdep.h> #include <linux/lockdep.h>
typedef struct { typedef struct spinlock {
raw_spinlock_t raw_lock; raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK #ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock; unsigned int break_lock;

View File

@ -516,11 +516,11 @@ static inline int power_supply_is_system_supplied(void) { return -1; }
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
static void init_rwsem(struct rw_semaphore *sem) //static void init_rwsem(struct rw_semaphore *sem)
{ //{
sem->count = RWSEM_UNLOCKED_VALUE; // sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock); // spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list); // INIT_LIST_HEAD(&sem->wait_list);
} //}
#endif #endif

View File

@ -135,8 +135,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->funcs->force) if (connector->funcs->force)
connector->funcs->force(connector); connector->funcs->force(connector);
} else { } else {
// dbgprintf("call detect funcs %p ", connector->funcs);
// dbgprintf("detect %p\n", connector->funcs->detect);
connector->status = connector->funcs->detect(connector, true); connector->status = connector->funcs->detect(connector, true);
// drm_kms_helper_poll_enable(dev); // dbgprintf("status %x\n", connector->status);
} }
if (connector->status == connector_status_disconnected) { if (connector->status == connector_status_disconnected) {
@ -296,7 +298,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
crtc->fb = NULL; crtc->fb = NULL;
} }
} }
} }
EXPORT_SYMBOL(drm_helper_disable_unused_functions); EXPORT_SYMBOL(drm_helper_disable_unused_functions);

View File

@ -61,8 +61,6 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
struct drm_connector *connector; struct drm_connector *connector;
int i; int i;
ENTER();
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_helper_connector; struct drm_fb_helper_connector *fb_helper_connector;
@ -73,7 +71,6 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
fb_helper_connector->connector = connector; fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector; fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
} }
LEAVE();
return 0; return 0;
fail: fail:
for (i = 0; i < fb_helper->connector_count; i++) { for (i = 0; i < fb_helper->connector_count; i++) {
@ -81,7 +78,6 @@ fail:
fb_helper->connector_info[i] = NULL; fb_helper->connector_info[i] = NULL;
} }
fb_helper->connector_count = 0; fb_helper->connector_count = 0;
FAIL();
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors); EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
@ -194,26 +190,18 @@ int drm_fb_helper_init(struct drm_device *dev,
struct drm_crtc *crtc; struct drm_crtc *crtc;
int i; int i;
ENTER();
dbgprintf("crtc_count %d max_conn_count %d\n", crtc_count, max_conn_count);
fb_helper->dev = dev; fb_helper->dev = dev;
INIT_LIST_HEAD(&fb_helper->kernel_fb_list); INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL); fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info) if (!fb_helper->crtc_info)
{
FAIL();
return -ENOMEM; return -ENOMEM;
};
fb_helper->crtc_count = crtc_count; fb_helper->crtc_count = crtc_count;
fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL); fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
if (!fb_helper->connector_info) { if (!fb_helper->connector_info) {
kfree(fb_helper->crtc_info); kfree(fb_helper->crtc_info);
FAIL();
return -ENOMEM; return -ENOMEM;
} }
fb_helper->connector_count = 0; fb_helper->connector_count = 0;
@ -235,11 +223,9 @@ int drm_fb_helper_init(struct drm_device *dev,
i++; i++;
} }
LEAVE();
return 0; return 0;
out_free: out_free:
drm_fb_helper_crtc_free(fb_helper); drm_fb_helper_crtc_free(fb_helper);
FAIL();
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(drm_fb_helper_init); EXPORT_SYMBOL(drm_fb_helper_init);
@ -599,8 +585,8 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (new_fb) { if (new_fb) {
info->var.pixclock = 0; info->var.pixclock = 0;
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->fix.id); info->node, info->fix.id);
} else { } else {
drm_fb_helper_set_par(info); drm_fb_helper_set_par(info);
@ -996,9 +982,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{ {
struct drm_device *dev = fb_helper->dev; struct drm_device *dev = fb_helper->dev;
int count = 0; int count = 0;
bool ret;
ENTER();
/* disable all the possible outputs/crtcs before entering KMS mode */ /* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(fb_helper->dev); drm_helper_disable_unused_functions(fb_helper->dev);
@ -1016,8 +999,7 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
drm_setup_crtcs(fb_helper); drm_setup_crtcs(fb_helper);
ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
LEAVE();
} }
EXPORT_SYMBOL(drm_fb_helper_initial_config); EXPORT_SYMBOL(drm_fb_helper_initial_config);

View File

@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002 #define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001 #define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006 #define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70 #define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000 #define I810_GFX_MEM_WIN_SIZE 0x00010000
@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) #define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */ #define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000 #define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001 #define I810_DRAM_ROW_0 0x00000001
@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70 #define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50 #define INTEL_I7505_MCHCFG 0x50
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
#define SNB_GTT_SIZE_0M (0 << 8)
#define SNB_GTT_SIZE_1M (1 << 8)
#define SNB_GTT_SIZE_2M (2 << 8)
#define SNB_GTT_SIZE_MASK (3 << 8)
/* pci devices ids */ /* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 #define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a #define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif #endif

View File

@ -20,13 +20,15 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/scatterlist.h>
//#include <linux/pagemap.h> //#include <linux/pagemap.h>
//#include <linux/agp_backend.h> //#include <linux/agp_backend.h>
//#include <asm/smp.h> //#include <asm/smp.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include "agp.h" #include "agp.h"
#include "intel-agp.h" #include "intel-agp.h"
#include "intel-gtt.h" #include <drm/intel-gtt.h>
#include <syscall.h> #include <syscall.h>
@ -110,14 +112,15 @@ static struct _intel_private {
static int intel_gtt_setup_scratch_page(void) static int intel_gtt_setup_scratch_page(void)
{ {
struct page *page;
dma_addr_t dma_addr; dma_addr_t dma_addr;
dma_addr = AllocPage(); page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (dma_addr == 0) if (page == NULL)
return -ENOMEM; return -ENOMEM;
intel_private.base.scratch_page_dma = page_to_phys(page);
intel_private.base.scratch_page_dma = dma_addr; intel_private.scratch_page = page;
intel_private.scratch_page = NULL;
return 0; return 0;
} }
@ -158,62 +161,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0; stolen_size = 0;
break; break;
} }
} else if (INTEL_GTT_GEN == 6) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
case SNB_GMCH_GMS_STOLEN_32M:
stolen_size = MB(32);
break;
case SNB_GMCH_GMS_STOLEN_64M:
stolen_size = MB(64);
break;
case SNB_GMCH_GMS_STOLEN_96M:
stolen_size = MB(96);
break;
case SNB_GMCH_GMS_STOLEN_128M:
stolen_size = MB(128);
break;
case SNB_GMCH_GMS_STOLEN_160M:
stolen_size = MB(160);
break;
case SNB_GMCH_GMS_STOLEN_192M:
stolen_size = MB(192);
break;
case SNB_GMCH_GMS_STOLEN_224M:
stolen_size = MB(224);
break;
case SNB_GMCH_GMS_STOLEN_256M:
stolen_size = MB(256);
break;
case SNB_GMCH_GMS_STOLEN_288M:
stolen_size = MB(288);
break;
case SNB_GMCH_GMS_STOLEN_320M:
stolen_size = MB(320);
break;
case SNB_GMCH_GMS_STOLEN_352M:
stolen_size = MB(352);
break;
case SNB_GMCH_GMS_STOLEN_384M:
stolen_size = MB(384);
break;
case SNB_GMCH_GMS_STOLEN_416M:
stolen_size = MB(416);
break;
case SNB_GMCH_GMS_STOLEN_448M:
stolen_size = MB(448);
break;
case SNB_GMCH_GMS_STOLEN_480M:
stolen_size = MB(480);
break;
case SNB_GMCH_GMS_STOLEN_512M:
stolen_size = MB(512);
break;
}
} else { } else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) { switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M: case I855_GMCH_GMS_STOLEN_1M:
@ -347,29 +294,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void) static unsigned int intel_gtt_total_entries(void)
{ {
int size;
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries(); return i965_gtt_total_entries();
else if (INTEL_GTT_GEN == 6) { else {
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
default:
case SNB_GTT_SIZE_0M:
printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
size = MB(0);
break;
case SNB_GTT_SIZE_1M:
size = MB(1);
break;
case SNB_GTT_SIZE_2M:
size = MB(2);
break;
}
return size/4;
} else {
/* On previous hardware, the GTT size was just what was /* On previous hardware, the GTT size was just what was
* required to map the aperture. * required to map the aperture.
*/ */
@ -433,10 +360,7 @@ static int intel_gtt_init(void)
ret = intel_private.driver->setup(); ret = intel_private.driver->setup();
if (ret != 0) if (ret != 0)
{
return ret; return ret;
};
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries(); intel_private.base.gtt_total_entries = intel_gtt_total_entries();
@ -457,9 +381,6 @@ static int intel_gtt_init(void)
gtt_map_size = intel_private.base.gtt_total_entries * 4; gtt_map_size = intel_private.base.gtt_total_entries * 4;
intel_private.gtt = NULL; intel_private.gtt = NULL;
// if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
// intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
// gtt_map_size);
if (intel_private.gtt == NULL) if (intel_private.gtt == NULL)
intel_private.gtt = ioremap(intel_private.gtt_bus_addr, intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
gtt_map_size); gtt_map_size);
@ -509,9 +430,6 @@ bool intel_enable_gtt(void)
{ {
u8 __iomem *reg; u8 __iomem *reg;
if (INTEL_GTT_GEN >= 6)
return true;
if (INTEL_GTT_GEN == 2) { if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl; u16 gmch_ctrl;
@ -565,33 +483,39 @@ static bool i830_check_flags(unsigned int flags)
return false; return false;
} }
void intel_gtt_insert_sg_entries(struct pagelist *st, void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start, unsigned int pg_start,
unsigned int flags) unsigned int flags)
{ {
struct scatterlist *sg;
unsigned int len, m;
int i, j; int i, j;
j = pg_start; j = pg_start;
for(i = 0; i < st->nents; i++) /* sg may merge pages, but we have to separate
{ * per-page addr for GTT */
dma_addr_t addr = st->page[i]; for_each_sg(st->sgl, sg, st->nents, i) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
intel_private.driver->write_entry(addr, j, flags); intel_private.driver->write_entry(addr, j, flags);
j++; j++;
}; }
}
readl(intel_private.gtt+j-1); readl(intel_private.gtt+j-1);
} }
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
static void intel_gtt_insert_pages(unsigned int first_entry, static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries, unsigned int num_entries,
dma_addr_t *pages, struct page **pages,
unsigned int flags) unsigned int flags)
{ {
int i, j; int i, j;
for (i = 0, j = first_entry; i < num_entries; i++, j++) { for (i = 0, j = first_entry; i < num_entries; i++, j++) {
dma_addr_t addr = pages[i]; dma_addr_t addr = page_to_phys(pages[i]);
intel_private.driver->write_entry(addr, intel_private.driver->write_entry(addr,
j, flags); j, flags);
} }
@ -670,85 +594,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry); writel(addr | pte_flags, intel_private.gtt + entry);
} }
static bool gen6_check_flags(unsigned int flags)
{
return true;
}
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else {
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
}
static void gen6_cleanup(void)
{
}
/* Certain Gen5 chipsets require require idling the GPU before /* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled. * unmapping anything from the GTT when VT-d is enabled.
*/ */
@ -770,42 +615,30 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void) static int i9xx_setup(void)
{ {
u32 reg_addr; u32 reg_addr, gtt_addr;
int size = KB(512); int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr); pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000; reg_addr &= 0xfff80000;
if (INTEL_GTT_GEN >= 7)
size = MB(2);
intel_private.registers = ioremap(reg_addr, size); intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers) if (!intel_private.registers)
return -ENOMEM; return -ENOMEM;
if (INTEL_GTT_GEN == 3) { switch (INTEL_GTT_GEN) {
u32 gtt_addr; case 3:
pci_read_config_dword(intel_private.pcidev, pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr); I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr; intel_private.gtt_bus_addr = gtt_addr;
} else { break;
u32 gtt_offset;
switch (INTEL_GTT_GEN) {
case 5: case 5:
case 6: intel_private.gtt_bus_addr = reg_addr + MB(2);
case 7:
gtt_offset = MB(2);
break; break;
case 4:
default: default:
gtt_offset = KB(512); intel_private.gtt_bus_addr = reg_addr + KB(512);
break; break;
} }
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
if (needs_idle_maps()) if (needs_idle_maps())
intel_private.base.do_idle_maps = 1; intel_private.base.do_idle_maps = 1;
@ -875,32 +708,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags, .check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush, .chipset_flush = i9xx_chipset_flush,
}; };
static const struct intel_gtt_driver sandybridge_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = gen6_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver haswell_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = haswell_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine * driver and gmch_driver must be non-null, and find_gmch will determine
@ -963,106 +770,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver }, "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver }, "HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL } { 0, NULL, NULL }
}; };
@ -1107,7 +814,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
intel_private.bridge_dev = bridge_pdev; intel_private.bridge_dev = bridge_pdev;
dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name); dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
mask = intel_private.driver->dma_mask_size; mask = intel_private.driver->dma_mask_size;
// if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) // if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
@ -1127,7 +834,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
} }
EXPORT_SYMBOL(intel_gmch_probe); EXPORT_SYMBOL(intel_gmch_probe);
const struct intel_gtt *intel_gtt_get(void) struct intel_gtt *intel_gtt_get(void)
{ {
return &intel_private.base; return &intel_private.base;
} }
@ -1141,7 +848,5 @@ void intel_gtt_chipset_flush(void)
EXPORT_SYMBOL(intel_gtt_chipset_flush); EXPORT_SYMBOL(intel_gtt_chipset_flush);
//phys_addr_t get_bus_addr(void) MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
//{ MODULE_LICENSE("GPL and additional rights");
// return intel_private.gma_bus_addr;
//};

View File

@ -6,7 +6,9 @@
#include "hmm.h" #include "hmm.h"
#include "bitmap.h" #include "bitmap.h"
#define DRIVER_CAPS_0 HW_BIT_BLIT; //#define DRIVER_CAPS_0 HW_BIT_BLIT;
#define DRIVER_CAPS_0 0
#define DRIVER_CAPS_1 0 #define DRIVER_CAPS_1 0
struct context *context_map[256]; struct context *context_map[256];
@ -231,7 +233,7 @@ int create_surface(struct drm_device *dev, struct io_call_10 *pbitmap)
if (ret) if (ret)
goto err5; goto err5;
obj->mapped = uaddr ; obj->mapped = uaddr ;
bitmap->handle = handle; bitmap->handle = handle;
bitmap->uaddr = uaddr; bitmap->uaddr = uaddr;
@ -394,7 +396,7 @@ int resize_surface(struct io_call_14 *pbitmap)
FreePage(pages[i]); FreePage(pages[i]);
pages[i] = 0; pages[i] = 0;
}; };
DRM_DEBUG("%s release %d pages\n", __FUNCTION__, DRM_DEBUG("%s release %d pages\n", __FUNCTION__,
bitmap->page_count - page_count); bitmap->page_count - page_count);

File diff suppressed because it is too large Load Diff

View File

@ -105,32 +105,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
I915_WRITE(HWS_PGA, addr); I915_WRITE(HWS_PGA, addr);
} }
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
*/
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
0, PAGE_SIZE);
i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
/** /**
* Frees the hardware status page, whether it's a physical address or a virtual * Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server. * address set up by the X Server.
@ -171,7 +145,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8); ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0) if (ring->space < 0)
ring->space += ring->size; ring->space += ring->size;
@ -455,16 +429,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
dev_priv->counter++; dev_priv->dri1.counter++;
if (dev_priv->counter > 0x7FFFFFFFUL) if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->counter = 0; dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv) if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter; master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) { if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter); OUT_RING(dev_priv->dri1.counter);
OUT_RING(0); OUT_RING(0);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
@ -606,12 +580,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING(); ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++; master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
if (BEGIN_LP_RING(4) == 0) { if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter); OUT_RING(dev_priv->dri1.counter);
OUT_RING(0); OUT_RING(0);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
@ -622,10 +596,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
static int i915_quiescent(struct drm_device *dev) static int i915_quiescent(struct drm_device *dev)
{ {
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev); i915_kernel_lost_context(dev);
return intel_wait_ring_idle(ring); return intel_ring_idle(LP_RING(dev->dev_private));
} }
static int i915_flush_ioctl(struct drm_device *dev, void *data, static int i915_flush_ioctl(struct drm_device *dev, void *data,
@ -779,21 +751,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n"); DRM_DEBUG_DRIVER("\n");
dev_priv->counter++; dev_priv->dri1.counter++;
if (dev_priv->counter > 0x7FFFFFFFUL) if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->counter = 1; dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv) if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter; master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) { if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX); OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter); OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT); OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING(); ADVANCE_LP_RING();
} }
return dev_priv->counter; return dev_priv->dri1.counter;
} }
static int i915_wait_irq(struct drm_device * dev, int irq_nr) static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@ -824,7 +796,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (ret == -EBUSY) { if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
} }
return ret; return ret;
@ -1018,6 +990,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH: case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
default: default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n", DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param); param->param);
@ -1074,7 +1052,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data; drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv); struct intel_ring_buffer *ring;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV; return -ENODEV;
@ -1094,6 +1072,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr = dev_priv->dri1.gfx_hws_cpu_addr =
@ -1299,18 +1278,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
info = (struct intel_device_info *) flags; info = (struct intel_device_info *) flags;
#if 0
/* Refuse to load on gen6+ without kms enabled. */
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
#endif
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
if (dev_priv == NULL) if (dev_priv == NULL)
@ -1327,26 +1294,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv; goto free_priv;
} }
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); ret = i915_gem_gtt_init(dev);
if (!ret) { if (ret)
DRM_ERROR("failed to set up gmch\n");
ret = -EIO;
goto put_bridge; goto put_bridge;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto put_gmch;
}
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */ /* overlay on gen2 is broken and can't address above 1G */
// if (IS_GEN2(dev))
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
/* 965GM sometimes incorrectly writes to hardware status page (HWS) /* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located * using 32bit addressing, overwriting memory if HWS is located
@ -1356,8 +1311,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* behaviour if any general state is accessed within a page above 4GB, * behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully. * which also needs to be handled carefully.
*/ */
// if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
mmio_bar = IS_GEN2(dev) ? 1 : 0; mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* Before gen4, the registers and the GTT are behind different BARs. /* Before gen4, the registers and the GTT are behind different BARs.
@ -1382,11 +1335,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
DRM_INFO("gtt_base_addr %x aperture_size %d\n",
dev_priv->mm.gtt_base_addr, aperture_size );
// i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
// aperture_size);
/* The i915 workqueue is primarily used for batched retirement of /* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed * requests (and thus managing bo) once the task has been completed
@ -1419,18 +1368,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_setup_gmbus(dev); intel_setup_gmbus(dev);
intel_opregion_setup(dev); intel_opregion_setup(dev);
/* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev); intel_setup_bios(dev);
i915_gem_load(dev); i915_gem_load(dev);
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret)
goto out_gem_unload;
}
/* On the 945G/GM, the chipset reports the MSI capability on the /* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there * integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function * according to the published specs. It doesn't appear to function
@ -1448,6 +1389,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->rps.lock); spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock); spin_lock_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3; dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev)) else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@ -1469,11 +1412,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
} }
/* Must be done after probing outputs */ /* Must be done after probing outputs */
// intel_opregion_init(dev);
// acpi_video_register();
// setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
// (unsigned long) dev);
if (IS_GEN5(dev)) if (IS_GEN5(dev))
@ -1547,6 +1485,7 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) { if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev); intel_fbdev_fini(dev);
intel_modeset_cleanup(dev); intel_modeset_cleanup(dev);
cancel_work_sync(&dev_priv->console_resume_work);
/* /*
* free the memory space allocated for the child device * free the memory space allocated for the child device

View File

@ -33,7 +33,6 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_drv.h" #include "intel_drv.h"
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
@ -384,26 +383,36 @@ void intel_detect_pch(struct drm_device *dev)
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) { if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) { if (pch->vendor == PCI_VENDOR_ID_INTEL) {
int id; unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK; id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX; dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2; dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT; dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2; dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n"); DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */ /* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT; dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2; dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n"); DRM_DEBUG_KMS("Found PatherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT; dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0; dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n"); DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
} }
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
} }
@ -449,7 +458,7 @@ int i915_init(void)
struct intel_device_info *intel_info = struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data; (struct intel_device_info *) ent->driver_data;
if (intel_info->is_haswell || intel_info->is_valleyview) if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) { if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n"); DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV; return -ENODEV;
@ -473,6 +482,8 @@ int i915_init(void)
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent) int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
struct drm_device *dev; struct drm_device *dev;
static struct drm_driver driver;
int ret; int ret;
dev = kzalloc(sizeof(*dev), 0); dev = kzalloc(sizeof(*dev), 0);
@ -503,6 +514,8 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&dev->struct_mutex); mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->ctxlist_mutex);
dev->driver = &driver;
ret = i915_driver_load(dev, ent->driver_data ); ret = i915_driver_load(dev, ent->driver_data );
if (ret) if (ret)
@ -609,12 +622,40 @@ static bool IS_DISPLAYREG(u32 reg)
if (reg == GEN6_GDRST) if (reg == GEN6_GDRST)
return false; return false;
switch (reg) {
case _3D_CHICKEN3:
case IVB_CHICKEN3:
case GEN7_COMMON_SLICE_CHICKEN1:
case GEN7_L3CNTLREG1:
case GEN7_L3_CHICKEN_MODE_REGISTER:
case GEN7_ROW_CHICKEN2:
case GEN7_L3SQCREG4:
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
case GEN7_HALF_SLICE_CHICKEN1:
case GEN6_MBCTL:
case GEN6_UCGCTL2:
return false;
default:
break;
}
return true; return true;
} }
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
* chip from rc6 before touching it for real. MI_MODE is masked, hence
* harmless to write 0 into. */
I915_WRITE_NOTRACE(MI_MODE, 0);
}
#define __i915_read(x, y) \ #define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \ u##x val = 0; \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \ unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@ -645,6 +686,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \ } \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \ write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \ } else { \

View File

@ -33,6 +33,7 @@
#include "i915_reg.h" #include "i915_reg.h"
#include "intel_bios.h" #include "intel_bios.h"
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
#include <linux/scatterlist.h>
//#include <linux/io-mapping.h> //#include <linux/io-mapping.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/i2c-algo-bit.h> #include <linux/i2c-algo-bit.h>
@ -40,6 +41,7 @@
//#include <linux/backlight.h> //#include <linux/backlight.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/err.h>
/* General customization: /* General customization:
@ -69,6 +71,14 @@ enum pipe {
}; };
#define pipe_name(p) ((p) + 'A') #define pipe_name(p) ((p) + 'A')
enum transcoder {
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP = 0xF,
};
#define transcoder_name(t) ((t) + 'A')
enum plane { enum plane {
PLANE_A = 0, PLANE_A = 0,
PLANE_B, PLANE_B,
@ -104,6 +114,12 @@ struct intel_pch_pll {
}; };
#define I915_NUM_PLLS 2 #define I915_NUM_PLLS 2
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
int wrpll2_refcount;
};
/* Interface history: /* Interface history:
* *
* 1.1: Original. * 1.1: Original.
@ -127,13 +143,7 @@ struct intel_pch_pll {
#define I915_GEM_PHYS_OVERLAY_REGS 3 #define I915_GEM_PHYS_OVERLAY_REGS 3
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
int start;
int size;
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
struct opregion_header; struct opregion_header;
struct opregion_acpi; struct opregion_acpi;
@ -181,19 +191,24 @@ struct sdvo_device_mapping {
struct intel_display_error_state; struct intel_display_error_state;
struct drm_i915_error_state { struct drm_i915_error_state {
struct kref ref;
u32 eir; u32 eir;
u32 pgtbl_er; u32 pgtbl_er;
u32 ier; u32 ier;
u32 ccid; u32 ccid;
u32 derrmr;
u32 forcewake;
bool waiting[I915_NUM_RINGS]; bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES]; u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS]; u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS]; u32 head[I915_NUM_RINGS];
u32 ctl[I915_NUM_RINGS];
u32 ipeir[I915_NUM_RINGS]; u32 ipeir[I915_NUM_RINGS];
u32 ipehr[I915_NUM_RINGS]; u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS]; u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS]; u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */ /* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS]; u32 cpu_ring_head[I915_NUM_RINGS];
@ -254,6 +269,7 @@ struct drm_i915_display_funcs {
uint32_t sprite_width, int pixel_size); uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe, void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode); struct drm_display_mode *mode);
void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc, int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, struct drm_display_mode *adjusted_mode,
@ -266,7 +282,6 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc); struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc); void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev); void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj);
@ -341,8 +356,9 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512 #define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024 #define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
struct drm_device *dev;
unsigned num_pd_entries; unsigned num_pd_entries;
dma_addr_t *pt_pages; struct page **pt_pages;
uint32_t pd_offset; uint32_t pd_offset;
dma_addr_t *pt_dma_addr; dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr; dma_addr_t scratch_page_dma_addr;
@ -377,6 +393,11 @@ enum intel_pch {
PCH_LPT, /* Lynxpoint PCH */ PCH_LPT, /* Lynxpoint PCH */
}; };
enum intel_sbi_destination {
SBI_ICLK,
SBI_MPHY,
};
#define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2) #define QUIRK_INVERT_BRIGHTNESS (1<<2)
@ -386,154 +407,18 @@ struct intel_fbc_work;
struct intel_gmbus { struct intel_gmbus {
struct i2c_adapter adapter; struct i2c_adapter adapter;
bool force_bit; u32 force_bit;
u32 reg0; u32 reg0;
u32 gpio_reg; u32 gpio_reg;
struct i2c_algo_bit_data bit_algo; struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
}; };
typedef struct drm_i915_private { struct i915_suspend_saved_registers {
struct drm_device *dev;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
spinlock_t gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
// struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* DPIO indirect register protection */
spinlock_t dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
// struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
// struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
// struct notifier_block lid_notifier;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
u8 saveLBB; u8 saveLBB;
u32 saveDSPACNTR; u32 saveDSPACNTR;
u32 saveDSPBCNTR; u32 saveDSPBCNTR;
u32 saveDSPARB; u32 saveDSPARB;
u32 saveHWS;
u32 savePIPEACONF; u32 savePIPEACONF;
u32 savePIPEBCONF; u32 savePIPEBCONF;
u32 savePIPEASRC; u32 savePIPEASRC;
@ -679,10 +564,206 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1; u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY; u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG; u32 savePCH_PORT_HOTPLUG;
};
struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
struct delayed_work delayed_resume_work;
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
*/
struct mutex hw_lock;
};
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
int c_m;
int r_t;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
};
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
uint32_t counter;
};
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
};
typedef struct drm_i915_private {
struct drm_device *dev;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* DPIO indirect register protection */
spinlock_t dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
struct { struct {
/** Bridge to intel-gtt-ko */ /** Bridge to intel-gtt-ko */
const struct intel_gtt *gtt; struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */ /** Memory allocator for GTT stolen memory */
struct drm_mm stolen; struct drm_mm stolen;
/** Memory allocator for GTT */ /** Memory allocator for GTT */
@ -709,9 +790,8 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */ /** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt; struct i915_hw_ppgtt *aliasing_ppgtt;
u32 *l3_remap_info;
// struct shrinker inactive_shrinker; // struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
/** /**
* List of objects currently involved in rendering. * List of objects currently involved in rendering.
@ -788,19 +868,6 @@ typedef struct drm_i915_private {
u32 object_count; u32 object_count;
} mm; } mm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
} dri1;
/* Kernel Modesetting */ /* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2]; struct sdvo_device_mapping sdvo_mappings[2];
@ -814,6 +881,7 @@ typedef struct drm_i915_private {
wait_queue_head_t pending_flip_queue; wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS]; struct intel_pch_pll pch_plls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
/* Reclocking support */ /* Reclocking support */
bool render_reclock_avail; bool render_reclock_avail;
@ -823,46 +891,17 @@ typedef struct drm_i915_private {
u16 orig_clock; u16 orig_clock;
int child_dev_num; int child_dev_num;
struct child_device_config *child_dev; struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
struct drm_connector *int_edp_connector;
bool mchbar_need_disable; bool mchbar_need_disable;
/* gen6+ rps state */ struct intel_l3_parity l3_parity;
struct {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
/* The below variables an all the rps hw state are protected by /* gen6+ rps state */
* dev->struct mutext. */ struct intel_gen6_power_mgmt rps;
u8 cur_delay;
u8 min_delay;
u8 max_delay;
} rps;
/* ilk-only ips/rps state. Everything in here is protected by the global /* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */ * mchdev_lock in intel_pm.c */
struct { struct intel_ilk_power_mgmt ips;
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
int c_m;
int r_t;
} ips;
enum no_fbc_reason no_fbc_reason; enum no_fbc_reason no_fbc_reason;
@ -874,6 +913,12 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */ /* list of fbdev register on this device */
struct intel_fbdev *fbdev; struct intel_fbdev *fbdev;
/*
* The console may be contended at resume, but we don't
* want it to block on it.
*/
struct work_struct console_resume_work;
// struct backlight_device *backlight; // struct backlight_device *backlight;
struct drm_property *broadcast_rgb_property; struct drm_property *broadcast_rgb_property;
@ -881,6 +926,14 @@ typedef struct drm_i915_private {
bool hw_contexts_disabled; bool hw_contexts_disabled;
uint32_t hw_context_size; uint32_t hw_context_size;
bool fdi_rx_polarity_reversed;
struct i915_suspend_saved_registers regfile;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
} drm_i915_private_t; } drm_i915_private_t;
/* Iterate over initialised rings */ /* Iterate over initialised rings */
@ -924,7 +977,7 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops; const struct drm_i915_gem_object_ops *ops;
void *mapped; // void *mapped;
/** Current space allocated to this object in the GTT, if any. */ /** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space; struct drm_mm_node *gtt_space;
@ -1012,8 +1065,8 @@ struct drm_i915_gem_object {
unsigned int has_global_gtt_mapping:1; unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1; unsigned int has_dma_mapping:1;
dma_addr_t *allocated_pages; // dma_addr_t *allocated_pages;
struct pagelist pages; struct sg_table *pages;
int pages_pin_count; int pages_pin_count;
/* prime dma-buf support */ /* prime dma-buf support */
@ -1062,6 +1115,7 @@ struct drm_i915_gem_object {
*/ */
atomic_t pending_flip; atomic_t pending_flip;
}; };
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
@ -1098,7 +1152,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private { struct drm_i915_file_private {
struct { struct {
spinlock_t lock; struct spinlock lock;
struct list_head request_list; struct list_head request_list;
} mm; } mm;
struct idr context_idr; struct idr context_idr;
@ -1125,9 +1179,17 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
(dev)->pci_device == 0x0106 || \
(dev)->pci_device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
/* /*
* The genX designation typically refers to the render engine, so render * The genX designation typically refers to the render engine, so render
@ -1153,6 +1215,9 @@ struct drm_i915_file_private {
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming. * rows, which changed the alignment requirements and fence programming.
*/ */
@ -1173,6 +1238,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@ -1258,6 +1330,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev); extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref); void i915_error_state_free(struct kref *error_ref);
@ -1340,15 +1413,23 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev); void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
static inline dma_addr_t i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{ {
return obj->pages.page[n]; struct scatterlist *sg = obj->pages->sgl;
}; int nents = obj->pages->nents;
while (nents > SG_MAX_SINGLE_ALLOC) {
if (n < SG_MAX_SINGLE_ALLOC - 1)
break;
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
n -= SG_MAX_SINGLE_ALLOC - 1;
nents -= SG_MAX_SINGLE_ALLOC - 1;
}
return sg_page(sg+n);
}
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
BUG_ON(obj->pages.page == NULL); BUG_ON(obj->pages == NULL);
obj->pages_pin_count++; obj->pages_pin_count++;
} }
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
@ -1361,8 +1442,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj, int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to); struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring);
u32 seqno);
int i915_gem_dumb_create(struct drm_file *file_priv, int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
@ -1380,7 +1460,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0; return (int32_t)(seq1 - seq2) >= 0;
} }
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@ -1490,6 +1570,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start, unsigned long start,
unsigned long mappable_end, unsigned long mappable_end,
unsigned long end); unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@ -1586,11 +1674,12 @@ extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev); extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern bool intel_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_init_pch_refclk(struct drm_device *dev); extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev); extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@ -1619,6 +1708,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
#define __i915_read(x, y) \ #define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);

View File

@ -30,7 +30,6 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
#include "intel_drv.h" #include "intel_drv.h"
//#include <linux/shmem_fs.h>
#include <linux/slab.h> #include <linux/slab.h>
//#include <linux/swap.h> //#include <linux/swap.h>
#include <linux/pci.h> #include <linux/pci.h>
@ -53,21 +52,6 @@ static inline void clflush(volatile void *__p)
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
static inline long IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
static inline void *ERR_PTR(long error)
{
return (void *) error;
}
static inline long PTR_ERR(const void *ptr)
{
return (long) ptr;
}
void void
drm_gem_object_free(struct kref *kref) drm_gem_object_free(struct kref *kref)
{ {
@ -921,12 +905,12 @@ out:
* domain anymore. */ * domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
} }
} }
if (needs_clflush_after) if (needs_clflush_after)
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
return ret; return ret;
} }
@ -1389,6 +1373,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
static void static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{ {
int page_count = obj->base.size / PAGE_SIZE;
struct scatterlist *sg;
int ret, i; int ret, i;
BUG_ON(obj->madv == __I915_MADV_PURGED); BUG_ON(obj->madv == __I915_MADV_PURGED);
@ -1406,12 +1392,18 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED) if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0; obj->dirty = 0;
for (i = 0; i < obj->pages.nents; i++) for_each_sg(obj->pages->sgl, sg, page_count, i) {
FreePage(obj->pages.page[i]); struct page *page = sg_page(sg);
DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, obj->pages.nents);
page_cache_release(page);
}
//DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count);
obj->dirty = 0; obj->dirty = 0;
kfree(obj->pages.page);
sg_free_table(obj->pages);
kfree(obj->pages);
} }
static int static int
@ -1419,10 +1411,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{ {
const struct drm_i915_gem_object_ops *ops = obj->ops; const struct drm_i915_gem_object_ops *ops = obj->ops;
// printf("page %x pin count %d\n", if (obj->pages == NULL)
// obj->pages.page, obj->pages_pin_count );
if (obj->pages.page == NULL)
return 0; return 0;
BUG_ON(obj->gtt_space); BUG_ON(obj->gtt_space);
@ -1430,10 +1419,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count) if (obj->pages_pin_count)
return -EBUSY; return -EBUSY;
ops->put_pages(obj); /* ->put_pages might need to allocate memory for the bit17 swizzle
obj->pages.page = NULL; * array, hence protect them from being reaped by removing them from gtt
* lists early. */
list_del(&obj->gtt_list); list_del(&obj->gtt_list);
ops->put_pages(obj);
obj->pages = NULL;
if (i915_gem_object_is_purgeable(obj)) if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
@ -1450,43 +1443,55 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
static int static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{ {
dma_addr_t page; int page_count, i;
int page_count, i; struct sg_table *st;
struct scatterlist *sg;
struct page *page;
gfp_t gfp;
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
page_count = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
sg_free_table(st);
kfree(st);
return -ENOMEM;
}
/* Get the list of pages out of our struct file. They'll be pinned /* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them. * at this point until we release them.
*
* Fail silently without starting the shrinker
*/ */
page_count = obj->base.size / PAGE_SIZE; for_each_sg(st->sgl, sg, page_count, i) {
BUG_ON(obj->pages.page != NULL);
obj->pages.page = malloc(page_count * sizeof(dma_addr_t));
if (obj->pages.page == NULL)
return -ENOMEM;
for (i = 0; i < page_count; i++) {
page = AllocPage(); // oh-oh page = AllocPage(); // oh-oh
if ( page == 0 ) if ( page == 0 )
goto err_pages; goto err_pages;
obj->pages.page[i] = page; sg_set_page(sg, page, PAGE_SIZE, 0);
}; }
DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
obj->pages.nents = page_count;
obj->pages = st;
// if (obj->tiling_mode != I915_TILING_NONE) // DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
// i915_gem_object_do_bit_17_swizzle(obj);
return 0; return 0;
err_pages: err_pages:
while (i--) for_each_sg(st->sgl, sg, i, page_count)
FreePage(obj->pages.page[i]); page_cache_release(sg_page(sg));
sg_free_table(st);
free(obj->pages.page); kfree(st);
obj->pages.page = NULL; return PTR_ERR(page);
obj->pages.nents = 0;
return -ENOMEM;
} }
/* Ensure that the associated pages are gathered from the backing storage /* Ensure that the associated pages are gathered from the backing storage
@ -1503,7 +1508,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
const struct drm_i915_gem_object_ops *ops = obj->ops; const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret; int ret;
if (obj->pages.page) if (obj->pages)
return 0; return 0;
BUG_ON(obj->pages_pin_count); BUG_ON(obj->pages_pin_count);
@ -1513,16 +1518,16 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return ret; return ret;
list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
return 0; return 0;
} }
void void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring)
u32 seqno)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
BUG_ON(ring == NULL); BUG_ON(ring == NULL);
obj->ring = ring; obj->ring = ring;
@ -1583,26 +1588,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
} }
static u32 static int
i915_gem_get_seqno(struct drm_device *dev) i915_gem_handle_seqno_wrap(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = dev_priv->next_seqno; struct intel_ring_buffer *ring;
int ret, i, j;
/* reserve 0 for non-seqno */ /* The hardware uses various monotonic 32-bit counters, if we
if (++dev_priv->next_seqno == 0) * detect that they will wraparound we need to idle the GPU
dev_priv->next_seqno = 1; * and reset those counters.
*/
ret = 0;
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ret |= ring->sync_seqno[j] != 0;
}
if (ret == 0)
return ret;
return seqno; ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev);
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
}
return 0;
} }
u32 int
i915_gem_next_request_seqno(struct intel_ring_buffer *ring) i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{ {
if (ring->outstanding_lazy_request == 0) struct drm_i915_private *dev_priv = dev->dev_private;
ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
return ring->outstanding_lazy_request; /* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
int ret = i915_gem_handle_seqno_wrap(dev);
if (ret)
return ret;
dev_priv->next_seqno = 1;
}
*seqno = dev_priv->next_seqno++;
return 0;
} }
int int
@ -1613,7 +1646,6 @@ i915_add_request(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
u32 request_ring_position; u32 request_ring_position;
u32 seqno;
int was_empty; int was_empty;
int ret; int ret;
@ -1632,7 +1664,6 @@ i915_add_request(struct intel_ring_buffer *ring,
if (request == NULL) if (request == NULL)
return -ENOMEM; return -ENOMEM;
seqno = i915_gem_next_request_seqno(ring);
/* Record the position of the start of the request so that /* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the * should we detect the updated seqno part-way through the
@ -1641,15 +1672,13 @@ i915_add_request(struct intel_ring_buffer *ring,
*/ */
request_ring_position = intel_ring_get_tail(ring); request_ring_position = intel_ring_get_tail(ring);
ret = ring->add_request(ring, &seqno); ret = ring->add_request(ring);
if (ret) { if (ret) {
kfree(request); kfree(request);
return ret; return ret;
} }
trace_i915_gem_request_add(ring, seqno); request->seqno = intel_ring_get_seqno(ring);
request->seqno = seqno;
request->ring = ring; request->ring = ring;
request->tail = request_ring_position; request->tail = request_ring_position;
request->emitted_jiffies = GetTimerTicks(); request->emitted_jiffies = GetTimerTicks();
@ -1674,7 +1703,7 @@ i915_add_request(struct intel_ring_buffer *ring,
} }
if (out_seqno) if (out_seqno)
*out_seqno = seqno; *out_seqno = request->seqno;
return 0; return 0;
} }
@ -1759,7 +1788,6 @@ void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{ {
uint32_t seqno; uint32_t seqno;
int i;
if (list_empty(&ring->request_list)) if (list_empty(&ring->request_list))
return; return;
@ -1768,10 +1796,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
seqno = ring->get_seqno(ring, true); seqno = ring->get_seqno(ring, true);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
ring->sync_seqno[i] = 0;
while (!list_empty(&ring->request_list)) { while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
@ -1846,7 +1870,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */ /* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_trylock(&dev->struct_mutex)) {
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
return; return;
} }
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
@ -1891,6 +1915,33 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
* Returns 0 if successful, else an error is returned with the remaining time in
* the timeout parameter.
* -ETIME: object is still busy after timeout
* -ERESTARTSYS: signal interrupted the wait
* -ENONENT: object doesn't exist
* Also possible, but rare:
* -EAGAIN: GPU wedged
* -ENOMEM: damn
* -ENODEV: Internal IRQ fail
* -E?: The add request failed
*
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
* non-zero timeout parameter the wait ioctl will wait for the given number of
* nanoseconds on an object becoming unbusy. Since the wait itself does so
* without holding struct_mutex the object may become re-busied before this
* function completes. A similar but shorter * race condition exists in the busy
* ioctl
*/
@ -1938,7 +1989,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
ret = to->sync_to(to, from, seqno); ret = to->sync_to(to, from, seqno);
if (!ret) if (!ret)
from->sync_seqno[idx] = seqno; /* We use last_read_seqno because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
from->sync_seqno[idx] = obj->last_read_seqno;
return ret; return ret;
} }
@ -1982,7 +2037,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->pin_count) if (obj->pin_count)
return -EBUSY; return -EBUSY;
BUG_ON(obj->pages.page == NULL); BUG_ON(obj->pages == NULL);
ret = i915_gem_object_finish_gpu(obj); ret = i915_gem_object_finish_gpu(obj);
if (ret) if (ret)
@ -2002,7 +2057,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
trace_i915_gem_object_unbind(obj); trace_i915_gem_object_unbind(obj);
if (obj->has_global_gtt_mapping) if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj); i915_gem_gtt_unbind_object(obj);
if (obj->has_aliasing_ppgtt_mapping) { if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0; obj->has_aliasing_ppgtt_mapping = 0;
@ -2021,14 +2076,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
if (list_empty(&ring->active_list))
return 0;
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
int i915_gpu_idle(struct drm_device *dev) int i915_gpu_idle(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
@ -2041,7 +2088,7 @@ int i915_gpu_idle(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
ret = i915_ring_idle(ring); ret = intel_ring_idle(ring);
if (ret) if (ret)
return ret; return ret;
} }
@ -2431,13 +2478,13 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space; struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment; u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable; bool mappable, fenceable;
int ret; int ret;
if (obj->madv != I915_MADV_WILLNEED) { if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n"); DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL; return -EINVAL;
} }
@ -2475,72 +2522,57 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
i915_gem_object_pin_pages(obj);
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
search_free: search_free:
if (map_and_fenceable) if (map_and_fenceable)
free_space = ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level, size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end, 0, dev_priv->mm.gtt_mappable_end);
false);
else else
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level, size, alignment, obj->cache_level);
false); if (ret) {
if (free_space != NULL) { i915_gem_object_unpin_pages(obj);
if (map_and_fenceable) kfree(node);
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
obj->gtt_space =
drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level,
false);
}
if (obj->gtt_space == NULL) {
ret = 1; //i915_gem_evict_something(dev, size, alignment,
// map_and_fenceable);
if (ret)
return ret; return ret;
goto search_free;
} }
if (WARN_ON(!i915_gem_valid_gtt_space(dev, if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
obj->gtt_space, i915_gem_object_unpin_pages(obj);
obj->cache_level))) { drm_mm_put_block(node);
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
return -EINVAL; return -EINVAL;
} }
ret = i915_gem_gtt_prepare_object(obj); ret = i915_gem_gtt_prepare_object(obj);
if (ret) { if (ret) {
drm_mm_put_block(obj->gtt_space); i915_gem_object_unpin_pages(obj);
obj->gtt_space = NULL; drm_mm_put_block(node);
return ret; return ret;
} }
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->gtt_offset = obj->gtt_space->start; obj->gtt_space = node;
obj->gtt_offset = node->start;
fenceable = fenceable =
obj->gtt_space->size == fence_size && node->size == fence_size &&
(obj->gtt_space->start & (fence_alignment - 1)) == 0; (node->start & (fence_alignment - 1)) == 0;
mappable = mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable); trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev); i915_gem_verify_gtt(dev);
return 0; return 0;
@ -2553,7 +2585,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
* to GPU, and we can ignore the cache flush because it'll happen * to GPU, and we can ignore the cache flush because it'll happen
* again at bind time. * again at bind time.
*/ */
if (obj->pages.page == NULL) if (obj->pages == NULL)
return; return;
/* If the GPU is snooping the contents of the CPU cache, /* If the GPU is snooping the contents of the CPU cache,
@ -2566,7 +2598,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
*/ */
if (obj->cache_level != I915_CACHE_NONE) if (obj->cache_level != I915_CACHE_NONE)
return; return;
#if 0
if(obj->mapped != NULL) if(obj->mapped != NULL)
{ {
uint8_t *page_virtual; uint8_t *page_virtual;
@ -2613,6 +2645,8 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
"mfence"); "mfence");
} }
} }
#endif
} }
/** Flushes the GTT write domain for the object if it's dirty. */ /** Flushes the GTT write domain for the object if it's dirty. */
@ -2652,7 +2686,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return; return;
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
intel_gtt_chipset_flush(); i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain; old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0; obj->base.write_domain = 0;
@ -2854,8 +2888,8 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
return 0; return 0;
ret = i915_gem_object_wait_rendering(obj, false); ret = i915_gem_object_wait_rendering(obj, false);
if (ret) if (ret)
return ret; return ret;
/* Ensure that we invalidate the GPU's caches and TLBs. */ /* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
@ -2989,11 +3023,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
#endif #endif
if (obj->gtt_space == NULL) { if (obj->gtt_space == NULL) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment, ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable, map_and_fenceable,
nonblocking); nonblocking);
if (ret) if (ret)
return ret; return ret;
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
} }
if (!obj->has_global_gtt_mapping && map_and_fenceable) if (!obj->has_global_gtt_mapping && map_and_fenceable)
@ -3047,14 +3086,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
goto out; goto out;
} }
obj->user_pin_count++; if (obj->user_pin_count == 0) {
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment, true, false); ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret) if (ret)
goto out; goto out;
} }
obj->user_pin_count++;
obj->pin_filp = file;
/* XXX - flush the CPU caches for pinned objects /* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet * as the X server doesn't manage domains yet
*/ */
@ -3295,7 +3335,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_put_pages(obj); i915_gem_object_put_pages(obj);
// i915_gem_object_free_mmap_offset(obj); // i915_gem_object_free_mmap_offset(obj);
BUG_ON(obj->pages.page); BUG_ON(obj->pages);
// if (obj->base.import_attach) // if (obj->base.import_attach)
// drm_prime_gem_destroy(&obj->base, NULL); // drm_prime_gem_destroy(&obj->base, NULL);
@ -3358,7 +3398,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
if (!IS_IVYBRIDGE(dev)) if (!IS_IVYBRIDGE(dev))
return; return;
if (!dev_priv->mm.l3_remap_info) if (!dev_priv->l3_parity.remap_info)
return; return;
misccpctl = I915_READ(GEN7_MISCCPCTL); misccpctl = I915_READ(GEN7_MISCCPCTL);
@ -3367,12 +3407,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i); u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n", DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap); GEN7_L3LOG_BASE + i, remap);
if (remap && !dev_priv->mm.l3_remap_info[i/4]) if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n"); DRM_DEBUG_DRIVER("Clearing remapped register\n");
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
} }
/* Make sure all the writes land before disabling dop clock gating */ /* Make sure all the writes land before disabling dop clock gating */
@ -3402,68 +3442,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
} }
void i915_gem_init_ppgtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = ppgtt->pt_pages[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
static bool static bool
intel_enable_blt(struct drm_device *dev) intel_enable_blt(struct drm_device *dev)
{ {
@ -3486,7 +3464,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
if (!intel_enable_gtt()) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO; return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))

View File

@ -148,7 +148,7 @@ create_hw_context(struct drm_device *dev,
struct i915_hw_context *ctx; struct i915_hw_context *ctx;
int ret, id; int ret, id;
ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL) if (ctx == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
@ -420,9 +420,8 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed. * MI_SET_CONTEXT instead of when the next seqno has completed.
*/ */
if (from_obj != NULL) { if (from_obj != NULL) {
u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno); i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the * whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be * object dirty. The only exception is that the context must be

View File

@ -22,6 +22,8 @@
* *
*/ */
#define iowrite32(v, addr) writel((v), (addr))
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
@ -32,19 +34,67 @@
#define AGP_USER_MEMORY (AGP_USER_TYPES) #define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
typedef uint32_t gtt_pte_t;
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
static inline gtt_pte_t pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_LLC_MLC:
/* Haswell doesn't set L3 this way */
if (IS_HASWELL(dev))
pte |= GEN6_PTE_CACHE_LLC;
else
pte |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(dev))
pte |= HSW_PTE_UNCACHED;
else
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
return pte;
}
/* PPGTT support for Sandybdrige/Gen6 and later */ /* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry, unsigned first_entry,
unsigned num_entries) unsigned num_entries)
{ {
uint32_t *pt_vaddr; gtt_pte_t *pt_vaddr;
uint32_t scratch_pte; gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i; unsigned last_pte, i;
scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; I915_CACHE_LLC);
pt_vaddr = AllocKernelSpace(4096); pt_vaddr = AllocKernelSpace(4096);
@ -56,7 +106,7 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
if (last_pte > I915_PPGTT_PT_ENTRIES) if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES; last_pte = I915_PPGTT_PT_ENTRIES;
MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3); MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pd]), 3);
for (i = first_pte; i < last_pte; i++) for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte; pt_vaddr[i] = scratch_pte;
@ -87,13 +137,13 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
return ret; return ret;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries, ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL); GFP_KERNEL);
if (!ppgtt->pt_pages) if (!ppgtt->pt_pages)
goto err_ppgtt; goto err_ppgtt;
for (i = 0; i < ppgtt->num_pd_entries; i++) { for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = AllocPage(); ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i]) if (!ppgtt->pt_pages[i])
goto err_pt_alloc; goto err_pt_alloc;
} }
@ -128,7 +178,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
i915_ppgtt_clear_range(ppgtt, 0, i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
dev_priv->mm.aliasing_ppgtt = ppgtt; dev_priv->mm.aliasing_ppgtt = ppgtt;
@ -144,7 +194,7 @@ err_pt_alloc:
// kfree(ppgtt->pt_dma_addr); // kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) { for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i]) if (ppgtt->pt_pages[i])
FreePage(ppgtt->pt_pages[i]); FreePage((addr_t)(ppgtt->pt_pages[i]));
} }
kfree(ppgtt->pt_pages); kfree(ppgtt->pt_pages);
err_ppgtt: err_ppgtt:
@ -170,72 +220,67 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
// kfree(ppgtt->pt_dma_addr); // kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) for (i = 0; i < ppgtt->num_pd_entries; i++)
FreePage(ppgtt->pt_pages[i]); FreePage((addr_t)(ppgtt->pt_pages[i]));
kfree(ppgtt->pt_pages); kfree(ppgtt->pt_pages);
kfree(ppgtt); kfree(ppgtt);
} }
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct pagelist *pages, const struct sg_table *pages,
unsigned first_entry, unsigned first_entry,
uint32_t pte_flags) enum i915_cache_level cache_level)
{ {
uint32_t *pt_vaddr, pte; gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j; unsigned i, j, m, segment_len;
dma_addr_t page_addr; dma_addr_t page_addr;
struct scatterlist *sg;
/* init sg walking */
sg = pages->sgl;
i = 0; i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
pt_vaddr = AllocKernelSpace(4096); pt_vaddr = AllocKernelSpace(4096);
if( pt_vaddr == NULL)
return;
if( pt_vaddr != NULL) while (i < pages->nents) {
{ MapPage(pt_vaddr,(addr_t)ppgtt->pt_pages[act_pd], 3);
while (i < pages->nents)
{
MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) { for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = pages->page[i]; page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pte = GEN6_PTE_ADDR_ENCODE(page_addr); pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
pt_vaddr[j] = pte | pte_flags; cache_level);
}
first_pte = 0; /* grab the next page */
act_pd++; if (++m == segment_len) {
if (++i == pages->nents)
break;
sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
}
first_pte = 0;
act_pd++;
} }
FreeKernelSpace(pt_vaddr); FreeKernelSpace(pt_vaddr);
};
} }
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
case I915_CACHE_LLC_MLC:
pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
i915_ppgtt_insert_sg_entries(ppgtt, i915_ppgtt_insert_sg_entries(ppgtt,
&obj->pages, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT, obj->gtt_space->start >> PAGE_SHIFT,
pte_flags); cache_level);
} }
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@ -246,23 +291,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
} }
/* XXX kill agp_type! */ void i915_gem_init_ppgtt(struct drm_device *dev)
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
{ {
switch (cache_level) { drm_i915_private_t *dev_priv = dev->dev_private;
case I915_CACHE_LLC_MLC: uint32_t pd_offset;
if (INTEL_INFO(dev)->gen >= 6) struct intel_ring_buffer *ring;
return AGP_USER_CACHED_MEMORY_LLC_MLC; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
/* Older chipsets do not have this extra level of CPU uint32_t __iomem *pd_addr;
* cacheing, so fallthrough and request the PTE simply uint32_t pd_entry;
* as cached. int i;
*/
case I915_CACHE_LLC: if (!dev_priv->mm.aliasing_ppgtt)
return AGP_USER_CACHED_MEMORY; return;
default:
case I915_CACHE_NONE:
return AGP_USER_MEMORY; pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
} }
} }
@ -288,6 +375,34 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible; dev_priv->mm.interruptible = interruptible;
} }
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned first_entry,
unsigned num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i;
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
return;
}
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
#if 0 #if 0
void i915_gem_restore_gtt_mappings(struct drm_device *dev) void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{ {
@ -295,7 +410,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */ /* First fill our portion of the GTT with scratch pages */
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@ -303,30 +418,105 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level); i915_gem_gtt_bind_object(obj, obj->cache_level);
} }
intel_gtt_chipset_flush(); i915_gem_chipset_flush(dev);
} }
#endif #endif
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{ {
struct scatterlist *sg, *s;
unsigned int nents ;
int i;
if (obj->has_dma_mapping)
return 0;
sg = obj->pages->sgl;
nents = obj->pages->nents;
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
BUG_ON(!sg_page(s));
s->dma_address = sg_phys(s);
}
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
return 0; return 0;
} }
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
for_each_sg(st->sgl, sg, st->nents, unused) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
i++;
}
}
BUG_ON(i > max_entries);
BUG_ON(i != obj->base.size / PAGE_SIZE);
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level) enum i915_cache_level cache_level)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); if (INTEL_INFO(dev)->gen < 6) {
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
flags);
} else {
gen6_ggtt_bind_object(obj, cache_level);
}
intel_gtt_insert_sg_entries(&obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
obj->has_global_gtt_mapping = 1; obj->has_global_gtt_mapping = 1;
} }
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{ {
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, i915_ggtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0; obj->has_global_gtt_mapping = 0;
@ -384,5 +574,276 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */ /* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
} }
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct page *page;
dma_addr_t dma_addr;
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
#ifdef CONFIG_INTEL_IOMMU
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dma_addr))
return -EINVAL;
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->mm.gtt->scratch_page = page;
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
return 0;
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
return stolen_decoder[snb_gmch_ctl] << 20;
}
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
u16 snb_gmch_ctl;
int ret;
/* On modern platforms we need not worry ourself with the legacy
* hostbridge query stuff. Skip it entirely
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
return -ENODEV;
}
return 0;
}
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
#ifdef CONFIG_INTEL_IOMMU
dev_priv->mm.gtt->needs_dmar = 1;
#endif
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
dev_priv->mm.gtt->gtt_total_entries =
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
if (INTEL_INFO(dev)->gen < 7)
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
else
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
/* 64/512MB is the current min/max we actually know of, but this is just a
* coarse sanity check.
*/
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
DRM_ERROR("Unknown GMADR entries (%d)\n",
dev_priv->mm.gtt->gtt_mappable_entries);
ret = -ENXIO;
goto err_out;
}
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
goto err_out;
}
dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gtt->gtt) {
DRM_ERROR("Failed to map the gtt page table\n");
ret = -ENOMEM;
goto err_out;
}
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
return 0;
err_out:
kfree(dev_priv->mm.gtt);
return ret;
}
struct scatterlist *sg_next(struct scatterlist *sg)
{
if (sg_is_last(sg))
return NULL;
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
return sg;
}
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
if (unlikely(!table->sgl))
return;
sgl = table->sgl;
while (table->orig_nents) {
unsigned int alloc_size = table->orig_nents;
unsigned int sg_size;
/*
* If we have more than max_ents segments left,
* then assign 'next' to the sg table after the current one.
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
if (alloc_size > max_ents) {
next = sg_chain_ptr(&sgl[max_ents - 1]);
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
next = NULL;
}
table->orig_nents -= sg_size;
kfree(sgl);
sgl = next;
}
table->sgl = NULL;
}
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
}
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
{
struct scatterlist *sg, *prv;
unsigned int left;
unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
#ifndef ARCH_HAS_SG_CHAIN
BUG_ON(nents > max_ents);
#endif
memset(table, 0, sizeof(*table));
left = nents;
prv = NULL;
do {
unsigned int sg_size, alloc_size = left;
if (alloc_size > max_ents) {
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
left -= sg_size;
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
* entry of the previous table won't be used for
* linkage. Without this, sg_kfree() may get
* confused.
*/
if (prv)
table->nents = ++table->orig_nents;
goto err;
}
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
/*
* If this is the first mapping, assign the sg table header.
* If this is not the first mapping, chain previous part.
*/
if (prv)
sg_chain(prv, max_ents, sg);
else
table->sgl = sg;
/*
* If no more entries after this one, mark the end
*/
if (!left)
sg_mark_end(&sg[sg_size - 1]);
prv = sg;
} while (left);
return 0;
err:
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
return -ENOMEM;
}
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
#endif
sg_mark_end(&sgl[nents - 1]);
}

View File

@ -28,7 +28,6 @@
#define pr_fmt(fmt) ": " fmt #define pr_fmt(fmt) ": " fmt
#include <linux/irqreturn.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
@ -40,15 +39,6 @@
#define pr_err(fmt, ...) \ #define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
#define DRM_IRQ_ARGS void *arg
static struct drm_driver {
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
}drm_driver;
static struct drm_driver *driver = &drm_driver;
#define DRM_WAKEUP( queue ) wake_up( queue ) #define DRM_WAKEUP( queue ) wake_up( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
@ -170,7 +160,10 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe) i915_pipe_enabled(struct drm_device *dev, int pipe)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
} }
/* Called from drm generic code, passed a 'crtc', which /* Called from drm generic code, passed a 'crtc', which
@ -260,7 +253,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return; return;
mutex_lock(&dev_priv->dev->struct_mutex); mutex_lock(&dev_priv->rps.hw_lock);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1; new_delay = dev_priv->rps.cur_delay + 1;
@ -275,7 +268,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
gen6_set_rps(dev_priv->dev, new_delay); gen6_set_rps(dev_priv->dev, new_delay);
} }
mutex_unlock(&dev_priv->dev->struct_mutex); mutex_unlock(&dev_priv->rps.hw_lock);
} }
@ -291,7 +284,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
static void ivybridge_parity_work(struct work_struct *work) static void ivybridge_parity_work(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
parity_error_work); l3_parity.error_work);
u32 error_status, row, bank, subbank; u32 error_status, row, bank, subbank;
char *parity_event[5]; char *parity_event[5];
uint32_t misccpctl; uint32_t misccpctl;
@ -355,7 +348,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags); spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
queue_work(dev_priv->wq, &dev_priv->parity_error_work); queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
} }
#endif #endif
@ -364,6 +357,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv, struct drm_i915_private *dev_priv,
u32 gt_iir) u32 gt_iir)
{ {
printf("%s\n", __FUNCTION__);
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
@ -405,10 +399,10 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
POSTING_READ(GEN6_PMIMR); POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps.lock, flags); spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
queue_work(dev_priv->wq, &dev_priv->rps.work); // queue_work(dev_priv->wq, &dev_priv->rps.work);
} }
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -419,6 +413,8 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
u32 pipe_stats[I915_MAX_PIPES]; u32 pipe_stats[I915_MAX_PIPES];
bool blc_event; bool blc_event;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
while (true) { while (true) {
@ -479,8 +475,8 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true; blc_event = true;
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS) if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
// gen6_queue_rps_work(dev_priv, pm_iir); gen6_queue_rps_work(dev_priv, pm_iir);
I915_WRITE(GTIIR, gt_iir); I915_WRITE(GTIIR, gt_iir);
I915_WRITE(GEN6_PMIIR, pm_iir); I915_WRITE(GEN6_PMIIR, pm_iir);
@ -496,6 +492,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe; int pipe;
printf("%s\n", __FUNCTION__);
if (pch_iir & SDE_AUDIO_POWER_MASK) if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >> (pch_iir & SDE_AUDIO_POWER_MASK) >>
@ -560,7 +558,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe))); I915_READ(FDI_RX_IIR(pipe)));
} }
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -568,6 +566,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
int i; int i;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
/* disable master interrupt before clearing iir */ /* disable master interrupt before clearing iir */
@ -636,13 +636,14 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[VCS]); notify_ring(dev, &dev_priv->ring[VCS]);
} }
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE; int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
printf("%s\n", __FUNCTION__);
atomic_inc(&dev_priv->irq_received); atomic_inc(&dev_priv->irq_received);
@ -660,11 +661,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
(!IS_GEN6(dev) || pm_iir == 0)) (!IS_GEN6(dev) || pm_iir == 0))
goto done; goto done;
if (HAS_PCH_CPT(dev))
hotplug_mask = SDE_HOTPLUG_MASK_CPT;
else
hotplug_mask = SDE_HOTPLUG_MASK;
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (IS_GEN5(dev)) if (IS_GEN5(dev))
@ -986,6 +982,8 @@ static void i915_record_ring_state(struct drm_device *dev,
= I915_READ(RING_SYNC_0(ring->mmio_base)); = I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1] error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base)); = I915_READ(RING_SYNC_1(ring->mmio_base));
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
@ -1009,6 +1007,7 @@ static void i915_record_ring_state(struct drm_device *dev,
error->acthd[ring->id] = intel_ring_get_active_head(ring); error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring); error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring); error->tail[ring->id] = I915_READ_TAIL(ring);
error->ctl[ring->id] = I915_READ_CTL(ring);
error->cpu_ring_head[ring->id] = ring->head; error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail; error->cpu_ring_tail[ring->id] = ring->tail;
@ -1103,6 +1102,16 @@ static void i915_capture_error_state(struct drm_device *dev)
else else
error->ier = I915_READ(IER); error->ier = I915_READ(IER);
if (INTEL_INFO(dev)->gen >= 6)
error->derrmr = I915_READ(DERRMR);
if (IS_VALLEYVIEW(dev))
error->forcewake = I915_READ(FORCEWAKE_VLV);
else if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT);
else if (INTEL_INFO(dev)->gen == 6)
error->forcewake = I915_READ(FORCEWAKE);
for_each_pipe(pipe) for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
@ -1333,7 +1342,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work; work = intel_crtc->unpin_work;
if (work == NULL || work->pending || !work->enable_stall_check) { if (work == NULL ||
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
!work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */ /* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
return; return;
@ -1648,7 +1659,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* Clear & enable PCU event interrupts */ /* Clear & enable PCU event interrupts */
I915_WRITE(DEIIR, DE_PCU_EVENT); I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); // ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
} }
return 0; return 0;
@ -1710,6 +1721,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask; u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid; u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT; enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@ -1730,11 +1742,11 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[1] = 0; dev_priv->pipestat[1] = 0;
/* Hack for broken MSIs on VLV */ /* Hack for broken MSIs on VLV */
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); // pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
pci_read_config_word(dev->pdev, 0x98, &msid); // pci_read_config_word(dev->pdev, 0x98, &msid);
msid &= 0xff; /* mask out delivery bits */ // msid &= 0xff; /* mask out delivery bits */
msid |= (1<<14); // msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); // pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
I915_WRITE(VLV_IMR, dev_priv->irq_mask); I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask); I915_WRITE(VLV_IER, enable_mask);
@ -1749,21 +1761,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
GT_GEN6_BLT_CS_ERROR_INTERRUPT | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GT_GEN6_BLT_USER_INTERRUPT | GEN6_BLITTER_USER_INTERRUPT;
GT_GEN6_BSD_USER_INTERRUPT | I915_WRITE(GTIER, render_irqs);
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
GT_PIPE_NOTIFY |
GT_RENDER_CS_ERROR_INTERRUPT |
GT_SYNC_STATUS |
GT_USER_INTERRUPT);
POSTING_READ(GTIER); POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */ /* ack & enable invalid PTE error interrupts */
@ -1781,9 +1784,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN; hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN; hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN; hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN; hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN; hotplug_en |= CRT_HOTPLUG_INT_EN;
@ -1796,7 +1799,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
return 0; return 0;
} }
static void valleyview_irq_uninstall(struct drm_device *dev) static void valleyview_irq_uninstall(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -1886,8 +1888,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0; return 0;
} }
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2068,7 +2069,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0; return 0;
} }
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) static irqreturn_t i915_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2307,7 +2308,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0; return 0;
} }
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) static irqreturn_t i965_irq_handler(int irq, void *arg)
{ {
struct drm_device *dev = (struct drm_device *) arg; struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2451,37 +2452,48 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
driver->irq_handler = valleyview_irq_handler; dev->driver->irq_handler = valleyview_irq_handler;
driver->irq_preinstall = valleyview_irq_preinstall; dev->driver->irq_preinstall = valleyview_irq_preinstall;
driver->irq_postinstall = valleyview_irq_postinstall; dev->driver->irq_postinstall = valleyview_irq_postinstall;
} else if (IS_IVYBRIDGE(dev)) { } else if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */ /* Share pre & uninstall handlers with ILK/SNB */
driver->irq_handler = ivybridge_irq_handler; dev->driver->irq_handler = ivybridge_irq_handler;
driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_preinstall = ironlake_irq_preinstall;
driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall;
} else if (IS_HASWELL(dev)) { } else if (IS_HASWELL(dev)) {
/* Share interrupts handling with IVB */ /* Share interrupts handling with IVB */
driver->irq_handler = ivybridge_irq_handler; dev->driver->irq_handler = ivybridge_irq_handler;
driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_preinstall = ironlake_irq_preinstall;
driver->irq_postinstall = ivybridge_irq_postinstall; dev->driver->irq_postinstall = ivybridge_irq_postinstall;
} else if (HAS_PCH_SPLIT(dev)) { } else if (HAS_PCH_SPLIT(dev)) {
driver->irq_handler = ironlake_irq_handler; dev->driver->irq_handler = ironlake_irq_handler;
driver->irq_preinstall = ironlake_irq_preinstall; dev->driver->irq_preinstall = ironlake_irq_preinstall;
driver->irq_postinstall = ironlake_irq_postinstall; dev->driver->irq_postinstall = ironlake_irq_postinstall;
} else { } else {
if (INTEL_INFO(dev)->gen == 2) { if (INTEL_INFO(dev)->gen == 2) {
} else if (INTEL_INFO(dev)->gen == 3) { } else if (INTEL_INFO(dev)->gen == 3) {
driver->irq_handler = i915_irq_handler; dev->driver->irq_preinstall = i915_irq_preinstall;
driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall;
driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_handler = i915_irq_handler;
} else { } else {
driver->irq_handler = i965_irq_handler; dev->driver->irq_preinstall = i965_irq_preinstall;
driver->irq_preinstall = i965_irq_preinstall; dev->driver->irq_postinstall = i965_irq_postinstall;
driver->irq_postinstall = i965_irq_postinstall; dev->driver->irq_handler = i965_irq_handler;
} }
} }
printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
} }
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
printf("i915 irq\n");
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
return dev->driver->irq_handler(0, dev);
}
int drm_irq_install(struct drm_device *dev) int drm_irq_install(struct drm_device *dev)
{ {
@ -2495,13 +2507,13 @@ int drm_irq_install(struct drm_device *dev)
/* Driver must have been initialized */ /* Driver must have been initialized */
if (!dev->dev_private) { if (!dev->dev_private) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return -EINVAL; return -EINVAL;
} }
if (dev->irq_enabled) { if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return -EBUSY; return -EBUSY;
} }
dev->irq_enabled = 1; dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
@ -2511,14 +2523,14 @@ int drm_irq_install(struct drm_device *dev)
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
/* Before installing handler */ /* Before installing handler */
if (driver->irq_preinstall) if (dev->driver->irq_preinstall)
driver->irq_preinstall(dev); dev->driver->irq_preinstall(dev);
ret = AttachIntHandler(irq_line, driver->irq_handler, (u32)dev); ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
/* After installing handler */ /* After installing handler */
if (driver->irq_postinstall) if (dev->driver->irq_postinstall)
ret = driver->irq_postinstall(dev); ret = dev->driver->irq_postinstall(dev);
if (ret < 0) { if (ret < 0) {
DRM_ERROR(__FUNCTION__); DRM_ERROR(__FUNCTION__);

View File

@ -26,6 +26,7 @@
#define _I915_REG_H_ #define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@ -40,6 +41,14 @@
*/ */
#define INTEL_GMCH_CTRL 0x52 #define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1) #define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
#define IVB_GMCH_GMS_SHIFT 4
#define IVB_GMCH_GMS_MASK 0xf
/* PCI config space */ /* PCI config space */
@ -105,23 +114,6 @@
#define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3) #define GEN6_GRDOM_BLT (1 << 3)
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PDE_VALID (1 << 0)
#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)
#define GEN6_PTE_GFDT (1 << 3)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) #define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) #define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@ -241,11 +233,18 @@
*/ */
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18) #define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
#define MI_INVALIDATE_BSD (1<<7) #define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
#define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_PPGTT_HSW (1<<8)
#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ #define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@ -369,6 +368,7 @@
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ #define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ #define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ #define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ #define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ #define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034 #define _DPIO_REFSFR_B 0x8034
@ -384,6 +384,9 @@
#define DPIO_FASTCLK_DISABLE 0x8100 #define DPIO_FASTCLK_DISABLE 0x8100
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
/* /*
* Fence registers * Fence registers
*/ */
@ -509,11 +512,14 @@
#define GEN7_ERR_INT 0x44040 #define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13) #define ERR_INT_MMIO_UNCLAIMED (1<<13)
#define DERRMR 0x44050
/* GM45+ chicken bits -- debug workaround bits that may be required /* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are * for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit. * the enables for writing to the corresponding low bit.
*/ */
#define _3D_CHICKEN 0x02084 #define _3D_CHICKEN 0x02084
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c #define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface. /* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the * Required on all Ironlake steppings according to the B-Spec, but the
@ -521,14 +527,17 @@
*/ */
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090 #define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c #define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6) # define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12) # define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
#define GEN6_GT_MODE 0x20d0 #define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9) #define GEN6_GT_MODE_HI (1 << 9)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
#define GFX_MODE 0x02520 #define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c #define GFX_MODE_GEN7 0x0229c
@ -547,6 +556,8 @@
#define IIR 0x020a4 #define IIR 0x020a4
#define IMR 0x020a8 #define IMR 0x020a8
#define ISR 0x020ac #define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE 0x182060
#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084 #define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0 #define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4 #define VLV_IIR 0x1820a4
@ -661,6 +672,7 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ #define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */ #define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5) #define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) #define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@ -670,6 +682,8 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0) #define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */ #define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0 #define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3) #define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0) #define ECO_FLIP_DONE (1<<0)
@ -1559,14 +1573,14 @@
#define _VSYNCSHIFT_B 0x61028 #define _VSYNCSHIFT_B 0x61028
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) #define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) #define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) #define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */ /* VGA port control */
#define ADPA 0x61100 #define ADPA 0x61100
@ -2641,6 +2655,7 @@
#define PIPECONF_GAMMA (1<<24) #define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25) #define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21) #define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel /* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */ * fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21) #define PIPECONF_PROGRESSIVE (0 << 21)
@ -2711,7 +2726,7 @@
#define PIPE_12BPC (3 << 5) #define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) #define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@ -2998,12 +3013,19 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30) #define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0 #define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) #define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26) #define DISPPLANE_8BPP (0x2<<26)
#define DISPPLANE_15_16BPP (0x4<<26) #define DISPPLANE_BGRA555 (0x3<<26)
#define DISPPLANE_16BPP (0x5<<26) #define DISPPLANE_BGRX555 (0x4<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) #define DISPPLANE_BGRX565 (0x5<<26)
#define DISPPLANE_32BPP (0x7<<26) #define DISPPLANE_BGRX888 (0x6<<26)
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) #define DISPPLANE_BGRA888 (0x7<<26)
#define DISPPLANE_RGBX101010 (0x8<<26)
#define DISPPLANE_RGBA101010 (0x9<<26)
#define DISPPLANE_BGRX101010 (0xa<<26)
#define DISPPLANE_RGBX161616 (0xc<<26)
#define DISPPLANE_RGBX888 (0xe<<26)
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25) #define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0 #define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24 #define DISPPLANE_SEL_PIPE_SHIFT 24
@ -3024,6 +3046,8 @@
#define _DSPASIZE 0x70190 #define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */ #define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */ #define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@ -3033,6 +3057,8 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane) #define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
/* Display/Sprite base address macros */ /* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000) #define DISP_BASEADDR_MASK (0xfffff000)
@ -3078,6 +3104,8 @@
#define _DSPBSIZE 0x71190 #define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C #define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4 #define _DSPBTILEOFF 0x711A4
#define _DSPBOFFSET 0x711A4
#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */ /* Sprite A control */
#define _DVSACNTR 0x72180 #define _DVSACNTR 0x72180
@ -3143,6 +3171,7 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) #define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) #define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) #define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280 #define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31) #define SPRITE_ENABLE (1<<31)
@ -3177,6 +3206,8 @@
#define _SPRA_SURF 0x7029c #define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0 #define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4 #define _SPRA_TILEOFF 0x702a4
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304 #define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31) #define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29) #define SPRITE_FILTER_MASK (3<<29)
@ -3197,6 +3228,8 @@
#define _SPRB_SURF 0x7129c #define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0 #define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4 #define _SPRB_TILEOFF 0x712a4
#define _SPRB_OFFSET 0x712a4
#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304 #define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400 #define _SPRB_GAMC 0x71400
@ -3210,8 +3243,10 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) #define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) #define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) #define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) #define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */ /* VBIOS regs */
#define VGACNTRL 0x71400 #define VGACNTRL 0x71400
@ -3246,12 +3281,6 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010 #define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014 #define DISPLAY_PORT_PLL_BIOS_2 0x46014
#define PCH_DSPCLK_GATE_D 0x42020
# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
#define PCH_3DCGDIS0 0x46020 #define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@ -3301,20 +3330,22 @@
#define _PIPEB_LINK_M2 0x61048 #define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c #define _PIPEB_LINK_N2 0x6104c
#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) #define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) #define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) #define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) #define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) #define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) #define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) #define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) #define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */ /* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
#define _PFA_CTL_1 0x68080 #define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880 #define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31) #define PF_ENABLE (1<<31)
#define PF_PIPE_SEL_MASK_IVB (3<<29)
#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23) #define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23) #define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23) #define PF_FILTER_MED_3x3 (1<<23)
@ -3423,15 +3454,13 @@
#define ILK_HDCP_DISABLE (1<<25) #define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24) #define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23) #define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define IVB_VRHUNIT_CLK_GATE (1<<28)
#define ILK_DPARB_CLK_GATE (1<<5)
#define ILK_DPFD_CLK_GATE (1<<7)
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ #define ILK_DSPCLK_GATE_D 0x42020
#define ILK_CLK_FBC (1<<7) #define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
#define ILK_DPFC_DIS1 (1<<8) #define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
#define ILK_DPFC_DIS2 (1<<9) #define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
#define IVB_CHICKEN3 0x4200c #define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) # define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@ -3447,14 +3476,21 @@
#define GEN7_L3CNTLREG1 0xB01C #define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 #define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 #define GEN7_WA_L3_CHICKEN_MODE 0x20000000
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
/* WaCatErrorRejectionIssue */ /* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) #define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
#define HSW_FUSE_STRAP 0x42014
#define HSW_CDCLK_LIMIT (1 << 24)
/* PCH */ /* PCH */
/* south display engine interrupt: IBX */ /* south display engine interrupt: IBX */
@ -3686,7 +3722,7 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
#define VLV_VIDEO_DIP_CTL_A 0x60220 #define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208 #define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@ -3795,17 +3831,25 @@
#define TRANS_6BPC (2<<5) #define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5) #define TRANS_12BPC (3<<5)
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064 #define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064 #define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
#define SOUTH_CHICKEN1 0xc2000 #define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18 #define FDIA_PHASE_SYNC_SHIFT_EN 18
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004 #define SOUTH_CHICKEN2 0xc2004
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
#define DPLS_EDP_PPS_FIX_DIS (1<<0) #define DPLS_EDP_PPS_FIX_DIS (1<<0)
#define _FDI_RXA_CHICKEN 0xc200c #define _FDI_RXA_CHICKEN 0xc200c
@ -3816,6 +3860,7 @@
#define SOUTH_DSPCLK_GATE_D 0xc2020 #define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */ /* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100 #define _FDI_TXA_CTL 0x60100
@ -3877,6 +3922,7 @@
#define FDI_FS_ERRC_ENABLE (1<<27) #define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26) #define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19) #define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16) #define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16) #define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16) #define FDI_6BPC (2<<16)
@ -3903,14 +3949,19 @@
#define _FDI_RXA_MISC 0xf0010 #define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010 #define _FDI_RXB_MISC 0xf1010
#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define _FDI_RXA_TUSIZE1 0xf0030 #define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038 #define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030 #define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038 #define _FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) #define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) #define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@ -4003,6 +4054,11 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0 #define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c #define PCH_PP_OFF_DELAYS 0xc720c
#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
#define PANEL_POWER_PORT_LVDS (0 << 30)
#define PANEL_POWER_PORT_DP_A (1 << 30)
#define PANEL_POWER_PORT_DP_C (2 << 30)
#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16 #define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@ -4050,7 +4106,7 @@
#define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300 #define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300 #define TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) #define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31) #define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29) #define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29) #define TRANS_DP_PORT_SEL_C (1<<29)
@ -4108,6 +4164,8 @@
#define FORCEWAKE_ACK_HSW 0x130044 #define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090 #define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */ #define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040 #define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180 #define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5) #define FORCEWAKE_MT_ENABLE (1<<5)
@ -4220,6 +4278,10 @@
#define GEN6_READ_OC_PARAMS 0xc #define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 #define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128 #define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@ -4251,6 +4313,15 @@
#define GEN7_L3LOG_BASE 0xB070 #define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80 #define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
#define G4X_AUD_VID_DID 0x62020 #define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB #define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801 #define INTEL_AUDIO_DEVBLC 0x80862801
@ -4380,33 +4451,39 @@
#define HSW_PWR_WELL_CTL6 0x45414 #define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */ /* Per-pipe DDI Function Control */
#define PIPE_DDI_FUNC_CTL_A 0x60400 #define TRANS_DDI_FUNC_CTL_A 0x60400
#define PIPE_DDI_FUNC_CTL_B 0x61400 #define TRANS_DDI_FUNC_CTL_B 0x61400
#define PIPE_DDI_FUNC_CTL_C 0x62400 #define TRANS_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 #define TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ #define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_B) TRANS_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31) #define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ /* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define PIPE_DDI_PORT_MASK (7<<28) #define TRANS_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) #define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_MASK (7<<24) #define TRANS_DDI_PORT_NONE (0<<28)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) #define TRANS_DDI_MODE_SELECT_MASK (7<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24) #define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) #define TRANS_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) #define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_FDI (4<<24) #define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
#define PIPE_DDI_BPC_MASK (7<<20) #define TRANS_DDI_MODE_SELECT_FDI (4<<24)
#define PIPE_DDI_BPC_8 (0<<20) #define TRANS_DDI_BPC_MASK (7<<20)
#define PIPE_DDI_BPC_10 (1<<20) #define TRANS_DDI_BPC_8 (0<<20)
#define PIPE_DDI_BPC_6 (2<<20) #define TRANS_DDI_BPC_10 (1<<20)
#define PIPE_DDI_BPC_12 (3<<20) #define TRANS_DDI_BPC_6 (2<<20)
#define PIPE_DDI_PVSYNC (1<<17) #define TRANS_DDI_BPC_12 (3<<20)
#define PIPE_DDI_PHSYNC (1<<16) #define TRANS_DDI_PVSYNC (1<<17)
#define PIPE_DDI_BFI_ENABLE (1<<4) #define TRANS_DDI_PHSYNC (1<<16)
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) #define TRANS_DDI_EDP_INPUT_MASK (7<<12)
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) #define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) #define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_BFI_ENABLE (1<<4)
#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */ /* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040 #define DP_TP_CTL_A 0x64040
@ -4420,12 +4497,16 @@
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) #define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) #define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) #define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) #define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
/* DisplayPort Transport Status */ /* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044 #define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) #define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) #define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */ /* DDI Buffer Control */
@ -4444,6 +4525,7 @@
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7) #define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1) #define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1) #define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1) #define DDI_PORT_WIDTH_X4 (3<<1)
@ -4460,6 +4542,10 @@
#define SBI_ADDR 0xC6000 #define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004 #define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008 #define SBI_CTL_STAT 0xC6008
#define SBI_CTL_DEST_ICLK (0x0<<16)
#define SBI_CTL_DEST_MPHY (0x1<<16)
#define SBI_CTL_OP_IORD (0x2<<8)
#define SBI_CTL_OP_IOWR (0x3<<8)
#define SBI_CTL_OP_CRRD (0x6<<8) #define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8) #define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1) #define SBI_RESPONSE_FAIL (0x1<<1)
@ -4477,10 +4563,12 @@
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c #define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C #define SBI_SSCCTL6 0x060C
#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0) #define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610 #define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00 #define SBI_DBUFF0 0x2a00
#define SBI_DBUFF0_ENABLE (1<<0)
/* LPT PIXCLK_GATE */ /* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020 #define PIXCLK_GATE 0xC6020
@ -4490,8 +4578,8 @@
/* SPLL */ /* SPLL */
#define SPLL_CTL 0x46020 #define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31) #define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28) #define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28) #define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26) #define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26) #define SPLL_PLL_FREQ_1350MHz (1<<26)
@ -4500,7 +4588,7 @@
#define WRPLL_CTL2 0x46060 #define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31) #define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28) #define WRPLL_PLL_SELECT_SSC (0x01<<28)
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) #define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */ /* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@ -4517,21 +4605,36 @@
#define PORT_CLK_SEL_SPLL (3<<29) #define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29) #define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29) #define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
/* Pipe clock selection */ /* Transcoder clock selection */
#define PIPE_CLK_SEL_A 0x46140 #define TRANS_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144 #define TRANS_CLK_SEL_B 0x46144
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) #define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */ /* For each transcoder, we need to select the corresponding port clock */
#define PIPE_CLK_SEL_DISABLED (0x0<<29) #define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
_TRANSB_MSA_MISC)
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
#define TRANS_MSA_8_BPC (1<<5)
#define TRANS_MSA_10_BPC (2<<5)
#define TRANS_MSA_12_BPC (3<<5)
#define TRANS_MSA_16_BPC (4<<5)
/* LCPLL Control */ /* LCPLL Control */
#define LCPLL_CTL 0x130040 #define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31) #define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30) #define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CLK_FREQ_MASK (3<<26)
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
/* Pipe WM_LINETIME - watermark line time */ /* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270 #define PIPE_WM_LINETIME_A 0x45270

View File

@ -20,5 +20,7 @@
#define trace_i915_gem_request_wait_end(a, b) #define trace_i915_gem_request_wait_end(a, b)
#define trace_i915_gem_request_complete(a, b) #define trace_i915_gem_request_complete(a, b)
#define trace_intel_gpu_freq_change(a) #define trace_intel_gpu_freq_change(a)
#define trace_i915_reg_rw(a, b, c, d)
#define trace_i915_ring_wait_begin(a)
#endif #endif

View File

@ -735,7 +735,8 @@ void intel_setup_bios(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/* Set the Panel Power On/Off timings if uninitialized. */ /* Set the Panel Power On/Off timings if uninitialized. */
if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { if (!HAS_PCH_SPLIT(dev) &&
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */ /* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0); I915_WRITE(PP_ON_DELAYS, 0x019007d0);

View File

@ -197,6 +197,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
if (mode->clock > max_clock) if (mode->clock > max_clock)
return MODE_CLOCK_HIGH; return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
return MODE_OK; return MODE_OK;
} }
@ -220,14 +225,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa; u32 adpa;
if (HAS_PCH_SPLIT(dev))
adpa = ADPA_HOTPLUG_BITS; adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH; adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH; adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */ /* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_CPT(dev)) if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0) else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT; adpa |= ADPA_PIPE_A_SELECT;
@ -400,12 +411,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter) struct i2c_adapter *adapter)
{ {
struct edid *edid; struct edid *edid;
int ret;
edid = intel_crt_get_edid(connector, adapter); edid = intel_crt_get_edid(connector, adapter);
if (!edid) if (!edid)
return 0; return 0;
return intel_connector_update_modes(connector, edid); ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
} }
static bool intel_crt_detect_ddc(struct drm_connector *connector) static bool intel_crt_detect_ddc(struct drm_connector *connector)
@ -643,10 +658,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector) static void intel_crt_reset(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector); struct intel_crt *crt = intel_attached_crt(connector);
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1; crt->force_hotplug_required = 1;
}
} }
/* /*
@ -706,7 +733,7 @@ void intel_crt_init(struct drm_device *dev)
crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true; crt->base.cloneable = true;
if (IS_HASWELL(dev) || IS_I830(dev)) if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0); crt->base.crtc_mask = (1 << 0);
else else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@ -726,6 +753,9 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt; crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt; crt->base.enable = intel_enable_crt;
if (IS_HASWELL(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state; crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;
@ -743,18 +773,14 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff * Configure the automatic hotplug detection stuff
*/ */
crt->force_hotplug_required = 0; crt->force_hotplug_required = 0;
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
/*
* TODO: find a proper way to discover whether we need to set the
* polarity reversal bit or not, instead of relying on the BIOS.
*/
if (HAS_PCH_LPT(dev))
dev_priv->fdi_rx_polarity_reversed =
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -103,25 +103,7 @@
#define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8 #define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_UNKNOWN 9
/* Intel Pipe Clone Bit */
#define INTEL_HDMIB_CLONE_BIT 1
#define INTEL_HDMIC_CLONE_BIT 2
#define INTEL_HDMID_CLONE_BIT 3
#define INTEL_HDMIE_CLONE_BIT 4
#define INTEL_HDMIF_CLONE_BIT 5
#define INTEL_SDVO_NON_TV_CLONE_BIT 6
#define INTEL_SDVO_TV_CLONE_BIT 7
#define INTEL_SDVO_LVDS_CLONE_BIT 8
#define INTEL_ANALOG_CLONE_BIT 9
#define INTEL_TV_CLONE_BIT 10
#define INTEL_DP_B_CLONE_BIT 11
#define INTEL_DP_C_CLONE_BIT 12
#define INTEL_DP_D_CLONE_BIT 13
#define INTEL_LVDS_CLONE_BIT 14
#define INTEL_DVO_TMDS_CLONE_BIT 15
#define INTEL_DVO_LVDS_CLONE_BIT 16
#define INTEL_EDP_CLONE_BIT 17
#define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_LVDS 1
@ -191,6 +173,11 @@ struct intel_encoder {
int crtc_mask; int crtc_mask;
}; };
struct intel_panel {
struct drm_display_mode *fixed_mode;
int fitting_mode;
};
struct intel_connector { struct intel_connector {
struct drm_connector base; struct drm_connector base;
/* /*
@ -207,12 +194,19 @@ struct intel_connector {
/* Reads out the current hw, returning true if the connector is enabled /* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */ * and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *); bool (*get_hw_state)(struct intel_connector *);
/* Panel info for eDP and LVDS */
struct intel_panel panel;
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid;
}; };
struct intel_crtc { struct intel_crtc {
struct drm_crtc base; struct drm_crtc base;
enum pipe pipe; enum pipe pipe;
enum plane plane; enum plane plane;
enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256]; u8 lut_r[256], lut_g[256], lut_b[256];
/* /*
* Whether the crtc and the connected output pipeline is active. Implies * Whether the crtc and the connected output pipeline is active. Implies
@ -226,6 +220,8 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work; struct intel_unpin_work *unpin_work;
int fdi_lanes; int fdi_lanes;
atomic_t unpin_work_count;
/* Display surface base address adjustement for pageflips. Note that on /* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are * gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */ * handled in the hw itself (with the TILEOFF register). */
@ -240,12 +236,14 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */ /* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll; struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
}; };
struct intel_plane { struct intel_plane {
struct drm_plane base; struct drm_plane base;
enum pipe pipe; enum pipe pipe;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale; int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024]; u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane, void (*update_plane)(struct drm_plane *plane,
@ -345,10 +343,8 @@ struct dip_infoframe {
} __attribute__((packed)); } __attribute__((packed));
struct intel_hdmi { struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg; u32 sdvox_reg;
int ddc_bus; int ddc_bus;
int ddi_port;
uint32_t color_range; uint32_t color_range;
bool has_hdmi_sink; bool has_hdmi_sink;
bool has_audio; bool has_audio;
@ -359,18 +355,15 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
}; };
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10 #define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9 #define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp { struct intel_dp {
struct intel_encoder base;
uint32_t output_reg; uint32_t output_reg;
uint32_t DP; uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio; bool has_audio;
enum hdmi_force_audio force_audio; enum hdmi_force_audio force_audio;
enum port port;
uint32_t color_range; uint32_t color_range;
uint8_t link_bw; uint8_t link_bw;
uint8_t lane_count; uint8_t lane_count;
@ -385,11 +378,16 @@ struct intel_dp {
int panel_power_cycle_delay; int panel_power_cycle_delay;
int backlight_on_delay; int backlight_on_delay;
int backlight_off_delay; int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work; struct delayed_work panel_vdd_work;
bool want_panel_vdd; bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */ struct intel_connector *attached_connector;
int edid_mode_count; };
struct intel_digital_port {
struct intel_encoder base;
enum port port;
struct intel_dp dp;
struct intel_hdmi hdmi;
}; };
static inline struct drm_crtc * static inline struct drm_crtc *
@ -408,11 +406,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
struct intel_unpin_work { struct intel_unpin_work {
struct work_struct work; struct work_struct work;
struct drm_device *dev; struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj; struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj; struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
int pending; atomic_t pending;
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
bool enable_stall_check; bool enable_stall_check;
}; };
@ -423,6 +424,8 @@ struct intel_fbc_work {
int interval; int interval;
}; };
int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector, int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid); struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@ -433,7 +436,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev); extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port); int sdvox_reg, enum port port);
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob); bool is_sdvob);
@ -446,10 +454,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev); extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg, extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port); enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev); extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *); extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *, extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode); struct drm_display_mode *mode);
@ -459,6 +484,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane); enum plane plane);
/* intel_panel.c */ /* intel_panel.c */
extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev, extern void intel_pch_panel_fitting(struct drm_device *dev,
@ -467,7 +496,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev); extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev, extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe); enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev); extern void intel_panel_disable_backlight(struct drm_device *dev);
@ -501,6 +530,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder; return to_intel_connector(connector)->encoder;
} }
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->dp;
}
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_digital_port, base.base);
}
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
return container_of(intel_dp, struct intel_digital_port, dp);
}
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
extern void intel_connector_attach_encoder(struct intel_connector *connector, extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder); struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@ -509,8 +563,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc); struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern enum transcoder
intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
struct intel_load_detect_pipe { struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb; struct drm_framebuffer *release_fb;
@ -578,6 +636,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode); struct drm_display_mode *mode);
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
unsigned int bpp,
unsigned int pitch);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@ -601,12 +663,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev); extern void ironlake_teardown_rc6(struct drm_device *dev);
extern void intel_enable_ddi(struct intel_encoder *encoder);
extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe); enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder, extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
struct drm_display_mode *mode, extern void intel_ddi_pll_init(struct drm_device *dev);
struct drm_display_mode *adjusted_mode); extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
#endif /* __INTEL_DRV_H__ */ #endif /* __INTEL_DRV_H__ */

View File

@ -36,10 +36,15 @@
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
}
static void static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{ {
struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits; uint32_t enabled_bits;
@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{ {
return container_of(encoder, struct intel_hdmi, base.base); struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->hdmi;
} }
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{ {
return container_of(intel_attached_encoder(connector), return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
struct intel_hdmi, base);
} }
void intel_dip_infoframe_csum(struct dip_infoframe *frame) void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
intel_set_infoframe(encoder, &avi_if); intel_set_infoframe(encoder, &avi_if);
} }
@ -754,7 +762,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK; return MODE_OK;
} }
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
@ -763,7 +771,7 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{ {
struct drm_device *dev = intel_hdmi->base.base.dev; struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit; uint32_t bit;
@ -786,6 +794,9 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force) intel_hdmi_detect(struct drm_connector *connector, bool force)
{ {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid; struct edid *edid;
enum drm_connector_status status = connector_status_disconnected; enum drm_connector_status status = connector_status_disconnected;
@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio = intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON); (intel_hdmi->force_audio == HDMI_AUDIO_ON);
intel_encoder->type = INTEL_OUTPUT_HDMI;
} }
return status; return status;
@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val) uint64_t val)
{ {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret; int ret;
ret = drm_connector_property_set_value(connector, property, val); ret = drm_object_property_set_value(&connector->base, property, val);
if (ret) if (ret)
return ret; return ret;
#if 0 #if 0
@ -899,8 +913,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL; return -EINVAL;
done: done:
if (intel_hdmi->base.base.crtc) { if (intel_dig_port->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc; struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode, intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb); crtc->x, crtc->y, crtc->fb);
} }
@ -915,12 +929,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector); kfree(connector);
} }
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_ddi_mode_set,
.disable = intel_encoder_noop,
};
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup, .mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set, .mode_set = intel_hdmi_mode_set,
@ -952,43 +960,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector); intel_attach_broadcast_rgb_property(connector);
} }
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{ {
struct drm_connector *connector = &intel_connector->base;
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector; enum port port = intel_dig_port->port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_hdmi);
return;
}
intel_encoder = &intel_hdmi->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA); DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD; connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1; connector->interlace_allowed = 1;
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_hdmi->ddi_port = port;
switch (port) { switch (port) {
case PORT_B: case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB; intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@ -1008,8 +997,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
BUG(); BUG();
} }
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) { if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe; intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes; intel_hdmi->set_infoframes = g4x_set_infoframes;
@ -1027,22 +1014,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_hdmi->set_infoframes = cpt_set_infoframes; intel_hdmi->set_infoframes = cpt_set_infoframes;
} }
if (IS_HASWELL(dev)) { if (IS_HASWELL(dev))
intel_encoder->enable = intel_enable_ddi; intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
intel_encoder->disable = intel_disable_ddi; else
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs_hsw);
} else {
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs);
}
intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_hdmi_add_properties(intel_hdmi, connector); intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder); intel_connector_attach_encoder(intel_connector, intel_encoder);
@ -1057,3 +1033,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
} }
} }
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
if (!intel_dig_port)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
}
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_dig_port->port = port;
intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
intel_dig_port->dp.output_reg = 0;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}

View File

@ -1,4 +1,4 @@
/* /*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie> * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
* Copyright © 2006-2008,2010 Intel Corporation * Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com> * Jesse Barnes <jesse.barnes@intel.com>
@ -210,15 +210,15 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u16 len = msg->len; u16 len = msg->len;
u8 *buf = msg->buf; u8 *buf = msg->buf;
I915_WRITE(GMBUS1 + reg_offset, I915_WRITE(GMBUS1 + reg_offset,
gmbus1_index | gmbus1_index |
GMBUS_CYCLE_WAIT | GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) | (len << GMBUS_BYTE_COUNT_SHIFT) |
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY); GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) { while (len) {
int ret; int ret;
u32 val, loop = 0; u32 val, loop = 0;
u32 gmbus2; u32 gmbus2;
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
@ -229,11 +229,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
if (gmbus2 & GMBUS_SATOER) if (gmbus2 & GMBUS_SATOER)
return -ENXIO; return -ENXIO;
val = I915_READ(GMBUS3 + reg_offset); val = I915_READ(GMBUS3 + reg_offset);
do { do {
*buf++ = val & 0xff; *buf++ = val & 0xff;
val >>= 8; val >>= 8;
} while (--len && ++loop < 4); } while (--len && ++loop < 4);
} }
return 0; return 0;
@ -245,30 +245,30 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
int reg_offset = dev_priv->gpio_mmio_base; int reg_offset = dev_priv->gpio_mmio_base;
u16 len = msg->len; u16 len = msg->len;
u8 *buf = msg->buf; u8 *buf = msg->buf;
u32 val, loop; u32 val, loop;
val = loop = 0; val = loop = 0;
while (len && loop < 4) { while (len && loop < 4) {
val |= *buf++ << (8 * loop++); val |= *buf++ << (8 * loop++);
len -= 1; len -= 1;
} }
I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset, I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT | GMBUS_CYCLE_WAIT |
(msg->len << GMBUS_BYTE_COUNT_SHIFT) | (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) { while (len) {
int ret; int ret;
u32 gmbus2; u32 gmbus2;
val = loop = 0; val = loop = 0;
do { do {
val |= *buf++ << (8 * loop); val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4); } while (--len && ++loop < 4);
I915_WRITE(GMBUS3 + reg_offset, val); I915_WRITE(GMBUS3 + reg_offset, val);
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_RDY), (GMBUS_SATOER | GMBUS_HW_RDY),
@ -338,7 +338,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
if (bus->force_bit) { if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num); ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out; goto out;
} }
reg_offset = dev_priv->gpio_mmio_base; reg_offset = dev_priv->gpio_mmio_base;
@ -432,7 +432,7 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0); I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = true; bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num); ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out: out:
@ -491,10 +491,13 @@ int intel_setup_gmbus(struct drm_device *dev)
/* gmbus seems to be broken on i830 */ /* gmbus seems to be broken on i830 */
if (IS_I830(dev)) if (IS_I830(dev))
bus->force_bit = true; bus->force_bit = 1;
intel_gpio_setup(bus, port); intel_gpio_setup(bus, port);
ret = i2c_add_adapter(&bus->adapter);
if (ret)
goto err;
} }
intel_i2c_reset(dev_priv->dev); intel_i2c_reset(dev_priv->dev);
@ -502,10 +505,10 @@ int intel_setup_gmbus(struct drm_device *dev)
return 0; return 0;
err: err:
// while (--i) { while (--i) {
// struct intel_gmbus *bus = &dev_priv->gmbus[i]; struct intel_gmbus *bus = &dev_priv->gmbus[i];
// i2c_del_adapter(&bus->adapter); i2c_del_adapter(&bus->adapter);
// } }
return ret; return ret;
} }
@ -529,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{ {
struct intel_gmbus *bus = to_intel_gmbus(adapter); struct intel_gmbus *bus = to_intel_gmbus(adapter);
bus->force_bit = force_bit; bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
} }
void intel_teardown_gmbus(struct drm_device *dev) void intel_teardown_gmbus(struct drm_device *dev)
@ -539,6 +545,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
for (i = 0; i < GMBUS_NUM_PORTS; i++) { for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i]; struct intel_gmbus *bus = &dev_priv->gmbus[i];
// i2c_del_adapter(&bus->adapter); i2c_del_adapter(&bus->adapter);
} }
} }

View File

@ -40,28 +40,30 @@
//#include <linux/acpi.h> //#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */ /* Private structure for the integrated LVDS support */
struct intel_lvds { struct intel_lvds_connector {
struct intel_connector base;
// struct notifier_block lid_notifier;
};
struct intel_lvds_encoder {
struct intel_encoder base; struct intel_encoder base;
struct edid *edid;
int fitting_mode;
u32 pfit_control; u32 pfit_control;
u32 pfit_pgm_ratios; u32 pfit_pgm_ratios;
bool pfit_dirty; bool pfit_dirty;
struct drm_display_mode *fixed_mode; struct intel_lvds_connector *attached_connector;
}; };
static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{ {
return container_of(encoder, struct intel_lvds, base.base); return container_of(encoder, struct intel_lvds_encoder, base.base);
} }
static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{ {
return container_of(intel_attached_encoder(connector), return container_of(connector, struct intel_lvds_connector, base.base);
struct intel_lvds, base);
} }
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_enable_lvds(struct intel_encoder *encoder) static void intel_enable_lvds(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg; u32 ctl_reg, lvds_reg, stat_reg;
@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
if (intel_lvds->pfit_dirty) { if (lvds_encoder->pfit_dirty) {
/* /*
* Enable automatic panel scaling so that non-native modes * Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be * fill the screen. The panel fitter should only be
@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
* register description and PRM. * register description and PRM.
*/ */
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
intel_lvds->pfit_control, lvds_encoder->pfit_control,
intel_lvds->pfit_pgm_ratios); lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
intel_lvds->pfit_dirty = false; lvds_encoder->pfit_dirty = false;
} }
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
static void intel_disable_lvds(struct intel_encoder *encoder) static void intel_disable_lvds(struct intel_encoder *encoder)
{ {
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg; u32 ctl_reg, lvds_reg, stat_reg;
@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n"); DRM_ERROR("timed out waiting for panel to power off\n");
if (intel_lvds->pfit_control) { if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0); I915_WRITE(PFIT_CONTROL, 0);
intel_lvds->pfit_dirty = true; lvds_encoder->pfit_dirty = true;
} }
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
static int intel_lvds_mode_valid(struct drm_connector *connector, static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay) if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL; return MODE_PANEL;
@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{ {
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe; int pipe;
@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false; return false;
} }
if (intel_encoder_check_is_cloned(&intel_lvds->base)) if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false; return false;
/* /*
@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay * with the panel scaling set up to source from the H/VDisplay
* of the original mode. * of the original mode.
*/ */
intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode); mode, adjusted_mode);
return true; return true;
} }
@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0); drm_mode_set_crtcinfo(adjusted_mode, 0);
switch (intel_lvds->fitting_mode) { switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER: case DRM_MODE_SCALE_CENTER:
/* /*
* For centered modes, we have to calculate border widths & * For centered modes, we have to calculate border widths &
@ -396,11 +402,11 @@ out:
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE; pfit_control |= PANEL_8TO6_DITHER_ENABLE;
if (pfit_control != intel_lvds->pfit_control || if (pfit_control != lvds_encoder->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control; lvds_encoder->pfit_control = pfit_control;
intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
intel_lvds->pfit_dirty = true; lvds_encoder->pfit_dirty = true;
} }
dev_priv->lvds_border_bits = border; dev_priv->lvds_border_bits = border;
@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/ */
static int intel_lvds_get_modes(struct drm_connector *connector) static int intel_lvds_get_modes(struct drm_connector *connector)
{ {
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_display_mode *mode; struct drm_display_mode *mode;
if (intel_lvds->edid) /* use cached edid if we have one */
return drm_add_edid_modes(connector, intel_lvds->edid); if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
return drm_add_edid_modes(connector, lvds_connector->base.edid);
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL) if (mode == NULL)
return 0; return 0;
@ -497,10 +504,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val, static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused) void *unused)
{ {
struct drm_i915_private *dev_priv = struct intel_lvds_connector *lvds_connector =
container_of(nb, struct drm_i915_private, lid_notifier); container_of(nb, struct intel_lvds_connector, lid_notifier);
struct drm_device *dev = dev_priv->dev; struct drm_connector *connector = &lvds_connector->base.base;
struct drm_connector *connector = dev_priv->int_lvds_connector; struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON) if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK; return NOTIFY_OK;
@ -509,9 +517,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving * check and update the status of LVDS connector after receiving
* the LID nofication event. * the LID nofication event.
*/ */
if (connector) connector->status = connector->funcs->detect(connector, false);
connector->status = connector->funcs->detect(connector,
false);
/* Don't force modeset on machines where it causes a GPU lockup */ /* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid)) if (dmi_check_system(intel_no_modeset_on_lid))
@ -527,7 +533,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
dev_priv->modeset_on_lid = 0; dev_priv->modeset_on_lid = 0;
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
intel_modeset_check_state(dev); intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
return NOTIFY_OK; return NOTIFY_OK;
@ -543,13 +549,16 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/ */
static void intel_lvds_destroy(struct drm_connector *connector) static void intel_lvds_destroy(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct intel_lvds_connector *lvds_connector =
struct drm_i915_private *dev_priv = dev->dev_private; to_lvds_connector(connector);
intel_panel_destroy_backlight(dev);
// if (dev_priv->lid_notifier.notifier_call) if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
// acpi_lid_notifier_unregister(&dev_priv->lid_notifier); kfree(lvds_connector->base.edid);
intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel);
drm_sysfs_connector_remove(connector); drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(connector); kfree(connector);
@ -559,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property, struct drm_property *property,
uint64_t value) uint64_t value)
{ {
struct intel_lvds *intel_lvds = intel_attached_lvds(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) { if (property == dev->mode_config.scaling_mode_property) {
struct drm_crtc *crtc = intel_lvds->base.base.crtc; struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) { if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n"); DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL; return -EINVAL;
} }
if (intel_lvds->fitting_mode == value) { if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */ /* the LVDS scaling property is not changed */
return 0; return 0;
} }
intel_lvds->fitting_mode = value; intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) { if (crtc && crtc->enabled) {
/* /*
* If the CRTC is enabled, the display will be changed * If the CRTC is enabled, the display will be changed
@ -763,14 +774,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
}, },
}, },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "ZOTAC ZBOXSD-ID12/ID13",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
},
},
{ {
.callback = intel_no_lvds_dmi_callback, .callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD", .ident = "Gigabyte GA-D525TUD",
@ -914,12 +917,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev) bool intel_lvds_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds; struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder; struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector; struct intel_connector *intel_connector;
struct drm_connector *connector; struct drm_connector *connector;
struct drm_encoder *encoder; struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc; struct drm_crtc *crtc;
u32 lvds; u32 lvds;
int pipe; int pipe;
@ -947,23 +953,25 @@ bool intel_lvds_init(struct drm_device *dev)
} }
} }
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
if (!intel_lvds) { if (!lvds_encoder)
return false;
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return false; return false;
} }
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); lvds_encoder->attached_connector = lvds_connector;
if (!intel_connector) {
kfree(intel_lvds);
return false;
}
if (!HAS_PCH_SPLIT(dev)) { if (!HAS_PCH_SPLIT(dev)) {
intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
} }
intel_encoder = &intel_lvds->base; intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base; encoder = &intel_encoder->base;
intel_connector = &lvds_connector->base;
connector = &intel_connector->base; connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS); DRM_MODE_CONNECTOR_LVDS);
@ -995,14 +1003,10 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */ /* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev); drm_mode_create_scaling_mode_property(dev);
/* drm_object_attach_property(&connector->base,
* the initial panel fitting mode will be FULL_SCREEN.
*/
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property, dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT); DRM_MODE_SCALE_ASPECT);
intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/* /*
* LVDS discovery: * LVDS discovery:
* 1) check for EDID on DDC * 1) check for EDID on DDC
@ -1017,20 +1021,21 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the * Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one. * preferred mode is the right one.
*/ */
intel_lvds->edid = drm_get_edid(connector, edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
intel_gmbus_get_adapter(dev_priv, if (edid) {
pin)); if (drm_add_edid_modes(connector, edid)) {
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
drm_mode_connector_update_edid_property(connector, drm_mode_connector_update_edid_property(connector,
intel_lvds->edid); edid);
} else { } else {
kfree(intel_lvds->edid); kfree(edid);
intel_lvds->edid = NULL; edid = ERR_PTR(-EINVAL);
} }
} else {
edid = ERR_PTR(-ENOENT);
} }
if (!intel_lvds->edid) { lvds_connector->base.edid = edid;
if (IS_ERR_OR_NULL(edid)) {
/* Didn't get an EDID, so /* Didn't get an EDID, so
* Set wide sync ranges so we get all modes * Set wide sync ranges so we get all modes
* handed to valid_mode for checking * handed to valid_mode for checking
@ -1043,22 +1048,26 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) { list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) { if (scan->type & DRM_MODE_TYPE_PREFERRED) {
intel_lvds->fixed_mode = DRM_DEBUG_KMS("using preferred mode from EDID: ");
drm_mode_duplicate(dev, scan); drm_mode_debug_printmodeline(scan);
intel_find_lvds_downclock(dev,
intel_lvds->fixed_mode, fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
intel_find_lvds_downclock(dev, fixed_mode,
connector); connector);
goto out; goto out;
} }
} }
}
/* Failed to get EDID, what about VBT? */ /* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) { if (dev_priv->lfp_lvds_vbt_mode) {
intel_lvds->fixed_mode = DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
if (intel_lvds->fixed_mode) {
intel_lvds->fixed_mode->type |= fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
DRM_MODE_TYPE_PREFERRED; if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out; goto out;
} }
} }
@ -1078,16 +1087,17 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe); crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) { if (crtc && (lvds & LVDS_PORT_EN)) {
intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); fixed_mode = intel_crtc_mode_get(dev, crtc);
if (intel_lvds->fixed_mode) { if (fixed_mode) {
intel_lvds->fixed_mode->type |= DRM_DEBUG_KMS("using current (BIOS) mode: ");
DRM_MODE_TYPE_PREFERRED; drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out; goto out;
} }
} }
/* If we still don't have a mode after all that, give up. */ /* If we still don't have a mode after all that, give up. */
if (!intel_lvds->fixed_mode) if (!fixed_mode)
goto failed; goto failed;
out: out:
@ -1107,11 +1117,10 @@ out:
// DRM_DEBUG_KMS("lid notifier registration failed\n"); // DRM_DEBUG_KMS("lid notifier registration failed\n");
// dev_priv->lid_notifier.notifier_call = NULL; // dev_priv->lid_notifier.notifier_call = NULL;
// } // }
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector); drm_sysfs_connector_add(connector);
intel_panel_setup_backlight(dev); intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
return true; return true;
@ -1119,7 +1128,9 @@ failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder); drm_encoder_cleanup(encoder);
kfree(intel_lvds); if (fixed_mode)
kfree(intel_connector); drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return false; return false;
} }

View File

@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid); drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid); ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid); drm_edid_to_eld(connector, edid);
kfree(edid);
return ret; return ret;
} }
@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter) struct i2c_adapter *adapter)
{ {
struct edid *edid; struct edid *edid;
int ret;
edid = drm_get_edid(connector, adapter); edid = drm_get_edid(connector, adapter);
if (!edid) if (!edid)
return 0; return 0;
return intel_connector_update_modes(connector, edid); ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
} }
static const struct drm_prop_enum_list force_audio_names[] = { static const struct drm_prop_enum_list force_audio_names[] = {
@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
dev_priv->force_audio_property = prop; dev_priv->force_audio_property = prop;
} }
drm_connector_attach_property(connector, prop, 0); drm_object_attach_property(&connector->base, prop, 0);
#endif #endif
} }
@ -122,6 +125,6 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
dev_priv->broadcast_rgb_property = prop; dev_priv->broadcast_rgb_property = prop;
} }
drm_connector_attach_property(connector, prop, 0); drm_object_attach_property(&connector->base, prop, 0);
#endif #endif
} }

View File

@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0; return 0;
} }
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val; u32 val;
/* Restore the CTL value if it lost, e.g. GPU reset */ /* Restore the CTL value if it lost, e.g. GPU reset */
if (HAS_PCH_SPLIT(dev_priv->dev)) { if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2); val = I915_READ(BLC_PWM_PCH_CTL2);
if (dev_priv->saveBLC_PWM_CTL2 == 0) { if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
dev_priv->saveBLC_PWM_CTL2 = val; dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) { } else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2, val = dev_priv->regfile.saveBLC_PWM_CTL2;
dev_priv->saveBLC_PWM_CTL2); I915_WRITE(BLC_PWM_PCH_CTL2, val);
val = dev_priv->saveBLC_PWM_CTL2;
} }
} else { } else {
val = I915_READ(BLC_PWM_CTL); val = I915_READ(BLC_PWM_CTL);
if (dev_priv->saveBLC_PWM_CTL == 0) { if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
dev_priv->saveBLC_PWM_CTL = val; dev_priv->regfile.saveBLC_PWM_CTL = val;
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 =
I915_READ(BLC_PWM_CTL2);
} else if (val == 0) { } else if (val == 0) {
I915_WRITE(BLC_PWM_CTL, val = dev_priv->regfile.saveBLC_PWM_CTL;
dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_CTL, val);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2, I915_WRITE(BLC_PWM_CTL2,
dev_priv->saveBLC_PWM_CTL2); dev_priv->regfile.saveBLC_PWM_CTL2);
val = dev_priv->saveBLC_PWM_CTL;
} }
} }
@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
static u32 _intel_panel_get_max_backlight(struct drm_device *dev) static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max; u32 max;
max = i915_read_blc_pwm_ctl(dev_priv); max = i915_read_blc_pwm_ctl(dev);
if (HAS_PCH_SPLIT(dev)) { if (HAS_PCH_SPLIT(dev)) {
max >>= 16; max >>= 16;
@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
enum drm_connector_status enum drm_connector_status
intel_panel_detect(struct drm_device *dev) intel_panel_detect(struct drm_device *dev)
{ {
#if 0
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
#endif
if (i915_panel_ignore_lid)
return i915_panel_ignore_lid > 0 ?
connector_status_connected :
connector_status_disconnected;
/* opregion lid state on HP 2540p is wrong at boot up,
* appears to be either the BIOS or Linux ACPI fault */
#if 0
/* Assume that the BIOS does not lie through the OpRegion... */ /* Assume that the BIOS does not lie through the OpRegion... */
if (dev_priv->opregion.lid_state) if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ? return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected : connector_status_connected :
connector_status_disconnected; connector_status_disconnected;
#endif }
switch (i915_panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
return connector_status_disconnected;
default:
return connector_status_unknown; return connector_status_unknown;
}
} }
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE #ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@ -416,21 +414,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
.get_brightness = intel_panel_get_brightness, .get_brightness = intel_panel_get_brightness,
}; };
int intel_panel_setup_backlight(struct drm_device *dev) int intel_panel_setup_backlight(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props; struct backlight_properties props;
struct drm_connector *connector;
intel_panel_init_backlight(dev); intel_panel_init_backlight(dev);
if (dev_priv->int_lvds_connector)
connector = dev_priv->int_lvds_connector;
else if (dev_priv->int_edp_connector)
connector = dev_priv->int_edp_connector;
else
return -ENODEV;
memset(&props, 0, sizeof(props)); memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW; props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev); props.max_brightness = _intel_panel_get_max_backlight(dev);
@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
backlight_device_unregister(dev_priv->backlight); backlight_device_unregister(dev_priv->backlight);
} }
#else #else
int intel_panel_setup_backlight(struct drm_device *dev) int intel_panel_setup_backlight(struct drm_connector *connector)
{ {
intel_panel_init_backlight(dev); intel_panel_init_backlight(connector->dev);
return 0; return 0;
} }
@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
return; return;
} }
#endif #endif
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode)
{
panel->fixed_mode = fixed_mode;
return 0;
}
void intel_panel_fini(struct intel_panel *panel)
{
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
}

File diff suppressed because it is too large Load Diff

View File

@ -47,7 +47,7 @@ struct pipe_control {
static inline int ring_space(struct intel_ring_buffer *ring) static inline int ring_space(struct intel_ring_buffer *ring)
{ {
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0) if (space < 0)
space += ring->size; space += ring->size;
return space; return space;
@ -247,7 +247,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
/* /*
* TLB invalidate requires a post-sync write. * TLB invalidate requires a post-sync write.
*/ */
flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
} }
ret = intel_ring_begin(ring, 4); ret = intel_ring_begin(ring, 4);
@ -461,7 +461,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err_unref; goto err_unref;
pc->gtt_offset = obj->gtt_offset; pc->gtt_offset = obj->gtt_offset;
pc->cpu_page = (void*)MapIoMem((addr_t)obj->pages.page[0], 4096, PG_SW); pc->cpu_page = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
if (pc->cpu_page == NULL) if (pc->cpu_page == NULL)
goto err_unpin; goto err_unpin;
@ -502,13 +502,25 @@ static int init_render_ring(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring); int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) { if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
if (INTEL_INFO(dev)->gen == 6)
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
if (IS_GEN7(dev)) if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7, I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
}
if (INTEL_INFO(dev)->gen >= 5) { if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring); ret = init_pipe_control(ring);
@ -552,15 +564,11 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
static void static void
update_mboxes(struct intel_ring_buffer *ring, update_mboxes(struct intel_ring_buffer *ring,
u32 seqno,
u32 mmio_offset) u32 mmio_offset)
{ {
intel_ring_emit(ring, MI_SEMAPHORE_MBOX | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_REGISTER |
MI_SEMAPHORE_UPDATE);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, mmio_offset); intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, ring->outstanding_lazy_request);
} }
/** /**
@ -573,8 +581,7 @@ update_mboxes(struct intel_ring_buffer *ring,
* This acts like a signal in the canonical semaphore. * This acts like a signal in the canonical semaphore.
*/ */
static int static int
gen6_add_request(struct intel_ring_buffer *ring, gen6_add_request(struct intel_ring_buffer *ring)
u32 *seqno)
{ {
u32 mbox1_reg; u32 mbox1_reg;
u32 mbox2_reg; u32 mbox2_reg;
@ -587,13 +594,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
mbox1_reg = ring->signal_mbox[0]; mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1]; mbox2_reg = ring->signal_mbox[1];
*seqno = i915_gem_next_request_seqno(ring); update_mboxes(ring, mbox1_reg);
update_mboxes(ring, mbox2_reg);
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, *seqno); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -650,10 +655,8 @@ do { \
} while (0) } while (0)
static int static int
pc_render_add_request(struct intel_ring_buffer *ring, pc_render_add_request(struct intel_ring_buffer *ring)
u32 *result)
{ {
u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private; struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128; u32 scratch_addr = pc->gtt_offset + 128;
int ret; int ret;
@ -674,7 +677,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr); PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */ scratch_addr += 128; /* write to separate cachelines */
@ -693,11 +696,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY); PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_advance(ring); intel_ring_advance(ring);
*result = seqno;
return 0; return 0;
} }
@ -885,25 +887,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
} }
static int static int
i9xx_add_request(struct intel_ring_buffer *ring, i9xx_add_request(struct intel_ring_buffer *ring)
u32 *result)
{ {
u32 seqno;
int ret; int ret;
ret = intel_ring_begin(ring, 4); ret = intel_ring_begin(ring, 4);
if (ret) if (ret)
return ret; return ret;
seqno = i915_gem_next_request_seqno(ring);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX); intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, seqno); intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring); intel_ring_advance(ring);
*result = seqno;
return 0; return 0;
} }
@ -961,7 +958,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
} }
static int static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags)
{ {
int ret; int ret;
@ -972,35 +971,71 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
intel_ring_emit(ring, intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_BUFFER_START |
MI_BATCH_GTT | MI_BATCH_GTT |
MI_BATCH_NON_SECURE_I965); (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);
intel_ring_advance(ring); intel_ring_advance(ring);
return 0; return 0;
} }
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
static int static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring, i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len) u32 offset, u32 len,
unsigned flags)
{ {
int ret; int ret;
if (flags & I915_DISPATCH_PINNED) {
ret = intel_ring_begin(ring, 4); ret = intel_ring_begin(ring, 4);
if (ret) if (ret)
return ret; return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER); intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8); intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
u32 cs_offset = obj->gtt_offset;
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
ret = intel_ring_begin(ring, 9+3);
if (ret)
return ret;
/* Blit the batch (which has now all relocs applied) to the stable batch
* scratch bo area (so that the CS never stumbles over its tlb
* invalidation bug) ... */
intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset);
intel_ring_emit(ring, MI_FLUSH);
/* ... and execute it. */
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, cs_offset + len - 8);
intel_ring_advance(ring); intel_ring_advance(ring);
}
return 0; return 0;
} }
static int static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring, i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len) u32 offset, u32 len,
unsigned flags)
{ {
int ret; int ret;
@ -1009,7 +1044,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret; return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring); intel_ring_advance(ring);
return 0; return 0;
@ -1050,7 +1085,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
} }
ring->status_page.gfx_addr = obj->gtt_offset; ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = (void*)MapIoMem(obj->pages.page[0],4096,PG_SW); ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
if (ring->status_page.page_addr == NULL) { if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unpin; goto err_unpin;
@ -1072,6 +1107,29 @@ err:
return ret; return ret;
} }
static int init_phys_hws_pga(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(ring->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
return 0;
}
static int intel_init_ring_buffer(struct drm_device *dev, static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
@ -1083,6 +1141,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->request_list);
ring->size = 32 * PAGE_SIZE; ring->size = 32 * PAGE_SIZE;
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
init_waitqueue_head(&ring->irq_queue); init_waitqueue_head(&ring->irq_queue);
@ -1090,6 +1149,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = init_status_page(ring); ret = init_status_page(ring);
if (ret) if (ret)
return ret; return ret;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_hws_pga(ring);
if (ret)
return ret;
} }
obj = i915_gem_alloc_object(dev, ring->size); obj = i915_gem_alloc_object(dev, ring->size);
@ -1154,7 +1218,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */ /* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private; dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_idle(ring); ret = intel_ring_idle(ring);
if (ret) if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret); ring->name, ret);
@ -1173,28 +1237,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
// cleanup_status_page(ring); // cleanup_status_page(ring);
} }
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
if (ring->space < rem) {
int ret = intel_wait_ring_buffer(ring, rem);
if (ret)
return ret;
}
virt = ring->virtual_start + ring->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
ring->tail = 0;
ring->space = ring_space(ring);
return 0;
}
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{ {
int ret; int ret;
@ -1228,7 +1270,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
if (request->tail == -1) if (request->tail == -1)
continue; continue;
space = request->tail - (ring->tail + 8); space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0) if (space < 0)
space += ring->size; space += ring->size;
if (space >= n) { if (space >= n) {
@ -1263,7 +1305,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
return 0; return 0;
} }
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -1274,7 +1316,7 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
if (ret != -ENOSPC) if (ret != -ENOSPC)
return ret; return ret;
trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop, /* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due * leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume * to running on almost untested codepaths). But on resume
@ -1300,6 +1342,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return -EBUSY; return -EBUSY;
} }
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
if (ring->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}
virt = ring->virtual_start + ring->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
ring->tail = 0;
ring->space = ring_space(ring);
return 0;
}
int intel_ring_idle(struct intel_ring_buffer *ring)
{
u32 seqno;
int ret;
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL);
if (ret)
return ret;
}
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
return 0;
seqno = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
list)->seqno;
return i915_wait_seqno(ring, seqno);
}
static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
{
if (ring->outstanding_lazy_request)
return 0;
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
int intel_ring_begin(struct intel_ring_buffer *ring, int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords) int num_dwords)
{ {
@ -1311,6 +1407,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
if (ret) if (ret)
return ret; return ret;
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
return ret;
if (unlikely(ring->tail + n > ring->effective_size)) { if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring); ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret)) if (unlikely(ret))
@ -1318,7 +1419,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
} }
if (unlikely(ring->space < n)) { if (unlikely(ring->space < n)) {
ret = intel_wait_ring_buffer(ring, n); ret = ring_wait_for_space(ring, n);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
} }
@ -1382,19 +1483,48 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret; return ret;
cmd = MI_FLUSH_DW; cmd = MI_FLUSH_DW;
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_GPU_DOMAINS) if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
return 0; return 0;
} }
static int
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
unsigned flags)
{
int ret;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
static int static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len) u32 offset, u32 len,
unsigned flags)
{ {
int ret; int ret;
@ -1402,7 +1532,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
if (ret) if (ret)
return ret; return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */ /* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset); intel_ring_emit(ring, offset);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -1423,10 +1555,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return ret; return ret;
cmd = MI_FLUSH_DW; cmd = MI_FLUSH_DW;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_DOMAIN_RENDER) if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB; cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd); intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
@ -1481,7 +1620,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT; ring->irq_enable_mask = I915_USER_INTERRUPT;
} }
ring->write_tail = ring_write_tail; ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4) else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer; ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@ -1492,15 +1633,98 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring; ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup; ring->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
struct drm_i915_gem_object *obj;
int ret;
if (!I915_NEED_GFX_HWS(dev)) { obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; if (obj == NULL) {
memset(ring->status_page.page_addr, 0, PAGE_SIZE); DRM_ERROR("Failed to allocate batch bo\n");
return -ENOMEM;
}
ret = i915_gem_object_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
return ret;
}
ring->private = obj;
} }
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
} }
#if 0
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
return -ENODEV;
}
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
* gem_init ioctl returns with -ENODEV). Hence we do not need to set up
* the special gen5 functions. */
ring->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
ring->flush = gen2_render_ring_flush;
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
} else {
ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = i9xx_ring_put_irq;
}
ring->irq_enable_mask = I915_USER_INTERRUPT;
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
ring->effective_size = ring->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ring->virtual_start = ioremap_wc(start, size);
if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_hws_pga(ring);
if (ret)
return ret;
}
return 0;
}
#endif
int intel_init_bsd_ring_buffer(struct drm_device *dev) int intel_init_bsd_ring_buffer(struct drm_device *dev)
{ {
@ -1547,7 +1771,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
} }
ring->init = init_ring_common; ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring); return intel_init_ring_buffer(dev, ring);
} }

View File

@ -1,6 +1,17 @@
#ifndef _INTEL_RINGBUFFER_H_ #ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
* Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
*
* "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
* cacheline, the Head Pointer must not be greater than the Tail
* Pointer."
*/
#define I915_RING_FREE_SPACE 64
struct intel_hw_status_page { struct intel_hw_status_page {
u32 *page_addr; u32 *page_addr;
unsigned int gfx_addr; unsigned int gfx_addr;
@ -70,8 +81,7 @@ struct intel_ring_buffer {
int __must_check (*flush)(struct intel_ring_buffer *ring, int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 invalidate_domains,
u32 flush_domains); u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring, int (*add_request)(struct intel_ring_buffer *ring);
u32 *seqno);
/* Some chipsets are not quite as coherent as advertised and need /* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno. * an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last * However, the up-to-date seqno is not always required and the last
@ -81,7 +91,10 @@ struct intel_ring_buffer {
u32 (*get_seqno)(struct intel_ring_buffer *ring, u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency); bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length); u32 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring); void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring, int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to, struct intel_ring_buffer *to,
@ -181,27 +194,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
* The area from dword 0x20 to 0x3ff is available for driver usage. * The area from dword 0x20 to 0x3ff is available for driver usage.
*/ */
#define I915_GEM_HWS_INDEX 0x20 #define I915_GEM_HWS_INDEX 0x20
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
{
return intel_wait_ring_buffer(ring, ring->size - 8);
}
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
static inline void intel_ring_emit(struct intel_ring_buffer *ring, static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data) u32 data)
{ {
iowrite32(data, ring->virtual_start + ring->tail); iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4; ring->tail += 4;
} }
void intel_ring_advance(struct intel_ring_buffer *ring); void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
@ -217,6 +224,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
return ring->tail; return ring->tail;
} }
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
{
BUG_ON(ring->outstanding_lazy_request == 0);
return ring->outstanding_lazy_request;
}
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{ {
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))

View File

@ -27,7 +27,7 @@
*/ */
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/slab.h> #include <linux/slab.h>
//#include <linux/delay.h> #include <linux/delay.h>
#include <linux/export.h> #include <linux/export.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
@ -518,7 +518,7 @@ out:
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len) void *response, int response_len)
{ {
u8 retry = 5; u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status; u8 status;
int i; int i;
@ -531,14 +531,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
* command to be complete. * command to be complete.
* *
* Check 5 times in case the hardware failed to read the docs. * Check 5 times in case the hardware failed to read the docs.
*
* Also beware that the first response by many devices is to
* reply PENDING and stall for time. TVs are notorious for
* requiring longer than specified to complete their replies.
* Originally (in the DDX long ago), the delay was only ever 15ms
* with an additional delay of 30ms applied for TVs added later after
* many experiments. To accommodate both sets of delays, we do a
* sequence of slow checks if the device is falling behind and fails
* to reply within 5*15µs.
*/ */
if (!intel_sdvo_read_byte(intel_sdvo, if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS, SDVO_I2C_CMD_STATUS,
&status)) &status))
goto log_fail; goto log_fail;
while (status == SDVO_CMD_STATUS_PENDING && retry--) { while (status == SDVO_CMD_STATUS_PENDING && --retry) {
udelay(15); if (retry < 10)
msleep(15);
else
udelay(15);
if (!intel_sdvo_read_byte(intel_sdvo, if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS, SDVO_I2C_CMD_STATUS,
&status)) &status))
@ -1237,6 +1250,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
temp = I915_READ(intel_sdvo->sdvo_reg); temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) { if ((temp & SDVO_ENABLE) != 0) {
/* HW workaround for IBX, we need to move the port to
* transcoder A before disabling it. */
if (HAS_PCH_IBX(encoder->base.dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
/* Again we need to write this twice. */
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(encoder->base.dev, pipe);
else
msleep(50);
}
}
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
} }
} }
@ -1253,8 +1290,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
u8 status; u8 status;
temp = I915_READ(intel_sdvo->sdvo_reg); temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0) if ((temp & SDVO_ENABLE) == 0) {
/* HW workaround for IBX, we need to move the port
* to transcoder A before disabling it. */
if (HAS_PCH_IBX(dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
/* Restore the transcoder select bit. */
if (pipe == PIPE_B)
temp |= SDVO_PIPE_B_SELECT;
}
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
}
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe); intel_wait_for_vblank(dev, intel_crtc->pipe);
@ -1508,15 +1557,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret; enum drm_connector_status ret;
if (!intel_sdvo_write_cmd(intel_sdvo, if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) SDVO_CMD_GET_ATTACHED_DISPLAYS,
return connector_status_unknown; &response, 2))
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
msleep(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown; return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@ -1805,7 +1848,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
intel_sdvo_destroy_enhance_property(connector); intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector); drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(connector); kfree(intel_sdvo_connector);
} }
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@ -1837,7 +1880,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
uint8_t cmd; uint8_t cmd;
int ret; int ret;
ret = drm_connector_property_set_value(connector, property, val); ret = drm_object_property_set_value(&connector->base, property, val);
if (ret) if (ret)
return ret; return ret;
@ -1894,7 +1937,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val; temp_value = val;
if (intel_sdvo_connector->left == property) { if (intel_sdvo_connector->left == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val); intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value) if (intel_sdvo_connector->left_margin == temp_value)
return 0; return 0;
@ -1906,7 +1949,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H; cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value; goto set_value;
} else if (intel_sdvo_connector->right == property) { } else if (intel_sdvo_connector->right == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val); intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value) if (intel_sdvo_connector->right_margin == temp_value)
return 0; return 0;
@ -1918,7 +1961,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_H; cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value; goto set_value;
} else if (intel_sdvo_connector->top == property) { } else if (intel_sdvo_connector->top == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val); intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value) if (intel_sdvo_connector->top_margin == temp_value)
return 0; return 0;
@ -1930,7 +1973,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
cmd = SDVO_CMD_SET_OVERSCAN_V; cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value; goto set_value;
} else if (intel_sdvo_connector->bottom == property) { } else if (intel_sdvo_connector->bottom == property) {
drm_connector_property_set_value(connector, drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val); intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value) if (intel_sdvo_connector->bottom_margin == temp_value)
return 0; return 0;
@ -2003,7 +2046,7 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
drm_mode_destroy(encoder->dev, drm_mode_destroy(encoder->dev,
intel_sdvo->sdvo_lvds_fixed_mode); intel_sdvo->sdvo_lvds_fixed_mode);
// i2c_del_adapter(&intel_sdvo->ddc); i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder); intel_encoder_destroy(encoder);
} }
@ -2083,17 +2126,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else else
mapping = &dev_priv->sdvo_mappings[1]; mapping = &dev_priv->sdvo_mappings[1];
pin = GMBUS_PORT_DPB; if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
if (mapping->initialized)
pin = mapping->i2c_pin; pin = mapping->i2c_pin;
else
pin = GMBUS_PORT_DPB;
if (intel_gmbus_is_port_valid(pin)) {
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
/* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
* our code totally fails once we start using gmbus. Hence fall back to
* bit banging for now. */
intel_gmbus_force_bit(sdvo->i2c, true); intel_gmbus_force_bit(sdvo->i2c, true);
} else { }
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
} /* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
static void
intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
{
intel_gmbus_force_bit(sdvo->i2c, false);
} }
static bool static bool
@ -2438,7 +2488,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(&intel_sdvo_connector->base.base, drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0); intel_sdvo_connector->tv_format, 0);
return true; return true;
@ -2454,7 +2504,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
intel_sdvo_connector->name = \ intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \ if (!intel_sdvo_connector->name) return false; \
drm_connector_attach_property(connector, \ drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \ intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \ intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@ -2491,7 +2541,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->left) if (!intel_sdvo_connector->left)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
intel_sdvo_connector->left, intel_sdvo_connector->left,
intel_sdvo_connector->left_margin); intel_sdvo_connector->left_margin);
@ -2500,7 +2550,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->right) if (!intel_sdvo_connector->right)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
intel_sdvo_connector->right, intel_sdvo_connector->right,
intel_sdvo_connector->right_margin); intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, " DRM_DEBUG_KMS("h_overscan: max %d, "
@ -2528,7 +2578,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->top) if (!intel_sdvo_connector->top)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
intel_sdvo_connector->top, intel_sdvo_connector->top,
intel_sdvo_connector->top_margin); intel_sdvo_connector->top_margin);
@ -2538,7 +2588,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->bottom) if (!intel_sdvo_connector->bottom)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom, intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin); intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, " DRM_DEBUG_KMS("v_overscan: max %d, "
@ -2570,7 +2620,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_connector->dot_crawl) if (!intel_sdvo_connector->dot_crawl)
return false; return false;
drm_connector_attach_property(connector, drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl, intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl); intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response); DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@ -2655,7 +2705,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
sdvo->ddc.algo_data = sdvo; sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &intel_sdvo_ddc_proxy; sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
return 1; //i2c_add_adapter(&sdvo->ddc) == 0; return i2c_add_adapter(&sdvo->ddc) == 0;
} }
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
@ -2674,10 +2724,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->is_sdvob = is_sdvob; intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
kfree(intel_sdvo); goto err_i2c_bus;
return false;
}
/* encoder type will be decided later */ /* encoder type will be decided later */
intel_encoder = &intel_sdvo->base; intel_encoder = &intel_sdvo->base;
@ -2775,7 +2823,9 @@ err_output:
err: err:
drm_encoder_cleanup(&intel_encoder->base); drm_encoder_cleanup(&intel_encoder->base);
// i2c_del_adapter(&intel_sdvo->ddc); i2c_del_adapter(&intel_sdvo->ddc);
err_i2c_bus:
intel_sdvo_unselect_i2c_bus(intel_sdvo);
kfree(intel_sdvo); kfree(intel_sdvo);
return false; return false;

View File

@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe; int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0; u32 sprctl, sprscale = 0;
int pixel_size; unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
sprctl = I915_READ(SPRCTL(pipe)); sprctl = I915_READ(SPRCTL(pipe));
@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) { switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break; break;
case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888; sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break; break;
case DRM_FORMAT_YUYV: case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
pixel_size = 2;
break; break;
case DRM_FORMAT_YVYU: case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
pixel_size = 2;
break; break;
case DRM_FORMAT_UYVY: case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
pixel_size = 2;
break; break;
case DRM_FORMAT_VYUY: case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
pixel_size = 2;
break; break;
default: default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); BUG();
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
} }
if (obj->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
@ -127,18 +119,27 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); linear_offset = y * fb->pitches[0] + x * pixel_size;
I915_WRITE(SPRLINOFF(pipe), offset); sprsurf_offset =
} intel_gen4_compute_offset_xtiled(&x, &y,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(SPRLINOFF(pipe), linear_offset);
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl); I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe)); POSTING_READ(SPRSURF(pipe));
} }
@ -152,6 +153,7 @@ ivb_disable_plane(struct drm_plane *plane)
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */ /* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0); I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */ /* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
@ -225,8 +227,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_device *dev = plane->dev; struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size; int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale; u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dvscntr = I915_READ(DVSCNTR(pipe)); dvscntr = I915_READ(DVSCNTR(pipe));
@ -239,33 +243,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) { switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
pixel_size = 4;
break; break;
case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888; dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break; break;
case DRM_FORMAT_YUYV: case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
pixel_size = 2;
break; break;
case DRM_FORMAT_YVYU: case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
pixel_size = 2;
break; break;
case DRM_FORMAT_UYVY: case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
pixel_size = 2;
break; break;
case DRM_FORMAT_VYUY: case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
pixel_size = 2;
break; break;
default: default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); BUG();
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
} }
if (obj->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
@ -289,18 +284,22 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); linear_offset = y * fb->pitches[0] + x * pixel_size;
I915_WRITE(DVSLINOFF(pipe), offset); dvssurf_offset =
} intel_gen4_compute_offset_xtiled(&x, &y,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(DVSLINOFF(pipe), linear_offset);
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr); I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
POSTING_READ(DVSSURF(pipe)); POSTING_READ(DVSSURF(pipe));
} }
@ -422,6 +421,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj; struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe; int pipe = intel_plane->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int ret = 0; int ret = 0;
int x = src_x >> 16, y = src_y >> 16; int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@ -436,7 +437,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_h = src_h >> 16; src_h = src_h >> 16;
/* Pipe must be running... */ /* Pipe must be running... */
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL; return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h) if (crtc_x >= primary_w || crtc_y >= primary_h)
@ -446,6 +447,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->pipe != intel_crtc->pipe) if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL; return -EINVAL;
/* Sprite planes can be linear or x-tiled surfaces */
switch (obj->tiling_mode) {
case I915_TILING_NONE:
case I915_TILING_X:
break;
default:
return -EINVAL;
}
/* /*
* Clamp the width & height into the visible area. Note we don't * Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen. * try to scale the source if part of the visible region is offscreen.
@ -472,6 +482,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (!crtc_w || !crtc_h) /* Again, nothing to display */ if (!crtc_w || !crtc_h) /* Again, nothing to display */
goto out; goto out;
/*
* We may not have a scaler, eg. HSW does not have it any more
*/
if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
return -EINVAL;
/* /*
* We can take a larger source and scale it down, but * We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB. * only so much... 16x is the max on SNB.
@ -570,8 +586,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane; struct intel_plane *intel_plane;
int ret = 0; int ret = 0;
// if (!drm_core_check_feature(dev, DRIVER_MODESET))
// return -ENODEV;
/* Make sure we don't try to enable both src & dest simultaneously */ /* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
@ -603,8 +617,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct intel_plane *intel_plane; struct intel_plane *intel_plane;
int ret = 0; int ret = 0;
// if (!drm_core_check_feature(dev, DRIVER_MODESET))
// return -ENODEV;
mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.mutex);
@ -665,6 +677,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev)->gen) {
case 5: case 5:
case 6: case 6:
intel_plane->can_scale = true;
intel_plane->max_downscale = 16; intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane; intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane; intel_plane->disable_plane = ilk_disable_plane;
@ -681,6 +694,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break; break;
case 7: case 7:
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
intel_plane->can_scale = false;
else
intel_plane->can_scale = true;
intel_plane->max_downscale = 2; intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane; intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane; intel_plane->disable_plane = ivb_disable_plane;

View File

@ -368,7 +368,9 @@ int init_display_kms(struct drm_device *dev)
main_device = dev; main_device = dev;
#ifdef __HWA__
err = init_bitmaps(); err = init_bitmaps();
#endif
return 0; return 0;
}; };
@ -624,7 +626,7 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
#ifdef __HWA__
extern struct hmm bm_mm; extern struct hmm bm_mm;
@ -655,9 +657,9 @@ static u32_t get_display_map()
#define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4)) #define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4))
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_CHROMA_CMD ((2<<29)|(0x73<<22)|8) #define XY_SRC_COPY_CHROMA_CMD ((2<<29)|(0x73<<22)|8)
#define ROP_COPY_SRC 0xCC #define ROP_COPY_SRC 0xCC
#define FORMAT8888 3 #define FORMAT8888 3
#define BLT_WRITE_ALPHA (1<<21) #define BLT_WRITE_ALPHA (1<<21)
#define BLT_WRITE_RGB (1<<20) #define BLT_WRITE_RGB (1<<20)
@ -697,7 +699,7 @@ int srv_blit_bitmap(u32 hbitmap, int dst_x, int dst_y,
u32_t br13, cmd, slot_mask, *b; u32_t br13, cmd, slot_mask, *b;
u32_t offset; u32_t offset;
u8 slot; u8 slot;
int n=0; int n=0;
int ret; int ret;
if(unlikely(hbitmap==0)) if(unlikely(hbitmap==0))
@ -1312,7 +1314,7 @@ int blit_tex(u32 hbitmap, int dst_x, int dst_y,
#endif #endif
#endif
@ -1322,8 +1324,8 @@ void __stdcall run_workqueue(struct workqueue_struct *cwq)
{ {
unsigned long irqflags; unsigned long irqflags;
// dbgprintf("wq: %x head %x, next %x\n", dbgprintf("wq: %x head %x, next %x\n",
// cwq, &cwq->worklist, cwq->worklist.next); cwq, &cwq->worklist, cwq->worklist.next);
spin_lock_irqsave(&cwq->lock, irqflags); spin_lock_irqsave(&cwq->lock, irqflags);
@ -1333,8 +1335,8 @@ void __stdcall run_workqueue(struct workqueue_struct *cwq)
struct work_struct, entry); struct work_struct, entry);
work_func_t f = work->func; work_func_t f = work->func;
list_del_init(cwq->worklist.next); list_del_init(cwq->worklist.next);
// dbgprintf("head %x, next %x\n", dbgprintf("head %x, next %x\n",
// &cwq->worklist, cwq->worklist.next); &cwq->worklist, cwq->worklist.next);
spin_unlock_irqrestore(&cwq->lock, irqflags); spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work); f(work);
@ -1351,8 +1353,8 @@ int __queue_work(struct workqueue_struct *wq,
{ {
unsigned long flags; unsigned long flags;
// dbgprintf("wq: %x, work: %x\n", dbgprintf("wq: %x, work: %x\n",
// wq, work ); wq, work );
if(!list_empty(&work->entry)) if(!list_empty(&work->entry))
return 0; return 0;
@ -1365,8 +1367,8 @@ int __queue_work(struct workqueue_struct *wq,
list_add_tail(&work->entry, &wq->worklist); list_add_tail(&work->entry, &wq->worklist);
spin_unlock_irqrestore(&wq->lock, flags); spin_unlock_irqrestore(&wq->lock, flags);
// dbgprintf("wq: %x head %x, next %x\n", dbgprintf("wq: %x head %x, next %x\n",
// wq, &wq->worklist, wq->worklist.next); wq, &wq->worklist, wq->worklist.next);
return 1; return 1;
}; };
@ -1376,8 +1378,8 @@ void __stdcall delayed_work_timer_fn(unsigned long __data)
struct delayed_work *dwork = (struct delayed_work *)__data; struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data; struct workqueue_struct *wq = dwork->work.data;
// dbgprintf("wq: %x, work: %x\n", dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work ); wq, &dwork->work );
__queue_work(wq, &dwork->work); __queue_work(wq, &dwork->work);
} }
@ -1398,8 +1400,8 @@ int queue_delayed_work(struct workqueue_struct *wq,
{ {
u32 flags; u32 flags;
// dbgprintf("wq: %x, work: %x\n", dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work ); wq, &dwork->work );
if (delay == 0) if (delay == 0)
return __queue_work(wq, &dwork->work); return __queue_work(wq, &dwork->work);

View File

@ -53,8 +53,8 @@ u32_t drvEntry(int action, char *cmdline)
if(!dbg_open(log)) if(!dbg_open(log))
{ {
// strcpy(log, "/tmp1/1/i915.log");
strcpy(log, "/RD/1/DRIVERS/i915.log"); strcpy(log, "/RD/1/DRIVERS/i915.log");
// strcpy(log, "/BD1/2/i915.log");
if(!dbg_open(log)) if(!dbg_open(log))
{ {
@ -62,7 +62,7 @@ u32_t drvEntry(int action, char *cmdline)
return 0; return 0;
}; };
} }
dbgprintf("i915 preview #08\n cmdline: %s\n", cmdline); dbgprintf("i915 RC 10\n cmdline: %s\n", cmdline);
cpu_detect(); cpu_detect();
dbgprintf("\ncache line size %d\n", x86_clflush_size); dbgprintf("\ncache line size %d\n", x86_clflush_size);
@ -153,20 +153,20 @@ int _stdcall display_handler(ioctl_t *io)
case SRV_CREATE_SURFACE: case SRV_CREATE_SURFACE:
// check_input(8); // check_input(8);
retval = create_surface(main_device, (struct io_call_10*)inp); // retval = create_surface(main_device, (struct io_call_10*)inp);
break; break;
case SRV_LOCK_SURFACE: case SRV_LOCK_SURFACE:
retval = lock_surface((struct io_call_12*)inp); // retval = lock_surface((struct io_call_12*)inp);
break; break;
case SRV_RESIZE_SURFACE: case SRV_RESIZE_SURFACE:
retval = resize_surface((struct io_call_14*)inp); // retval = resize_surface((struct io_call_14*)inp);
break; break;
case SRV_BLIT_BITMAP: // case SRV_BLIT_BITMAP:
srv_blit_bitmap( inp[0], inp[1], inp[2], // srv_blit_bitmap( inp[0], inp[1], inp[2],
inp[3], inp[4], inp[5], inp[6]); // inp[3], inp[4], inp[5], inp[6]);
// blit_tex( inp[0], inp[1], inp[2], // blit_tex( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]); // inp[3], inp[4], inp[5], inp[6]);
@ -279,3 +279,26 @@ void cpu_detect()
} }
} }
int get_driver_caps(hwcaps_t *caps)
{
int ret = 0;
switch(caps->idx)
{
case 0:
caps->opt[0] = 0;
caps->opt[1] = 0;
break;
case 1:
caps->cap1.max_tex_width = 4096;
caps->cap1.max_tex_height = 4096;
break;
default:
ret = 1;
};
caps->idx = 1;
return ret;
}