i915: DRM & GEM related code

git-svn-id: svn://kolibrios.org@3255 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-02-18 14:33:08 +00:00
parent 013e845fb3
commit 322b8405c7
9 changed files with 373 additions and 46 deletions

View File

@ -427,16 +427,11 @@ struct drm_prime_file_private {
struct list_head head; struct list_head head;
struct mutex lock; struct mutex lock;
}; };
#endif
/** File private data */ /** File private data */
struct drm_file { struct drm_file {
int authenticated;
struct pid *pid;
kuid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct list_head lhead; struct list_head lhead;
struct drm_minor *minor;
unsigned long lock_count; unsigned long lock_count;
/** Mapping of mm object handles to object pointers. */ /** Mapping of mm object handles to object pointers. */
@ -444,21 +439,16 @@ struct drm_file {
/** Lock for synchronization of access to object_idr. */ /** Lock for synchronization of access to object_idr. */
spinlock_t table_lock; spinlock_t table_lock;
struct file *filp;
void *driver_priv; void *driver_priv;
int is_master; /* this file private is a master for a minor */
struct drm_master *master; /* master this node is currently associated with
N.B. not always minor->master */
struct list_head fbs; struct list_head fbs;
wait_queue_head_t event_wait; wait_queue_head_t event_wait;
struct list_head event_list; struct list_head event_list;
int event_space; int event_space;
struct drm_prime_file_private prime;
}; };
#if 0
/** Wait queue */ /** Wait queue */
struct drm_queue { struct drm_queue {
atomic_t use_count; /**< Outstanding uses (+1) */ atomic_t use_count; /**< Outstanding uses (+1) */
@ -972,6 +962,8 @@ struct drm_driver {
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS); irqreturn_t (*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev); void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev); int (*irq_postinstall) (struct drm_device *dev);
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
}; };
@ -1601,7 +1593,6 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
kref_put(&obj->refcount, drm_gem_object_free); kref_put(&obj->refcount, drm_gem_object_free);
} }
#if 0
static inline void static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{ {
@ -1682,6 +1673,8 @@ extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
#if 0
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
unsigned int token) unsigned int token)
{ {

View File

@ -312,6 +312,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_SEMAPHORES 20 #define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22 #define I915_PARAM_RSVD_FOR_FUTURE_USE 22
#define I915_PARAM_HAS_SECURE_BATCHES 23
#define I915_PARAM_HAS_PINNED_BATCHES 24
typedef struct drm_i915_getparam { typedef struct drm_i915_getparam {
int param; int param;

View File

@ -51,6 +51,16 @@
#define DBG_NO_HANDLE_LUT 0 #define DBG_NO_HANDLE_LUT 0
#define DBG_DUMP 0 #define DBG_DUMP 0
/* Worst case seems to be 965gm where we cannot write within a cacheline that
* is being simultaneously being read by the GPU, or within the sampler
* prefetch. In general, the chipsets seem to have a requirement that sampler
* offsets be aligned to a cacheline (64 bytes).
*/
#define UPLOAD_ALIGNMENT 128
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
#define MAX_GTT_VMA_CACHE 512 #define MAX_GTT_VMA_CACHE 512
#define MAX_CPU_VMA_CACHE INT16_MAX #define MAX_CPU_VMA_CACHE INT16_MAX
#define MAP_PRESERVE_TIME 10 #define MAP_PRESERVE_TIME 10
@ -72,7 +82,123 @@
#define LOCAL_I915_PARAM_HAS_NO_RELOC 25 #define LOCAL_I915_PARAM_HAS_NO_RELOC 25
#define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26 #define LOCAL_I915_PARAM_HAS_HANDLE_LUT 26
static struct kgem_bo *__kgem_freed_bo;
#define bucket(B) (B)->size.pages.bucket
#define num_pages(B) (B)->size.pages.count
#ifdef DEBUG_MEMORY
static void debug_alloc(struct kgem *kgem, size_t size)
{
kgem->debug_memory.bo_allocs++;
kgem->debug_memory.bo_bytes += size;
}
static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo)
{
debug_alloc(kgem, bytes(bo));
}
#else
#define debug_alloc(k, b)
#define debug_alloc__bo(k, b)
#endif
static uint32_t gem_create(int fd, int num_pages)
{
struct drm_i915_gem_create create;
ioctl_t io;
VG_CLEAR(create);
create.handle = 0;
create.size = PAGE_SIZE * num_pages;
io.handle = fd;
io.io_code = SRV_I915_GEM_CREATE;
io.input = &create;
io.inp_size = sizeof(create);
io.output = NULL;
io.out_size = 0;
if (call_service(&io)!=0)
return 0;
return create.handle;
}
static void gem_close(int fd, uint32_t handle)
{
struct drm_gem_close close;
ioctl_t io;
VG_CLEAR(close);
close.handle = handle;
io.handle = fd;
io.io_code = SRV_DRM_GEM_CLOSE;
io.input = &close;
io.inp_size = sizeof(close);
io.output = NULL;
io.out_size = 0;
call_service(&io);
}
constant inline static unsigned long __fls(unsigned long word)
{
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86__) || defined(__x86_64__))
asm("bsr %1,%0"
: "=r" (word)
: "rm" (word));
return word;
#else
unsigned int v = 0;
while (word >>= 1)
v++;
return v;
#endif
}
constant inline static int cache_bucket(int num_pages)
{
return __fls(num_pages);
}
static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
int handle, int num_pages)
{
assert(num_pages);
memset(bo, 0, sizeof(*bo));
bo->refcnt = 1;
bo->handle = handle;
bo->target_handle = -1;
num_pages(bo) = num_pages;
bucket(bo) = cache_bucket(num_pages);
bo->reusable = true;
bo->domain = DOMAIN_CPU;
list_init(&bo->request);
list_init(&bo->list);
list_init(&bo->vma);
return bo;
}
static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages)
{
struct kgem_bo *bo;
if (__kgem_freed_bo) {
bo = __kgem_freed_bo;
__kgem_freed_bo = *(struct kgem_bo **)bo;
} else {
bo = malloc(sizeof(*bo));
if (bo == NULL)
return NULL;
}
return __kgem_bo_init(bo, handle, num_pages);
}
static int gem_param(struct kgem *kgem, int name) static int gem_param(struct kgem *kgem, int name)
{ {
@ -99,6 +225,11 @@ static int gem_param(struct kgem *kgem, int name)
return v; return v;
} }
static bool test_has_execbuffer2(struct kgem *kgem)
{
return 1;
}
static bool test_has_no_reloc(struct kgem *kgem) static bool test_has_no_reloc(struct kgem *kgem)
{ {
if (DBG_NO_FAST_RELOC) if (DBG_NO_FAST_RELOC)
@ -131,6 +262,41 @@ static bool test_has_semaphores_enabled(struct kgem *kgem)
return detected; return detected;
} }
static bool __kgem_throttle(struct kgem *kgem)
{
// if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0)
return false;
// return errno == EIO;
}
static bool is_hw_supported(struct kgem *kgem,
struct pci_device *dev)
{
if (DBG_NO_HW)
return false;
if (!test_has_execbuffer2(kgem))
return false;
if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
return kgem->has_blt;
/* Although pre-855gm the GMCH is fubar, it works mostly. So
* let the user decide through "NoAccel" whether or not to risk
* hw acceleration.
*/
if (kgem->gen == 060 && dev->revision < 8) {
/* pre-production SNB with dysfunctional BLT */
return false;
}
if (kgem->gen >= 060) /* Only if the kernel supports the BLT ring */
return kgem->has_blt;
return true;
}
static bool test_has_relaxed_fencing(struct kgem *kgem) static bool test_has_relaxed_fencing(struct kgem *kgem)
{ {
@ -223,6 +389,89 @@ static bool test_has_pinned_batches(struct kgem *kgem)
} }
static bool kgem_init_pinned_batches(struct kgem *kgem)
{
ioctl_t io;
int count[2] = { 4, 2 };
int size[2] = { 1, 4 };
int n, i;
if (kgem->wedged)
return true;
for (n = 0; n < ARRAY_SIZE(count); n++) {
for (i = 0; i < count[n]; i++) {
struct drm_i915_gem_pin pin;
struct kgem_bo *bo;
VG_CLEAR(pin);
pin.handle = gem_create(kgem->fd, size[n]);
if (pin.handle == 0)
goto err;
DBG(("%s: new handle=%d, num_pages=%d\n",
__FUNCTION__, pin.handle, size[n]));
bo = __kgem_bo_alloc(pin.handle, size[n]);
if (bo == NULL) {
gem_close(kgem->fd, pin.handle);
goto err;
}
pin.alignment = 0;
io.handle = kgem->fd;
io.io_code = SRV_I915_GEM_PIN;
io.input = &pin;
io.inp_size = sizeof(pin);
io.output = NULL;
io.out_size = 0;
if (call_service(&io)!=0){
gem_close(kgem->fd, pin.handle);
goto err;
}
bo->presumed_offset = pin.offset;
debug_alloc__bo(kgem, bo);
list_add(&bo->list, &kgem->pinned_batches[n]);
}
}
return true;
err:
for (n = 0; n < ARRAY_SIZE(kgem->pinned_batches); n++) {
while (!list_is_empty(&kgem->pinned_batches[n])) {
kgem_bo_destroy(kgem,
list_first_entry(&kgem->pinned_batches[n],
struct kgem_bo, list));
}
}
/* For simplicity populate the lists with a single unpinned bo */
for (n = 0; n < ARRAY_SIZE(count); n++) {
struct kgem_bo *bo;
uint32_t handle;
handle = gem_create(kgem->fd, size[n]);
if (handle == 0)
break;
bo = __kgem_bo_alloc(handle, size[n]);
if (bo == NULL) {
gem_close(kgem->fd, handle);
break;
}
debug_alloc__bo(kgem, bo);
list_add(&bo->list, &kgem->pinned_batches[n]);
}
return false;
}
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen) void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
{ {
@ -259,7 +508,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++) for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
list_init(&kgem->vma[i].inactive[j]); list_init(&kgem->vma[i].inactive[j]);
} }
kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE; kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE; kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
@ -272,7 +520,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
DBG(("%s: has relaxed delta? %d\n", __FUNCTION__, DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
kgem->has_relaxed_delta)); kgem->has_relaxed_delta));
kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem); kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__, DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
kgem->has_relaxed_fencing)); kgem->has_relaxed_fencing));
@ -315,15 +562,11 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__, DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
kgem->has_pinned_batches)); kgem->has_pinned_batches));
#if 0
if (!is_hw_supported(kgem, dev)) { if (!is_hw_supported(kgem, dev)) {
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING, printf("Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
"Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
kgem->wedged = 1; kgem->wedged = 1;
} else if (__kgem_throttle(kgem)) { } else if (__kgem_throttle(kgem)) {
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING, printf("Detected a hung GPU, disabling acceleration.\n");
"Detected a hung GPU, disabling acceleration.\n");
kgem->wedged = 1; kgem->wedged = 1;
} }
@ -340,8 +583,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
kgem->batch_size = 4*1024; kgem->batch_size = 4*1024;
if (!kgem_init_pinned_batches(kgem) && gen == 020) { if (!kgem_init_pinned_batches(kgem) && gen == 020) {
xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING, printf("Unable to reserve memory for GPU, disabling acceleration.\n");
"Unable to reserve memory for GPU, disabling acceleration.\n");
kgem->wedged = 1; kgem->wedged = 1;
} }
@ -352,6 +594,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
if (gen < 040) if (gen < 040)
kgem->min_alignment = 64; kgem->min_alignment = 64;
#if 0
kgem->half_cpu_cache_pages = cpu_cache_size() >> 13; kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
DBG(("%s: half cpu cache %d pages\n", __FUNCTION__, DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
kgem->half_cpu_cache_pages)); kgem->half_cpu_cache_pages));

View File

@ -69,6 +69,9 @@ typedef struct
#define SRV_GET_INFO 20 #define SRV_GET_INFO 20
#define SRV_GET_PARAM 21 #define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
static int call_service(ioctl_t *io) static int call_service(ioctl_t *io)
{ {

View File

@ -85,7 +85,7 @@ struct kgem_bo *sna_static_stream_fini(struct sna *sna,
bo = kgem_create_linear(&sna->kgem, stream->used, 0); bo = kgem_create_linear(&sna->kgem, stream->used, 0);
if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) { if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) {
// kgem_bo_destroy(&sna->kgem, bo); kgem_bo_destroy(&sna->kgem, bo);
return NULL; return NULL;
} }

View File

@ -912,6 +912,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
return ret; return ret;
} }
#endif
static int i915_getparam(struct drm_device *dev, void *data, static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
@ -991,7 +992,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1; value = 1;
break; break;
case I915_PARAM_HAS_SECURE_BATCHES: case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN); value = 1;
break; break;
case I915_PARAM_HAS_PINNED_BATCHES: case I915_PARAM_HAS_PINNED_BATCHES:
value = 1; value = 1;
@ -1002,14 +1003,17 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) { // if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("DRM_COPY_TO_USER failed\n"); // DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT; // return -EFAULT;
} // }
*param->value = value;
return 0; return 0;
} }
#if 0
static int i915_setparam(struct drm_device *dev, void *data, static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
@ -1672,3 +1676,9 @@ int i915_driver_device_is_agp(struct drm_device * dev)
return 1; return 1;
} }
#endif #endif
int gem_getparam(struct drm_device *dev, void *data)
{
return i915_getparam(dev, data, NULL);
};

View File

@ -49,6 +49,8 @@ int init_display_kms(struct drm_device *dev);
struct drm_device *main_device; struct drm_device *main_device;
struct drm_file *drm_file_handlers[256];
static int i915_modeset __read_mostly = 1; static int i915_modeset __read_mostly = 1;
MODULE_PARM_DESC(modeset, MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
@ -481,14 +483,19 @@ int i915_init(void)
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent) int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{ {
struct drm_device *dev;
static struct drm_driver driver; static struct drm_driver driver;
static struct drm_device drm_dev;
static struct drm_file drm_file;
struct drm_device *dev;
struct drm_file *priv;
int ret; int ret;
dev = kzalloc(sizeof(*dev), 0); dev = &drm_dev;
if (!dev) priv = &drm_file;
return -ENOMEM;
drm_file_handlers[0] = priv;
// ret = pci_enable_device(pdev); // ret = pci_enable_device(pdev);
// if (ret) // if (ret)
@ -514,6 +521,15 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&dev->struct_mutex); mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->ctxlist_mutex);
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->fbs);
INIT_LIST_HEAD(&priv->event_list);
init_waitqueue_head(&priv->event_wait);
priv->event_space = 4096; /* set aside 4k for event buffer */
idr_init(&priv->object_idr);
spin_lock_init(&priv->table_lock);
dev->driver = &driver; dev->driver = &driver;
ret = i915_driver_load(dev, ent->driver_data ); ret = i915_driver_load(dev, ent->driver_data );
@ -529,14 +545,12 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
err_g4: err_g4:
// drm_put_minor(&dev->primary);
//err_g3: //err_g3:
// if (drm_core_check_feature(dev, DRIVER_MODESET)) // if (drm_core_check_feature(dev, DRIVER_MODESET))
// drm_put_minor(&dev->control); // drm_put_minor(&dev->control);
//err_g2: //err_g2:
// pci_disable_device(pdev); // pci_disable_device(pdev);
//err_g1: //err_g1:
free(dev);
return ret; return ret;
} }

View File

@ -138,7 +138,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size; dev_priv->mm.object_memory -= size;
} }
#if 0
static int static int
i915_gem_wait_for_error(struct drm_device *dev) i915_gem_wait_for_error(struct drm_device *dev)
@ -150,7 +149,7 @@ i915_gem_wait_for_error(struct drm_device *dev)
if (!atomic_read(&dev_priv->mm.wedged)) if (!atomic_read(&dev_priv->mm.wedged))
return 0; return 0;
#if 0
/* /*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging * Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and * userspace. If it takes that long something really bad is going on and
@ -174,6 +173,8 @@ i915_gem_wait_for_error(struct drm_device *dev)
x->done++; x->done++;
spin_unlock_irqrestore(&x->wait.lock, flags); spin_unlock_irqrestore(&x->wait.lock, flags);
} }
#endif
return 0; return 0;
} }
@ -185,14 +186,11 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret) if (ret)
return ret; return ret;
ret = mutex_lock_interruptible(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
return 0; return 0;
} }
#endif
static inline bool static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
@ -251,7 +249,6 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
#if 0
static int static int
i915_gem_create(struct drm_file *file, i915_gem_create(struct drm_file *file,
struct drm_device *dev, struct drm_device *dev,
@ -284,6 +281,7 @@ i915_gem_create(struct drm_file *file,
trace_i915_gem_object_create(obj); trace_i915_gem_object_create(obj);
*handle_p = handle; *handle_p = handle;
return 0; return 0;
} }
@ -319,6 +317,8 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle); args->size, &args->handle);
} }
#if 0
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{ {
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@ -1473,7 +1473,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* Fail silently without starting the shrinker * Fail silently without starting the shrinker
*/ */
for_each_sg(st->sgl, sg, page_count, i) { for_each_sg(st->sgl, sg, page_count, i) {
page = AllocPage(); // oh-oh page = (struct page *)AllocPage(); // oh-oh
if ( page == 0 ) if ( page == 0 )
goto err_pages; goto err_pages;
@ -3054,7 +3054,6 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj)
obj->pin_mappable = false; obj->pin_mappable = false;
} }
#if 0
int int
i915_gem_pin_ioctl(struct drm_device *dev, void *data, i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
@ -3107,6 +3106,8 @@ unlock:
return ret; return ret;
} }
#if 0
int int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data, i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)

View File

@ -13,7 +13,21 @@
#include "bitmap.h" #include "bitmap.h"
struct pci_device {
uint16_t domain;
uint8_t bus;
uint8_t dev;
uint8_t func;
uint16_t vendor_id;
uint16_t device_id;
uint16_t subvendor_id;
uint16_t subdevice_id;
uint32_t device_class;
uint8_t revision;
};
extern struct drm_device *main_device; extern struct drm_device *main_device;
extern struct drm_file *drm_file_handlers[256];
void cpu_detect(); void cpu_detect();
@ -30,6 +44,10 @@ int blit_textured(u32 hbitmap, int dst_x, int dst_y,
int blit_tex(u32 hbitmap, int dst_x, int dst_y, int blit_tex(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h); int src_x, int src_y, u32 w, u32 h);
void get_pci_info(struct pci_device *dev);
int gem_getparam(struct drm_device *dev, void *data);
static char log[256]; static char log[256];
int x86_clflush_size; int x86_clflush_size;
@ -38,7 +56,6 @@ int i915_modeset = 1;
u32_t drvEntry(int action, char *cmdline) u32_t drvEntry(int action, char *cmdline)
{ {
struct pci_device_id *ent;
int err = 0; int err = 0;
@ -105,6 +122,12 @@ u32_t drvEntry(int action, char *cmdline)
#define SRV_BLIT_TEXTURE 16 #define SRV_BLIT_TEXTURE 16
#define SRV_BLIT_VIDEO 17 #define SRV_BLIT_VIDEO 17
#define SRV_PCI_INFO 20
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
#define check_input(size) \ #define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \ if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break; break;
@ -115,6 +138,8 @@ u32_t drvEntry(int action, char *cmdline)
int _stdcall display_handler(ioctl_t *io) int _stdcall display_handler(ioctl_t *io)
{ {
struct drm_file *file;
int retval = -1; int retval = -1;
u32_t *inp; u32_t *inp;
u32_t *outp; u32_t *outp;
@ -122,6 +147,8 @@ int _stdcall display_handler(ioctl_t *io)
inp = io->input; inp = io->input;
outp = io->output; outp = io->output;
file = drm_file_handlers[0];
switch(io->io_code) switch(io->io_code)
{ {
case SRV_GETVERSION: case SRV_GETVERSION:
@ -164,17 +191,35 @@ int _stdcall display_handler(ioctl_t *io)
// retval = resize_surface((struct io_call_14*)inp); // retval = resize_surface((struct io_call_14*)inp);
break; break;
// case SRV_BLIT_BITMAP: case SRV_BLIT_BITMAP:
// srv_blit_bitmap( inp[0], inp[1], inp[2], // srv_blit_bitmap( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]); // inp[3], inp[4], inp[5], inp[6]);
// blit_tex( inp[0], inp[1], inp[2], // blit_tex( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]); // inp[3], inp[4], inp[5], inp[6]);
break;
case SRV_PCI_INFO:
get_pci_info((struct pci_device *)inp);
retval = 0; retval = 0;
break; break;
case SRV_GET_PARAM:
retval = gem_getparam(main_device, inp);
break;
case SRV_I915_GEM_CREATE:
retval = i915_gem_create_ioctl(main_device, inp, file);
break;
case SRV_DRM_GEM_CLOSE:
retval = drm_gem_close_ioctl(main_device, inp, file);
break;
case SRV_I915_GEM_PIN:
retval = i915_gem_pin_ioctl(main_device, inp, file);
break;
}; };
return retval; return retval;
@ -302,3 +347,18 @@ int get_driver_caps(hwcaps_t *caps)
return ret; return ret;
} }
void get_pci_info(struct pci_device *dev)
{
struct pci_dev *pdev = main_device->pdev;
memset(dev, sizeof(*dev), 0);
dev->domain = 0;
dev->bus = pdev->busnr;
dev->dev = pdev->devfn >> 3;
dev->func = pdev->devfn & 7;
dev->vendor_id = pdev->vendor;
dev->device_id = pdev->device;
dev->revision = pdev->revision;
};