i915: drm_pwrite

git-svn-id: svn://kolibrios.org@3260 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2013-02-21 04:10:35 +00:00
parent d12cb9e707
commit b30a061ac1
8 changed files with 1171 additions and 100 deletions

646
drivers/video/drm/drm_gem.c Normal file
View File

@ -0,0 +1,646 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/shmem_fs.h>
#include <linux/err.h>
#include <drm/drmP.h>
/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies, implementing that is left up to
* the driver, and all that the general API provides should be generic --
* allocating objects, reading/writing data with the cpu, freeing objects.
* Even there, platform-dependent optimizations for reading/writing data with
* the CPU mean we'll likely hook those out to driver-specific calls. However,
* the DRI2 implementation wants to have at least allocate/mmap be generic.
*
* The goal was to have swap-backed object allocation managed through
* struct file. However, file descriptors as handles to a struct file have
* two major failings:
* - Process limits prevent more than 1024 or so being used at a time by
* default.
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
* This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date, and as our interface with shmfs for memory allocation.
*/
/*
* We make up offsets for buffer objects so we can recognize them at
* mmap time.
*/
/* pgoff in mmap is an unsigned long, so we need to make sure that
* the faked up offset will fit
*/
#if BITS_PER_LONG == 64
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
#else
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
#endif
#if 0
/**
* Initialize the GEM device fields
*/
int
drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
if (!mm) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&mm->offset_hash);
kfree(mm);
return -ENOMEM;
}
return 0;
}
void
drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
drm_mm_takedown(&mm->offset_manager);
drm_ht_remove(&mm->offset_hash);
kfree(mm);
dev->mm_private = NULL;
}
#endif
/**
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(obj->filp))
return PTR_ERR(obj->filp);
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);
/**
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
int drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj->dev = dev;
obj->filp = NULL;
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
return 0;
}
EXPORT_SYMBOL(drm_gem_private_object_init);
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
goto free;
if (drm_gem_object_init(dev, obj, size) != 0)
goto free;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
goto fput;
}
return obj;
fput:
/* Object_init mangles the global counters - readjust them. */
free(obj->filp);
free:
kfree(obj);
return NULL;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
struct drm_gem_object *obj;
/* This is gross. The idr system doesn't let us try a delete and
* return an error code. It just spews if you fail at deleting.
* So, we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount, or the user
* could race us to double-decrement the refcount and cause a
* use-after-free later. Given the frequency of our handle lookups,
* we may want to use ida for number allocation and a hash table
* for the pointers, anyway.
*/
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return -EINVAL;
}
dev = obj->dev;
/* Release reference and decrement refcount. */
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
// drm_gem_remove_prime_handles(obj, filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep)
{
struct drm_device *dev = obj->dev;
int ret;
/*
* Get the user-visible handle using idr.
*/
again:
/* ensure there is space available to allocate a handle */
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
return -ENOMEM;
/* do the allocation under our spinlock */
spin_lock(&file_priv->table_lock);
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
else if (ret)
return ret;
drm_gem_object_handle_reference(obj);
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret) {
drm_gem_handle_delete(file_priv, *handlep);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);
/**
* drm_gem_free_mmap_offset - release a fake mmap offset for an object
* @obj: obj in question
*
* This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
*/
#if 0
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash);
drm_mm_put_block(list->file_offset_node);
kfree(list->map);
list->map = NULL;
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
/**
* drm_gem_create_mmap_offset - create a fake mmap offset for an object
* @obj: obj in question
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
*/
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list;
struct drm_local_map *map;
int ret;
/* Set the object up for mmap'ing */
list = &obj->map_list;
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
if (!list->map)
return -ENOMEM;
map = list->map;
map->type = _DRM_GEM;
map->size = obj->size;
map->handle = obj;
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
obj->size / PAGE_SIZE, 0, false);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
ret = -ENOSPC;
goto out_free_list;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
obj->size / PAGE_SIZE, 0);
if (!list->file_offset_node) {
ret = -ENOMEM;
goto out_free_list;
}
list->hash.key = list->file_offset_node->start;
ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
if (ret) {
DRM_ERROR("failed to add to map hash\n");
goto out_free_mm;
}
return 0;
out_free_mm:
drm_mm_put_block(list->file_offset_node);
out_free_list:
kfree(list->map);
list->map = NULL;
return ret;
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
#endif
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
u32 handle)
{
struct drm_gem_object *obj;
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return NULL;
}
drm_gem_object_reference(obj);
spin_unlock(&filp->table_lock);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
* Releases the handle to an mm object.
*/
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_close *args = data;
int ret;
ret = drm_gem_handle_delete(file_priv, args->handle);
return ret;
}
/**
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
#if 0
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -ENOENT;
again:
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
ret = -ENOMEM;
goto err;
}
spin_lock(&dev->object_name_lock);
if (!obj->name) {
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
&obj->name);
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
if (ret == -EAGAIN)
goto again;
else if (ret)
goto err;
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
} else {
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
ret = 0;
}
err:
drm_gem_object_unreference_unlocked(obj);
return ret;
}
/**
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
u32 handle;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
spin_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj)
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
if (!obj)
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
args->handle = handle;
args->size = obj->size;
return 0;
}
/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
idr_init(&file_private->object_idr);
spin_lock_init(&file_private->table_lock);
}
/**
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
drm_gem_remove_prime_handles(obj, file_priv);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
/**
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, file_private);
idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
}
#endif
void
drm_gem_object_release(struct drm_gem_object *obj)
{
if (obj->filp)
free(obj->filp);
}
EXPORT_SYMBOL(drm_gem_object_release);
/**
* Called after the last reference to the object has been lost.
* Must be called holding struct_ mutex
*
* Frees the object
*/
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
static void drm_gem_object_ref_bug(struct kref *list_kref)
{
BUG();
}
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
obj->name = 0;
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
*
* This cannot be the last reference, since the handle holds one too.
*/
kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
spin_unlock(&dev->object_name_lock);
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
#if 0
void drm_gem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj);
mutex_lock(&obj->dev->struct_mutex);
drm_vm_open_locked(obj->dev, vma);
mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(obj->dev, vma);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_close);
#endif

View File

@ -0,0 +1,112 @@
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drm_global.h>
struct drm_global_item {
struct mutex mutex;
void *object;
int refcount;
};
static struct drm_global_item glob[DRM_GLOBAL_NUM];
void drm_global_init(void)
{
int i;
for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
struct drm_global_item *item = &glob[i];
mutex_init(&item->mutex);
item->object = NULL;
item->refcount = 0;
}
}
void drm_global_release(void)
{
int i;
for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
struct drm_global_item *item = &glob[i];
BUG_ON(item->object != NULL);
BUG_ON(item->refcount != 0);
}
}
int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
struct drm_global_item *item = &glob[ref->global_type];
void *object;
mutex_lock(&item->mutex);
if (item->refcount == 0) {
item->object = kzalloc(ref->size, GFP_KERNEL);
if (unlikely(item->object == NULL)) {
ret = -ENOMEM;
goto out_err;
}
ref->object = item->object;
ret = ref->init(ref);
if (unlikely(ret != 0))
goto out_err;
}
++item->refcount;
ref->object = item->object;
object = item->object;
mutex_unlock(&item->mutex);
return 0;
out_err:
mutex_unlock(&item->mutex);
item->object = NULL;
return ret;
}
EXPORT_SYMBOL(drm_global_item_ref);
void drm_global_item_unref(struct drm_global_reference *ref)
{
struct drm_global_item *item = &glob[ref->global_type];
mutex_lock(&item->mutex);
BUG_ON(item->refcount == 0);
BUG_ON(ref->object != item->object);
if (--item->refcount == 0) {
ref->release(ref);
item->object = NULL;
}
mutex_unlock(&item->mutex);
}
EXPORT_SYMBOL(drm_global_item_unref);

View File

@ -107,3 +107,23 @@ int drm_order(unsigned long size)
return order;
}
extern int x86_clflush_size;
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
void
drm_clflush_virt_range(char *addr, unsigned long length)
{
char *end = addr + length;
mb();
for (; addr < end; addr += x86_clflush_size)
clflush(addr);
clflush(end - 1);
mb();
return;
}

View File

@ -481,9 +481,54 @@ int i915_init(void)
return err;
}
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
*/
// .driver_features =
// DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
// DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
// .load = i915_driver_load,
// .unload = i915_driver_unload,
// .open = i915_driver_open,
// .lastclose = i915_driver_lastclose,
// .preclose = i915_driver_preclose,
// .postclose = i915_driver_postclose,
/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
// .suspend = i915_suspend,
// .resume = i915_resume,
// .device_is_agp = i915_driver_device_is_agp,
// .master_create = i915_master_create,
// .master_destroy = i915_master_destroy,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
// .gem_vm_ops = &i915_gem_vm_ops,
// .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
// .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
// .gem_prime_export = i915_gem_prime_export,
// .gem_prime_import = i915_gem_prime_import,
// .dumb_create = i915_gem_dumb_create,
// .dumb_map_offset = i915_gem_mmap_gtt,
// .dumb_destroy = i915_gem_dumb_destroy,
// .ioctls = i915_ioctls,
// .fops = &i915_driver_fops,
// .name = DRIVER_NAME,
// .desc = DRIVER_DESC,
// .date = DRIVER_DATE,
// .major = DRIVER_MAJOR,
// .minor = DRIVER_MINOR,
// .patchlevel = DRIVER_PATCHLEVEL,
};
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static struct drm_driver driver;
static struct drm_device drm_dev;
static struct drm_file drm_file;

View File

@ -30,6 +30,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/shmem_fs.h>
#include <linux/slab.h>
//#include <linux/swap.h>
#include <linux/pci.h>
@ -52,38 +53,6 @@ static inline void clflush(volatile void *__p)
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
i915_gem_free_object(obj);
}
/**
* Initialize an already allocated GEM object of the specified size with
* shmfs backing store.
*/
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj->dev = dev;
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
obj->size = size;
return 0;
}
void
drm_gem_object_release(struct drm_gem_object *obj)
{ }
#define I915_EXEC_CONSTANTS_MASK (3<<6)
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
@ -138,7 +107,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
static int
i915_gem_wait_for_error(struct drm_device *dev)
{
@ -281,7 +249,6 @@ i915_gem_create(struct drm_file *file,
trace_i915_gem_object_create(obj);
*handle_p = handle;
return 0;
}
@ -317,8 +284,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
args->size, &args->handle);
}
#if 0
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@ -326,6 +291,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
}
#if 0
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
@ -650,7 +616,9 @@ fast_user_write(struct io_mapping *mapping,
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
#endif
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
@ -666,6 +634,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length, ret;
char *vaddr;
ret = i915_gem_object_pin(obj, 0, true, true);
if (ret)
@ -679,6 +648,13 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (ret)
goto out_unpin;
vaddr = AllocKernelSpace(4096);
if(vaddr == NULL)
{
ret = -ENOSPC;
goto out_unpin;
};
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
@ -697,24 +673,22 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_unpin;
}
MapPage(vaddr, page_base, PG_SW|PG_NOCACHE);
memcpy(vaddr+page_offset, user_data, page_length);
remain -= page_length;
user_data += page_length;
offset += page_length;
}
FreeKernelSpace(vaddr);
out_unpin:
i915_gem_object_unpin(obj);
out:
printf("% s ret = %d\n", __FUNCTION__, ret);
return ret;
}
@ -730,25 +704,26 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
bool needs_clflush_after)
{
char *vaddr;
int ret;
int ret = 0;
if (unlikely(page_do_bit17_swizzling))
return -EINVAL;
vaddr = kmap_atomic(page);
vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
memcpy(vaddr + shmem_page_offset,
user_data,
page_length);
if (needs_clflush_after)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
kunmap_atomic(vaddr);
FreeKernelSpace(vaddr);
return ret ? -EFAULT : 0;
}
#if 0
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
@ -783,6 +758,8 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
return ret ? -EFAULT : 0;
}
#endif
static int
i915_gem_shmem_pwrite(struct drm_device *dev,
@ -860,7 +837,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* overcomplicate things and flush the entire patch. */
partial_cacheline_write = needs_clflush_before &&
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
& (x86_clflush_size - 1));
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
@ -875,16 +852,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
// ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
// user_data, page_do_bit17_swizzling,
// partial_cacheline_write,
// needs_clflush_after);
mutex_lock(&dev->struct_mutex);
next_page:
set_page_dirty(page);
mark_page_accessed(page);
if (ret)
goto out;
@ -931,16 +908,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (args->size == 0)
return 0;
if (!access_ok(VERIFY_READ,
(char __user *)(uintptr_t)args->data_ptr,
args->size))
return -EFAULT;
ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
args->size);
if (ret)
return -EFAULT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@ -975,10 +942,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
if (obj->phys_obj) {
ret = i915_gem_phys_pwrite(dev, obj, args, file);
goto out;
}
// if (obj->phys_obj) {
// ret = i915_gem_phys_pwrite(dev, obj, args, file);
// goto out;
// }
if (obj->cache_level == I915_CACHE_NONE &&
obj->tiling_mode == I915_TILING_NONE &&
@ -999,8 +966,6 @@ unlock:
return ret;
}
#endif
int
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
bool interruptible)
@ -1123,6 +1088,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
}
#endif
#define EXIT_COND \
@ -1195,35 +1161,163 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
return 0;
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
* as the object state may change during this call.
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
bool readonly)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = obj->ring;
u32 seqno;
int ret;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
if (seqno == 0)
return 0;
ret = i915_gem_check_wedge(dev_priv, true);
if (ret)
return ret;
ret = i915_gem_check_olr(ring, seqno);
if (ret)
return ret;
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, true, NULL);
mutex_lock(&dev->struct_mutex);
i915_gem_retire_requests_ring(ring);
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
return ret;
}
/**
* Called when user space prepares to use an object with the CPU, either
* through the mmap ioctl's mapping or a GTT mapping.
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_domain *args = data;
struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
/* Only handle setting domains to types used by the CPU. */
if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
if (read_domains & I915_GEM_GPU_DOMAINS)
return -EINVAL;
/* Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
if (write_domain != 0 && read_domains != write_domain)
return -EINVAL;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
/* Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
if (ret)
goto unref;
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
*/
if (ret == -EINVAL)
ret = 0;
} else {
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Maps the contents of an object, returning the address it is mapped
* into.
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
unsigned long addr = 0;
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
dbgprintf("%s offset %lld size %lld not supported\n",
args->offset, args->size);
/* prime objects have no backing filp to GEM mmap
* pages from.
*/
if (!obj->filp) {
drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
// addr = vm_mmap(obj->filp, 0, args->size,
// PROT_READ | PROT_WRITE, MAP_SHARED,
// args->offset);
drm_gem_object_unreference_unlocked(obj);
// if (IS_ERR((void *)addr))
// return addr;
args->addr_ptr = (uint64_t) addr;
return -EINVAL;
// return 0;
}
@ -1443,7 +1537,9 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int page_count, i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
struct page *page;
@ -1473,16 +1569,18 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* Fail silently without starting the shrinker
*/
for_each_sg(st->sgl, sg, page_count, i) {
page = (struct page *)AllocPage(); // oh-oh
if ( page == 0 )
page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
if (IS_ERR(page)) {
dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
goto err_pages;
}
sg_set_page(sg, page, PAGE_SIZE, 0);
}
obj->pages = st;
// DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
return 0;
@ -1949,8 +2047,6 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
/**
* i915_gem_object_sync - sync an object to a ring.
*
@ -2821,6 +2917,68 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return 0;
}
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
args->caching = obj->cache_level != I915_CACHE_NONE;
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret;
switch (args->caching) {
case I915_CACHING_NONE:
level = I915_CACHE_NONE;
break;
case I915_CACHING_CACHED:
level = I915_CACHE_LLC;
break;
default:
return -EINVAL;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
ret = i915_gem_object_set_cache_level(obj, level);
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
@ -3145,6 +3303,8 @@ unlock:
return ret;
}
#endif
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
@ -3182,6 +3342,7 @@ unlock:
return ret;
}
#if 0
int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)

View File

@ -22,5 +22,6 @@
#define trace_intel_gpu_freq_change(a)
#define trace_i915_reg_rw(a, b, c, d)
#define trace_i915_ring_wait_begin(a)
#define trace_i915_gem_object_pwrite(a, b, c)
#endif

View File

@ -122,11 +122,17 @@ u32_t drvEntry(int action, char *cmdline)
#define SRV_BLIT_TEXTURE 16
#define SRV_BLIT_VIDEO 17
#define SRV_PCI_INFO 20
#define SRV_GET_PCI_INFO 20
#define SRV_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_I915_GEM_PIN 24
#define SRV_I915_GEM_SET_CACHEING 25
#define SRV_I915_GEM_GET_APERTURE 26
#define SRV_I915_GEM_PWRITE 27
#define SRV_I915_GEM_BUSY 28
#define SRV_I915_GEM_SET_DOMAIN 29
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
@ -200,7 +206,7 @@ int _stdcall display_handler(ioctl_t *io)
break;
case SRV_PCI_INFO:
case SRV_GET_PCI_INFO:
get_pci_info((struct pci_device *)inp);
retval = 0;
break;
@ -220,6 +226,27 @@ int _stdcall display_handler(ioctl_t *io)
case SRV_I915_GEM_PIN:
retval = i915_gem_pin_ioctl(main_device, inp, file);
break;
case SRV_I915_GEM_SET_CACHEING:
retval = i915_gem_set_caching_ioctl(main_device, inp, file);
break;
case SRV_I915_GEM_GET_APERTURE:
retval = i915_gem_get_aperture_ioctl(main_device, inp, file);
break;
case SRV_I915_GEM_PWRITE:
retval = i915_gem_pwrite_ioctl(main_device, inp, file);
break;
case SRV_I915_GEM_BUSY:
retval = i915_gem_busy_ioctl(main_device, inp, file);
break;
case SRV_I915_GEM_SET_DOMAIN:
retval = i915_gem_set_domain_ioctl(main_device, inp, file);
break;
};
return retval;

View File

@ -0,0 +1,59 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
struct file *filep;
int count;
filep = malloc(sizeof(*filep));
if(unlikely(filep == NULL))
return ERR_PTR(-ENOMEM);
count = size / PAGE_SIZE;
filep->pages = kzalloc(sizeof(struct page *) * count, 0);
if(unlikely(filep->pages == NULL))
{
kfree(filep);
return ERR_PTR(-ENOMEM);
};
filep->count = count;
filep->allocated = 0;
filep->vma = NULL;
// dbgprintf("%s file %p pages %p count %d\n",
// __FUNCTION__,filep, filep->pages, filep->pages);
return filep;
}
struct page *shmem_read_mapping_page_gfp(struct file *filep,
pgoff_t index, gfp_t gfp)
{
struct page *page;
// dbgprintf("%s, file %p index %d\n", __FUNCTION__, filep, index);
if(unlikely(index >= filep->count))
return ERR_PTR(-EINVAL);
page = filep->pages[index];
if(unlikely(page == NULL))
{
page = (struct page *)AllocPage();
if(unlikely(page == NULL))
return ERR_PTR(-ENOMEM);
filep->pages[index] = page;
};
return page;
};