ddk: update 3.14-rc1 includes
git-svn-id: svn://kolibrios.org@4568 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
ef154816ce
commit
76c50442a0
@ -172,6 +172,7 @@ struct ttm_tt;
|
||||
* @offset: The current GPU offset, which can have different meanings
|
||||
* depending on the memory type. For SYSTEM type memory, it should be 0.
|
||||
* @cur_placement: Hint of current placement.
|
||||
* @wu_mutex: Wait unreserved mutex.
|
||||
*
|
||||
* Base class for TTM buffer object, that deals with data placement and CPU
|
||||
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
|
||||
@ -253,6 +254,7 @@ struct ttm_buffer_object {
|
||||
|
||||
struct reservation_object *resv;
|
||||
struct reservation_object ttm_resv;
|
||||
struct mutex wu_mutex;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -705,5 +707,5 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
||||
size_t count, loff_t *f_pos, bool write);
|
||||
|
||||
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
|
||||
|
||||
extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
|
||||
#endif
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include <drm/drm_global.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
//#include <linux/workqueue.h>
|
||||
//#include <linux/fs.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct ww_acquire_ctx;
|
||||
@ -682,6 +682,15 @@ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
|
||||
extern int ttm_tt_swapout(struct ttm_tt *ttm,
|
||||
struct file *persistent_swap_storage);
|
||||
|
||||
/**
|
||||
* ttm_tt_unpopulate - free pages from a ttm
|
||||
*
|
||||
* @ttm: Pointer to the ttm_tt structure
|
||||
*
|
||||
* Calls the driver method to free all pages from a ttm
|
||||
*/
|
||||
extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
|
||||
|
||||
/*
|
||||
* ttm_bo.c
|
||||
*/
|
||||
|
@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
/**
|
||||
* function ttm_eu_reserve_buffers
|
||||
*
|
||||
* @ticket: [out] ww_acquire_ctx returned by call.
|
||||
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
|
||||
* non-blocking reserves should be tried.
|
||||
* @list: thread private list of ttm_validate_buffer structs.
|
||||
*
|
||||
* Tries to reserve bos pointed to by the list entries for validation.
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <linux/kref.h>
|
||||
//#include <linux/rcupdate.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <ttm/ttm_memory.h>
|
||||
|
||||
/**
|
||||
@ -77,6 +78,7 @@ enum ttm_object_type {
|
||||
ttm_fence_type,
|
||||
ttm_buffer_type,
|
||||
ttm_lock_type,
|
||||
ttm_prime_type,
|
||||
ttm_driver_type0 = 256,
|
||||
ttm_driver_type1,
|
||||
ttm_driver_type2,
|
||||
@ -132,6 +134,30 @@ struct ttm_base_object {
|
||||
enum ttm_ref_type ref_type);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct ttm_prime_object - Modified base object that is prime-aware
|
||||
*
|
||||
* @base: struct ttm_base_object that we derive from
|
||||
* @mutex: Mutex protecting the @dma_buf member.
|
||||
* @size: Size of the dma_buf associated with this object
|
||||
* @real_type: Type of the underlying object. Needed since we're setting
|
||||
* the value of @base::object_type to ttm_prime_type
|
||||
* @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
|
||||
* object.
|
||||
* @refcount_release: The underlying object's release method. Needed since
|
||||
* we set @base::refcount_release to our own release method.
|
||||
*/
|
||||
|
||||
struct ttm_prime_object {
|
||||
struct ttm_base_object base;
|
||||
struct mutex mutex;
|
||||
size_t size;
|
||||
enum ttm_object_type real_type;
|
||||
struct dma_buf *dma_buf;
|
||||
void (*refcount_release) (struct ttm_base_object **);
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_base_object_init
|
||||
*
|
||||
@ -164,13 +190,25 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||
* @key: Hash key
|
||||
*
|
||||
* Looks up a struct ttm_base_object with the key @key.
|
||||
* Also verifies that the object is visible to the application, by
|
||||
* comparing the @tfile argument and checking the object shareable flag.
|
||||
*/
|
||||
|
||||
extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
|
||||
*tfile, uint32_t key);
|
||||
|
||||
/**
|
||||
* ttm_base_object_lookup_for_ref
|
||||
*
|
||||
* @tdev: Pointer to a struct ttm_object_device.
|
||||
* @key: Hash key
|
||||
*
|
||||
* Looks up a struct ttm_base_object with the key @key.
|
||||
* This function should only be used when the struct tfile associated with the
|
||||
* caller doesn't yet have a reference to the base object.
|
||||
*/
|
||||
|
||||
extern struct ttm_base_object *
|
||||
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
|
||||
|
||||
/**
|
||||
* ttm_base_object_unref
|
||||
*
|
||||
@ -192,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
|
||||
* @existed: Upon completion, indicates that an identical reference object
|
||||
* already existed, and the refcount was upped on that object instead.
|
||||
*
|
||||
* Checks that the base object is shareable and adds a ref object to it.
|
||||
*
|
||||
* Adding a ref object to a base object is basically like referencing the
|
||||
* base object, but a user-space application holds the reference. When the
|
||||
* file corresponding to @tfile is closed, all its reference objects are
|
||||
@ -248,14 +288,18 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
|
||||
/**
|
||||
* ttm_object device init - initialize a struct ttm_object_device
|
||||
*
|
||||
* @mem_glob: struct ttm_mem_global for memory accounting.
|
||||
* @hash_order: Order of hash table used to hash the base objects.
|
||||
* @ops: DMA buf ops for prime objects of this device.
|
||||
*
|
||||
* This function is typically called on device initialization to prepare
|
||||
* data structures needed for ttm base and ref objects.
|
||||
*/
|
||||
|
||||
extern struct ttm_object_device *ttm_object_device_init
|
||||
(struct ttm_mem_global *mem_glob, unsigned int hash_order);
|
||||
extern struct ttm_object_device *
|
||||
ttm_object_device_init(struct ttm_mem_global *mem_glob,
|
||||
unsigned int hash_order,
|
||||
const struct dma_buf_ops *ops);
|
||||
|
||||
/**
|
||||
* ttm_object_device_release - release data held by a ttm_object_device
|
||||
@ -272,4 +316,31 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
|
||||
|
||||
#define ttm_base_object_kfree(__object, __base)\
|
||||
kfree_rcu(__object, __base.rhead)
|
||||
|
||||
extern int ttm_prime_object_init(struct ttm_object_file *tfile,
|
||||
size_t size,
|
||||
struct ttm_prime_object *prime,
|
||||
bool shareable,
|
||||
enum ttm_object_type type,
|
||||
void (*refcount_release)
|
||||
(struct ttm_base_object **),
|
||||
void (*ref_obj_release)
|
||||
(struct ttm_base_object *,
|
||||
enum ttm_ref_type ref_type));
|
||||
|
||||
static inline enum ttm_object_type
|
||||
ttm_base_object_type(struct ttm_base_object *base)
|
||||
{
|
||||
return (base->object_type == ttm_prime_type) ?
|
||||
container_of(base, struct ttm_prime_object, base)->real_type :
|
||||
base->object_type;
|
||||
}
|
||||
extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
|
||||
int fd, u32 *handle);
|
||||
extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
|
||||
uint32_t handle, uint32_t flags,
|
||||
int *prime_fd);
|
||||
|
||||
//#define ttm_prime_object_kfree(__obj, __prime) \
|
||||
// kfree_rcu(__obj, __prime.base.rhead)
|
||||
#endif
|
||||
|
@ -62,7 +62,7 @@ extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
|
||||
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
|
||||
/**
|
||||
* Initialize pool allocator.
|
||||
*/
|
||||
@ -94,6 +94,15 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
|
||||
struct device *dev)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
|
||||
struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
197
drivers/include/linux/dma-buf.h
Normal file
197
drivers/include/linux/dma-buf.h
Normal file
@ -0,0 +1,197 @@
|
||||
/*
|
||||
* Header file for dma buffer sharing framework.
|
||||
*
|
||||
* Copyright(C) 2011 Linaro Limited. All rights reserved.
|
||||
* Author: Sumit Semwal <sumit.semwal@ti.com>
|
||||
*
|
||||
* Many thanks to linaro-mm-sig list, and specially
|
||||
* Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
|
||||
* Daniel Vetter <daniel@ffwll.ch> for their support in creation and
|
||||
* refining of this idea.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __DMA_BUF_H__
|
||||
#define __DMA_BUF_H__
|
||||
|
||||
#include <linux/file.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
struct device;
|
||||
struct dma_buf;
|
||||
struct dma_buf_attachment;
|
||||
|
||||
/**
|
||||
* struct dma_buf_ops - operations possible on struct dma_buf
|
||||
* @attach: [optional] allows different devices to 'attach' themselves to the
|
||||
* given buffer. It might return -EBUSY to signal that backing storage
|
||||
* is already allocated and incompatible with the requirements
|
||||
* of requesting device.
|
||||
* @detach: [optional] detach a given device from this buffer.
|
||||
* @map_dma_buf: returns list of scatter pages allocated, increases usecount
|
||||
* of the buffer. Requires atleast one attach to be called
|
||||
* before. Returned sg list should already be mapped into
|
||||
* _device_ address space. This call may sleep. May also return
|
||||
* -EINTR. Should return -EINVAL if attach hasn't been called yet.
|
||||
* @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
|
||||
* pages.
|
||||
* @release: release this buffer; to be called after the last dma_buf_put.
|
||||
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
|
||||
* caches and allocate backing storage (if not yet done)
|
||||
* respectively pin the objet into memory.
|
||||
* @end_cpu_access: [optional] called after cpu access to flush caches.
|
||||
* @kmap_atomic: maps a page from the buffer into kernel address
|
||||
* space, users may not block until the subsequent unmap call.
|
||||
* This callback must not sleep.
|
||||
* @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
|
||||
* This Callback must not sleep.
|
||||
* @kmap: maps a page from the buffer into kernel address space.
|
||||
* @kunmap: [optional] unmaps a page from the buffer.
|
||||
* @mmap: used to expose the backing storage to userspace. Note that the
|
||||
* mapping needs to be coherent - if the exporter doesn't directly
|
||||
* support this, it needs to fake coherency by shooting down any ptes
|
||||
* when transitioning away from the cpu domain.
|
||||
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
|
||||
* address space. Same restrictions as for vmap and friends apply.
|
||||
* @vunmap: [optional] unmaps a vmap from the buffer
|
||||
*/
|
||||
struct dma_buf_ops {
|
||||
int (*attach)(struct dma_buf *, struct device *,
|
||||
struct dma_buf_attachment *);
|
||||
|
||||
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
|
||||
|
||||
/* For {map,unmap}_dma_buf below, any specific buffer attributes
|
||||
* required should get added to device_dma_parameters accessible
|
||||
* via dev->dma_params.
|
||||
*/
|
||||
struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
|
||||
enum dma_data_direction);
|
||||
void (*unmap_dma_buf)(struct dma_buf_attachment *,
|
||||
struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
|
||||
* if the call would block.
|
||||
*/
|
||||
|
||||
/* after final dma_buf_put() */
|
||||
void (*release)(struct dma_buf *);
|
||||
|
||||
int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
|
||||
enum dma_data_direction);
|
||||
void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
|
||||
enum dma_data_direction);
|
||||
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
|
||||
void *(*kmap)(struct dma_buf *, unsigned long);
|
||||
void (*kunmap)(struct dma_buf *, unsigned long, void *);
|
||||
|
||||
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
|
||||
|
||||
void *(*vmap)(struct dma_buf *);
|
||||
void (*vunmap)(struct dma_buf *, void *vaddr);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_buf - shared buffer object
|
||||
* @size: size of the buffer
|
||||
* @file: file pointer used for sharing buffers across, and for refcounting.
|
||||
* @attachments: list of dma_buf_attachment that denotes all devices attached.
|
||||
* @ops: dma_buf_ops associated with this buffer object.
|
||||
* @exp_name: name of the exporter; useful for debugging.
|
||||
* @list_node: node for dma_buf accounting and debugging.
|
||||
* @priv: exporter specific private data for this buffer object.
|
||||
*/
|
||||
struct dma_buf {
|
||||
size_t size;
|
||||
struct file *file;
|
||||
struct list_head attachments;
|
||||
const struct dma_buf_ops *ops;
|
||||
/* mutex to serialize list manipulation, attach/detach and vmap/unmap */
|
||||
struct mutex lock;
|
||||
unsigned vmapping_counter;
|
||||
void *vmap_ptr;
|
||||
const char *exp_name;
|
||||
struct list_head list_node;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_buf_attachment - holds device-buffer attachment data
|
||||
* @dmabuf: buffer for this attachment.
|
||||
* @dev: device attached to the buffer.
|
||||
* @node: list of dma_buf_attachment.
|
||||
* @priv: exporter specific attachment data.
|
||||
*
|
||||
* This structure holds the attachment information between the dma_buf buffer
|
||||
* and its user device(s). The list contains one attachment struct per device
|
||||
* attached to the buffer.
|
||||
*/
|
||||
struct dma_buf_attachment {
|
||||
struct dma_buf *dmabuf;
|
||||
struct device *dev;
|
||||
struct list_head node;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* get_dma_buf - convenience wrapper for get_file.
|
||||
* @dmabuf: [in] pointer to dma_buf
|
||||
*
|
||||
* Increments the reference count on the dma-buf, needed in case of drivers
|
||||
* that either need to create additional references to the dmabuf on the
|
||||
* kernel side. For example, an exporter that needs to keep a dmabuf ptr
|
||||
* so that subsequent exports don't create a new dmabuf.
|
||||
*/
|
||||
static inline void get_dma_buf(struct dma_buf *dmabuf)
|
||||
{
|
||||
get_file(dmabuf->file);
|
||||
}
|
||||
|
||||
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
|
||||
struct device *dev);
|
||||
void dma_buf_detach(struct dma_buf *dmabuf,
|
||||
struct dma_buf_attachment *dmabuf_attach);
|
||||
|
||||
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
||||
size_t size, int flags, const char *);
|
||||
|
||||
#define dma_buf_export(priv, ops, size, flags) \
|
||||
dma_buf_export_named(priv, ops, size, flags, __FILE__)
|
||||
|
||||
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
|
||||
struct dma_buf *dma_buf_get(int fd);
|
||||
void dma_buf_put(struct dma_buf *dmabuf);
|
||||
|
||||
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
|
||||
enum dma_data_direction);
|
||||
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
|
||||
enum dma_data_direction);
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
||||
enum dma_data_direction dir);
|
||||
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
||||
enum dma_data_direction dir);
|
||||
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
|
||||
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
|
||||
void *dma_buf_kmap(struct dma_buf *, unsigned long);
|
||||
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
|
||||
|
||||
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
|
||||
unsigned long);
|
||||
void *dma_buf_vmap(struct dma_buf *);
|
||||
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
|
||||
#endif /* __DMA_BUF_H__ */
|
13
drivers/include/linux/dma-direction.h
Normal file
13
drivers/include/linux/dma-direction.h
Normal file
@ -0,0 +1,13 @@
|
||||
#ifndef _LINUX_DMA_DIRECTION_H
|
||||
#define _LINUX_DMA_DIRECTION_H
|
||||
/*
|
||||
* These definitions mirror those in pci.h, so they can be used
|
||||
* interchangeably with their PCI_ counterparts.
|
||||
*/
|
||||
enum dma_data_direction {
|
||||
DMA_BIDIRECTIONAL = 0,
|
||||
DMA_TO_DEVICE = 1,
|
||||
DMA_FROM_DEVICE = 2,
|
||||
DMA_NONE = 3,
|
||||
};
|
||||
#endif
|
8
drivers/include/linux/dma-mapping.h
Normal file
8
drivers/include/linux/dma-mapping.h
Normal file
@ -0,0 +1,8 @@
|
||||
#ifndef _LINUX_DMA_MAPPING_H
|
||||
#define _LINUX_DMA_MAPPING_H
|
||||
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
|
||||
#endif
|
48
drivers/include/linux/dma_remapping.h
Normal file
48
drivers/include/linux/dma_remapping.h
Normal file
@ -0,0 +1,48 @@
|
||||
#ifndef _DMA_REMAPPING_H
|
||||
#define _DMA_REMAPPING_H
|
||||
|
||||
/*
|
||||
* VT-d hardware uses 4KiB page size regardless of host page size.
|
||||
*/
|
||||
#define VTD_PAGE_SHIFT (12)
|
||||
#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
|
||||
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
|
||||
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
|
||||
|
||||
#define VTD_STRIDE_SHIFT (9)
|
||||
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
|
||||
|
||||
#define DMA_PTE_READ (1)
|
||||
#define DMA_PTE_WRITE (2)
|
||||
#define DMA_PTE_LARGE_PAGE (1 << 7)
|
||||
#define DMA_PTE_SNP (1 << 11)
|
||||
|
||||
#define CONTEXT_TT_MULTI_LEVEL 0
|
||||
#define CONTEXT_TT_DEV_IOTLB 1
|
||||
#define CONTEXT_TT_PASS_THROUGH 2
|
||||
|
||||
struct intel_iommu;
|
||||
struct dmar_domain;
|
||||
struct root_entry;
|
||||
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
|
||||
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
|
||||
extern int dmar_disabled;
|
||||
extern int intel_iommu_enabled;
|
||||
#else
|
||||
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#define dmar_disabled (1)
|
||||
#define intel_iommu_enabled (0)
|
||||
#endif
|
||||
|
||||
|
||||
#endif
|
@ -337,6 +337,7 @@ struct sg_mapping_iter {
|
||||
|
||||
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
|
||||
unsigned int nents, unsigned int flags);
|
||||
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
|
||||
bool sg_miter_next(struct sg_mapping_iter *miter);
|
||||
void sg_miter_stop(struct sg_mapping_iter *miter);
|
||||
|
||||
|
@ -28,6 +28,10 @@
|
||||
#ifndef __VMWGFX_DRM_H__
|
||||
#define __VMWGFX_DRM_H__
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#include <drm.h>
|
||||
#endif
|
||||
|
||||
#define DRM_VMW_MAX_SURFACE_FACES 6
|
||||
#define DRM_VMW_MAX_MIP_LEVELS 24
|
||||
|
||||
@ -55,6 +59,11 @@
|
||||
#define DRM_VMW_PRESENT 18
|
||||
#define DRM_VMW_PRESENT_READBACK 19
|
||||
#define DRM_VMW_UPDATE_LAYOUT 20
|
||||
#define DRM_VMW_CREATE_SHADER 21
|
||||
#define DRM_VMW_UNREF_SHADER 22
|
||||
#define DRM_VMW_GB_SURFACE_CREATE 23
|
||||
#define DRM_VMW_GB_SURFACE_REF 24
|
||||
#define DRM_VMW_SYNCCPU 25
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
@ -75,6 +84,9 @@
|
||||
#define DRM_VMW_PARAM_FIFO_CAPS 4
|
||||
#define DRM_VMW_PARAM_MAX_FB_SIZE 5
|
||||
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
|
||||
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
|
||||
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
|
||||
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
|
||||
|
||||
/**
|
||||
* struct drm_vmw_getparam_arg
|
||||
@ -787,4 +799,253 @@ struct drm_vmw_update_layout_arg {
|
||||
uint64_t rects;
|
||||
};
|
||||
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_CREATE_SHADER - Create shader
|
||||
*
|
||||
* Creates a shader and optionally binds it to a dma buffer containing
|
||||
* the shader byte-code.
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum drm_vmw_shader_type - Shader types
|
||||
*/
|
||||
enum drm_vmw_shader_type {
|
||||
drm_vmw_shader_type_vs = 0,
|
||||
drm_vmw_shader_type_ps,
|
||||
drm_vmw_shader_type_gs
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct drm_vmw_shader_create_arg
|
||||
*
|
||||
* @shader_type: Shader type of the shader to create.
|
||||
* @size: Size of the byte-code in bytes.
|
||||
* where the shader byte-code starts
|
||||
* @buffer_handle: Buffer handle identifying the buffer containing the
|
||||
* shader byte-code
|
||||
* @shader_handle: On successful completion contains a handle that
|
||||
* can be used to subsequently identify the shader.
|
||||
* @offset: Offset in bytes into the buffer given by @buffer_handle,
|
||||
*
|
||||
* Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
|
||||
*/
|
||||
struct drm_vmw_shader_create_arg {
|
||||
enum drm_vmw_shader_type shader_type;
|
||||
uint32_t size;
|
||||
uint32_t buffer_handle;
|
||||
uint32_t shader_handle;
|
||||
uint64_t offset;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_UNREF_SHADER - Unreferences a shader
|
||||
*
|
||||
* Destroys a user-space reference to a shader, optionally destroying
|
||||
* it.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_shader_arg
|
||||
*
|
||||
* @handle: Handle identifying the shader to destroy.
|
||||
*
|
||||
* Input argument to the DRM_VMW_UNREF_SHADER ioctl.
|
||||
*/
|
||||
struct drm_vmw_shader_arg {
|
||||
uint32_t handle;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
|
||||
*
|
||||
* Allocates a surface handle and queues a create surface command
|
||||
* for the host on the first use of the surface. The surface ID can
|
||||
* be used as the surface ID in commands referencing the surface.
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum drm_vmw_surface_flags
|
||||
*
|
||||
* @drm_vmw_surface_flag_shareable: Whether the surface is shareable
|
||||
* @drm_vmw_surface_flag_scanout: Whether the surface is a scanout
|
||||
* surface.
|
||||
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
|
||||
* given.
|
||||
*/
|
||||
enum drm_vmw_surface_flags {
|
||||
drm_vmw_surface_flag_shareable = (1 << 0),
|
||||
drm_vmw_surface_flag_scanout = (1 << 1),
|
||||
drm_vmw_surface_flag_create_buffer = (1 << 2)
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_gb_surface_create_req
|
||||
*
|
||||
* @svga3d_flags: SVGA3d surface flags for the device.
|
||||
* @format: SVGA3d format.
|
||||
* @mip_level: Number of mip levels for all faces.
|
||||
* @drm_surface_flags Flags as described above.
|
||||
* @multisample_count Future use. Set to 0.
|
||||
* @autogen_filter Future use. Set to 0.
|
||||
* @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID
|
||||
* if none.
|
||||
* @base_size Size of the base mip level for all faces.
|
||||
*
|
||||
* Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl.
|
||||
* Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
|
||||
*/
|
||||
struct drm_vmw_gb_surface_create_req {
|
||||
uint32_t svga3d_flags;
|
||||
uint32_t format;
|
||||
uint32_t mip_levels;
|
||||
enum drm_vmw_surface_flags drm_surface_flags;
|
||||
uint32_t multisample_count;
|
||||
uint32_t autogen_filter;
|
||||
uint32_t buffer_handle;
|
||||
uint32_t pad64;
|
||||
struct drm_vmw_size base_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_gb_surface_create_rep
|
||||
*
|
||||
* @handle: Surface handle.
|
||||
* @backup_size: Size of backup buffers for this surface.
|
||||
* @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none.
|
||||
* @buffer_size: Actual size of the buffer identified by
|
||||
* @buffer_handle
|
||||
* @buffer_map_handle: Offset into device address space for the buffer
|
||||
* identified by @buffer_handle.
|
||||
*
|
||||
* Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
|
||||
* Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
*/
|
||||
struct drm_vmw_gb_surface_create_rep {
|
||||
uint32_t handle;
|
||||
uint32_t backup_size;
|
||||
uint32_t buffer_handle;
|
||||
uint32_t buffer_size;
|
||||
uint64_t buffer_map_handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_gb_surface_create_arg
|
||||
*
|
||||
* @req: Input argument as described above.
|
||||
* @rep: Output argument as described above.
|
||||
*
|
||||
* Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
*/
|
||||
union drm_vmw_gb_surface_create_arg {
|
||||
struct drm_vmw_gb_surface_create_rep rep;
|
||||
struct drm_vmw_gb_surface_create_req req;
|
||||
};
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_GB_SURFACE_REF - Reference a host surface.
|
||||
*
|
||||
* Puts a reference on a host surface with a given handle, as previously
|
||||
* returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
* A reference will make sure the surface isn't destroyed while we hold
|
||||
* it and will allow the calling client to use the surface handle in
|
||||
* the command stream.
|
||||
*
|
||||
* On successful return, the Ioctl returns the surface information given
|
||||
* to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
|
||||
*/
|
||||
|
||||
/**
|
||||
* struct drm_vmw_gb_surface_reference_arg
|
||||
*
|
||||
* @creq: The data used as input when the surface was created, as described
|
||||
* above at "struct drm_vmw_gb_surface_create_req"
|
||||
* @crep: Additional data output when the surface was created, as described
|
||||
* above at "struct drm_vmw_gb_surface_create_rep"
|
||||
*
|
||||
* Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
|
||||
*/
|
||||
struct drm_vmw_gb_surface_ref_rep {
|
||||
struct drm_vmw_gb_surface_create_req creq;
|
||||
struct drm_vmw_gb_surface_create_rep crep;
|
||||
};
|
||||
|
||||
/**
|
||||
* union drm_vmw_gb_surface_reference_arg
|
||||
*
|
||||
* @req: Input data as described above at "struct drm_vmw_surface_arg"
|
||||
* @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
|
||||
*
|
||||
* Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
|
||||
*/
|
||||
union drm_vmw_gb_surface_reference_arg {
|
||||
struct drm_vmw_gb_surface_ref_rep rep;
|
||||
struct drm_vmw_surface_arg req;
|
||||
};
|
||||
|
||||
|
||||
/*************************************************************************/
|
||||
/**
|
||||
* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
|
||||
*
|
||||
* Idles any previously submitted GPU operations on the buffer and
|
||||
* by default blocks command submissions that reference the buffer.
|
||||
* If the file descriptor used to grab a blocking CPU sync is closed, the
|
||||
* cpu sync is released.
|
||||
* The flags argument indicates how the grab / release operation should be
|
||||
* performed:
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum drm_vmw_synccpu_flags - Synccpu flags:
|
||||
*
|
||||
* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
|
||||
* hint to the kernel to allow command submissions that references the buffer
|
||||
* for read-only.
|
||||
* @drm_vmw_synccpu_write: Sync for write. Block all command submissions
|
||||
* referencing this buffer.
|
||||
* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
|
||||
* -EBUSY should the buffer be busy.
|
||||
* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
|
||||
* while the buffer is synced for CPU. This is similar to the GEM bo idle
|
||||
* behavior.
|
||||
*/
|
||||
enum drm_vmw_synccpu_flags {
|
||||
drm_vmw_synccpu_read = (1 << 0),
|
||||
drm_vmw_synccpu_write = (1 << 1),
|
||||
drm_vmw_synccpu_dontblock = (1 << 2),
|
||||
drm_vmw_synccpu_allow_cs = (1 << 3)
|
||||
};
|
||||
|
||||
/**
|
||||
* enum drm_vmw_synccpu_op - Synccpu operations:
|
||||
*
|
||||
* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
|
||||
* @drm_vmw_synccpu_release: Release a previous grab.
|
||||
*/
|
||||
enum drm_vmw_synccpu_op {
|
||||
drm_vmw_synccpu_grab,
|
||||
drm_vmw_synccpu_release
|
||||
};
|
||||
|
||||
/**
|
||||
* struct drm_vmw_synccpu_arg
|
||||
*
|
||||
* @op: The synccpu operation as described above.
|
||||
* @handle: Handle identifying the buffer object.
|
||||
* @flags: Flags as described above.
|
||||
*/
|
||||
struct drm_vmw_synccpu_arg {
|
||||
enum drm_vmw_synccpu_op op;
|
||||
enum drm_vmw_synccpu_flags flags;
|
||||
uint32_t handle;
|
||||
uint32_t pad64;
|
||||
};
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user