sdk: update libdrm

git-svn-id: svn://kolibrios.org@5068 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2014-08-29 15:38:30 +00:00
parent c85b73c2b8
commit 57c86a8cde
12 changed files with 157 additions and 71 deletions

View File

@ -427,7 +427,7 @@ struct drm_draw {
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
DRM_DRAWABLE_CLIPRECTS,
DRM_DRAWABLE_CLIPRECTS
} drm_drawable_info_type_t;
struct drm_update_draw {
@ -459,15 +459,12 @@ struct drm_irq_busid {
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
/* bits 1-6 are reserved for high crtcs */
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
};
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
@ -622,6 +619,14 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
/**
* DRM_CLIENT_CAP_UNIVERSAL_PLANES
*
* if set to 1, the DRM core will expose the full universal plane list
* (including primary and cursor planes).
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;

View File

@ -106,11 +106,6 @@
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
/*
* 3 plane YCbCr

View File

@ -173,9 +173,6 @@ struct drm_mode_get_plane_res {
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
#define DRM_MODE_ENCODER_DSI 6
#define DRM_MODE_ENCODER_DPMST 7
struct drm_mode_get_encoder {
__u32 encoder_id;
@ -213,8 +210,6 @@ struct drm_mode_get_encoder {
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
struct drm_mode_get_connector {
@ -235,8 +230,6 @@ struct drm_mode_get_connector {
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
__u32 pad;
};
#define DRM_MODE_PROP_PENDING (1<<0)
@ -246,21 +239,6 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
/* non-extended types: legacy bitmask, one bit per type: */
#define DRM_MODE_PROP_LEGACY_TYPE ( \
DRM_MODE_PROP_RANGE | \
DRM_MODE_PROP_ENUM | \
DRM_MODE_PROP_BLOB | \
DRM_MODE_PROP_BITMASK)
/* extended-types: rather than continue to consume a bit per type,
* grab a chunk of the bits to use as integer type id.
*/
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
@ -284,6 +262,15 @@ struct drm_mode_connector_set_property {
__u32 connector_id;
};
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
#define DRM_MODE_OBJECT_MODE 0xdededede
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
@ -346,8 +333,6 @@ struct drm_mode_fb_cmd2 {
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
/*
* Mark a region of a framebuffer as dirty.
*
@ -388,21 +373,20 @@ struct drm_mode_mode_cmd {
struct drm_mode_modeinfo mode;
};
#define DRM_MODE_CURSOR_BO 0x01
#define DRM_MODE_CURSOR_MOVE 0x02
#define DRM_MODE_CURSOR_FLAGS 0x03
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
/*
* depending on the value in flags different members are used.
* depending on the value in flags diffrent members are used.
*
* CURSOR_BO uses
* crtc_id
* crtc
* width
* height
* handle - if 0 turns the cursor off
* handle - if 0 turns the cursor of
*
* CURSOR_MOVE uses
* crtc_id
* crtc
* x
* y
*/

View File

@ -223,6 +223,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
#define DRM_IOCTL_I915_INIT
#define DRM_IOCTL_I915_FLUSH
@ -1050,6 +1051,20 @@ struct drm_i915_reset_stats {
__u32 pad;
};
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
};
struct drm_i915_mask {
__u32 handle;
__u32 width;

View File

@ -52,7 +52,6 @@ drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
#if 0
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
@ -60,7 +59,6 @@ drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
#endif
drm_intel_bo *
drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,

View File

@ -61,9 +61,8 @@ struct _drm_intel_bo {
unsigned long align;
/**
* Last seen card virtual address (offset from the beginning of the
* aperture) for the object. This should be used to fill relocation
* entries when calling drm_intel_bo_emit_reloc()
* Deprecated field containing (possibly the low 32-bits of) the last
* seen virtual card address. Use offset64 instead.
*/
unsigned long offset;
@ -84,6 +83,13 @@ struct _drm_intel_bo {
* MM-specific handle for accessing object
*/
int handle;
/**
* Last seen card virtual address (offset from the beginning of the
* aperture) for the object. This should be used to fill relocation
* entries when calling drm_intel_bo_emit_reloc()
*/
uint64_t offset64;
};
enum aub_dump_bmp_format {

View File

@ -212,6 +212,15 @@ struct _drm_intel_bo_gem {
*/
bool reusable;
/**
* Boolean of whether the GPU is definitely not accessing the buffer.
*
* This is only valid when reusable, since non-reusable
* buffers are those that have been shared wth other
* processes, so we don't know their state.
*/
bool idle;
/**
* Size in bytes of this buffer and its relocation descendents.
*
@ -383,7 +392,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
(unsigned long long)bo_gem->relocs[j].offset,
target_gem->gem_handle,
target_gem->name,
target_bo->offset,
target_bo->offset64,
bo_gem->relocs[j].delta);
}
}
@ -568,11 +577,19 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
struct drm_i915_gem_busy busy;
int ret;
if (bo_gem->reusable && bo_gem->idle)
return false;
VG_CLEAR(busy);
busy.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
if (ret == 0) {
bo_gem->idle = !busy.busy;
return busy.busy;
} else {
return false;
}
return (ret == 0 && busy.busy);
}
@ -865,10 +882,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
}
}
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
VG_CLEAR(open_arg);
open_arg.name = handle;
ret = drmIoctl(bufmgr_gem->fd,
@ -877,11 +890,29 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
free(bo_gem);
return NULL;
}
/* Now see if someone has used a prime handle to get this
* object from the kernel before by looking through the list
* again for a matching gem_handle
*/
for (list = bufmgr_gem->named.next;
list != &bufmgr_gem->named;
list = list->next) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->gem_handle == open_arg.handle) {
drm_intel_gem_bo_reference(&bo_gem->bo);
return &bo_gem->bo;
}
}
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
bo_gem->bo.size = open_arg.size;
bo_gem->bo.offset = 0;
bo_gem->bo.offset64 = 0;
bo_gem->bo.virtual = NULL;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->name = name;
@ -1322,6 +1353,9 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
#ifdef HAVE_VALGRIND
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
#endif
int ret;
/* If the CPU cache isn't coherent with the GTT, then use a
@ -1662,7 +1696,7 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
target_bo_gem->gem_handle;
bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
if (target_bo != bo)
@ -1813,11 +1847,12 @@ drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
/* Update the buffer offset */
if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
bo_gem->gem_handle, bo_gem->name, bo->offset64,
(unsigned long long)bufmgr_gem->exec_objects[i].
offset);
bo->offset64 = bufmgr_gem->exec_objects[i].offset;
bo->offset = bufmgr_gem->exec_objects[i].offset;
}
}
@ -1833,10 +1868,11 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
/* Update the buffer offset */
if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
bo_gem->gem_handle, bo_gem->name, bo->offset64,
(unsigned long long)bufmgr_gem->exec2_objects[i].offset);
bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
bo->offset = bufmgr_gem->exec2_objects[i].offset;
}
}
@ -2221,6 +2257,8 @@ skip_execution:
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
bo_gem->idle = false;
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
bufmgr_gem->exec_bos[i] = NULL;
@ -2274,6 +2312,7 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
if (ret != 0)
return -errno;
bo->offset64 = pin.offset;
bo->offset = pin.offset;
return 0;
}
@ -2488,7 +2527,8 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
bo_gem->global_name = flink.name;
bo_gem->reusable = false;
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
if (DRMLISTEMPTY(&bo_gem->name_list))
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
}
*name = bo_gem->global_name;
@ -2876,7 +2916,7 @@ drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
aub_out(bufmgr_gem, 0); /* comment len */
/* Set up the GTT. The max we can handle is 256M */
aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
aub_out(bufmgr_gem, 0); /* subtype */
aub_out(bufmgr_gem, 0); /* offset */
@ -2894,15 +2934,19 @@ drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
drm_intel_context *context = NULL;
int ret;
context = calloc(1, sizeof(*context));
if (!context)
return NULL;
VG_CLEAR(create);
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
strerror(errno));
free(context);
return NULL;
}
context = calloc(1, sizeof(*context));
context->ctx_id = create.ctx_id;
context->bufmgr = bufmgr;
@ -3138,8 +3182,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
// bufmgr_gem->bufmgr.bo_alloc_for_render =
// drm_intel_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_alloc_for_render =
drm_intel_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;

View File

@ -56,10 +56,10 @@ struct _drm_intel_bufmgr {
*
* This is otherwise the same as bo_alloc.
*/
// drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
// const char *name,
// unsigned long size,
// unsigned int alignment);
drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
/**
* Allocate a tiled buffer object.

View File

@ -160,6 +160,11 @@
#define PCI_CHIP_VALLEYVIEW_2 0x0f32
#define PCI_CHIP_VALLEYVIEW_3 0x0f33
#define PCI_CHIP_CHERRYVIEW_0 0x22b0
#define PCI_CHIP_CHERRYVIEW_1 0x22b1
#define PCI_CHIP_CHERRYVIEW_2 0x22b2
#define PCI_CHIP_CHERRYVIEW_3 0x22b3
#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \
(devid) == PCI_CHIP_I915_GM || \
(devid) == PCI_CHIP_I945_GM || \
@ -311,8 +316,13 @@
((devid & 0x000f) == BDW_WORKSTATION) ? 1 : \
((devid & 0x000f) == BDW_ULX) ? 1 : 0)
#define IS_CHERRYVIEW(devid) ((devid) == PCI_CHIP_CHERRYVIEW_0 || \
(devid) == PCI_CHIP_CHERRYVIEW_1 || \
(devid) == PCI_CHIP_CHERRYVIEW_2 || \
(devid) == PCI_CHIP_CHERRYVIEW_3)
#define IS_GEN8(devid) IS_BROADWELL(devid)
#define IS_GEN8(devid) (IS_BROADWELL(devid) || \
IS_CHERRYVIEW(devid))
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \

View File

@ -44,6 +44,11 @@
#include <time.h>
#include <stdarg.h>
/* Not all systems have MAP_FAILED defined */
#ifndef MAP_FAILED
#define MAP_FAILED ((void *)-1)
#endif
#include "xf86drm.h"
#include <kos32sys.h>

View File

@ -79,8 +79,14 @@ extern "C" {
typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */
typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */
#if (__GNUC__ >= 3)
#define DRM_PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
#else
#define DRM_PRINTFLIKE(f, a)
#endif
typedef struct _drmServerInfo {
int (*debug_print)(const char *format, va_list ap);
int (*debug_print)(const char *format, va_list ap) DRM_PRINTFLIKE(1,0);
int (*load_module)(const char *name);
} drmServerInfo, *drmServerInfoPtr;
@ -684,7 +690,7 @@ extern int drmSLLookupNeighbors(void *l, unsigned long key,
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
extern void drmCloseOnce(int fd);
extern void drmMsg(const char *format, ...);
extern void drmMsg(const char *format, ...) DRM_PRINTFLIKE(1, 2);
extern int drmSetMaster(int fd);
extern int drmDropMaster(int fd);

View File

@ -441,10 +441,28 @@ struct blit_call
int stride;
};
void Blit(void *bitmap, int dst_x, int dst_y,
static inline void Blit(void *bitmap, int dst_x, int dst_y,
int src_x, int src_y, int w, int h,
int src_w, int src_h, int stride);
int src_w, int src_h, int stride)
{
volatile struct blit_call bc;
bc.dstx = dst_x;
bc.dsty = dst_y;
bc.w = w;
bc.h = h;
bc.srcx = src_x;
bc.srcy = src_y;
bc.srcw = src_w;
bc.srch = src_h;
bc.stride = stride;
bc.bitmap = bitmap;
__asm__ __volatile__(
"int $0x40"
::"a"(73),"b"(0),"c"(&bc.dstx));
};
#endif