diff --git a/contrib/sdk/sources/libdrm/include/drm/drm.h b/contrib/sdk/sources/libdrm/include/drm/drm.h index 36888d3243..9c343c09d2 100644 --- a/contrib/sdk/sources/libdrm/include/drm/drm.h +++ b/contrib/sdk/sources/libdrm/include/drm/drm.h @@ -38,6 +38,7 @@ #include +#include typedef int8_t __s8; typedef uint8_t __u8; typedef int16_t __s16; @@ -627,6 +628,13 @@ struct drm_get_cap { */ #define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 +/** + * DRM_CLIENT_CAP_ATOMIC + * + * If set to 1, the DRM core will allow atomic modesetting requests. + */ +#define DRM_CLIENT_CAP_ATOMIC 3 + /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ struct drm_set_client_cap { __u64 capability; @@ -673,12 +681,13 @@ struct drm_prime_handle { #define SRV_MASK_UPDATE 45 #define SRV_MASK_UPDATE_EX 46 - +#define SRV_I915_GEM_PREAD 47 +#define SRV_I915_GEM_EXECBUFFER 48 #include "drm_mode.h" #define DRM_IOCTL_BASE 'd' -#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) +#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) #define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type) #define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type) @@ -829,6 +838,7 @@ struct drm_event_vblank { #define DRM_CAP_PRIME 0x5 #define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 #define DRM_CAP_ASYNC_PAGE_FLIP 0x7 +#define DRM_CAP_ADDFB2_MODIFIERS 0x10 #define DRM_PRIME_CAP_IMPORT 0x1 #define DRM_PRIME_CAP_EXPORT 0x2 diff --git a/contrib/sdk/sources/libdrm/include/drm/drm_fourcc.h b/contrib/sdk/sources/libdrm/include/drm/drm_fourcc.h index 7296679d9f..624b882093 100644 --- a/contrib/sdk/sources/libdrm/include/drm/drm_fourcc.h +++ b/contrib/sdk/sources/libdrm/include/drm/drm_fourcc.h @@ -127,4 +127,97 @@ #define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ #define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ + +/* + * Format Modifiers: + * + * Format modifiers describe, typically, a re-ordering or modification + * of the data in a plane of an FB. This can be used to express tiled/ + * swizzled formats, or compression, or a combination of the two. + * + * The upper 8 bits of the format modifier are a vendor-id as assigned + * below. The lower 56 bits are assigned as vendor sees fit. + */ + +/* Vendor Ids: */ +#define DRM_FORMAT_MOD_NONE 0 +#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 +#define DRM_FORMAT_MOD_VENDOR_AMD 0x02 +#define DRM_FORMAT_MOD_VENDOR_NV 0x03 +#define DRM_FORMAT_MOD_VENDOR_SAMSUNG 0x04 +#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05 +/* add more to the end as needed */ + +#define fourcc_mod_code(vendor, val) \ + ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) + +/* + * Format Modifier tokens: + * + * When adding a new token please document the layout with a code comment, + * similar to the fourcc codes above. drm_fourcc.h is considered the + * authoritative source for all of these. + */ + +/* Intel framebuffer modifiers */ + +/* + * Intel X-tiling layout + * + * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb) + * in row-major layout. Within the tile bytes are laid out row-major, with + * a platform-dependent stride. On top of that the memory can apply + * platform-depending swizzling of some higher address bits into bit6. + * + * This format is highly platforms specific and not useful for cross-driver + * sharing. It exists since on a given platform it does uniquely identify the + * layout in a simple way for i915-specific userspace. + */ +#define I915_FORMAT_MOD_X_TILED fourcc_mod_code(INTEL, 1) + +/* + * Intel Y-tiling layout + * + * This is a tiled layout using 4Kb tiles (except on gen2 where the tiles 2Kb) + * in row-major layout. Within the tile bytes are laid out in OWORD (16 bytes) + * chunks column-major, with a platform-dependent height. On top of that the + * memory can apply platform-depending swizzling of some higher address bits + * into bit6. + * + * This format is highly platforms specific and not useful for cross-driver + * sharing. It exists since on a given platform it does uniquely identify the + * layout in a simple way for i915-specific userspace. + */ +#define I915_FORMAT_MOD_Y_TILED fourcc_mod_code(INTEL, 2) + +/* + * Intel Yf-tiling layout + * + * This is a tiled layout using 4Kb tiles in row-major layout. + * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which + * are arranged in four groups (two wide, two high) with column-major layout. + * Each group therefore consits out of four 256 byte units, which are also laid + * out as 2x2 column-major. + * 256 byte units are made out of four 64 byte blocks of pixels, producing + * either a square block or a 2:1 unit. + * 64 byte blocks of pixels contain four pixel rows of 16 bytes, where the width + * in pixel depends on the pixel depth. + */ +#define I915_FORMAT_MOD_Yf_TILED fourcc_mod_code(INTEL, 3) + +/* + * Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks + * + * Macroblocks are laid in a Z-shape, and each pixel data is following the + * standard NV12 style. + * As for NV12, an image is the result of two frame buffers: one for Y, + * one for the interleaved Cb/Cr components (1/2 the height of the Y buffer). + * Alignment requirements are (for each buffer): + * - multiple of 128 pixels for the width + * - multiple of 32 pixels for the height + * + * For more information: see http://linuxtv.org/downloads/v4l-dvb-apis/re32.html + */ +#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1) + #endif /* DRM_FOURCC_H */ diff --git a/contrib/sdk/sources/libdrm/include/drm/drm_mode.h b/contrib/sdk/sources/libdrm/include/drm/drm_mode.h index 6b07546f85..d4b4b999af 100644 --- a/contrib/sdk/sources/libdrm/include/drm/drm_mode.h +++ b/contrib/sdk/sources/libdrm/include/drm/drm_mode.h @@ -42,20 +42,20 @@ /* Video mode flags */ /* bit compatible with the xorg definitions. */ -#define DRM_MODE_FLAG_PHSYNC (1<<0) -#define DRM_MODE_FLAG_NHSYNC (1<<1) -#define DRM_MODE_FLAG_PVSYNC (1<<2) -#define DRM_MODE_FLAG_NVSYNC (1<<3) -#define DRM_MODE_FLAG_INTERLACE (1<<4) -#define DRM_MODE_FLAG_DBLSCAN (1<<5) -#define DRM_MODE_FLAG_CSYNC (1<<6) -#define DRM_MODE_FLAG_PCSYNC (1<<7) -#define DRM_MODE_FLAG_NCSYNC (1<<8) -#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ -#define DRM_MODE_FLAG_BCAST (1<<10) -#define DRM_MODE_FLAG_PIXMUX (1<<11) -#define DRM_MODE_FLAG_DBLCLK (1<<12) -#define DRM_MODE_FLAG_CLKDIV2 (1<<13) +#define DRM_MODE_FLAG_PHSYNC (1<<0) +#define DRM_MODE_FLAG_NHSYNC (1<<1) +#define DRM_MODE_FLAG_PVSYNC (1<<2) +#define DRM_MODE_FLAG_NVSYNC (1<<3) +#define DRM_MODE_FLAG_INTERLACE (1<<4) +#define DRM_MODE_FLAG_DBLSCAN (1<<5) +#define DRM_MODE_FLAG_CSYNC (1<<6) +#define DRM_MODE_FLAG_PCSYNC (1<<7) +#define DRM_MODE_FLAG_NCSYNC (1<<8) +#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ +#define DRM_MODE_FLAG_BCAST (1<<10) +#define DRM_MODE_FLAG_PIXMUX (1<<11) +#define DRM_MODE_FLAG_DBLCLK (1<<12) +#define DRM_MODE_FLAG_CLKDIV2 (1<<13) #define DRM_MODE_FLAG_3D_MASK (0x1f<<14) #define DRM_MODE_FLAG_3D_NONE (0<<14) #define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) @@ -173,6 +173,9 @@ struct drm_mode_get_plane_res { #define DRM_MODE_ENCODER_TMDS 2 #define DRM_MODE_ENCODER_LVDS 3 #define DRM_MODE_ENCODER_TVDAC 4 +#define DRM_MODE_ENCODER_VIRTUAL 5 +#define DRM_MODE_ENCODER_DSI 6 +#define DRM_MODE_ENCODER_DPMST 7 struct drm_mode_get_encoder { __u32 encoder_id; @@ -210,6 +213,8 @@ struct drm_mode_get_encoder { #define DRM_MODE_CONNECTOR_HDMIB 12 #define DRM_MODE_CONNECTOR_TV 13 #define DRM_MODE_CONNECTOR_eDP 14 +#define DRM_MODE_CONNECTOR_VIRTUAL 15 +#define DRM_MODE_CONNECTOR_DSI 16 struct drm_mode_get_connector { @@ -239,6 +244,21 @@ struct drm_mode_get_connector { #define DRM_MODE_PROP_BLOB (1<<4) #define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ +/* non-extended types: legacy bitmask, one bit per type: */ +#define DRM_MODE_PROP_LEGACY_TYPE ( \ + DRM_MODE_PROP_RANGE | \ + DRM_MODE_PROP_ENUM | \ + DRM_MODE_PROP_BLOB | \ + DRM_MODE_PROP_BITMASK) + +/* extended-types: rather than continue to consume a bit per type, + * grab a chunk of the bits to use as integer type id. + */ +#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 +#define DRM_MODE_PROP_TYPE(n) ((n) << 6) +#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) +#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) + struct drm_mode_property_enum { __u64 value; char name[DRM_PROP_NAME_LEN]; @@ -302,7 +322,8 @@ struct drm_mode_fb_cmd { __u32 handle; }; -#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ +#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ +#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */ struct drm_mode_fb_cmd2 { __u32 fb_id; @@ -323,10 +344,18 @@ struct drm_mode_fb_cmd2 { * So it would consist of Y as offset[0] and UV as * offset[1]. Note that offset[0] will generally * be 0. + * + * To accommodate tiled, compressed, etc formats, a per-plane + * modifier can be specified. The default value of zero + * indicates "native" format as specified by the fourcc. + * Vendor specific modifier token. This allows, for example, + * different tiling/swizzling pattern on different planes. + * See discussion above of DRM_FORMAT_MOD_xxx. */ __u32 handles[4]; __u32 pitches[4]; /* pitch for each plane */ __u32 offsets[4]; /* offset of each plane */ + __u64 modifier[4]; /* ie, tiling, compressed (per plane) */ }; #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 @@ -487,4 +516,41 @@ struct drm_mode_destroy_dumb { __u32 handle; }; +/* page-flip flags are valid, plus: */ +#define DRM_MODE_ATOMIC_TEST_ONLY 0x0100 +#define DRM_MODE_ATOMIC_NONBLOCK 0x0200 +#define DRM_MODE_ATOMIC_ALLOW_MODESET 0x0400 + +struct drm_mode_atomic { + __u32 flags; + __u32 count_objs; + __u64 objs_ptr; + __u64 count_props_ptr; + __u64 props_ptr; + __u64 prop_values_ptr; + __u64 reserved; + __u64 user_data; +}; + +/** + * Create a new 'blob' data property, copying length bytes from data pointer, + * and returning new blob ID. + */ +struct drm_mode_create_blob { + /** Pointer to data to copy. */ + __u64 data; + /** Length of data to copy. */ + __u32 length; + /** Return: new property ID. */ + __u32 blob_id; +}; + +/** + * Destroy a user-created blob property. + */ +struct drm_mode_destroy_blob { + __u32 blob_id; +}; + + #endif diff --git a/contrib/sdk/sources/libdrm/include/drm/i915_drm.h b/contrib/sdk/sources/libdrm/include/drm/i915_drm.h index 24f4b24be7..de9cd0a789 100644 --- a/contrib/sdk/sources/libdrm/include/drm/i915_drm.h +++ b/contrib/sdk/sources/libdrm/include/drm/i915_drm.h @@ -165,58 +165,62 @@ typedef struct _drm_i915_sarea { /* Flags for perf_boxes */ -#define I915_BOX_RING_EMPTY 0x1 -#define I915_BOX_FLIP 0x2 -#define I915_BOX_WAIT 0x4 -#define I915_BOX_TEXTURE_LOAD 0x8 -#define I915_BOX_LOST_CONTEXT 0x10 +#define I915_BOX_RING_EMPTY 0x1 +#define I915_BOX_FLIP 0x2 +#define I915_BOX_WAIT 0x4 +#define I915_BOX_TEXTURE_LOAD 0x8 +#define I915_BOX_LOST_CONTEXT 0x10 -/* I915 specific ioctls - * The device specific ioctl range is 0x40 to 0x79. +/* + * i915 specific ioctls. + * + * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie + * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset + * against DRM_COMMAND_BASE and should be between [0x0, 0x60). */ -#define DRM_I915_INIT 0x00 -#define DRM_I915_FLUSH 0x01 -#define DRM_I915_FLIP 0x02 -#define DRM_I915_BATCHBUFFER 0x03 -#define DRM_I915_IRQ_EMIT 0x04 -#define DRM_I915_IRQ_WAIT 0x05 -#define DRM_I915_GETPARAM 0x06 -#define DRM_I915_SETPARAM 0x07 -#define DRM_I915_ALLOC 0x08 -#define DRM_I915_FREE 0x09 -#define DRM_I915_INIT_HEAP 0x0a -#define DRM_I915_CMDBUFFER 0x0b -#define DRM_I915_DESTROY_HEAP 0x0c +#define DRM_I915_INIT 0x00 +#define DRM_I915_FLUSH 0x01 +#define DRM_I915_FLIP 0x02 +#define DRM_I915_BATCHBUFFER 0x03 +#define DRM_I915_IRQ_EMIT 0x04 +#define DRM_I915_IRQ_WAIT 0x05 +#define DRM_I915_GETPARAM 0x06 +#define DRM_I915_SETPARAM 0x07 +#define DRM_I915_ALLOC 0x08 +#define DRM_I915_FREE 0x09 +#define DRM_I915_INIT_HEAP 0x0a +#define DRM_I915_CMDBUFFER 0x0b +#define DRM_I915_DESTROY_HEAP 0x0c #define DRM_I915_SET_VBLANK_PIPE 0x0d #define DRM_I915_GET_VBLANK_PIPE 0x0e -#define DRM_I915_VBLANK_SWAP 0x0f -#define DRM_I915_HWS_ADDR 0x11 -#define DRM_I915_GEM_INIT 0x13 -#define DRM_I915_GEM_EXECBUFFER 0x14 -#define DRM_I915_GEM_PIN 0x15 -#define DRM_I915_GEM_UNPIN 0x16 -#define DRM_I915_GEM_BUSY 0x17 -#define DRM_I915_GEM_THROTTLE 0x18 -#define DRM_I915_GEM_ENTERVT 0x19 -#define DRM_I915_GEM_LEAVEVT 0x1a -#define DRM_I915_GEM_CREATE 0x1b -#define DRM_I915_GEM_PREAD 0x1c -#define DRM_I915_GEM_PWRITE 0x1d -#define DRM_I915_GEM_MMAP 0x1e -#define DRM_I915_GEM_SET_DOMAIN 0x1f -#define DRM_I915_GEM_SW_FINISH 0x20 -#define DRM_I915_GEM_SET_TILING 0x21 -#define DRM_I915_GEM_GET_TILING 0x22 -#define DRM_I915_GEM_GET_APERTURE 0x23 -#define DRM_I915_GEM_MMAP_GTT 0x24 +#define DRM_I915_VBLANK_SWAP 0x0f +#define DRM_I915_HWS_ADDR 0x11 +#define DRM_I915_GEM_INIT 0x13 +#define DRM_I915_GEM_EXECBUFFER 0x14 +#define DRM_I915_GEM_PIN 0x15 +#define DRM_I915_GEM_UNPIN 0x16 +#define DRM_I915_GEM_BUSY 0x17 +#define DRM_I915_GEM_THROTTLE 0x18 +#define DRM_I915_GEM_ENTERVT 0x19 +#define DRM_I915_GEM_LEAVEVT 0x1a +#define DRM_I915_GEM_CREATE 0x1b +#define DRM_I915_GEM_PREAD 0x1c +#define DRM_I915_GEM_PWRITE 0x1d +#define DRM_I915_GEM_MMAP 0x1e +#define DRM_I915_GEM_SET_DOMAIN 0x1f +#define DRM_I915_GEM_SW_FINISH 0x20 +#define DRM_I915_GEM_SET_TILING 0x21 +#define DRM_I915_GEM_GET_TILING 0x22 +#define DRM_I915_GEM_GET_APERTURE 0x23 +#define DRM_I915_GEM_MMAP_GTT 0x24 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 -#define DRM_I915_GEM_MADVISE 0x26 +#define DRM_I915_GEM_MADVISE 0x26 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 -#define DRM_I915_OVERLAY_ATTRS 0x28 +#define DRM_I915_OVERLAY_ATTRS 0x28 #define DRM_I915_GEM_EXECBUFFER2 0x29 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a #define DRM_I915_SET_SPRITE_COLORKEY 0x2b -#define DRM_I915_GEM_WAIT 0x2c +#define DRM_I915_GEM_WAIT 0x2c #define DRM_I915_GEM_CONTEXT_CREATE 0x2d #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e #define DRM_I915_GEM_SET_CACHING 0x2f @@ -243,7 +247,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_VBLANK_SWAP #define DRM_IOCTL_I915_HWS_ADDR #define DRM_IOCTL_I915_GEM_INIT -#define DRM_IOCTL_I915_GEM_EXECBUFFER +#define DRM_IOCTL_I915_GEM_EXECBUFFER SRV_I915_GEM_EXECBUFFER #define DRM_IOCTL_I915_GEM_EXECBUFFER2 SRV_I915_GEM_EXECBUFFER2 #define DRM_IOCTL_I915_GEM_PIN SRV_I915_GEM_PIN #define DRM_IOCTL_I915_GEM_UNPIN SRV_I915_GEM_UNPIN @@ -254,7 +258,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_GEM_ENTERVT #define DRM_IOCTL_I915_GEM_LEAVEVT #define DRM_IOCTL_I915_GEM_CREATE SRV_I915_GEM_CREATE -#define DRM_IOCTL_I915_GEM_PREAD +#define DRM_IOCTL_I915_GEM_PREAD SRV_I915_GEM_PREAD #define DRM_IOCTL_I915_GEM_PWRITE SRV_I915_GEM_PWRITE #define DRM_IOCTL_I915_GEM_MMAP SRV_I915_GEM_MMAP #define DRM_IOCTL_I915_GEM_MMAP_GTT SRV_I915_GEM_MMAP_GTT @@ -332,7 +336,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_WAIT_TIMEOUT 19 #define I915_PARAM_HAS_SEMAPHORES 20 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 -#define I915_PARAM_HAS_VEBOX 22 +#define I915_PARAM_HAS_VEBOX 22 #define I915_PARAM_HAS_SECURE_BATCHES 23 #define I915_PARAM_HAS_PINNED_BATCHES 24 #define I915_PARAM_HAS_EXEC_NO_RELOC 25 @@ -340,9 +344,21 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_WT 27 #define I915_PARAM_CMD_PARSER_VERSION 28 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29 +#define I915_PARAM_MMAP_VERSION 30 +#define I915_PARAM_HAS_BSD2 31 +#define I915_PARAM_REVISION 32 +#define I915_PARAM_SUBSLICE_TOTAL 33 +#define I915_PARAM_EU_TOTAL 34 +#define I915_PARAM_HAS_GPU_RESET 35 +#define I915_PARAM_HAS_RESOURCE_STREAMER 36 +#define I915_PARAM_HAS_EXEC_SOFTPIN 37 typedef struct drm_i915_getparam { int param; + /* + * WARNING: Using pointers instead of fixed-size u64 means we need to write + * compat32 code. Don't repeat this mistake. + */ int *value; } drm_i915_getparam_t; @@ -487,6 +503,14 @@ struct drm_i915_gem_mmap { * This is a fixed-size type for 32/64 compatibility. */ __u64 addr_ptr; + + /** + * Flags for extended behaviour. + * + * Added in version 2. + */ + __u64 flags; +#define I915_MMAP_WC 0x1 }; struct drm_i915_gem_mmap_gtt { @@ -654,15 +678,21 @@ struct drm_i915_gem_exec_object2 { __u64 alignment; /** - * Returned value of the updated offset of the object, for future - * presumed_offset writes. + * When the EXEC_OBJECT_PINNED flag is specified this is populated by + * the user with the GTT offset at which this object will be pinned. + * When the I915_EXEC_NO_RELOC flag is specified this must contain the + * presumed_offset of the object. + * During execbuffer2 the kernel populates it with the value of the + * current GTT offset of the object, for future presumed_offset writes. */ __u64 offset; #define EXEC_OBJECT_NEEDS_FENCE (1<<0) #define EXEC_OBJECT_NEEDS_GTT (1<<1) #define EXEC_OBJECT_WRITE (1<<2) -#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) +#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) +#define EXEC_OBJECT_PINNED (1<<4) +#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1) __u64 flags; __u64 rsvd1; @@ -736,7 +766,18 @@ struct drm_i915_gem_execbuffer2 { */ #define I915_EXEC_HANDLE_LUT (1<<12) -#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) +/** Used for switching BSD rings on the platforms with two BSD rings */ +#define I915_EXEC_BSD_MASK (3<<13) +#define I915_EXEC_BSD_DEFAULT (0<<13) /* default ping-pong mode */ +#define I915_EXEC_BSD_RING1 (1<<13) +#define I915_EXEC_BSD_RING2 (2<<13) + +/** Tell the kernel that the batchbuffer is processed by + * the resource streamer. + */ +#define I915_EXEC_RESOURCE_STREAMER (1<<15) + +#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ @@ -972,6 +1013,7 @@ struct drm_intel_overlay_put_image { /* flags */ #define I915_OVERLAY_UPDATE_ATTRS (1<<0) #define I915_OVERLAY_UPDATE_GAMMA (1<<1) +#define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2) struct drm_intel_overlay_attrs { __u32 flags; __u32 color_key; @@ -1038,9 +1080,23 @@ struct drm_i915_gem_context_destroy { }; struct drm_i915_reg_read { + /* + * Register offset. + * For 64bit wide registers where the upper 32bits don't immediately + * follow the lower 32bits, the offset of the lower 32bits must + * be specified + */ __u64 offset; __u64 val; /* Return value */ }; +/* Known registers: + * + * Render engine timestamp - 0x2358 + 64bit - gen7+ + * - Note this register returns an invalid value if using the default + * single instruction 8byte read, in order to workaround that use + * offset (0x2538 | 1) instead. + * + */ struct drm_i915_reset_stats { __u32 ctx_id; @@ -1065,13 +1121,23 @@ struct drm_i915_gem_userptr { #define I915_USERPTR_READ_ONLY 0x1 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000 /** - * Returned handle for the object. - * - * Object handles are nonzero. - */ + * Returned handle for the object. + * + * Object handles are nonzero. + */ __u32 handle; }; +struct drm_i915_gem_context_param { + __u32 ctx_id; + __u32 size; + __u64 param; +#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 +#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 +#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 + __u64 value; +}; + struct drm_i915_mask { __u32 handle; __u32 width; @@ -1102,4 +1168,4 @@ struct drm_i915_mask_update { __u32 forced; }; -#endif /* _I915_DRM_H_ */ +#endif /* _I915_DRM_H_ */ diff --git a/contrib/sdk/sources/libdrm/intel/intel_bufmgr.c b/contrib/sdk/sources/libdrm/intel/intel_bufmgr.c index a017823973..44d29a37dd 100644 --- a/contrib/sdk/sources/libdrm/intel/intel_bufmgr.c +++ b/contrib/sdk/sources/libdrm/intel/intel_bufmgr.c @@ -37,6 +37,7 @@ #include #include //#include +#include "libdrm_macros.h" #include "intel_bufmgr.h" #include "intel_bufmgr_priv.h" #include "xf86drm.h" @@ -46,20 +47,34 @@ * Convenience functions for buffer management methods. */ -drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, - unsigned long size, unsigned int alignment) +drm_intel_bo * +drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment) { return bufmgr->bo_alloc(bufmgr, name, size, alignment); } -drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, - const char *name, - unsigned long size, - unsigned int alignment) +drm_intel_bo * +drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment) { return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment); } +drm_intel_bo * +drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, + const char *name, void *addr, + uint32_t tiling_mode, + uint32_t stride, + unsigned long size, + unsigned long flags) +{ + if (bufmgr->bo_alloc_userptr) + return bufmgr->bo_alloc_userptr(bufmgr, name, addr, tiling_mode, + stride, size, flags); + return NULL; +} + drm_intel_bo * drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, int x, int y, int cpp, uint32_t *tiling_mode, @@ -69,12 +84,14 @@ drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, tiling_mode, pitch, flags); } -void drm_intel_bo_reference(drm_intel_bo *bo) +void +drm_intel_bo_reference(drm_intel_bo *bo) { bo->bufmgr->bo_reference(bo); } -void drm_intel_bo_unreference(drm_intel_bo *bo) +void +drm_intel_bo_unreference(drm_intel_bo *bo) { if (bo == NULL) return; @@ -82,12 +99,14 @@ void drm_intel_bo_unreference(drm_intel_bo *bo) bo->bufmgr->bo_unreference(bo); } -int drm_intel_bo_map(drm_intel_bo *buf, int write_enable) +int +drm_intel_bo_map(drm_intel_bo *buf, int write_enable) { return buf->bufmgr->bo_map(buf, write_enable); } -int drm_intel_bo_unmap(drm_intel_bo *buf) +int +drm_intel_bo_unmap(drm_intel_bo *buf) { return buf->bufmgr->bo_unmap(buf); } @@ -104,8 +123,8 @@ drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, unsigned long size, void *data) { int ret; -// if (bo->bufmgr->bo_get_subdata) -// return bo->bufmgr->bo_get_subdata(bo, offset, size, data); + if (bo->bufmgr->bo_get_subdata) + return bo->bufmgr->bo_get_subdata(bo, offset, size, data); if (size == 0 || data == NULL) return 0; @@ -118,12 +137,14 @@ drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, return 0; } -void drm_intel_bo_wait_rendering(drm_intel_bo *bo) +void +drm_intel_bo_wait_rendering(drm_intel_bo *bo) { bo->bufmgr->bo_wait_rendering(bo); } -void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr) +void +drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr) { bufmgr->destroy(bufmgr); } @@ -155,17 +176,20 @@ drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used, } } -void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug) +void +drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug) { bufmgr->debug = enable_debug; } -int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count) +int +drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count) { return bo_array[0]->bufmgr->check_aperture_space(bo_array, count); } -int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name) +int +drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name) { if (bo->bufmgr->bo_flink) return bo->bufmgr->bo_flink(bo, name); @@ -195,7 +219,8 @@ drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset, } -int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment) +int +drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment) { if (bo->bufmgr->bo_pin) return bo->bufmgr->bo_pin(bo, alignment); @@ -203,7 +228,8 @@ int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment) return -ENODEV; } -int drm_intel_bo_unpin(drm_intel_bo *bo) +int +drm_intel_bo_unpin(drm_intel_bo *bo) { if (bo->bufmgr->bo_unpin) return bo->bufmgr->bo_unpin(bo); @@ -211,8 +237,9 @@ int drm_intel_bo_unpin(drm_intel_bo *bo) return -ENODEV; } -int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, - uint32_t stride) +int +drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, + uint32_t stride) { if (bo->bufmgr->bo_set_tiling) return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride); @@ -221,8 +248,9 @@ int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, return 0; } -int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, - uint32_t * swizzle_mode) +int +drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, + uint32_t * swizzle_mode) { if (bo->bufmgr->bo_get_tiling) return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode); @@ -232,35 +260,60 @@ int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, return 0; } -int drm_intel_bo_disable_reuse(drm_intel_bo *bo) +int +drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset) +{ + if (bo->bufmgr->bo_set_softpin_offset) + return bo->bufmgr->bo_set_softpin_offset(bo, offset); + + return -ENODEV; +} + +int +drm_intel_bo_disable_reuse(drm_intel_bo *bo) { if (bo->bufmgr->bo_disable_reuse) return bo->bufmgr->bo_disable_reuse(bo); return 0; } -int drm_intel_bo_is_reusable(drm_intel_bo *bo) +int +drm_intel_bo_is_reusable(drm_intel_bo *bo) { if (bo->bufmgr->bo_is_reusable) return bo->bufmgr->bo_is_reusable(bo); return 0; } -int drm_intel_bo_busy(drm_intel_bo *bo) +int +drm_intel_bo_busy(drm_intel_bo *bo) { if (bo->bufmgr->bo_busy) return bo->bufmgr->bo_busy(bo); return 0; } -int drm_intel_bo_madvise(drm_intel_bo *bo, int madv) +int +drm_intel_bo_madvise(drm_intel_bo *bo, int madv) { if (bo->bufmgr->bo_madvise) return bo->bufmgr->bo_madvise(bo, madv); return -1; } -int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) +int +drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable) +{ + if (bo->bufmgr->bo_use_48b_address_range) { + bo->bufmgr->bo_use_48b_address_range(bo, enable); + return 0; + } + + return -ENODEV; +} + +int +drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) { return bo->bufmgr->bo_references(bo, target_bo); } @@ -295,9 +348,8 @@ err: } #endif -int drm_intel_get_aperture_sizes(int fd, - size_t *mappable, - size_t *total) +int +drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total) { struct drm_i915_gem_get_aperture aperture; diff --git a/contrib/sdk/sources/libdrm/intel/intel_bufmgr.h b/contrib/sdk/sources/libdrm/intel/intel_bufmgr.h index 50849ac208..9c2d72b255 100644 --- a/contrib/sdk/sources/libdrm/intel/intel_bufmgr.h +++ b/contrib/sdk/sources/libdrm/intel/intel_bufmgr.h @@ -38,6 +38,10 @@ #include #include +#if defined(__cplusplus) +extern "C" { +#endif + struct drm_clip_rect; typedef struct _drm_intel_bufmgr drm_intel_bufmgr; @@ -113,6 +117,11 @@ drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, unsigned int alignment); +drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, + const char *name, + void *addr, uint32_t tiling_mode, + uint32_t stride, unsigned long size, + unsigned long flags); drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, int x, int y, int cpp, @@ -155,6 +164,8 @@ int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name); int drm_intel_bo_busy(drm_intel_bo *bo); int drm_intel_bo_madvise(drm_intel_bo *bo, int madv); +int drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable); +int drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset); int drm_intel_bo_disable_reuse(drm_intel_bo *bo); int drm_intel_bo_is_reusable(drm_intel_bo *bo); @@ -263,6 +274,9 @@ int drm_intel_get_reset_stats(drm_intel_context *ctx, uint32_t *active, uint32_t *pending); +int drm_intel_get_subslice_total(int fd, unsigned int *subslice_total); +int drm_intel_get_eu_total(int fd, unsigned int *eu_total); + /** @{ Compatibility defines to keep old code building despite the symbol rename * from dri_* to drm_intel_* */ @@ -304,4 +318,8 @@ int drm_intel_get_reset_stats(drm_intel_context *ctx, /** @{ */ +#if defined(__cplusplus) +} +#endif + #endif /* INTEL_BUFMGR_H */ diff --git a/contrib/sdk/sources/libdrm/intel/intel_bufmgr_gem.c b/contrib/sdk/sources/libdrm/intel/intel_bufmgr_gem.c index 4313ab50da..428809c18e 100644 --- a/contrib/sdk/sources/libdrm/intel/intel_bufmgr_gem.c +++ b/contrib/sdk/sources/libdrm/intel/intel_bufmgr_gem.c @@ -47,17 +47,19 @@ #include #include //#include +#include +#include #include #include "errno.h" #ifndef ETIME #define ETIME ETIMEDOUT #endif +#include "libdrm_macros.h" #include "libdrm_lists.h" #include "intel_bufmgr.h" #include "intel_bufmgr_priv.h" #include "intel_chipset.h" -#include "intel_aub.h" #include "string.h" #include "i915_drm.h" @@ -70,17 +72,35 @@ #define VG(x) #endif -#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s))) +#define memclear(s) memset(&s, 0, sizeof(s)) #if 0 #define DBG(...) do { \ - fprintf(stderr, __VA_ARGS__); \ + if (bufmgr_gem->bufmgr.debug) \ + fprintf(stderr, __VA_ARGS__); \ } while (0) #else #define DBG(...) #endif #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define MAX2(A, B) ((A) > (B) ? (A) : (B)) + +/** + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing + * + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16)) + +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((__u32)(n)) typedef struct _drm_intel_bo_gem drm_intel_bo_gem; @@ -92,6 +112,8 @@ struct drm_intel_gem_bo_bucket { typedef struct _drm_intel_bufmgr_gem { drm_intel_bufmgr bufmgr; + atomic_t refcount; + int fd; int max_relocs; @@ -109,6 +131,8 @@ typedef struct _drm_intel_bufmgr_gem { int num_buckets; time_t time; + drmMMListHead managers; + drmMMListHead named; drmMMListHead vma_cache; int vma_count, vma_open, vma_max; @@ -127,9 +151,11 @@ typedef struct _drm_intel_bufmgr_gem { unsigned int has_vebox : 1; bool fenced_relocs; - char *aub_filename; - FILE *aub_file; - uint32_t aub_offset; + struct { + void *ptr; + uint32_t handle; + } userptr_active; + } drm_intel_bufmgr_gem; #define DRM_INTEL_RELOC_FENCE (1<<0) @@ -177,10 +203,22 @@ struct _drm_intel_bo_gem { drm_intel_reloc_target *reloc_target_info; /** Number of entries in relocs */ int reloc_count; + /** Array of BOs that are referenced by this buffer and will be softpinned */ + drm_intel_bo **softpin_target; + /** Number softpinned BOs that are referenced by this buffer */ + int softpin_target_count; + /** Maximum amount of softpinned BOs that are referenced by this buffer */ + int softpin_target_size; + /** Mapped address for the buffer, saved across map/unmap cycles */ void *mem_virtual; /** GTT virtual address for the buffer, saved across map/unmap cycles */ void *gtt_virtual; + /** + * Virtual address of the buffer allocated by user, used for userptr + * objects only. + */ + void *user_virtual; int map_count; drmMMListHead vma_list; @@ -219,6 +257,25 @@ struct _drm_intel_bo_gem { */ bool idle; + /** + * Boolean of whether this buffer was allocated with userptr + */ + bool is_userptr; + + /** + * Boolean of whether this buffer can be placed in the full 48-bit + * address range on gen8+. + * + * By default, buffers will be keep in a 32-bit range, unless this + * flag is explicitly set. + */ + bool use_48b_address_range; + + /** + * Whether this buffer is softpinned at offset specified by the user + */ + bool is_softpin; + /** * Size in bytes of this buffer and its relocation descendents. * @@ -235,11 +292,6 @@ struct _drm_intel_bo_gem { /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */ bool mapped_cpu_write; - - uint32_t aub_offset; - - drm_intel_aub_annotation *aub_annotations; - unsigned aub_annotation_count; }; static unsigned int @@ -264,6 +316,11 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo); static void drm_intel_gem_bo_free(drm_intel_bo *bo); +static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo) +{ + return (drm_intel_bo_gem *)bo; +} + static unsigned long drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size, uint32_t *tiling_mode) @@ -372,8 +429,9 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - if (bo_gem->relocs == NULL) { - DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, + if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) { + DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle, + bo_gem->is_softpin ? "*" : "", bo_gem->name); continue; } @@ -383,16 +441,36 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *) target_bo; - DBG("%2d: %d (%s)@0x%08llx -> " - "%d (%s)@0x%08lx + 0x%08x\n", + DBG("%2d: %d %s(%s)@0x%08x %08x -> " + "%d (%s)@0x%08x %08x + 0x%08x\n", i, - bo_gem->gem_handle, bo_gem->name, - (unsigned long long)bo_gem->relocs[j].offset, + bo_gem->gem_handle, + bo_gem->is_softpin ? "*" : "", + bo_gem->name, + upper_32_bits(bo_gem->relocs[j].offset), + lower_32_bits(bo_gem->relocs[j].offset), target_gem->gem_handle, target_gem->name, - target_bo->offset64, + upper_32_bits(target_bo->offset64), + lower_32_bits(target_bo->offset64), bo_gem->relocs[j].delta); } + + for (j = 0; j < bo_gem->softpin_target_count; j++) { + drm_intel_bo *target_bo = bo_gem->softpin_target[j]; + drm_intel_bo_gem *target_gem = + (drm_intel_bo_gem *) target_bo; + DBG("%2d: %d %s(%s) -> " + "%d *(%s)@0x%08x %08x\n", + i, + bo_gem->gem_handle, + bo_gem->is_softpin ? "*" : "", + bo_gem->name, + target_gem->gem_handle, + target_gem->name, + upper_32_bits(target_bo->offset64), + lower_32_bits(target_bo->offset64)); + } } } @@ -444,7 +522,7 @@ drm_intel_add_validate_buffer(drm_intel_bo *bo) bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs; - bufmgr_gem->exec_objects[index].alignment = 0; + bufmgr_gem->exec_objects[index].alignment = bo->align; bufmgr_gem->exec_objects[index].offset = 0; bufmgr_gem->exec_bos[index] = bo; bufmgr_gem->exec_count++; @@ -456,11 +534,17 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; int index; + int flags = 0; + + if (need_fence) + flags |= EXEC_OBJECT_NEEDS_FENCE; + if (bo_gem->use_48b_address_range) + flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS; + if (bo_gem->is_softpin) + flags |= EXEC_OBJECT_PINNED; if (bo_gem->validate_index != -1) { - if (need_fence) - bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= - EXEC_OBJECT_NEEDS_FENCE; + bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags; return; } @@ -486,16 +570,13 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle; bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count; bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; - bufmgr_gem->exec2_objects[index].alignment = 0; - bufmgr_gem->exec2_objects[index].offset = 0; + bufmgr_gem->exec2_objects[index].alignment = bo->align; + bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ? + bo->offset64 : 0; bufmgr_gem->exec_bos[index] = bo; - bufmgr_gem->exec2_objects[index].flags = 0; + bufmgr_gem->exec2_objects[index].flags = flags; bufmgr_gem->exec2_objects[index].rsvd1 = 0; bufmgr_gem->exec2_objects[index].rsvd2 = 0; - if (need_fence) { - bufmgr_gem->exec2_objects[index].flags |= - EXEC_OBJECT_NEEDS_FENCE; - } bufmgr_gem->exec_count++; } @@ -504,9 +585,10 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) static void drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, - drm_intel_bo_gem *bo_gem) + drm_intel_bo_gem *bo_gem, + unsigned int alignment) { - int size; + unsigned int size; assert(!bo_gem->used_as_reloc_target); @@ -518,7 +600,7 @@ drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, */ size = bo_gem->bo.size; if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) { - int min_size; + unsigned int min_size; if (bufmgr_gem->has_relaxed_fencing) { if (bufmgr_gem->gen == 3) @@ -532,10 +614,10 @@ drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, min_size = size; /* Account for worst-case alignment. */ - size = 2 * min_size; + alignment = MAX2(alignment, min_size); } - bo_gem->reloc_tree_size = size; + bo_gem->reloc_tree_size = size + alignment; } static int @@ -578,7 +660,7 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo) if (bo_gem->reusable && bo_gem->idle) return false; - VG_CLEAR(busy); + memclear(busy); busy.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); @@ -597,7 +679,7 @@ drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem, { struct drm_i915_gem_madvise madv; - VG_CLEAR(madv); + memclear(madv); madv.handle = bo_gem->gem_handle; madv.madv = state; madv.retained = 1; @@ -640,7 +722,8 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, unsigned long size, unsigned long flags, uint32_t tiling_mode, - unsigned long stride) + unsigned long stride, + unsigned int alignment) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; drm_intel_bo_gem *bo_gem; @@ -682,7 +765,9 @@ retry: bucket->head.prev, head); DRMLISTDEL(&bo_gem->head); alloc_from_cache = true; + bo_gem->bo.align = alignment; } else { + assert(alignment == 0); /* For non-render-target BOs (where we're probably * going to map it first thing in order to fill it * with data), check if the last BO in the cache is @@ -726,7 +811,7 @@ retry: bo_gem->bo.size = bo_size; - VG_CLEAR(create); + memclear(create); create.size = bo_size; ret = drmIoctl(bufmgr_gem->fd, @@ -739,20 +824,22 @@ retry: return NULL; } bo_gem->bo.bufmgr = bufmgr; + bo_gem->bo.align = alignment; bo_gem->tiling_mode = I915_TILING_NONE; bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; bo_gem->stride = 0; + /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized + list (vma_list), so better set the list head here */ + DRMINITLISTHEAD(&bo_gem->name_list); + DRMINITLISTHEAD(&bo_gem->vma_list); if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, tiling_mode, stride)) { drm_intel_gem_bo_free(&bo_gem->bo); return NULL; } - - DRMINITLISTHEAD(&bo_gem->name_list); - DRMINITLISTHEAD(&bo_gem->vma_list); } bo_gem->name = name; @@ -762,10 +849,9 @@ retry: bo_gem->used_as_reloc_target = false; bo_gem->has_error = false; bo_gem->reusable = true; - bo_gem->aub_annotations = NULL; - bo_gem->aub_annotation_count = 0; + bo_gem->use_48b_address_range = false; - drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment); DBG("bo_create: buf %d (%s) %ldb\n", bo_gem->gem_handle, bo_gem->name, size); @@ -781,7 +867,8 @@ drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, { return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, BO_ALLOC_FOR_RENDER, - I915_TILING_NONE, 0); + I915_TILING_NONE, 0, + alignment); } static drm_intel_bo * @@ -791,7 +878,7 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, unsigned int alignment) { return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0, - I915_TILING_NONE, 0); + I915_TILING_NONE, 0, 0); } static drm_intel_bo * @@ -843,7 +930,146 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, stride = 0; return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags, - tiling, stride); + tiling, stride, 0); +} + +#if 0 +static drm_intel_bo * +drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, + const char *name, + void *addr, + uint32_t tiling_mode, + uint32_t stride, + unsigned long size, + unsigned long flags) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; + drm_intel_bo_gem *bo_gem; + int ret; + struct drm_i915_gem_userptr userptr; + + /* Tiling with userptr surfaces is not supported + * on all hardware so refuse it for time being. + */ + if (tiling_mode != I915_TILING_NONE) + return NULL; + + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + + bo_gem->bo.size = size; + + memclear(userptr); + userptr.user_ptr = (__u64)((unsigned long)addr); + userptr.user_size = size; + userptr.flags = flags; + + ret = drmIoctl(bufmgr_gem->fd, + DRM_IOCTL_I915_GEM_USERPTR, + &userptr); + if (ret != 0) { + DBG("bo_create_userptr: " + "ioctl failed with user ptr %p size 0x%lx, " + "user flags 0x%lx\n", addr, size, flags); + free(bo_gem); + return NULL; + } + + bo_gem->gem_handle = userptr.handle; + bo_gem->bo.handle = bo_gem->gem_handle; + bo_gem->bo.bufmgr = bufmgr; + bo_gem->is_userptr = true; + bo_gem->bo.virtual = addr; + /* Save the address provided by user */ + bo_gem->user_virtual = addr; + bo_gem->tiling_mode = I915_TILING_NONE; + bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + bo_gem->stride = 0; + + DRMINITLISTHEAD(&bo_gem->name_list); + DRMINITLISTHEAD(&bo_gem->vma_list); + + bo_gem->name = name; + atomic_set(&bo_gem->refcount, 1); + bo_gem->validate_index = -1; + bo_gem->reloc_tree_fences = 0; + bo_gem->used_as_reloc_target = false; + bo_gem->has_error = false; + bo_gem->reusable = false; + bo_gem->use_48b_address_range = false; + + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); + + DBG("bo_create_userptr: " + "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n", + addr, bo_gem->gem_handle, bo_gem->name, + size, stride, tiling_mode); + + return &bo_gem->bo; +} + +static bool +has_userptr(drm_intel_bufmgr_gem *bufmgr_gem) +{ + int ret; + void *ptr; + long pgsz; + struct drm_i915_gem_userptr userptr; + + pgsz = sysconf(_SC_PAGESIZE); + assert(pgsz > 0); + + ret = posix_memalign(&ptr, pgsz, pgsz); + if (ret) { + DBG("Failed to get a page (%ld) for userptr detection!\n", + pgsz); + return false; + } + + memclear(userptr); + userptr.user_ptr = (__u64)(unsigned long)ptr; + userptr.user_size = pgsz; + +retry: + ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr); + if (ret) { + if (errno == ENODEV && userptr.flags == 0) { + userptr.flags = I915_USERPTR_UNSYNCHRONIZED; + goto retry; + } + free(ptr); + return false; + } + + /* We don't release the userptr bo here as we want to keep the + * kernel mm tracking alive for our lifetime. The first time we + * create a userptr object the kernel has to install a mmu_notifer + * which is a heavyweight operation (e.g. it requires taking all + * mm_locks and stop_machine()). + */ + + bufmgr_gem->userptr_active.ptr = ptr; + bufmgr_gem->userptr_active.handle = userptr.handle; + + return true; +} + +#endif + +static drm_intel_bo * +check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, + const char *name, + void *addr, + uint32_t tiling_mode, + uint32_t stride, + unsigned long size, + unsigned long flags) +{ + bufmgr->bo_alloc_userptr = NULL; + + return drm_intel_bo_alloc_userptr(bufmgr, name, addr, + tiling_mode, stride, size, flags); } /** @@ -880,7 +1106,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, } } - VG_CLEAR(open_arg); + memclear(open_arg); open_arg.name = handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, @@ -920,8 +1146,9 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, bo_gem->bo.handle = open_arg.handle; bo_gem->global_name = handle; bo_gem->reusable = false; + bo_gem->use_48b_address_range = false; - VG_CLEAR(get_tiling); + memclear(get_tiling); get_tiling.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, @@ -933,7 +1160,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, bo_gem->tiling_mode = get_tiling.tiling_mode; bo_gem->swizzle_mode = get_tiling.swizzle_mode; /* XXX stride is unknown */ - drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); DRMINITLISTHEAD(&bo_gem->vma_list); DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); @@ -960,14 +1187,13 @@ drm_intel_gem_bo_free(drm_intel_bo *bo) } /* Close this object */ - VG_CLEAR(close); + memclear(close); close.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); if (ret != 0) { DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", bo_gem->gem_handle, bo_gem->name, strerror(errno)); } - free(bo_gem->aub_annotations); free(bo); } @@ -1092,8 +1318,12 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) time); } } + for (i = 0; i < bo_gem->softpin_target_count; i++) + drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i], + time); bo_gem->reloc_count = 0; bo_gem->used_as_reloc_target = false; + bo_gem->softpin_target_count = 0; DBG("bo_unreference final: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); @@ -1107,6 +1337,11 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) free(bo_gem->relocs); bo_gem->relocs = NULL; } + if (bo_gem->softpin_target) { + free(bo_gem->softpin_target); + bo_gem->softpin_target = NULL; + bo_gem->softpin_target_size = 0; + } /* Clear any left-over mappings */ if (bo_gem->map_count) { @@ -1149,16 +1384,21 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; assert(atomic_read(&bo_gem->refcount) > 0); - if (atomic_dec_and_test(&bo_gem->refcount)) { + + if (atomic_add_unless(&bo_gem->refcount, -1, 1)) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; -// struct timespec time; + struct timespec time; -// clock_gettime(CLOCK_MONOTONIC, &time); + clock_gettime(CLOCK_MONOTONIC, &time); // pthread_mutex_lock(&bufmgr_gem->lock); - drm_intel_gem_bo_unreference_final(bo, 0); - drm_intel_gem_cleanup_bo_cache(bufmgr_gem, 0); + + if (atomic_dec_and_test(&bo_gem->refcount)) { + drm_intel_gem_bo_unreference_final(bo, time.tv_sec); + drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec); + } + // pthread_mutex_unlock(&bufmgr_gem->lock); } } @@ -1170,6 +1410,12 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) struct drm_i915_gem_set_domain set_domain; int ret; + if (bo_gem->is_userptr) { + /* Return the same user ptr */ + bo->virtual = bo_gem->user_virtual; + return 0; + } + // pthread_mutex_lock(&bufmgr_gem->lock); if (bo_gem->map_count++ == 0) @@ -1181,9 +1427,8 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) DBG("bo_map: %d (%s), map_count=%d\n", bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); - VG_CLEAR(mmap_arg); + memclear(mmap_arg); mmap_arg.handle = bo_gem->gem_handle; - mmap_arg.offset = 0; mmap_arg.size = bo->size; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, @@ -1205,7 +1450,7 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) bo_gem->mem_virtual); bo->virtual = bo_gem->mem_virtual; - VG_CLEAR(set_domain); + memclear(set_domain); set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_CPU; if (write_enable) @@ -1238,6 +1483,9 @@ map_gtt(drm_intel_bo *bo) drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int ret; + if (bo_gem->is_userptr) + return -EINVAL; + if (bo_gem->map_count++ == 0) drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); @@ -1248,9 +1496,8 @@ map_gtt(drm_intel_bo *bo) DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); - VG_CLEAR(mmap_arg); + memclear(mmap_arg); mmap_arg.handle = bo_gem->gem_handle; - mmap_arg.offset = 0; /* Get the fake offset back... */ ret = drmIoctl(bufmgr_gem->fd, @@ -1268,7 +1515,7 @@ map_gtt(drm_intel_bo *bo) } /* and mmap it */ - bo_gem->gtt_virtual = mmap_arg.offset; + bo_gem->gtt_virtual = (void*)(__u32)mmap_arg.offset; if (bo_gem->gtt_virtual == 0) { bo_gem->gtt_virtual = NULL; ret = -errno; @@ -1290,7 +1537,8 @@ map_gtt(drm_intel_bo *bo) return 0; } -int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) +int +drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; @@ -1314,7 +1562,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) * tell it when we're about to use things if we had done * rendering and it still happens to be bound to the GTT. */ - VG_CLEAR(set_domain); + memclear(set_domain); set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_GTT; set_domain.write_domain = I915_GEM_DOMAIN_GTT; @@ -1348,7 +1596,8 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) * undefined). */ -int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) +int +drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; #ifdef HAVE_VALGRIND @@ -1375,13 +1624,17 @@ int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) { - drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; + drm_intel_bufmgr_gem *bufmgr_gem; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int ret = 0; if (bo == NULL) return 0; + if (bo_gem->is_userptr) + return 0; + + bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; // pthread_mutex_lock(&bufmgr_gem->lock); if (bo_gem->map_count <= 0) { @@ -1419,7 +1672,8 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) return ret; } -int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) +int +drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) { return drm_intel_gem_bo_unmap(bo); } @@ -1433,7 +1687,10 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset, struct drm_i915_gem_pwrite pwrite; int ret; - VG_CLEAR(pwrite); + if (bo_gem->is_userptr) + return -EINVAL; + + memclear(pwrite); pwrite.handle = bo_gem->gem_handle; pwrite.offset = offset; pwrite.size = size; @@ -1459,7 +1716,7 @@ drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id; int ret; - VG_CLEAR(get_pipe_from_crtc_id); + memclear(get_pipe_from_crtc_id); get_pipe_from_crtc_id.crtc_id = crtc_id; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, @@ -1476,6 +1733,7 @@ drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) return get_pipe_from_crtc_id.pipe; } +#endif static int drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, @@ -1486,7 +1744,10 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, struct drm_i915_gem_pread pread; int ret; - VG_CLEAR(pread); + if (bo_gem->is_userptr) + return -EINVAL; + + memclear(pread); pread.handle = bo_gem->gem_handle; pread.offset = offset; pread.size = size; @@ -1504,8 +1765,6 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, return ret; } -#endif - /** Waits for all GPU rendering with the object to have completed. */ static void drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) @@ -1536,8 +1795,12 @@ drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) * not guarantee that the buffer is re-issued via another thread, or an flinked * handle. Userspace must make sure this race does not occur if such precision * is important. + * + * Note that some kernels have broken the inifite wait for negative values + * promise, upgrade to latest stable kernels if this is the case. */ -int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) +int +drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; @@ -1551,13 +1814,13 @@ int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) drm_intel_gem_bo_wait_rendering(bo); return 0; } else { - return drm_intel_gem_bo_busy(bo) ? -1 : 0; + return drm_intel_gem_bo_busy(bo) ? -ETIME : 0; } } + memclear(wait); wait.bo_handle = bo_gem->gem_handle; wait.timeout_ns = timeout_ns; - wait.flags = 0; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); if (ret == -1) return -errno; @@ -1580,7 +1843,7 @@ drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) struct drm_i915_gem_set_domain set_domain; int ret; - VG_CLEAR(set_domain); + memclear(set_domain); set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_GTT; set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; @@ -1599,12 +1862,12 @@ static void drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; - int i; + struct drm_gem_close close_bo; + int i, ret; free(bufmgr_gem->exec2_objects); free(bufmgr_gem->exec_objects); free(bufmgr_gem->exec_bos); - free(bufmgr_gem->aub_filename); // pthread_mutex_destroy(&bufmgr_gem->lock); @@ -1673,6 +1936,14 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, assert(offset <= bo->size - 4); assert((write_domain & (write_domain - 1)) == 0); + /* An object needing a fence is a tiled buffer, so it won't have + * relocs to other buffers. + */ + if (need_fence) { + assert(target_bo_gem->reloc_count == 0); + target_bo_gem->reloc_tree_fences = 1; + } + /* Make sure that we're not adding a reloc to something whose size has * already been accounted for. */ @@ -1680,21 +1951,8 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, if (target_bo_gem != bo_gem) { target_bo_gem->used_as_reloc_target = true; bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size; + bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; } - /* An object needing a fence is a tiled buffer, so it won't have - * relocs to other buffers. - */ - if (need_fence) - target_bo_gem->reloc_tree_fences = 1; - bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; - - bo_gem->relocs[bo_gem->reloc_count].offset = offset; - bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; - bo_gem->relocs[bo_gem->reloc_count].target_handle = - target_bo_gem->gem_handle; - bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; - bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; - bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; if (target_bo != bo) @@ -1705,21 +1963,77 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, else bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0; + bo_gem->relocs[bo_gem->reloc_count].offset = offset; + bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; + bo_gem->relocs[bo_gem->reloc_count].target_handle = + target_bo_gem->gem_handle; + bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; + bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; + bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; bo_gem->reloc_count++; return 0; } +static void +drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + bo_gem->use_48b_address_range = enable; +} + +static int +drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; + if (bo_gem->has_error) + return -ENOMEM; + + if (target_bo_gem->has_error) { + bo_gem->has_error = true; + return -ENOMEM; + } + + if (!target_bo_gem->is_softpin) + return -EINVAL; + if (target_bo_gem == bo_gem) + return -EINVAL; + + if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) { + int new_size = bo_gem->softpin_target_size * 2; + if (new_size == 0) + new_size = bufmgr_gem->max_relocs; + + bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size * + sizeof(drm_intel_bo *)); + if (!bo_gem->softpin_target) + return -ENOMEM; + + bo_gem->softpin_target_size = new_size; + } + bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo; + drm_intel_gem_bo_reference(target_bo); + bo_gem->softpin_target_count++; + + return 0; +} + static int drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, drm_intel_bo *target_bo, uint32_t target_offset, uint32_t read_domains, uint32_t write_domain) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo; - return do_bo_emit_reloc(bo, offset, target_bo, target_offset, - read_domains, write_domain, - !bufmgr_gem->fenced_relocs); + if (target_bo_gem->is_softpin) + return drm_intel_gem_bo_add_softpin_target(bo, target_bo); + else + return do_bo_emit_reloc(bo, offset, target_bo, target_offset, + read_domains, write_domain, + !bufmgr_gem->fenced_relocs); } static int @@ -1752,27 +2066,37 @@ drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo) * * Any further drm_intel_bufmgr_check_aperture_space() queries * involving this buffer in the tree are undefined after this call. + * + * This also removes all softpinned targets being referenced by the BO. */ void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) { + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int i; -// struct timespec time; + struct timespec time; -// clock_gettime(CLOCK_MONOTONIC, &time); + clock_gettime(CLOCK_MONOTONIC, &time); assert(bo_gem->reloc_count >= start); + /* Unreference the cleared target buffers */ for (i = start; i < bo_gem->reloc_count; i++) { drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo; if (&target_bo_gem->bo != bo) { bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences; drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, - 0); + time.tv_sec); } } bo_gem->reloc_count = start; + + for (i = 0; i < bo_gem->softpin_target_count; i++) { + drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i]; + drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec); + } + bo_gem->softpin_target_count = 0; } /** @@ -1811,7 +2135,7 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; int i; - if (bo_gem->relocs == NULL) + if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) return; for (i = 0; i < bo_gem->reloc_count; i++) { @@ -1832,6 +2156,17 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) /* Add the target to the validate list */ drm_intel_add_validate_buffer2(target_bo, need_fence); } + + for (i = 0; i < bo_gem->softpin_target_count; i++) { + drm_intel_bo *target_bo = bo_gem->softpin_target[i]; + + if (target_bo == bo) + continue; + + drm_intel_gem_bo_mark_mmaps_incoherent(bo); + drm_intel_gem_bo_process_reloc2(target_bo); + drm_intel_add_validate_buffer2(target_bo, false); + } } @@ -1846,10 +2181,12 @@ drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) /* Update the buffer offset */ if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { - DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", - bo_gem->gem_handle, bo_gem->name, bo->offset64, - (unsigned long long)bufmgr_gem->exec_objects[i]. - offset); + DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", + bo_gem->gem_handle, bo_gem->name, + upper_32_bits(bo->offset64), + lower_32_bits(bo->offset64), + upper_32_bits(bufmgr_gem->exec_objects[i].offset), + lower_32_bits(bufmgr_gem->exec_objects[i].offset)); bo->offset64 = bufmgr_gem->exec_objects[i].offset; bo->offset = bufmgr_gem->exec_objects[i].offset; } @@ -1867,309 +2204,95 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem) /* Update the buffer offset */ if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { - DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", - bo_gem->gem_handle, bo_gem->name, bo->offset64, - (unsigned long long)bufmgr_gem->exec2_objects[i].offset); + /* If we're seeing softpinned object here it means that the kernel + * has relocated our object... Indicating a programming error + */ + assert(!bo_gem->is_softpin); + DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", + bo_gem->gem_handle, bo_gem->name, + upper_32_bits(bo->offset64), + lower_32_bits(bo->offset64), + upper_32_bits(bufmgr_gem->exec2_objects[i].offset), + lower_32_bits(bufmgr_gem->exec2_objects[i].offset)); bo->offset64 = bufmgr_gem->exec2_objects[i].offset; bo->offset = bufmgr_gem->exec2_objects[i].offset; } } } -static void -aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data) -{ - fwrite(&data, 1, 4, bufmgr_gem->aub_file); -} - -static void -aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size) -{ - fwrite(data, 1, size, bufmgr_gem->aub_file); -} - -static void -aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size) -{ - drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - uint32_t *data; - unsigned int i; - - data = malloc(bo->size); - drm_intel_bo_get_subdata(bo, offset, size, data); - - /* Easy mode: write out bo with no relocations */ - if (!bo_gem->reloc_count) { - aub_out_data(bufmgr_gem, data, size); - free(data); - return; - } - - /* Otherwise, handle the relocations while writing. */ - for (i = 0; i < size / 4; i++) { - int r; - for (r = 0; r < bo_gem->reloc_count; r++) { - struct drm_i915_gem_relocation_entry *reloc; - drm_intel_reloc_target *info; - - reloc = &bo_gem->relocs[r]; - info = &bo_gem->reloc_target_info[r]; - - if (reloc->offset == offset + i * 4) { - drm_intel_bo_gem *target_gem; - uint32_t val; - - target_gem = (drm_intel_bo_gem *)info->bo; - - val = reloc->delta; - val += target_gem->aub_offset; - - aub_out(bufmgr_gem, val); - data[i] = val; - break; - } - } - if (r == bo_gem->reloc_count) { - /* no relocation, just the data */ - aub_out(bufmgr_gem, data[i]); - } - } - - free(data); -} - -static void -aub_bo_get_address(drm_intel_bo *bo) -{ - drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - - /* Give the object a graphics address in the AUB file. We - * don't just use the GEM object address because we do AUB - * dumping before execution -- we want to successfully log - * when the hardware might hang, and we might even want to aub - * capture for a driver trying to execute on a different - * generation of hardware by disabling the actual kernel exec - * call. - */ - bo_gem->aub_offset = bufmgr_gem->aub_offset; - bufmgr_gem->aub_offset += bo->size; - /* XXX: Handle aperture overflow. */ - assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024); -} - -static void -aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype, - uint32_t offset, uint32_t size) -{ - drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - - aub_out(bufmgr_gem, - CMD_AUB_TRACE_HEADER_BLOCK | - ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); - aub_out(bufmgr_gem, - AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE); - aub_out(bufmgr_gem, subtype); - aub_out(bufmgr_gem, bo_gem->aub_offset + offset); - aub_out(bufmgr_gem, size); - if (bufmgr_gem->gen >= 8) - aub_out(bufmgr_gem, 0); - aub_write_bo_data(bo, offset, size); -} - -/** - * Break up large objects into multiple writes. Otherwise a 128kb VBO - * would overflow the 16 bits of size field in the packet header and - * everything goes badly after that. - */ -static void -aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype, - uint32_t offset, uint32_t size) -{ - uint32_t block_size; - uint32_t sub_offset; - - for (sub_offset = 0; sub_offset < size; sub_offset += block_size) { - block_size = size - sub_offset; - - if (block_size > 8 * 4096) - block_size = 8 * 4096; - - aub_write_trace_block(bo, type, subtype, offset + sub_offset, - block_size); - } -} - -static void -aub_write_bo(drm_intel_bo *bo) -{ - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - uint32_t offset = 0; - unsigned i; - - aub_bo_get_address(bo); - - /* Write out each annotated section separately. */ - for (i = 0; i < bo_gem->aub_annotation_count; ++i) { - drm_intel_aub_annotation *annotation = - &bo_gem->aub_annotations[i]; - uint32_t ending_offset = annotation->ending_offset; - if (ending_offset > bo->size) - ending_offset = bo->size; - if (ending_offset > offset) { - aub_write_large_trace_block(bo, annotation->type, - annotation->subtype, - offset, - ending_offset - offset); - offset = ending_offset; - } - } - - /* Write out any remaining unannotated data */ - if (offset < bo->size) { - aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0, - offset, bo->size - offset); - } -} - -/* - * Make a ringbuffer on fly and dump it - */ -static void -aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem, - uint32_t batch_buffer, int ring_flag) -{ - uint32_t ringbuffer[4096]; - int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */ - int ring_count = 0; - - if (ring_flag == I915_EXEC_BSD) - ring = AUB_TRACE_TYPE_RING_PRB1; - else if (ring_flag == I915_EXEC_BLT) - ring = AUB_TRACE_TYPE_RING_PRB2; - - /* Make a ring buffer to execute our batchbuffer. */ - memset(ringbuffer, 0, sizeof(ringbuffer)); - if (bufmgr_gem->gen >= 8) { - ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2); - ringbuffer[ring_count++] = batch_buffer; - ringbuffer[ring_count++] = 0; - } else { - ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START; - ringbuffer[ring_count++] = batch_buffer; - } - - /* Write out the ring. This appears to trigger execution of - * the ring in the simulator. - */ - aub_out(bufmgr_gem, - CMD_AUB_TRACE_HEADER_BLOCK | - ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); - aub_out(bufmgr_gem, - AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE); - aub_out(bufmgr_gem, 0); /* general/surface subtype */ - aub_out(bufmgr_gem, bufmgr_gem->aub_offset); - aub_out(bufmgr_gem, ring_count * 4); - if (bufmgr_gem->gen >= 8) - aub_out(bufmgr_gem, 0); - - /* FIXME: Need some flush operations here? */ - aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4); - - /* Update offset pointer */ - bufmgr_gem->aub_offset += 4096; -} - void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo, int x1, int y1, int width, int height, enum aub_dump_bmp_format format, int pitch, int offset) { - drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; - uint32_t cpp; - - switch (format) { - case AUB_DUMP_BMP_FORMAT_8BIT: - cpp = 1; - break; - case AUB_DUMP_BMP_FORMAT_ARGB_4444: - cpp = 2; - break; - case AUB_DUMP_BMP_FORMAT_ARGB_0888: - case AUB_DUMP_BMP_FORMAT_ARGB_8888: - cpp = 4; - break; - default: - printf("Unknown AUB dump format %d\n", format); - return; - } - - if (!bufmgr_gem->aub_file) - return; - - aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4); - aub_out(bufmgr_gem, (y1 << 16) | x1); - aub_out(bufmgr_gem, - (format << 24) | - (cpp << 19) | - pitch / 4); - aub_out(bufmgr_gem, (height << 16) | width); - aub_out(bufmgr_gem, bo_gem->aub_offset + offset); - aub_out(bufmgr_gem, - ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) | - ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0)); } -static void -aub_exec(drm_intel_bo *bo, int ring_flag, int used) +static int +drm_intel_gem_bo_exec(drm_intel_bo *bo, int used, + drm_clip_rect_t * cliprects, int num_cliprects, int DR4) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - int i; - bool batch_buffer_needs_annotations; + struct drm_i915_gem_execbuffer execbuf; + int ret, i; - if (!bufmgr_gem->aub_file) - return; + if (to_bo_gem(bo)->has_error) + return -ENOMEM; - /* If batch buffer is not annotated, annotate it the best we - * can. + /* Update indices and set up the validate list. */ + drm_intel_gem_bo_process_reloc(bo); + + /* Add the batch buffer to the validation list. There are no + * relocations pointing to it. */ - batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0; - if (batch_buffer_needs_annotations) { - drm_intel_aub_annotation annotations[2] = { - { AUB_TRACE_TYPE_BATCH, 0, used }, - { AUB_TRACE_TYPE_NOTYPE, 0, bo->size } - }; - drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2); - } + drm_intel_add_validate_buffer(bo); + + memclear(execbuf); + execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects; + execbuf.buffer_count = bufmgr_gem->exec_count; + execbuf.batch_start_offset = 0; + execbuf.batch_len = used; + execbuf.cliprects_ptr = (uintptr_t) cliprects; + execbuf.num_cliprects = num_cliprects; + execbuf.DR1 = 0; + execbuf.DR4 = DR4; + + ret = drmIoctl(bufmgr_gem->fd, + DRM_IOCTL_I915_GEM_EXECBUFFER, + &execbuf); + if (ret != 0) { + ret = -errno; + if (errno == ENOSPC) { + DBG("Execbuffer fails to pin. " + "Estimate: %u. Actual: %u. Available: %u\n", + drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, + bufmgr_gem-> + exec_count), + drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, + bufmgr_gem-> + exec_count), + (unsigned int)bufmgr_gem->gtt_size); + } + } + drm_intel_update_buffer_offsets(bufmgr_gem); + + if (bufmgr_gem->bufmgr.debug) + drm_intel_gem_dump_validation_list(bufmgr_gem); - /* Write out all buffers to AUB memory */ for (i = 0; i < bufmgr_gem->exec_count; i++) { - aub_write_bo(bufmgr_gem->exec_bos[i]); + drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]); + + bo_gem->idle = false; + + /* Disconnect the buffer from the validate list */ + bo_gem->validate_index = -1; + bufmgr_gem->exec_bos[i] = NULL; } + bufmgr_gem->exec_count = 0; - /* Remove any annotations we added */ - if (batch_buffer_needs_annotations) - drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0); - - /* Dump ring buffer */ - aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag); - - fflush(bufmgr_gem->aub_file); - - /* - * One frame has been dumped. So reset the aub_offset for the next frame. - * - * FIXME: Can we do this? - */ - bufmgr_gem->aub_offset = 0x10000; + return ret; } - static int do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, drm_clip_rect_t *cliprects, int num_cliprects, int DR4, @@ -2180,6 +2303,9 @@ do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, int ret = 0; int i; + if (to_bo_gem(bo)->has_error) + return -ENOMEM; + switch (flags & 0x7) { default: return -EINVAL; @@ -2209,7 +2335,7 @@ do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, */ drm_intel_add_validate_buffer2(bo, 0); - VG_CLEAR(execbuf); + memclear(execbuf); execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects; execbuf.buffer_count = bufmgr_gem->exec_count; execbuf.batch_start_offset = 0; @@ -2225,8 +2351,6 @@ do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id); execbuf.rsvd2 = 0; - aub_exec(bo, flags, used); - if (bufmgr_gem->no_exec) goto skip_execution; @@ -2252,8 +2376,7 @@ skip_execution: drm_intel_gem_dump_validation_list(bufmgr_gem); for (i = 0; i < bufmgr_gem->exec_count; i++) { - drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; + drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]); bo_gem->idle = false; @@ -2300,7 +2423,7 @@ drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) struct drm_i915_gem_pin pin; int ret; - VG_CLEAR(pin); + memclear(pin); pin.handle = bo_gem->gem_handle; pin.alignment = alignment; @@ -2323,7 +2446,7 @@ drm_intel_gem_bo_unpin(drm_intel_bo *bo) struct drm_i915_gem_unpin unpin; int ret; - VG_CLEAR(unpin); + memclear(unpin); unpin.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); @@ -2379,6 +2502,12 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int ret; + /* Tiling with userptr surfaces is not supported + * on all hardware so refuse it for time being. + */ + if (bo_gem->is_userptr) + return -EINVAL; + /* Linear buffers have no stride. By ensuring that we only ever use * stride 0 with linear buffers, we simplify our code. */ @@ -2387,7 +2516,7 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride); if (ret == 0) - drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); *tiling_mode = bo_gem->tiling_mode; return ret; @@ -2404,6 +2533,16 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, return 0; } +static int +drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + + bo_gem->is_softpin = true; + bo->offset64 = offset; + bo->offset = offset; + return 0; +} #if 0 drm_intel_bo * drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size) @@ -2416,6 +2555,11 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s drmMMListHead *list; ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle); + if (ret) { + DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno)); + pthread_mutex_unlock(&bufmgr_gem->lock); + return NULL; + } /* * See if the kernel has already returned this buffer to us. Just as @@ -2432,15 +2576,10 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s } } - if (ret) { - fprintf(stderr,"ret is %d %d\n", ret, errno); + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) { return NULL; } - - bo_gem = calloc(1, sizeof(*bo_gem)); - if (!bo_gem) - return NULL; - /* Determine size of bo. The fd-to-handle ioctl really should * return the size, but it doesn't. If we have kernel 3.12 or * later, we can lseek on the prime fd to get the size. Older @@ -2450,7 +2589,7 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s if (ret != -1) bo_gem->bo.size = ret; else - bo_gem->bo.size = size; + bo_gem->bo.size = size; bo_gem->bo.handle = handle; bo_gem->bo.bufmgr = bufmgr; @@ -2465,23 +2604,25 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s bo_gem->used_as_reloc_target = false; bo_gem->has_error = false; bo_gem->reusable = false; + bo_gem->use_48b_address_range = false; DRMINITLISTHEAD(&bo_gem->vma_list); DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); - VG_CLEAR(get_tiling); + memclear(get_tiling); get_tiling.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling); if (ret != 0) { + DBG("create_from_prime: failed to get tiling: %s\n", strerror(errno)); drm_intel_gem_bo_unreference(&bo_gem->bo); return NULL; } bo_gem->tiling_mode = get_tiling.tiling_mode; bo_gem->swizzle_mode = get_tiling.swizzle_mode; /* XXX stride is unknown */ - drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); return &bo_gem->bo; } @@ -2515,7 +2656,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) if (!bo_gem->global_name) { struct drm_gem_flink flink; - VG_CLEAR(flink); + memclear(flink); flink.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); @@ -2525,8 +2666,9 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) bo_gem->global_name = flink.name; bo_gem->reusable = false; - if (DRMLISTEMPTY(&bo_gem->name_list)) - DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); + if (DRMLISTEMPTY(&bo_gem->name_list)) + DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); + } *name = bo_gem->global_name; @@ -2772,6 +2914,13 @@ _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) return 1; } + for (i = 0; i< bo_gem->softpin_target_count; i++) { + if (bo_gem->softpin_target[i] == target_bo) + return 1; + if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo)) + return 1; + } + return 0; } @@ -2845,12 +2994,11 @@ static int get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem) { char *devid_override; - int devid; + int devid = 0; int ret; drm_i915_getparam_t gp; - VG_CLEAR(devid); - VG_CLEAR(gp); + memclear(gp); gp.param = I915_PARAM_CHIPSET_ID; gp.value = &devid; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); @@ -2869,6 +3017,18 @@ drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr) return bufmgr_gem->pci_device; } +/** + * Sets the AUB filename. + * + * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump() + * for it to have any effect. + */ +void +drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr, + const char *filename) +{ +} + /** * Sets up AUB dumping. * @@ -2880,48 +3040,11 @@ drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr) void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable) { - drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; - int entry = 0x200003; - int i; - int gtt_size = 0x10000; - const char *filename; - - if (!enable) { - if (bufmgr_gem->aub_file) { - fclose(bufmgr_gem->aub_file); - bufmgr_gem->aub_file = NULL; - } - return; - } - - bufmgr_gem->aub_file = fopen("intel.aub", "w+"); - if (!bufmgr_gem->aub_file) - return; - - /* Start allocating objects from just after the GTT. */ - bufmgr_gem->aub_offset = gtt_size; - - /* Start with a (required) version packet. */ - aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2)); - aub_out(bufmgr_gem, - (4 << AUB_HEADER_MAJOR_SHIFT) | - (0 << AUB_HEADER_MINOR_SHIFT)); - for (i = 0; i < 8; i++) { - aub_out(bufmgr_gem, 0); /* app name */ - } - aub_out(bufmgr_gem, 0); /* timestamp */ - aub_out(bufmgr_gem, 0); /* timestamp */ - aub_out(bufmgr_gem, 0); /* comment len */ - - /* Set up the GTT. The max we can handle is 256M */ - aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); - aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE); - aub_out(bufmgr_gem, 0); /* subtype */ - aub_out(bufmgr_gem, 0); /* offset */ - aub_out(bufmgr_gem, gtt_size); /* size */ - for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) { - aub_out(bufmgr_gem, entry); - } + fprintf(stderr, "libdrm aub dumping is deprecated.\n\n" + "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n" + "then run (for example)\n\n" + "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n" + "See the intel_aubdump man page for more details.\n"); } drm_intel_context * @@ -2936,7 +3059,7 @@ drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) if (!context) return NULL; - VG_CLEAR(create); + memclear(create); ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); if (ret != 0) { DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", @@ -2961,7 +3084,7 @@ drm_intel_gem_context_destroy(drm_intel_context *ctx) if (ctx == NULL) return; - VG_CLEAR(destroy); + memclear(destroy); bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; destroy.ctx_id = ctx->ctx_id; @@ -2983,7 +3106,7 @@ drm_intel_reg_read(drm_intel_bufmgr *bufmgr, struct drm_i915_reg_read reg_read; int ret; - VG_CLEAR(reg_read); + memclear(reg_read); reg_read.offset = offset; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read); @@ -2992,6 +3115,37 @@ drm_intel_reg_read(drm_intel_bufmgr *bufmgr, return ret; } +int +drm_intel_get_subslice_total(int fd, unsigned int *subslice_total) +{ + drm_i915_getparam_t gp; + int ret; + + memclear(gp); + gp.value = (int*)subslice_total; + gp.param = I915_PARAM_SUBSLICE_TOTAL; + ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); + if (ret) + return -errno; + + return 0; +} + +int +drm_intel_get_eu_total(int fd, unsigned int *eu_total) +{ + drm_i915_getparam_t gp; + int ret; + + memclear(gp); + gp.value = (int*)eu_total; + gp.param = I915_PARAM_EU_TOTAL; + ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp); + if (ret) + return -errno; + + return 0; +} /** * Annotate the given bo for use in aub dumping. @@ -3019,19 +3173,40 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, drm_intel_aub_annotation *annotations, unsigned count) { - drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - unsigned size = sizeof(*annotations) * count; - drm_intel_aub_annotation *new_annotations = - count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL; - if (new_annotations == NULL) { - free(bo_gem->aub_annotations); - bo_gem->aub_annotations = NULL; - bo_gem->aub_annotation_count = 0; - return; +} + +static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list }; + +static drm_intel_bufmgr_gem * +drm_intel_bufmgr_gem_find(int fd) +{ + drm_intel_bufmgr_gem *bufmgr_gem; + + DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) { + if (bufmgr_gem->fd == fd) { + atomic_inc(&bufmgr_gem->refcount); + return bufmgr_gem; + } + } + + return NULL; +} + +static void +drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + + if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) { +// pthread_mutex_lock(&bufmgr_list_mutex); + + if (atomic_dec_and_test(&bufmgr_gem->refcount)) { + DRMLISTDEL(&bufmgr_gem->managers); + drm_intel_bufmgr_gem_destroy(bufmgr); + } + +// pthread_mutex_unlock(&bufmgr_list_mutex); } - memcpy(new_annotations, annotations, size); - bo_gem->aub_annotations = new_annotations; - bo_gem->aub_annotation_count = count; } /** @@ -3049,17 +3224,25 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) int ret, tmp; bool exec2 = false; +// pthread_mutex_lock(&bufmgr_list_mutex); + + bufmgr_gem = drm_intel_bufmgr_gem_find(fd); + if (bufmgr_gem) + goto exit; + bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); if (bufmgr_gem == NULL) - return NULL; + goto exit; bufmgr_gem->fd = fd; + atomic_set(&bufmgr_gem->refcount, 1); // if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { // free(bufmgr_gem); // return NULL; // } + memclear(aperture); ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); @@ -3067,10 +3250,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) if (ret == 0) bufmgr_gem->gtt_size = aperture.aper_available_size; else { - printf("DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", + fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", strerror(errno)); bufmgr_gem->gtt_size = 128 * 1024 * 1024; - printf("Assuming %dkB available aperture size.\n" + fprintf(stderr, "Assuming %dkB available aperture size.\n" "May lead to reduced performance or incorrect " "rendering.\n", (int)bufmgr_gem->gtt_size / 1024); @@ -3090,13 +3273,16 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->gen = 6; else if (IS_GEN7(bufmgr_gem->pci_device)) bufmgr_gem->gen = 7; + else if (IS_GEN8(bufmgr_gem->pci_device)) + bufmgr_gem->gen = 8; + else if (IS_GEN9(bufmgr_gem->pci_device)) + bufmgr_gem->gen = 9; else { free(bufmgr_gem); - return NULL; + bufmgr_gem = NULL; + goto exit; } -// printf("gen %d\n", bufmgr_gem->gen); - if (IS_GEN3(bufmgr_gem->pci_device) && bufmgr_gem->gtt_size > 256*1024*1024) { /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't @@ -3106,7 +3292,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->gtt_size -= 256*1024*1024; } - VG_CLEAR(gp); + memclear(gp); gp.value = &tmp; gp.param = I915_PARAM_HAS_EXECBUF2; @@ -3126,6 +3312,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); bufmgr_gem->has_relaxed_fencing = ret == 0; + bufmgr_gem->bufmgr.bo_alloc_userptr = NULL; + gp.param = I915_PARAM_HAS_WAIT_TIMEOUT; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); bufmgr_gem->has_wait_timeout = ret == 0; @@ -3145,6 +3333,11 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0); + gp.param = I915_PARAM_HAS_EXEC_SOFTPIN; + ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); + if (ret == 0 && *gp.value > 0) + bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset; + if (bufmgr_gem->gen < 4) { gp.param = I915_PARAM_NUM_FENCES_AVAIL; gp.value = &bufmgr_gem->available_fences; @@ -3188,7 +3381,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map; bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap; bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata; -// bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; + bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering; bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc; bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence; @@ -3198,14 +3391,14 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling; bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink; /* Use the new one if available */ -// if (exec2) { + if (exec2) { bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2; bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2; -// } else -// bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; - bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; + } else + bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; + bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; - bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy; + bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref; bufmgr_gem->bufmgr.debug = 0; bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space; @@ -3221,7 +3414,12 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) DRMINITLISTHEAD(&bufmgr_gem->vma_cache); bufmgr_gem->vma_max = -1; /* unlimited by default */ - return &bufmgr_gem->bufmgr; + DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list); + +exit: +// pthread_mutex_unlock(&bufmgr_list_mutex); + + return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL; } @@ -3266,7 +3464,7 @@ bo_create_from_gem_handle(drm_intel_bufmgr *bufmgr, bo_gem->global_name = 0; bo_gem->reusable = false; - VG_CLEAR(get_tiling); + memclear(get_tiling); get_tiling.handle = bo_gem->gem_handle; ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, @@ -3278,7 +3476,7 @@ bo_create_from_gem_handle(drm_intel_bufmgr *bufmgr, bo_gem->tiling_mode = get_tiling.tiling_mode; bo_gem->swizzle_mode = get_tiling.swizzle_mode; /* XXX stride is unknown */ - drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); + drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); DRMINITLISTHEAD(&bo_gem->vma_list); DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); diff --git a/contrib/sdk/sources/libdrm/intel/intel_bufmgr_priv.h b/contrib/sdk/sources/libdrm/intel/intel_bufmgr_priv.h index ce0723b37e..5a32475c45 100644 --- a/contrib/sdk/sources/libdrm/intel/intel_bufmgr_priv.h +++ b/contrib/sdk/sources/libdrm/intel/intel_bufmgr_priv.h @@ -61,6 +61,18 @@ struct _drm_intel_bufmgr { unsigned long size, unsigned int alignment); + /** + * Allocate a buffer object from an existing user accessible + * address malloc'd with the provided size. + * Alignment is used when mapping to the gtt. + * Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED + */ + drm_intel_bo *(*bo_alloc_userptr)(drm_intel_bufmgr *bufmgr, + const char *name, void *addr, + uint32_t tiling_mode, uint32_t stride, + unsigned long size, + unsigned long flags); + /** * Allocate a tiled buffer object. * @@ -122,8 +134,8 @@ struct _drm_intel_bufmgr { * This is an optional function, if missing, * drm_intel_bo will map/memcpy/unmap. */ -// int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset, -// unsigned long size, void *data); + int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset, + unsigned long size, void *data); /** * Waits for rendering to an object by the GPU to have completed. @@ -139,6 +151,20 @@ struct _drm_intel_bufmgr { */ void (*destroy) (drm_intel_bufmgr *bufmgr); + /** + * Indicate if the buffer can be placed anywhere in the full ppgtt + * address range (2^48). + * + * Any resource used with flat/heapless (0x00000000-0xfffff000) + * General State Heap (GSH) or Intructions State Heap (ISH) must + * be in a 32-bit range. 48-bit range will only be used when explicitly + * requested. + * + * \param bo Buffer to set the use_48b_address_range flag. + * \param enable The flag value. + */ + void (*bo_use_48b_address_range) (drm_intel_bo *bo, uint32_t enable); + /** * Add relocation entry in reloc_buf, which will be updated with the * target buffer's real offset on on command submission. @@ -214,6 +240,13 @@ struct _drm_intel_bufmgr { int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode, uint32_t * swizzle_mode); + /** + * Set the offset at which this buffer will be softpinned + * \param bo Buffer to set the softpin offset for + * \param offset Softpin offset + */ + int (*bo_set_softpin_offset) (drm_intel_bo *bo, uint64_t offset); + /** * Create a visible name for a buffer which can be used by other apps * diff --git a/contrib/sdk/sources/libdrm/intel/intel_chipset.h b/contrib/sdk/sources/libdrm/intel/intel_chipset.h index f0df31bd40..80dc3f33a5 100644 --- a/contrib/sdk/sources/libdrm/intel/intel_chipset.h +++ b/contrib/sdk/sources/libdrm/intel/intel_chipset.h @@ -70,8 +70,8 @@ #define PCI_CHIP_G45_G 0x2E22 #define PCI_CHIP_G41_G 0x2E32 -#define PCI_CHIP_ILD_G 0x0042 -#define PCI_CHIP_ILM_G 0x0046 +#define PCI_CHIP_ILD_G 0x0042 +#define PCI_CHIP_ILM_G 0x0046 #define PCI_CHIP_SANDYBRIDGE_GT1 0x0102 /* desktop */ #define PCI_CHIP_SANDYBRIDGE_GT2 0x0112 @@ -88,14 +88,14 @@ #define PCI_CHIP_IVYBRIDGE_S 0x015a /* server */ #define PCI_CHIP_IVYBRIDGE_S_GT2 0x016a /* server */ -#define PCI_CHIP_HASWELL_GT1 0x0402 /* Desktop */ -#define PCI_CHIP_HASWELL_GT2 0x0412 +#define PCI_CHIP_HASWELL_GT1 0x0402 /* Desktop */ +#define PCI_CHIP_HASWELL_GT2 0x0412 #define PCI_CHIP_HASWELL_GT3 0x0422 -#define PCI_CHIP_HASWELL_M_GT1 0x0406 /* Mobile */ -#define PCI_CHIP_HASWELL_M_GT2 0x0416 +#define PCI_CHIP_HASWELL_M_GT1 0x0406 /* Mobile */ +#define PCI_CHIP_HASWELL_M_GT2 0x0416 #define PCI_CHIP_HASWELL_M_GT3 0x0426 -#define PCI_CHIP_HASWELL_S_GT1 0x040A /* Server */ -#define PCI_CHIP_HASWELL_S_GT2 0x041A +#define PCI_CHIP_HASWELL_S_GT1 0x040A /* Server */ +#define PCI_CHIP_HASWELL_S_GT2 0x041A #define PCI_CHIP_HASWELL_S_GT3 0x042A #define PCI_CHIP_HASWELL_B_GT1 0x040B /* Reserved */ #define PCI_CHIP_HASWELL_B_GT2 0x041B @@ -103,14 +103,14 @@ #define PCI_CHIP_HASWELL_E_GT1 0x040E /* Reserved */ #define PCI_CHIP_HASWELL_E_GT2 0x041E #define PCI_CHIP_HASWELL_E_GT3 0x042E -#define PCI_CHIP_HASWELL_SDV_GT1 0x0C02 /* Desktop */ -#define PCI_CHIP_HASWELL_SDV_GT2 0x0C12 +#define PCI_CHIP_HASWELL_SDV_GT1 0x0C02 /* Desktop */ +#define PCI_CHIP_HASWELL_SDV_GT2 0x0C12 #define PCI_CHIP_HASWELL_SDV_GT3 0x0C22 -#define PCI_CHIP_HASWELL_SDV_M_GT1 0x0C06 /* Mobile */ -#define PCI_CHIP_HASWELL_SDV_M_GT2 0x0C16 +#define PCI_CHIP_HASWELL_SDV_M_GT1 0x0C06 /* Mobile */ +#define PCI_CHIP_HASWELL_SDV_M_GT2 0x0C16 #define PCI_CHIP_HASWELL_SDV_M_GT3 0x0C26 -#define PCI_CHIP_HASWELL_SDV_S_GT1 0x0C0A /* Server */ -#define PCI_CHIP_HASWELL_SDV_S_GT2 0x0C1A +#define PCI_CHIP_HASWELL_SDV_S_GT1 0x0C0A /* Server */ +#define PCI_CHIP_HASWELL_SDV_S_GT2 0x0C1A #define PCI_CHIP_HASWELL_SDV_S_GT3 0x0C2A #define PCI_CHIP_HASWELL_SDV_B_GT1 0x0C0B /* Reserved */ #define PCI_CHIP_HASWELL_SDV_B_GT2 0x0C1B @@ -118,14 +118,14 @@ #define PCI_CHIP_HASWELL_SDV_E_GT1 0x0C0E /* Reserved */ #define PCI_CHIP_HASWELL_SDV_E_GT2 0x0C1E #define PCI_CHIP_HASWELL_SDV_E_GT3 0x0C2E -#define PCI_CHIP_HASWELL_ULT_GT1 0x0A02 /* Desktop */ -#define PCI_CHIP_HASWELL_ULT_GT2 0x0A12 +#define PCI_CHIP_HASWELL_ULT_GT1 0x0A02 /* Desktop */ +#define PCI_CHIP_HASWELL_ULT_GT2 0x0A12 #define PCI_CHIP_HASWELL_ULT_GT3 0x0A22 -#define PCI_CHIP_HASWELL_ULT_M_GT1 0x0A06 /* Mobile */ -#define PCI_CHIP_HASWELL_ULT_M_GT2 0x0A16 +#define PCI_CHIP_HASWELL_ULT_M_GT1 0x0A06 /* Mobile */ +#define PCI_CHIP_HASWELL_ULT_M_GT2 0x0A16 #define PCI_CHIP_HASWELL_ULT_M_GT3 0x0A26 -#define PCI_CHIP_HASWELL_ULT_S_GT1 0x0A0A /* Server */ -#define PCI_CHIP_HASWELL_ULT_S_GT2 0x0A1A +#define PCI_CHIP_HASWELL_ULT_S_GT1 0x0A0A /* Server */ +#define PCI_CHIP_HASWELL_ULT_S_GT2 0x0A1A #define PCI_CHIP_HASWELL_ULT_S_GT3 0x0A2A #define PCI_CHIP_HASWELL_ULT_B_GT1 0x0A0B /* Reserved */ #define PCI_CHIP_HASWELL_ULT_B_GT2 0x0A1B @@ -165,6 +165,32 @@ #define PCI_CHIP_CHERRYVIEW_2 0x22b2 #define PCI_CHIP_CHERRYVIEW_3 0x22b3 +#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902 +#define PCI_CHIP_SKYLAKE_ULT_GT1 0x1906 +#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A /* Reserved */ +#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E /* Reserved */ +#define PCI_CHIP_SKYLAKE_DT_GT2 0x1912 +#define PCI_CHIP_SKYLAKE_FUSED0_GT2 0x1913 /* Reserved */ +#define PCI_CHIP_SKYLAKE_FUSED1_GT2 0x1915 /* Reserved */ +#define PCI_CHIP_SKYLAKE_ULT_GT2 0x1916 +#define PCI_CHIP_SKYLAKE_FUSED2_GT2 0x1917 /* Reserved */ +#define PCI_CHIP_SKYLAKE_SRV_GT2 0x191A /* Reserved */ +#define PCI_CHIP_SKYLAKE_HALO_GT2 0x191B +#define PCI_CHIP_SKYLAKE_WKS_GT2 0x191D +#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E +#define PCI_CHIP_SKYLAKE_MOBILE_GT2 0x1921 /* Reserved */ +#define PCI_CHIP_SKYLAKE_GT3 0x1926 +#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B /* Reserved */ +#define PCI_CHIP_SKYLAKE_SRV_GT4 0x192A +#define PCI_CHIP_SKYLAKE_DT_GT4 0x1932 +#define PCI_CHIP_SKYLAKE_SRV_GT4X 0x193A +#define PCI_CHIP_SKYLAKE_H_GT4 0x193B +#define PCI_CHIP_SKYLAKE_WKS_GT4 0x193D + +#define PCI_CHIP_BROXTON_0 0x0A84 +#define PCI_CHIP_BROXTON_1 0x1A84 +#define PCI_CHIP_BROXTON_2 0x5A84 + #define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \ (devid) == PCI_CHIP_I915_GM || \ (devid) == PCI_CHIP_I945_GM || \ @@ -226,8 +252,8 @@ (devid) == PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS || \ (devid) == PCI_CHIP_SANDYBRIDGE_S) -#define IS_GEN7(devid) (IS_IVYBRIDGE(devid) || \ - IS_HASWELL(devid) || \ +#define IS_GEN7(devid) (IS_IVYBRIDGE(devid) || \ + IS_HASWELL(devid) || \ IS_VALLEYVIEW(devid)) #define IS_IVYBRIDGE(devid) ((devid) == PCI_CHIP_IVYBRIDGE_GT1 || \ @@ -303,7 +329,7 @@ (devid) == PCI_CHIP_HASWELL_CRW_B_GT3 || \ (devid) == PCI_CHIP_HASWELL_CRW_E_GT3) -#define IS_HASWELL(devid) (IS_HSW_GT1(devid) || \ +#define IS_HASWELL(devid) (IS_HSW_GT1(devid) || \ IS_HSW_GT2(devid) || \ IS_HSW_GT3(devid)) @@ -324,12 +350,50 @@ #define IS_GEN8(devid) (IS_BROADWELL(devid) || \ IS_CHERRYVIEW(devid)) +#define IS_SKL_GT1(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT1 || \ + (devid) == PCI_CHIP_SKYLAKE_ULX_GT1 || \ + (devid) == PCI_CHIP_SKYLAKE_DT_GT1 || \ + (devid) == PCI_CHIP_SKYLAKE_SRV_GT1) + +#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_FUSED0_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_FUSED1_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_ULT_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_FUSED2_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_SRV_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_HALO_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_WKS_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_MOBILE_GT2) + +#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_GT3 || \ + (devid) == PCI_CHIP_SKYLAKE_HALO_GT3) + +#define IS_SKL_GT4(devid) ((devid) == PCI_CHIP_SKYLAKE_SRV_GT4 || \ + (devid) == PCI_CHIP_SKYLAKE_DT_GT4 || \ + (devid) == PCI_CHIP_SKYLAKE_SRV_GT4X || \ + (devid) == PCI_CHIP_SKYLAKE_H_GT4 || \ + (devid) == PCI_CHIP_SKYLAKE_WKS_GT4) + +#define IS_SKYLAKE(devid) (IS_SKL_GT1(devid) || \ + IS_SKL_GT2(devid) || \ + IS_SKL_GT3(devid) || \ + IS_SKL_GT4(devid)) + +#define IS_BROXTON(devid) ((devid) == PCI_CHIP_BROXTON_0 || \ + (devid) == PCI_CHIP_BROXTON_1 || \ + (devid) == PCI_CHIP_BROXTON_2) + +#define IS_GEN9(devid) (IS_SKYLAKE(devid) || \ + IS_BROXTON(devid)) + #define IS_9XX(dev) (IS_GEN3(dev) || \ IS_GEN4(dev) || \ IS_GEN5(dev) || \ IS_GEN6(dev) || \ IS_GEN7(dev) || \ - IS_GEN8(dev)) + IS_GEN8(dev) || \ + IS_GEN9(dev)) #endif /* _INTEL_CHIPSET_H */ diff --git a/contrib/sdk/sources/libdrm/intel/intel_decode.c b/contrib/sdk/sources/libdrm/intel/intel_decode.c index 61239dd96d..a244c4b048 100644 --- a/contrib/sdk/sources/libdrm/intel/intel_decode.c +++ b/contrib/sdk/sources/libdrm/intel/intel_decode.c @@ -29,10 +29,14 @@ #include #include +#include "libdrm_macros.h" #include "xf86drm.h" #include "intel_chipset.h" #include "intel_bufmgr.h" +/* The compiler throws ~90 warnings. Do not spam the build, until we fix them. */ +#pragma GCC diagnostic ignored "-Wmissing-field-initializers" + /* Struct for tracking drm_intel_decode state. */ struct drm_intel_decode { /** stdio file where the output should land. Defaults to stdout. */ @@ -3625,7 +3629,6 @@ decode_3d_965(struct drm_intel_decode *ctx) case 0x7a00: if (IS_GEN6(devid) || IS_GEN7(devid)) { - unsigned int i; if (len != 4 && len != 5) fprintf(out, "Bad count in PIPE_CONTROL\n"); @@ -3727,8 +3730,6 @@ decode_3d_965(struct drm_intel_decode *ctx) if (opcode_3d->func) { return opcode_3d->func(ctx); } else { - unsigned int i; - instr_out(ctx, 0, "%s\n", opcode_3d->name); for (i = 1; i < len; i++) { @@ -3824,7 +3825,9 @@ drm_intel_decode_context_alloc(uint32_t devid) ctx->devid = devid; ctx->out = stdout; - if (IS_GEN8(devid)) + if (IS_GEN9(devid)) + ctx->gen = 9; + else if (IS_GEN8(devid)) ctx->gen = 8; else if (IS_GEN7(devid)) ctx->gen = 7; @@ -3876,9 +3879,9 @@ drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx, void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, - FILE *out) + FILE *output) { - ctx->out = out; + ctx->out = output; } /** diff --git a/contrib/sdk/sources/libdrm/libdrm_macros.h b/contrib/sdk/sources/libdrm/libdrm_macros.h new file mode 100644 index 0000000000..8a41fe0554 --- /dev/null +++ b/contrib/sdk/sources/libdrm/libdrm_macros.h @@ -0,0 +1,81 @@ +/* + * Copyright © 2014 NVIDIA Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef LIBDRM_LIBDRM_H +#define LIBDRM_LIBDRM_H + +#if defined(HAVE_VISIBILITY) +# define drm_private __attribute__((visibility("hidden"))) +#else +# define drm_private +#endif + + +/** + * Static (compile-time) assertion. + * Basically, use COND to dimension an array. If COND is false/zero the + * array size will be -1 and we'll get a compilation error. + */ +#define STATIC_ASSERT(COND) \ + do { \ + (void) sizeof(char [1 - 2*!(COND)]); \ + } while (0) + + + +#if defined(ANDROID) && !defined(__LP64__) +#include /* for EINVAL */ + +extern void *__mmap2(void *, size_t, int, int, int, size_t); + +static inline void *drm_mmap(void *addr, size_t length, int prot, int flags, + int fd, loff_t offset) +{ + /* offset must be aligned to 4096 (not necessarily the page size) */ + if (offset & 4095) { + errno = EINVAL; + return MAP_FAILED; + } + + return __mmap2(addr, length, prot, flags, fd, (size_t) (offset >> 12)); +} + +# define drm_munmap(addr, length) \ + munmap(addr, length) + + +#else + +/* assume large file support exists */ +# define drm_mmap(addr, length, prot, flags, fd, offset) \ + mmap(addr, length, prot, flags, fd, offset) + + +static inline int drm_munmap(void *addr, size_t length) +{ + /* Copied from configure code generated by AC_SYS_LARGEFILE */ + + return munmap(addr, length); +} +#endif + +#endif diff --git a/contrib/sdk/sources/libdrm/xf86atomic.h b/contrib/sdk/sources/libdrm/xf86atomic.h index 5dfaba87a1..43cebadab1 100644 --- a/contrib/sdk/sources/libdrm/xf86atomic.h +++ b/contrib/sdk/sources/libdrm/xf86atomic.h @@ -48,7 +48,8 @@ typedef struct { # define atomic_read(x) ((x)->atomic) # define atomic_set(x, val) ((x)->atomic = (val)) # define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1)) -# define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1) +# define atomic_inc_return(x) (__sync_add_and_fetch (&(x)->atomic, 1)) +# define atomic_dec_and_test(x) (__sync_add_and_fetch (&(x)->atomic, -1) == 0) # define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic, (v))) # define atomic_dec(x, v) ((void) __sync_sub_and_fetch(&(x)->atomic, (v))) # define atomic_cmpxchg(x, oldv, newv) __sync_val_compare_and_swap (&(x)->atomic, oldv, newv) @@ -66,6 +67,7 @@ typedef struct { # define atomic_read(x) AO_load_full(&(x)->atomic) # define atomic_set(x, val) AO_store_full(&(x)->atomic, (val)) # define atomic_inc(x) ((void) AO_fetch_and_add1_full(&(x)->atomic)) +# define atomic_inc_return(x) (AO_fetch_and_add1_full(&(x)->atomic) + 1) # define atomic_add(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, (v))) # define atomic_dec(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, -(v))) # define atomic_dec_and_test(x) (AO_fetch_and_sub1_full(&(x)->atomic) == 1) @@ -73,17 +75,24 @@ typedef struct { #endif -#if defined(__sun) && !defined(HAS_ATOMIC_OPS) /* Solaris & OpenSolaris */ +#if (defined(__sun) || defined(__NetBSD__)) && !defined(HAS_ATOMIC_OPS) /* Solaris & OpenSolaris & NetBSD */ #include #define HAS_ATOMIC_OPS 1 -typedef struct { uint_t atomic; } atomic_t; +#if defined(__NetBSD__) +#define LIBDRM_ATOMIC_TYPE int +#else +#define LIBDRM_ATOMIC_TYPE uint_t +#endif + +typedef struct { LIBDRM_ATOMIC_TYPE atomic; } atomic_t; # define atomic_read(x) (int) ((x)->atomic) -# define atomic_set(x, val) ((x)->atomic = (uint_t)(val)) +# define atomic_set(x, val) ((x)->atomic = (LIBDRM_ATOMIC_TYPE)(val)) # define atomic_inc(x) (atomic_inc_uint (&(x)->atomic)) -# define atomic_dec_and_test(x) (atomic_dec_uint_nv(&(x)->atomic) == 1) +# define atomic_inc_return(x) (atomic_inc_uint_nv(&(x)->atomic)) +# define atomic_dec_and_test(x) (atomic_dec_uint_nv(&(x)->atomic) == 0) # define atomic_add(x, v) (atomic_add_int(&(x)->atomic, (v))) # define atomic_dec(x, v) (atomic_add_int(&(x)->atomic, -(v))) # define atomic_cmpxchg(x, oldv, newv) atomic_cas_uint (&(x)->atomic, oldv, newv) @@ -94,4 +103,13 @@ typedef struct { uint_t atomic; } atomic_t; #error libdrm requires atomic operations, please define them for your CPU/compiler. #endif +static inline int atomic_add_unless(atomic_t *v, int add, int unless) +{ + int c, old; + c = atomic_read(v); + while (c != unless && (old = atomic_cmpxchg(v, c, c + add)) != c) + c = old; + return c == unless; +} + #endif diff --git a/contrib/sdk/sources/libdrm/xf86drm.c b/contrib/sdk/sources/libdrm/xf86drm.c index 979358325f..31c1343f1a 100644 --- a/contrib/sdk/sources/libdrm/xf86drm.c +++ b/contrib/sdk/sources/libdrm/xf86drm.c @@ -1,5 +1,5 @@ /** - * \file xf86drm.c + * \file xf86drm.c * User-level interface to DRM device * * \author Rickard E. (Rik) Faith @@ -39,9 +39,12 @@ #include #include #include +#include #include #include #include +#include +#include #include /* Not all systems have MAP_FAILED defined */ @@ -50,10 +53,12 @@ #endif #include "xf86drm.h" +#include "libdrm_macros.h" + #include #ifndef DRM_MAJOR -#define DRM_MAJOR 226 /* Linux */ +#define DRM_MAJOR 226 /* Linux */ #endif @@ -89,7 +94,7 @@ drmVersionPtr drmGetVersion(int fd) v->name_len = 4; v->name = "i915"; v->date_len = 8; - v->date = "20080730"; + v->date = "20151010"; v->desc_len = 14; v->desc = "Intel Graphics"; return v; diff --git a/contrib/sdk/sources/libdrm/xf86drm.h b/contrib/sdk/sources/libdrm/xf86drm.h index 6b86ee78fc..37d83a0415 100644 --- a/contrib/sdk/sources/libdrm/xf86drm.h +++ b/contrib/sdk/sources/libdrm/xf86drm.h @@ -39,7 +39,7 @@ #include #include -#if defined(__cplusplus) || defined(c_plusplus) +#if defined(__cplusplus) extern "C" { #endif @@ -66,6 +66,7 @@ extern "C" { #define DRM_DIR_NAME "/dev/dri" #define DRM_DEV_NAME "%s/card%d" #define DRM_CONTROL_DEV_NAME "%s/controlD%d" +#define DRM_RENDER_DEV_NAME "%s/renderD%d" #define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */ #define DRM_ERR_NO_DEVICE (-1001) @@ -537,7 +538,8 @@ do { register unsigned int __old __asm("o0"); \ /* General user-level programmer's API: unprivileged */ extern int drmAvailable(void); extern int drmOpen(const char *name, const char *busid); -extern int drmOpenControl(int minor); +extern int drmOpenControl(int minor); +extern int drmOpenRender(int minor); extern int drmClose(int fd); extern drmVersionPtr drmGetVersion(int fd); extern drmVersionPtr drmGetLibVersion(int fd); @@ -720,6 +722,7 @@ typedef struct _drmEventContext { extern int drmHandleEvent(int fd, drmEventContextPtr evctx); extern char *drmGetDeviceNameFromFd(int fd); +extern int drmGetNodeTypeFromFd(int fd); extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd); extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle); diff --git a/contrib/sdk/sources/libdrm/xf86drmMode.h b/contrib/sdk/sources/libdrm/xf86drmMode.h index b260af7caa..4de7bbbe54 100644 --- a/contrib/sdk/sources/libdrm/xf86drmMode.h +++ b/contrib/sdk/sources/libdrm/xf86drmMode.h @@ -36,7 +36,7 @@ #ifndef _XF86DRMMODE_H_ #define _XF86DRMMODE_H_ -#if defined(__cplusplus) || defined(c_plusplus) +#if defined(__cplusplus) extern "C" { #endif @@ -240,6 +240,15 @@ typedef struct _drmModeProperty { uint32_t *blob_ids; /* store the blob IDs */ } drmModePropertyRes, *drmModePropertyPtr; +static __inline int drm_property_type_is(drmModePropertyPtr property, + uint32_t type) +{ + /* instanceof for props.. handles extended type vs original types: */ + if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) + return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type; + return property->flags & type; +} + typedef struct _drmModeCrtc { uint32_t crtc_id; uint32_t buffer_id; /**< FB id to connect to 0 = disconnect */ @@ -413,10 +422,23 @@ drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id); */ /** - * Retrive information about the connector connectorId. + * Retrieve all information about the connector connectorId. This will do a + * forced probe on the connector to retrieve remote information such as EDIDs + * from the display device. */ extern drmModeConnectorPtr drmModeGetConnector(int fd, - uint32_t connectorId); + uint32_t connectorId); + +/** + * Retrieve current information, i.e the currently active mode and encoder, + * about the connector connectorId. This will not do any probing on the + * connector or remote device, and only reports what is currently known. + * For the complete set of modes and encoders associated with the connector + * use drmModeGetConnector() which will do a probe to determine any display + * link changes first. + */ +extern drmModeConnectorPtr drmModeGetConnectorCurrent(int fd, + uint32_t connector_id); /** * Attaches the given mode to an connector. @@ -462,7 +484,31 @@ extern int drmModeObjectSetProperty(int fd, uint32_t object_id, uint32_t object_type, uint32_t property_id, uint64_t value); -#if defined(__cplusplus) || defined(c_plusplus) + +typedef struct _drmModeAtomicReq drmModeAtomicReq, *drmModeAtomicReqPtr; + +extern drmModeAtomicReqPtr drmModeAtomicAlloc(void); +extern drmModeAtomicReqPtr drmModeAtomicDuplicate(drmModeAtomicReqPtr req); +extern int drmModeAtomicMerge(drmModeAtomicReqPtr base, + drmModeAtomicReqPtr augment); +extern void drmModeAtomicFree(drmModeAtomicReqPtr req); +extern int drmModeAtomicGetCursor(drmModeAtomicReqPtr req); +extern void drmModeAtomicSetCursor(drmModeAtomicReqPtr req, int cursor); +extern int drmModeAtomicAddProperty(drmModeAtomicReqPtr req, + uint32_t object_id, + uint32_t property_id, + uint64_t value); +extern int drmModeAtomicCommit(int fd, + drmModeAtomicReqPtr req, + uint32_t flags, + void *user_data); + +extern int drmModeCreatePropertyBlob(int fd, const void *data, size_t size, + uint32_t *id); +extern int drmModeDestroyPropertyBlob(int fd, uint32_t id); + + +#if defined(__cplusplus) } #endif