intel-2d: sna-2.99.906

git-svn-id: svn://kolibrios.org@4501 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2014-01-21 14:20:59 +00:00
parent 6d34dc3cde
commit d605acf341
22 changed files with 3686 additions and 1119 deletions

View File

@ -13,6 +13,7 @@ STRIP = $(PREFIX)strip
LDFLAGS:= -shared -s -nostdlib -T ../newlib/dll.lds --entry _DllStartup --image-base=0
PXFLAGS:= --version-script pixlib.ver --output-def $(LIBRARY).orig.def --out-implib $(LIBRARY).dll.a
SNAFLAGS:= --version-script sna.ver --output-def sna.def
UXAFLAGS:= --version-script uxa.ver --output-def uxa.def
INCLUDES= -I. -I../libdrm/intel -I../libdrm/include/drm -I./render_program -I../pixman -I../newlib/include
@ -25,7 +26,10 @@ DEFINES:= -DHAS_DEBUG_FULL=0 -DSHOW_BATCH=0 -DDEBUG_DUMP=0
SRC_PIXLIB = pixlib.c
SRC_SNA = sna/gen3_render.c \
SRC_SNA = \
sna/gen4_common.c \
sna/gen6_common.c \
sna/gen3_render.c \
sna/gen4_render.c \
sna/gen4_vertex.c \
sna/gen5_render.c \
@ -70,7 +74,7 @@ endif
# targets
all:$(LIBRARY).dll intel-sna.drv
uxa:$(LIBRARY).dll
uxa:$(LIBRARY).dll intel-uxa.drv
ebox:$(LIBRARY).dll
@ -86,7 +90,7 @@ intel-sna.drv: $(OBJ_SNA) Makefile
mv -f $@ ../../bin
intel-uxa.drv: $(OBJ_UXA) Makefile
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ $(OBJ_UXA) $(LIBS)
$(LD) $(LDFLAGS) $(UXAFLAGS) $(LIBPATH) -o $@ $(OBJ_UXA) $(LIBS)
$(STRIP) $@
mv -f $@ ../../bin

View File

@ -118,5 +118,6 @@ struct intel_device_info {
const struct intel_device_info *intel_detect_chipset(struct pci_device *pci);
#define hosted() (0)
#endif /* INTEL_DRIVER_H */

View File

@ -261,7 +261,7 @@ static inline void list_move_tail(struct list *list, struct list *head)
* @return True if the list contains one or more elements or False otherwise.
*/
static inline bool
list_is_empty(struct list *head)
list_is_empty(const struct list *head)
{
return head->next == head;
}

View File

@ -521,7 +521,7 @@ static void brw_wm_projective_st(struct brw_compile *p, int dw,
if (p->gen >= 060) {
/* First compute 1/z */
brw_PLN(p,
brw_message_reg(msg),
brw_vec8_grf(30, 0),
brw_vec1_grf(uv+1, 0),
brw_vec8_grf(2, 0));
@ -532,22 +532,22 @@ static void brw_wm_projective_st(struct brw_compile *p, int dw,
brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
} else
brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
brw_PLN(p,
brw_vec8_grf(28, 0),
brw_vec8_grf(26, 0),
brw_vec1_grf(uv, 0),
brw_vec8_grf(2, 0));
brw_MUL(p,
brw_message_reg(msg),
brw_vec8_grf(28, 0),
brw_vec8_grf(30, 0));
msg += dw/8;
brw_PLN(p,
brw_vec8_grf(28, 0),
brw_vec1_grf(uv, 0),
brw_vec8_grf(4, 0));
brw_MUL(p,
brw_message_reg(msg),
brw_vec8_grf(26, 0),
brw_vec8_grf(30, 0));
brw_MUL(p,
brw_message_reg(msg + dw/8),
brw_vec8_grf(28, 0),
brw_vec8_grf(30, 0));
} else {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,64 @@
/*
* Copyright © 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "gen4_common.h"
#include "gen4_vertex.h"
void gen4_render_flush(struct sna *sna)
{
gen4_vertex_close(sna);
assert(sna->render.vb_id == 0);
assert(sna->render.vertex_offset == 0);
}
void gen4_render_retire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (sna->render.nvertex_reloc == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
DBG(("%s: resetting idle vbo\n", __FUNCTION__));
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}
void gen4_render_expire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (sna->render.vbo && !sna->render.vertex_used) {
DBG(("%s: discarding vbo\n", __FUNCTION__));
discard_vbo(sna);
}
}

View File

@ -0,0 +1,49 @@
/*
* Copyright © 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#ifndef GEN4_COMMON_H
#define GEN4_COMMON_H
#include "sna.h"
inline static void
discard_vbo(struct sna *sna)
{
kgem_bo_destroy(&sna->kgem, sna->render.vbo);
sna->render.vbo = NULL;
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
void gen4_render_flush(struct sna *sna);
void gen4_render_retire(struct kgem *kgem);
void gen4_render_expire(struct kgem *kgem);
#endif /* GEN4_COMMON_H */

View File

@ -41,6 +41,7 @@
//#include "sna_video.h"
#include "brw/brw.h"
#include "gen4_common.h"
#include "gen4_render.h"
#include "gen4_source.h"
#include "gen4_vertex.h"
@ -549,9 +550,6 @@ static int gen4_get_rectangles__flush(struct sna *sna,
if (!kgem_check_reloc_and_exec(&sna->kgem, 2))
return 0;
if (op->need_magic_ca_pass && sna->render.vbo)
return 0;
if (sna->render.vertex_offset) {
gen4_vertex_flush(sna);
if (gen4_magic_ca_pass(sna, op))
@ -747,16 +745,10 @@ gen4_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
assert(op->floats_per_rect == 3*op->floats_per_vertex);
if (op->floats_per_vertex != sna->render_state.gen4.floats_per_vertex) {
if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
gen4_vertex_finish(sna);
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
DBG(("aligning vertex: was %d, now %d floats per vertex\n",
sna->render_state.gen4.floats_per_vertex,
op->floats_per_vertex,
sna->render.vertex_index,
(sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex));
sna->render.vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
sna->render.vertex_used = sna->render.vertex_index * op->floats_per_vertex;
op->floats_per_vertex));
gen4_vertex_align(sna, op);
sna->render_state.gen4.floats_per_vertex = op->floats_per_vertex;
}
}
@ -1314,11 +1306,12 @@ gen4_render_video(struct sna *sna,
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
kgem_submit(&sna->kgem);
assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
return false;
}
gen4_video_bind_surfaces(sna, &tmp);
gen4_align_vertex(sna, &tmp);
gen4_video_bind_surfaces(sna, &tmp);
/* Set up the offset for translating from the given region (in screen
* coordinates) to the backing pixmap.
@ -1548,33 +1541,6 @@ gen4_composite_set_target(struct sna *sna,
return true;
}
static bool
try_blt(struct sna *sna,
PicturePtr dst, PicturePtr src,
int width, int height)
{
if (sna->kgem.mode != KGEM_RENDER) {
DBG(("%s: already performing BLT\n", __FUNCTION__));
return true;
}
if (too_large(width, height)) {
DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
__FUNCTION__, width, height));
return true;
}
if (too_large(dst->pDrawable->width, dst->pDrawable->height))
return true;
/* The blitter is much faster for solids */
if (sna_picture_is_solid(src, NULL))
return true;
/* is the source picture only in cpu memory e.g. a shm pixmap? */
return picture_is_cpu(sna, src);
}
static bool
check_gradient(PicturePtr picture, bool precise)
{
@ -1803,7 +1769,6 @@ gen4_render_composite(struct sna *sna,
return false;
if (mask == NULL &&
try_blt(sna, dst, src, width, height) &&
sna_blt_composite(sna, op,
src, dst,
src_x, src_y,
@ -1932,8 +1897,8 @@ gen4_render_composite(struct sna *sna,
goto cleanup_mask;
}
gen4_bind_surfaces(sna, tmp);
gen4_align_vertex(sna, tmp);
gen4_bind_surfaces(sna, tmp);
return true;
cleanup_mask:
@ -1989,51 +1954,6 @@ cleanup_dst:
static void
gen4_render_flush(struct sna *sna)
{
gen4_vertex_close(sna);
assert(sna->render.vb_id == 0);
assert(sna->render.vertex_offset == 0);
}
static void
discard_vbo(struct sna *sna)
{
kgem_bo_destroy(&sna->kgem, sna->render.vbo);
sna->render.vbo = NULL;
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
static void
gen4_render_retire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
DBG(("%s: resetting idle vbo\n", __FUNCTION__));
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}
static void
gen4_render_expire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (sna->render.vbo && !sna->render.vertex_used) {
DBG(("%s: discarding vbo\n", __FUNCTION__));
discard_vbo(sna);
}
}
static void gen4_render_reset(struct sna *sna)
{
@ -2047,8 +1967,7 @@ static void gen4_render_reset(struct sna *sna)
sna->render_state.gen4.drawrect_limit = -1;
sna->render_state.gen4.surface_table = -1;
if (sna->render.vbo &&
!kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) {
DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
discard_vbo(sna);
}
@ -2407,8 +2326,8 @@ gen4_blit_tex(struct sna *sna,
kgem_submit(&sna->kgem);
}
gen4_bind_surfaces(sna, tmp);
gen4_align_vertex(sna, tmp);
gen4_bind_surfaces(sna, tmp);
return true;
}

View File

@ -38,6 +38,29 @@
#define sse2
#endif
void gen4_vertex_align(struct sna *sna, const struct sna_composite_op *op)
{
int vertex_index;
assert(op->floats_per_rect == 3*op->floats_per_vertex);
vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
if ((int)sna->render.vertex_size - vertex_index * op->floats_per_vertex < 2*op->floats_per_rect) {
DBG(("%s: flushing vertex buffer: new index=%d, max=%d\n",
__FUNCTION__, vertex_index, sna->render.vertex_size / op->floats_per_vertex));
if (gen4_vertex_finish(sna) < op->floats_per_rect) {
kgem_submit(&sna->kgem);
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
assert(vertex_index * op->floats_per_vertex <= sna->render.vertex_size);
}
sna->render.vertex_index = vertex_index;
sna->render.vertex_used = vertex_index * op->floats_per_vertex;
}
void gen4_vertex_flush(struct sna *sna)
{
DBG(("%s[%x] = %d\n", __FUNCTION__,
@ -45,7 +68,9 @@ void gen4_vertex_flush(struct sna *sna)
sna->render.vertex_index - sna->render.vertex_start));
assert(sna->render.vertex_offset);
assert(sna->render.vertex_offset <= sna->kgem.nbatch);
assert(sna->render.vertex_index > sna->render.vertex_start);
assert(sna->render.vertex_used <= sna->render.vertex_size);
sna->kgem.batch[sna->render.vertex_offset] =
sna->render.vertex_index - sna->render.vertex_start;
@ -62,11 +87,14 @@ int gen4_vertex_finish(struct sna *sna)
sna->render.vertex_used, sna->render.vertex_size));
assert(sna->render.vertex_offset == 0);
assert(sna->render.vertex_used);
assert(sna->render.vertex_used <= sna->render.vertex_size);
sna_vertex_wait__locked(&sna->render);
/* Note: we only need dword alignment (currently) */
hint = CREATE_GTT_MAP;
bo = sna->render.vbo;
if (bo) {
for (i = 0; i < sna->render.nvertex_reloc; i++) {
@ -88,11 +116,15 @@ int gen4_vertex_finish(struct sna *sna)
sna->render.vb_id = 0;
kgem_bo_destroy(&sna->kgem, bo);
hint |= CREATE_CACHED | CREATE_NO_THROTTLE;
} else {
if (kgem_is_idle(&sna->kgem)) {
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
return 0;
}
}
hint = CREATE_GTT_MAP;
if (bo)
hint |= CREATE_CACHED | CREATE_NO_THROTTLE;
size = 256*1024;
assert(!sna->render.active);
@ -163,7 +195,7 @@ void gen4_vertex_close(struct sna *sna)
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
free_bo = bo;
} else if (IS_CPU_MAP(bo->map) && !sna->kgem.has_llc) {
} else if (!sna->kgem.has_llc && sna->render.vertices == MAP(bo->map__cpu)) {
DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
sna->render.vertices =
kgem_bo_map__gtt(&sna->kgem, sna->render.vbo);
@ -176,9 +208,16 @@ void gen4_vertex_close(struct sna *sna)
}
} else {
if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
int size;
size = sna->kgem.nbatch;
size += sna->kgem.batch_size - sna->kgem.surface;
size += sna->render.vertex_used;
if (size <= 1024) {
DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
sna->render.vertex_used, sna->kgem.nbatch));
assert(sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface);
memcpy(sna->kgem.batch + sna->kgem.nbatch,
sna->render.vertex_data,
sna->render.vertex_used * 4);
@ -186,6 +225,37 @@ void gen4_vertex_close(struct sna *sna)
bo = NULL;
sna->kgem.nbatch += sna->render.vertex_used;
} else {
size = 256 * 1024;
do {
bo = kgem_create_linear(&sna->kgem, size,
CREATE_GTT_MAP | CREATE_NO_RETIRE | CREATE_NO_THROTTLE | CREATE_CACHED);
} while (bo == NULL && (size>>=1) > sizeof(float)*sna->render.vertex_used);
sna->render.vertices = NULL;
if (bo)
sna->render.vertices = kgem_bo_map(&sna->kgem, bo);
if (sna->render.vertices != NULL) {
DBG(("%s: new vbo: %d / %d\n", __FUNCTION__,
sna->render.vertex_used, __kgem_bo_size(bo)/4));
assert(sizeof(float)*sna->render.vertex_used <= __kgem_bo_size(bo));
memcpy(sna->render.vertices,
sna->render.vertex_data,
sizeof(float)*sna->render.vertex_used);
size = __kgem_bo_size(bo)/4;
if (size >= UINT16_MAX)
size = UINT16_MAX - 1;
sna->render.vbo = bo;
sna->render.vertex_size = size;
} else {
DBG(("%s: tmp vbo: %d\n", __FUNCTION__,
sna->render.vertex_used));
if (bo)
kgem_bo_destroy(&sna->kgem, bo);
bo = kgem_create_linear(&sna->kgem,
4*sna->render.vertex_used,
CREATE_NO_THROTTLE);
@ -195,11 +265,14 @@ void gen4_vertex_close(struct sna *sna)
kgem_bo_destroy(&sna->kgem, bo);
bo = NULL;
}
DBG(("%s: new vbo: %d\n", __FUNCTION__,
sna->render.vertex_used));
assert(sna->render.vbo == NULL);
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
free_bo = bo;
}
}
}
assert(sna->render.nvertex_reloc);
for (i = 0; i < sna->render.nvertex_reloc; i++) {

View File

@ -6,6 +6,7 @@
#include "sna.h"
#include "sna_render.h"
void gen4_vertex_align(struct sna *sna, const struct sna_composite_op *op);
void gen4_vertex_flush(struct sna *sna);
int gen4_vertex_finish(struct sna *sna);
void gen4_vertex_close(struct sna *sna);

View File

@ -42,6 +42,7 @@
#include "brw/brw.h"
#include "gen5_render.h"
#include "gen4_common.h"
#include "gen4_source.h"
#include "gen4_vertex.h"
@ -719,16 +720,10 @@ gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
assert(op->floats_per_rect == 3*op->floats_per_vertex);
if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
gen4_vertex_finish(sna);
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
DBG(("aligning vertex: was %d, now %d floats per vertex\n",
sna->render_state.gen5.floats_per_vertex,
op->floats_per_vertex,
sna->render.vertex_index,
(sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex));
sna->render.vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
sna->render.vertex_used = sna->render.vertex_index * op->floats_per_vertex;
op->floats_per_vertex));
gen4_vertex_align(sna, op);
sna->render_state.gen5.floats_per_vertex = op->floats_per_vertex;
}
}
@ -942,10 +937,14 @@ gen5_emit_vertex_elements(struct sna *sna,
inline static void
gen5_emit_pipe_flush(struct sna *sna)
{
#if 0
OUT_BATCH(GEN5_PIPE_CONTROL | (4 - 2));
OUT_BATCH(GEN5_PIPE_CONTROL_WC_FLUSH);
OUT_BATCH(0);
OUT_BATCH(0);
#else
OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH);
#endif
}
static void
@ -1311,11 +1310,12 @@ gen5_render_video(struct sna *sna,
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
kgem_submit(&sna->kgem);
assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
return false;
}
gen5_video_bind_surfaces(sna, &tmp);
gen5_align_vertex(sna, &tmp);
gen5_video_bind_surfaces(sna, &tmp);
/* Set up the offset for translating from the given region (in screen
* coordinates) to the backing pixmap.
@ -1452,7 +1452,6 @@ gen5_render_composite(struct sna *sna,
}
if (mask == NULL &&
try_blt(sna, dst, src, width, height) &&
sna_blt_composite(sna, op,
src, dst,
src_x, src_y,
@ -1577,8 +1576,8 @@ gen5_render_composite(struct sna *sna,
goto cleanup_mask;
}
gen5_bind_surfaces(sna, tmp);
gen5_align_vertex(sna, tmp);
gen5_bind_surfaces(sna, tmp);
return true;
cleanup_mask:
@ -1806,8 +1805,8 @@ gen5_render_composite_spans(struct sna *sna,
goto cleanup_src;
}
gen5_bind_surfaces(sna, &tmp->base);
gen5_align_vertex(sna, &tmp->base);
gen5_bind_surfaces(sna, &tmp->base);
return true;
cleanup_src:
@ -1952,7 +1951,10 @@ fallback_blt:
kgem_submit(&sna->kgem);
if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
DBG(("%s: aperture check failed\n", __FUNCTION__));
goto fallback_tiled_src;
kgem_bo_destroy(&sna->kgem, tmp.src.bo);
if (tmp.redirect.real_bo)
kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
goto fallback_blt;
}
}
@ -1963,8 +1965,8 @@ fallback_blt:
src_dx += tmp.src.offset[0];
src_dy += tmp.src.offset[1];
gen5_copy_bind_surfaces(sna, &tmp);
gen5_align_vertex(sna, &tmp);
gen5_copy_bind_surfaces(sna, &tmp);
do {
int n_this_time;
@ -1999,8 +2001,6 @@ fallback_blt:
kgem_bo_destroy(&sna->kgem, tmp.src.bo);
return true;
fallback_tiled_src:
kgem_bo_destroy(&sna->kgem, tmp.src.bo);
fallback_tiled_dst:
if (tmp.redirect.real_bo)
kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
@ -2021,16 +2021,6 @@ fallback_tiled:
}
#endif
static void
gen5_render_flush(struct sna *sna)
{
gen4_vertex_close(sna);
assert(sna->render.vb_id == 0);
assert(sna->render.vertex_offset == 0);
}
static void
gen5_render_context_switch(struct kgem *kgem,
int new_mode)
@ -2060,42 +2050,6 @@ gen5_render_context_switch(struct kgem *kgem,
}
}
static void
discard_vbo(struct sna *sna)
{
kgem_bo_destroy(&sna->kgem, sna->render.vbo);
sna->render.vbo = NULL;
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
static void
gen5_render_retire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
DBG(("%s: resetting idle vbo\n", __FUNCTION__));
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}
static void
gen5_render_expire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (sna->render.vbo && !sna->render.vertex_used) {
DBG(("%s: discarding vbo\n", __FUNCTION__));
discard_vbo(sna);
}
}
static void gen5_render_reset(struct sna *sna)
{
sna->render_state.gen5.needs_invariant = true;
@ -2107,8 +2061,7 @@ static void gen5_render_reset(struct sna *sna)
sna->render_state.gen5.drawrect_limit = -1;
sna->render_state.gen5.surface_table = -1;
if (sna->render.vbo &&
!kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) {
DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
discard_vbo(sna);
}
@ -2351,8 +2304,8 @@ const char *gen5_render_init(struct sna *sna, const char *backend)
return backend;
sna->kgem.context_switch = gen5_render_context_switch;
sna->kgem.retire = gen5_render_retire;
sna->kgem.expire = gen5_render_expire;
sna->kgem.retire = gen4_render_retire;
sna->kgem.expire = gen4_render_expire;
#if 0
#if !NO_COMPOSITE
@ -2362,7 +2315,7 @@ const char *gen5_render_init(struct sna *sna, const char *backend)
#if !NO_COMPOSITE_SPANS
sna->render.check_composite_spans = gen5_check_composite_spans;
sna->render.composite_spans = gen5_render_composite_spans;
if (sna->PciInfo->device_id == 0x0044)
if (intel_get_device_id(sna->scrn) == 0x0044)
sna->render.prefer_gpu |= PREFER_GPU_SPANS;
#endif
sna->render.video = gen5_render_video;
@ -2378,7 +2331,7 @@ const char *gen5_render_init(struct sna *sna, const char *backend)
sna->render.blit_tex = gen5_blit_tex;
sna->render.caps = HW_BIT_BLIT | HW_TEX_BLIT;
sna->render.flush = gen5_render_flush;
sna->render.flush = gen4_render_flush;
sna->render.reset = gen5_render_reset;
sna->render.fini = gen5_render_fini;
@ -2466,8 +2419,8 @@ gen5_blit_tex(struct sna *sna,
kgem_submit(&sna->kgem);
}
gen5_bind_surfaces(sna, tmp);
gen5_align_vertex(sna, tmp);
return true;
gen5_bind_surfaces(sna, tmp);
return true;
}

View File

@ -0,0 +1,71 @@
/*
* Copyright © 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "gen6_common.h"
#include "gen4_vertex.h"
void
gen6_render_context_switch(struct kgem *kgem,
int new_mode)
{
if (kgem->nbatch) {
DBG(("%s: from %d to %d, submit batch\n", __FUNCTION__, kgem->mode, new_mode));
_kgem_submit(kgem);
}
if (kgem->nexec) {
DBG(("%s: from %d to %d, reset incomplete batch\n", __FUNCTION__, kgem->mode, new_mode));
kgem_reset(kgem);
}
assert(kgem->nbatch == 0);
assert(kgem->nreloc == 0);
assert(kgem->nexec == 0);
kgem->ring = new_mode;
}
void gen6_render_retire(struct kgem *kgem)
{
struct sna *sna;
if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire))
kgem->ring = kgem->mode;
sna = container_of(kgem, struct sna, kgem);
if (sna->render.nvertex_reloc == 0 &&
sna->render.vbo &&
!kgem_bo_is_busy(sna->render.vbo)) {
DBG(("%s: resetting idle vbo\n", __FUNCTION__));
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}

View File

@ -0,0 +1,139 @@
/*
* Copyright © 2011-2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#ifndef GEN6_COMMON_H
#define GEN6_COMMON_H
#include "sna.h"
#define NO_RING_SWITCH 0
#define PREFER_RENDER 0
static inline bool is_uncached(struct sna *sna,
struct kgem_bo *bo)
{
return bo->scanout && !sna->kgem.has_wt;
}
inline static bool can_switch_to_blt(struct sna *sna,
struct kgem_bo *bo,
unsigned flags)
{
return false;
}
inline static bool can_switch_to_render(struct sna *sna,
struct kgem_bo *bo)
{
if (sna->kgem.ring == KGEM_RENDER)
return true;
if (NO_RING_SWITCH)
return false;
if (!sna->kgem.has_semaphores)
return false;
if (bo && !RQ_IS_BLT(bo->rq) && !is_uncached(sna, bo))
return true;
return !kgem_ring_is_idle(&sna->kgem, KGEM_RENDER);
}
static inline bool untiled_tlb_miss(struct kgem_bo *bo)
{
if (kgem_bo_is_render(bo))
return false;
return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
}
static int prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
{
if (bo->rq)
return RQ_IS_BLT(bo->rq);
if (sna->flags & SNA_POWERSAVE)
return true;
return bo->tiling == I915_TILING_NONE || is_uncached(sna, bo);
}
inline static bool force_blt_ring(struct sna *sna)
{
if (sna->flags & SNA_POWERSAVE)
return true;
if (sna->kgem.mode == KGEM_RENDER)
return false;
if (sna->render_state.gt < 2)
return true;
return false;
}
inline static bool prefer_blt_ring(struct sna *sna,
struct kgem_bo *bo,
unsigned flags)
{
assert(!force_blt_ring(sna));
assert(!kgem_bo_is_render(bo));
return can_switch_to_blt(sna, bo, flags);
}
inline static bool prefer_render_ring(struct sna *sna,
struct kgem_bo *bo)
{
if (sna->flags & SNA_POWERSAVE)
return false;
if (sna->render_state.gt < 2)
return false;
return can_switch_to_render(sna, bo);
}
inline static bool
prefer_blt_composite(struct sna *sna, struct sna_composite_op *tmp)
{
return false;
}
static inline bool prefer_blt_fill(struct sna *sna,
struct kgem_bo *bo,
unsigned flags)
{
return false;
}
void gen6_render_context_switch(struct kgem *kgem, int new_mode);
void gen6_render_retire(struct kgem *kgem);
#endif /* GEN6_COMMON_H */

View File

@ -39,6 +39,8 @@
#include "brw/brw.h"
#include "gen6_render.h"
#include "gen6_common.h"
#include "gen4_common.h"
#include "gen4_source.h"
#include "gen4_vertex.h"
@ -74,6 +76,7 @@ struct gt_info {
int max_vs_entries;
int max_gs_entries;
} urb;
int gt;
};
static const struct gt_info gt1_info = {
@ -82,6 +85,7 @@ static const struct gt_info gt1_info = {
.max_gs_threads = 21,
.max_wm_threads = 40,
.urb = { 32, 256, 256 },
.gt = 1,
};
static const struct gt_info gt2_info = {
@ -90,6 +94,7 @@ static const struct gt_info gt2_info = {
.max_gs_threads = 60,
.max_wm_threads = 80,
.urb = { 64, 256, 256 },
.gt = 2,
};
static const uint32_t ps_kernel_packed[][4] = {
@ -872,21 +877,22 @@ gen6_emit_state(struct sna *sna,
const struct sna_composite_op *op,
uint16_t wm_binding_table)
{
bool need_stall = wm_binding_table & 1;
bool need_flush, need_stall;
assert(op->dst.bo->exec);
if (gen6_emit_cc(sna, GEN6_BLEND(op->u.gen6.flags)))
need_stall = false;
need_flush =
gen6_emit_cc(sna, GEN6_BLEND(op->u.gen6.flags)) &&
wm_binding_table & 1;
gen6_emit_sampler(sna, GEN6_SAMPLER(op->u.gen6.flags));
gen6_emit_sf(sna, GEN6_VERTEX(op->u.gen6.flags) >> 2);
gen6_emit_wm(sna, GEN6_KERNEL(op->u.gen6.flags), GEN6_VERTEX(op->u.gen6.flags) >> 2);
gen6_emit_vertex_elements(sna, op);
need_stall |= gen6_emit_binding_table(sna, wm_binding_table & ~1);
need_stall = gen6_emit_binding_table(sna, wm_binding_table & ~1);
if (gen6_emit_drawing_rectangle(sna, op))
need_stall = false;
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
if (need_flush || kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
gen6_emit_flush(sna);
kgem_clear_dirty(&sna->kgem);
assert(op->dst.bo->exec);
@ -1317,16 +1323,10 @@ gen6_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
assert (sna->render.vertex_offset == 0);
if (op->floats_per_vertex != sna->render_state.gen6.floats_per_vertex) {
if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
gen4_vertex_finish(sna);
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
DBG(("aligning vertex: was %d, now %d floats per vertex\n",
sna->render_state.gen6.floats_per_vertex,
op->floats_per_vertex,
sna->render.vertex_index,
(sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex));
sna->render.vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
sna->render.vertex_used = sna->render.vertex_index * op->floats_per_vertex;
op->floats_per_vertex));
gen4_vertex_align(sna, op);
sna->render_state.gen6.floats_per_vertex = op->floats_per_vertex;
}
assert((sna->render.vertex_used % op->floats_per_vertex) == 0);
@ -1657,8 +1657,8 @@ gen6_render_video(struct sna *sna,
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen6_emit_video_state(sna, &tmp);
gen6_align_vertex(sna, &tmp);
gen6_emit_video_state(sna, &tmp);
/* Set up the offset for translating from the given region (in screen
* coordinates) to the backing pixmap.
@ -1853,9 +1853,9 @@ gen6_composite_set_target(struct sna *sna,
} else
sna_render_picture_extents(dst, &box);
// op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
// PREFER_GPU | FORCE_GPU | RENDER_GPU,
// &box, &op->damage);
op->dst.bo = sna_drawable_use_bo(dst->pDrawable,
PREFER_GPU | FORCE_GPU | RENDER_GPU,
&box, &op->damage);
if (op->dst.bo == NULL)
return false;
@ -1925,7 +1925,13 @@ gen6_render_composite(struct sna *sna,
return true;
if (gen6_composite_fallback(sna, src, mask, dst))
return false;
return (mask == NULL &&
sna_blt_composite(sna, op,
src, dst,
src_x, src_y,
dst_x, dst_y,
width, height,
tmp, true));
if (need_tiling(sna, width, height))
return sna_tiling_composite(op, src, mask, dst,
@ -2051,8 +2057,8 @@ gen6_render_composite(struct sna *sna,
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen6_align_vertex(sna, tmp);
gen6_emit_composite_state(sna, tmp);
gen6_align_vertex(sna, tmp);
return true;
cleanup_mask:
@ -2284,8 +2290,8 @@ gen6_render_composite_spans(struct sna *sna,
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen6_emit_composite_state(sna, &tmp->base);
gen6_align_vertex(sna, &tmp->base);
gen6_emit_composite_state(sna, &tmp->base);
return true;
cleanup_src:
@ -2351,10 +2357,16 @@ static inline bool prefer_blt_copy(struct sna *sna,
untiled_tlb_miss(dst_bo))
return true;
if (force_blt_ring(sna))
return true;
if (kgem_bo_is_render(dst_bo) ||
kgem_bo_is_render(src_bo))
return false;
if (prefer_render_ring(sna, dst_bo))
return false;
if (!prefer_blt_ring(sna, dst_bo, flags))
return false;
@ -2553,13 +2565,17 @@ fallback_blt:
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
DBG(("%s: too large for a single operation\n",
__FUNCTION__));
goto fallback_tiled_src;
if (tmp.src.bo != src_bo)
kgem_bo_destroy(&sna->kgem, tmp.src.bo);
if (tmp.redirect.real_bo)
kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
goto fallback_blt;
}
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen6_emit_copy_state(sna, &tmp);
gen6_align_vertex(sna, &tmp);
gen6_emit_copy_state(sna, &tmp);
do {
int16_t *v;
@ -2596,9 +2612,6 @@ fallback_blt:
kgem_bo_destroy(&sna->kgem, tmp.src.bo);
return true;
fallback_tiled_src:
if (tmp.src.bo != src_bo)
kgem_bo_destroy(&sna->kgem, tmp.src.bo);
fallback_tiled_dst:
if (tmp.redirect.real_bo)
kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
@ -2720,8 +2733,8 @@ fallback:
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen6_emit_copy_state(sna, &op->base);
gen6_align_vertex(sna, &op->base);
gen6_emit_copy_state(sna, &op->base);
op->blt = gen6_render_copy_blt;
op->done = gen6_render_copy_done;
@ -2760,24 +2773,6 @@ gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
gen6_emit_state(sna, op, offset | dirty);
}
static inline bool prefer_blt_fill(struct sna *sna,
struct kgem_bo *bo)
{
if (PREFER_RENDER)
return PREFER_RENDER < 0;
if (kgem_bo_is_render(bo))
return false;
if (untiled_tlb_miss(bo))
return true;
if (!prefer_blt_ring(sna, bo, 0))
return false;
return prefer_blt_bo(sna, bo);
}
static bool
gen6_render_fill_boxes(struct sna *sna,
CARD8 op,
@ -2799,7 +2794,8 @@ gen6_render_fill_boxes(struct sna *sna,
return false;
}
if (prefer_blt_fill(sna, dst_bo) || !gen6_check_dst_format(format)) {
if (prefer_blt_fill(sna, dst_bo, FILL_BOXES) ||
!gen6_check_dst_format(format)) {
uint8_t alu = GXinvalid;
if (op <= PictOpSrc) {
@ -2874,13 +2870,14 @@ gen6_render_fill_boxes(struct sna *sna,
assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
kgem_submit(&sna->kgem);
assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
}
gen6_emit_fill_state(sna, &tmp);
gen6_align_vertex(sna, &tmp);
gen6_emit_fill_state(sna, &tmp);
do {
int n_this_time;
@ -3009,12 +3006,12 @@ gen6_render_op_fill_done(struct sna *sna, const struct sna_fill_op *op)
static bool
gen6_render_fill(struct sna *sna, uint8_t alu,
PixmapPtr dst, struct kgem_bo *dst_bo,
uint32_t color,
uint32_t color, unsigned flags,
struct sna_fill_op *op)
{
DBG(("%s: (alu=%d, color=%x)\n", __FUNCTION__, alu, color));
if (prefer_blt_fill(sna, dst_bo) &&
if (prefer_blt_fill(sna, dst_bo, flags) &&
sna_blt_fill(sna, alu,
dst_bo, dst->drawable.bitsPerPixel,
color,
@ -3053,13 +3050,14 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
assert(GEN6_SAMPLER(op->base.u.gen6.flags) == FILL_SAMPLER);
assert(GEN6_VERTEX(op->base.u.gen6.flags) == FILL_VERTEX);
kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
kgem_submit(&sna->kgem);
assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
}
gen6_emit_fill_state(sna, &op->base);
gen6_align_vertex(sna, &op->base);
gen6_emit_fill_state(sna, &op->base);
op->blt = gen6_render_op_fill_blt;
op->box = gen6_render_op_fill_box;
@ -3097,7 +3095,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
int16_t *v;
/* Prefer to use the BLT if already engaged */
if (prefer_blt_fill(sna, bo) &&
if (prefer_blt_fill(sna, bo, FILL_BOXES) &&
gen6_render_fill_one_try_blt(sna, dst, bo, color,
x1, y1, x2, y2, alu))
return true;
@ -3133,6 +3131,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
kgem_submit(&sna->kgem);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
@ -3141,8 +3140,8 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
}
}
gen6_emit_fill_state(sna, &tmp);
gen6_align_vertex(sna, &tmp);
gen6_emit_fill_state(sna, &tmp);
gen6_get_rectangles(sna, &tmp, 1, gen6_emit_fill_state);
@ -3219,6 +3218,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
kgem_submit(&sna->kgem);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
@ -3227,8 +3227,8 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
}
}
gen6_emit_fill_state(sna, &tmp);
gen6_align_vertex(sna, &tmp);
gen6_emit_fill_state(sna, &tmp);
gen6_get_rectangles(sna, &tmp, 1, gen6_emit_fill_state);
@ -3251,60 +3251,6 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
}
#endif
static void gen6_render_flush(struct sna *sna)
{
gen4_vertex_close(sna);
assert(sna->render.vb_id == 0);
assert(sna->render.vertex_offset == 0);
}
static void
gen6_render_context_switch(struct kgem *kgem,
int new_mode)
{
if (kgem->nbatch) {
DBG(("%s: from %d to %d\n", __FUNCTION__, kgem->mode, new_mode));
_kgem_submit(kgem);
}
kgem->ring = new_mode;
}
static void
gen6_render_retire(struct kgem *kgem)
{
struct sna *sna;
if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire))
kgem->ring = kgem->mode;
sna = container_of(kgem, struct sna, kgem);
if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
DBG(("%s: resetting idle vbo handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}
static void
gen6_render_expire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (sna->render.vbo && !sna->render.vertex_used) {
DBG(("%s: discarding vbo handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
kgem_bo_destroy(kgem, sna->render.vbo);
assert(!sna->render.active);
sna->render.vbo = NULL;
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}
static void gen6_render_reset(struct sna *sna)
{
sna->render_state.gen6.needs_invariant = true;
@ -3320,6 +3266,11 @@ static void gen6_render_reset(struct sna *sna)
sna->render_state.gen6.drawrect_limit = -1;
sna->render_state.gen6.surface_table = -1;
if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) {
DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
discard_vbo(sna);
}
sna->render.vertex_offset = 0;
sna->render.nvertex_reloc = 0;
sna->render.vb_id = 0;
@ -3330,17 +3281,17 @@ static void gen6_render_fini(struct sna *sna)
kgem_bo_destroy(&sna->kgem, sna->render_state.gen6.general_bo);
}
static bool is_gt2(struct sna *sna)
static bool is_gt2(struct sna *sna, int devid)
{
return sna->PciInfo->device_id & 0x30;
return devid & 0x30;
}
static bool is_mobile(struct sna *sna)
static bool is_mobile(struct sna *sna, int devid)
{
return (sna->PciInfo->device_id & 0xf) == 0x6;
return (devid & 0xf) == 0x6;
}
static bool gen6_render_setup(struct sna *sna)
static bool gen6_render_setup(struct sna *sna, int devid)
{
struct gen6_render_state *state = &sna->render_state.gen6;
struct sna_static_stream general;
@ -3348,8 +3299,9 @@ static bool gen6_render_setup(struct sna *sna)
int i, j, k, l, m;
state->info = &gt1_info;
if (is_gt2(sna))
if (is_gt2(sna, devid))
state->info = &gt2_info; /* XXX requires GT_MODE WiZ disabled */
state->gt = state->info->gt;
sna_static_stream_init(&general);
@ -3420,12 +3372,14 @@ static bool gen6_render_setup(struct sna *sna)
const char *gen6_render_init(struct sna *sna, const char *backend)
{
if (!gen6_render_setup(sna))
int devid = intel_get_device_id(sna);
if (!gen6_render_setup(sna, devid))
return backend;
sna->kgem.context_switch = gen6_render_context_switch;
sna->kgem.retire = gen6_render_retire;
sna->kgem.expire = gen6_render_expire;
sna->kgem.expire = gen4_render_expire;
#if 0
#if !NO_COMPOSITE
@ -3436,7 +3390,7 @@ const char *gen6_render_init(struct sna *sna, const char *backend)
#if !NO_COMPOSITE_SPANS
sna->render.check_composite_spans = gen6_check_composite_spans;
sna->render.composite_spans = gen6_render_composite_spans;
if (is_mobile(sna))
if (is_mobile(sna, devid))
sna->render.prefer_gpu |= PREFER_GPU_SPANS;
#endif
sna->render.video = gen6_render_video;
@ -3465,7 +3419,7 @@ const char *gen6_render_init(struct sna *sna, const char *backend)
sna->render.caps = HW_BIT_BLIT | HW_TEX_BLIT;
sna->render.blit_tex = gen6_blit_tex;
sna->render.flush = gen6_render_flush;
sna->render.flush = gen4_render_flush;
sna->render.reset = gen6_render_reset;
sna->render.fini = gen6_render_fini;
@ -3568,7 +3522,7 @@ gen6_blit_tex(struct sna *sna,
// tmp->box = gen6_render_composite_box;
tmp->done = gen6_render_composite_done;
kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->dst.bo);
kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
if (!kgem_check_bo(&sna->kgem,
tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
NULL)) {
@ -3576,8 +3530,8 @@ gen6_blit_tex(struct sna *sna,
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen6_emit_composite_state(sna, tmp);
gen6_align_vertex(sna, tmp);
return true;
gen6_emit_composite_state(sna, tmp);
return true;
}

View File

@ -42,10 +42,14 @@
#include "brw/brw.h"
#include "gen7_render.h"
#include "gen4_common.h"
#include "gen4_source.h"
#include "gen4_vertex.h"
#include "gen6_common.h"
#define ALWAYS_INVALIDATE 0
#define ALWAYS_FLUSH 0
#define ALWAYS_STALL 0
#define NO_COMPOSITE 0
#define NO_COMPOSITE_SPANS 0
@ -1022,33 +1026,51 @@ gen7_emit_state(struct sna *sna,
const struct sna_composite_op *op,
uint16_t wm_binding_table)
{
bool need_invalidate;
bool need_flush;
bool need_stall;
assert(op->dst.bo->exec);
need_invalidate = kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo);
if (ALWAYS_INVALIDATE)
need_invalidate = true;
need_flush =
sna->render_state.gen7.emit_flush &&
wm_binding_table & GEN7_READS_DST(op->u.gen7.flags);
if (ALWAYS_FLUSH)
need_flush = true;
wm_binding_table &= ~1;
need_stall = sna->render_state.gen7.surface_table != wm_binding_table;
need_stall &= gen7_emit_drawing_rectangle(sna, op);
if (ALWAYS_STALL)
need_stall = true;
if (need_invalidate) {
gen7_emit_pipe_invalidate(sna);
kgem_clear_dirty(&sna->kgem);
assert(op->dst.bo->exec);
kgem_bo_mark_dirty(op->dst.bo);
need_flush = false;
need_stall = false;
}
if (need_flush) {
gen7_emit_pipe_flush(sna, need_stall);
need_stall = false;
}
if (need_stall)
gen7_emit_pipe_stall(sna);
gen7_emit_cc(sna, GEN7_BLEND(op->u.gen7.flags));
gen7_emit_sampler(sna, GEN7_SAMPLER(op->u.gen7.flags));
gen7_emit_sf(sna, GEN7_VERTEX(op->u.gen7.flags) >> 2);
gen7_emit_wm(sna, GEN7_KERNEL(op->u.gen7.flags));
gen7_emit_vertex_elements(sna, op);
need_stall = gen7_emit_binding_table(sna, wm_binding_table);
need_stall &= gen7_emit_drawing_rectangle(sna, op);
if (ALWAYS_FLUSH || kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
gen7_emit_pipe_invalidate(sna);
kgem_clear_dirty(&sna->kgem);
assert(op->dst.bo->exec);
kgem_bo_mark_dirty(op->dst.bo);
sna->render_state.gen7.emit_flush = false;
need_stall = false;
}
if (sna->render_state.gen7.emit_flush) {
gen7_emit_pipe_flush(sna, need_stall);
need_stall = false;
}
if (need_stall)
gen7_emit_pipe_stall(sna);
gen7_emit_binding_table(sna, wm_binding_table);
sna->render_state.gen7.emit_flush = GEN7_READS_DST(op->u.gen7.flags);
}
@ -1404,12 +1426,14 @@ static void gen7_emit_composite_state(struct sna *sna,
const struct sna_composite_op *op)
{
uint32_t *binding_table;
uint16_t offset;
uint16_t offset, dirty;
gen7_get_batch(sna, op);
binding_table = gen7_composite_get_binding_table(sna, &offset);
dirty = kgem_bo_is_dirty(op->dst.bo);
binding_table[0] =
gen7_bind_bo(sna,
op->dst.bo, op->dst.width, op->dst.height,
@ -1438,23 +1462,16 @@ static void gen7_emit_composite_state(struct sna *sna,
offset = sna->render_state.gen7.surface_table;
}
gen7_emit_state(sna, op, offset);
gen7_emit_state(sna, op, offset | dirty);
}
static void
gen7_align_vertex(struct sna *sna, const struct sna_composite_op *op)
{
if (op->floats_per_vertex != sna->render_state.gen7.floats_per_vertex) {
if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
gen4_vertex_finish(sna);
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
sna->render_state.gen7.floats_per_vertex,
op->floats_per_vertex,
sna->render.vertex_index,
(sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex));
sna->render.vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
sna->render.vertex_used = sna->render.vertex_index * op->floats_per_vertex;
DBG(("aligning vertex: was %d, now %d floats per vertex\n",
sna->render_state.gen7.floats_per_vertex, op->floats_per_vertex));
gen4_vertex_align(sna, op);
sna->render_state.gen7.floats_per_vertex = op->floats_per_vertex;
}
}
@ -1548,7 +1565,7 @@ static void gen7_emit_video_state(struct sna *sna,
int src_height[6];
int src_pitch[6];
uint32_t *binding_table;
uint16_t offset;
uint16_t offset, dirty;
int n_src, n;
gen7_get_batch(sna, op);
@ -1586,6 +1603,8 @@ static void gen7_emit_video_state(struct sna *sna,
binding_table = gen7_composite_get_binding_table(sna, &offset);
dirty = kgem_bo_is_dirty(op->dst.bo);
binding_table[0] =
gen7_bind_bo(sna,
op->dst.bo, op->dst.width, op->dst.height,
@ -1602,7 +1621,7 @@ static void gen7_emit_video_state(struct sna *sna,
src_surf_format);
}
gen7_emit_state(sna, op, offset);
gen7_emit_state(sna, op, offset | dirty);
}
static bool
@ -1669,12 +1688,14 @@ gen7_render_video(struct sna *sna,
kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp.dst.bo);
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
kgem_submit(&sna->kgem);
assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
return false;
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen7_emit_video_state(sna, &tmp);
gen7_align_vertex(sna, &tmp);
gen7_emit_video_state(sna, &tmp);
/* Set up the offset for translating from the given region (in screen
* coordinates) to the backing pixmap.
@ -1874,7 +1895,8 @@ gen7_render_fill_boxes(struct sna *sna,
return false;
}
if (prefer_blt_fill(sna, dst_bo) || !gen7_check_dst_format(format)) {
if (prefer_blt_fill(sna, dst_bo, FILL_BOXES) ||
!gen7_check_dst_format(format)) {
uint8_t alu = GXinvalid;
if (op <= PictOpSrc) {
@ -1949,11 +1971,17 @@ gen7_render_fill_boxes(struct sna *sna,
kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
kgem_submit(&sna->kgem);
assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
kgem_bo_destroy(&sna->kgem, tmp.src.bo);
if (tmp.redirect.real_bo)
kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
return false;
}
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen7_emit_fill_state(sna, &tmp);
gen7_align_vertex(sna, &tmp);
gen7_emit_fill_state(sna, &tmp);
do {
int n_this_time;
@ -1987,60 +2015,6 @@ gen7_render_fill_boxes(struct sna *sna,
}
#endif
static void gen7_render_flush(struct sna *sna)
{
gen4_vertex_close(sna);
assert(sna->render.vb_id == 0);
assert(sna->render.vertex_offset == 0);
}
static void
gen7_render_context_switch(struct kgem *kgem,
int new_mode)
{
if (kgem->nbatch) {
DBG(("%s: switch rings %d -> %d\n",
__FUNCTION__, kgem->mode, new_mode));
_kgem_submit(kgem);
}
kgem->ring = new_mode;
}
static void
gen7_render_retire(struct kgem *kgem)
{
struct sna *sna;
if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire))
kgem->ring = kgem->mode;
sna = container_of(kgem, struct sna, kgem);
if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
DBG(("%s: resetting idle vbo\n", __FUNCTION__));
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}
static void
gen7_render_expire(struct kgem *kgem)
{
struct sna *sna;
sna = container_of(kgem, struct sna, kgem);
if (sna->render.vbo && !sna->render.vertex_used) {
DBG(("%s: discarding vbo\n", __FUNCTION__));
kgem_bo_destroy(kgem, sna->render.vbo);
sna->render.vbo = NULL;
sna->render.vertices = sna->render.vertex_data;
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
sna->render.vertex_used = 0;
sna->render.vertex_index = 0;
}
}
static void gen7_render_reset(struct sna *sna)
{
sna->render_state.gen7.emit_flush = false;
@ -2056,6 +2030,11 @@ static void gen7_render_reset(struct sna *sna)
sna->render_state.gen7.drawrect_limit = -1;
sna->render_state.gen7.surface_table = -1;
if (sna->render.vbo && !kgem_bo_can_map(&sna->kgem, sna->render.vbo)) {
DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
discard_vbo(sna);
}
sna->render.vertex_offset = 0;
sna->render.nvertex_reloc = 0;
sna->render.vb_id = 0;
@ -2066,23 +2045,23 @@ static void gen7_render_fini(struct sna *sna)
kgem_bo_destroy(&sna->kgem, sna->render_state.gen7.general_bo);
}
static bool is_gt3(struct sna *sna)
static bool is_gt3(struct sna *sna, int devid)
{
assert(sna->kgem.gen == 075);
return sna->PciInfo->device_id & 0x20;
return devid & 0x20;
}
static bool is_gt2(struct sna *sna)
static bool is_gt2(struct sna *sna, int devid)
{
return sna->PciInfo->device_id & (is_hsw(sna)? 0x30 : 0x20);
return devid & (is_hsw(sna)? 0x30 : 0x20);
}
static bool is_mobile(struct sna *sna)
static bool is_mobile(struct sna *sna, int devid)
{
return (sna->PciInfo->device_id & 0xf) == 0x6;
return (devid & 0xf) == 0x6;
}
static bool gen7_render_setup(struct sna *sna)
static bool gen7_render_setup(struct sna *sna, int devid)
{
struct gen7_render_state *state = &sna->render_state.gen7;
struct sna_static_stream general;
@ -2091,19 +2070,19 @@ static bool gen7_render_setup(struct sna *sna)
if (is_ivb(sna)) {
state->info = &ivb_gt_info;
if (sna->PciInfo->device_id & 0xf) {
if (devid & 0xf) {
state->info = &ivb_gt1_info;
if (is_gt2(sna))
if (is_gt2(sna, devid))
state->info = &ivb_gt2_info; /* XXX requires GT_MODE WiZ disabled */
}
} else if (is_byt(sna)) {
state->info = &byt_gt_info;
} else if (is_hsw(sna)) {
state->info = &hsw_gt_info;
if (sna->PciInfo->device_id & 0xf) {
if (is_gt3(sna))
if (devid & 0xf) {
if (is_gt3(sna, devid))
state->info = &hsw_gt3_info;
else if (is_gt2(sna))
else if (is_gt2(sna, devid))
state->info = &hsw_gt2_info;
else
state->info = &hsw_gt1_info;
@ -2111,6 +2090,8 @@ static bool gen7_render_setup(struct sna *sna)
} else
return false;
state->gt = state->info->gt;
sna_static_stream_init(&general);
/* Zero pad the start. If you see an offset of 0x0 in the batchbuffer
@ -2175,12 +2156,14 @@ static bool gen7_render_setup(struct sna *sna)
const char *gen7_render_init(struct sna *sna, const char *backend)
{
if (!gen7_render_setup(sna))
int devid = intel_get_device_id(sna);
if (!gen7_render_setup(sna, devid))
return backend;
sna->kgem.context_switch = gen7_render_context_switch;
sna->kgem.retire = gen7_render_retire;
sna->kgem.expire = gen7_render_expire;
sna->kgem.context_switch = gen6_render_context_switch;
sna->kgem.retire = gen6_render_retire;
sna->kgem.expire = gen4_render_expire;
#if 0
#if !NO_COMPOSITE
@ -2190,7 +2173,7 @@ const char *gen7_render_init(struct sna *sna, const char *backend)
#if !NO_COMPOSITE_SPANS
sna->render.check_composite_spans = gen7_check_composite_spans;
sna->render.composite_spans = gen7_render_composite_spans;
if (is_mobile(sna) || is_gt2(sna) || is_byt(sna))
if (is_mobile(sna, devid) || is_gt2(sna, devid) || is_byt(sna))
sna->render.prefer_gpu |= PREFER_GPU_SPANS;
#endif
sna->render.video = gen7_render_video;
@ -2219,7 +2202,7 @@ const char *gen7_render_init(struct sna *sna, const char *backend)
sna->render.blit_tex = gen7_blit_tex;
sna->render.caps = HW_BIT_BLIT | HW_TEX_BLIT;
sna->render.flush = gen7_render_flush;
sna->render.flush = gen4_render_flush;
sna->render.reset = gen7_render_reset;
sna->render.fini = gen7_render_fini;
@ -2312,7 +2295,7 @@ gen7_blit_tex(struct sna *sna,
// tmp->box = gen7_render_composite_box;
tmp->done = gen7_render_composite_done;
kgem_set_mode(&sna->kgem, KGEM_RENDER, tmp->dst.bo);
kgem_set_mode(&sna->kgem, KGEM_RENDER, dst_bo);
if (!kgem_check_bo(&sna->kgem,
tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
NULL)) {
@ -2320,7 +2303,7 @@ gen7_blit_tex(struct sna *sna,
_kgem_set_mode(&sna->kgem, KGEM_RENDER);
}
gen7_emit_composite_state(sna, tmp);
gen7_align_vertex(sna, tmp);
gen7_emit_composite_state(sna, tmp);
return true;
}

File diff suppressed because it is too large Load Diff

View File

@ -71,9 +71,8 @@ struct kgem_bo {
struct list request;
struct list vma;
void *map;
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
void *map__cpu;
void *map__gtt;
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
struct kgem_bo_binding {
@ -82,11 +81,11 @@ struct kgem_bo {
uint16_t offset;
} binding;
uint64_t presumed_offset;
uint32_t unique_id;
uint32_t refcnt;
uint32_t handle;
uint32_t target_handle;
uint32_t presumed_offset;
uint32_t delta;
union {
struct {
@ -200,11 +199,12 @@ struct kgem {
uint32_t has_handle_lut :1;
uint32_t can_blt_cpu :1;
uint32_t can_render_y :1;
uint16_t fence_max;
uint16_t half_cpu_cache_pages;
uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
uint32_t aperture, aperture_fenced;
uint32_t aperture, aperture_fenced, aperture_max_fence;
uint32_t max_upload_tile_size, max_copy_tile_size;
uint32_t max_gpu_size, max_cpu_size;
uint32_t large_object_size, max_object_size;
@ -313,6 +313,8 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
int bpp,
uint32_t flags);
bool kgem_bo_convert_to_gpu(struct kgem *kgem, struct kgem_bo *bo);
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
@ -349,14 +351,6 @@ static inline void kgem_submit(struct kgem *kgem)
_kgem_submit(kgem);
}
static inline bool kgem_flush(struct kgem *kgem, bool flush)
{
if (kgem->nreloc == 0)
return false;
return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
}
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
{
if (bo->exec)
@ -392,8 +386,10 @@ static inline void kgem_set_mode(struct kgem *kgem,
kgem_submit(kgem);
#endif
if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring)) {
DBG(("%s: flushing before new bo\n", __FUNCTION__));
_kgem_submit(kgem);
}
if (kgem->mode == mode)
return;
@ -466,6 +462,11 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
struct kgem_bo *bo,
uint32_t read_write_domains,
uint32_t delta);
uint64_t kgem_add_reloc64(struct kgem *kgem,
uint32_t pos,
struct kgem_bo *bo,
uint32_t read_write_domains,
uint64_t delta);
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
@ -475,15 +476,13 @@ void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
const void *data, int length);
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
void kgem_get_tile_size(struct kgem *kgem, int tiling,
void kgem_get_tile_size(struct kgem *kgem, int tiling, int pitch,
int *tile_width, int *tile_height, int *tile_size);
static inline int __kgem_buffer_size(struct kgem_bo *bo)
@ -498,6 +497,12 @@ static inline int __kgem_bo_size(struct kgem_bo *bo)
return PAGE_SIZE * bo->size.pages.count;
}
static inline int __kgem_bo_num_pages(struct kgem_bo *bo)
{
assert(bo->proxy == NULL);
return bo->size.pages.count;
}
static inline int kgem_bo_size(struct kgem_bo *bo)
{
if (bo->proxy)
@ -506,7 +511,6 @@ static inline int kgem_bo_size(struct kgem_bo *bo)
return __kgem_bo_size(bo);
}
/*
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
struct kgem_bo *bo)
{
@ -533,80 +537,6 @@ static inline bool kgem_bo_can_blt(struct kgem *kgem,
return kgem_bo_blt_pitch_is_ok(kgem, bo);
}
*/
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
struct kgem_bo *bo)
{
if (bo->domain == DOMAIN_GTT)
return true;
if (kgem->gen < 040 && bo->tiling &&
bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
return false;
if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
return false;
if (kgem->has_llc && bo->tiling == I915_TILING_NONE)
return true;
if (!bo->presumed_offset)
return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
}
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
struct kgem_bo *bo)
{
DBG(("%s: domain=%d, offset: %d size: %d\n",
__FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
assert(bo->refcnt);
return __kgem_bo_is_mappable(kgem, bo);
}
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
{
DBG(("%s: map=%p, tiling=%d, domain=%d\n",
__FUNCTION__, bo->map, bo->tiling, bo->domain));
assert(bo->refcnt);
if (bo->map == NULL)
return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
return IS_CPU_MAP(bo->map) == !bo->tiling;
}
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
{
if (kgem_bo_mapped(kgem, bo))
return true;
if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
return true;
if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
return false;
return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
}
static inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
struct kgem_bo *bo,
bool write)
{
if (bo->purged || (bo->scanout && write))
return false;
if (kgem->has_llc)
return true;
if (bo->domain != DOMAIN_CPU)
return false;
return !write || bo->exec == NULL;
}
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
{
@ -652,9 +582,6 @@ static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
if (bo->exec)
return true;
if (kgem_flush(kgem, bo->flush))
kgem_submit(kgem);
if (bo->rq && !__kgem_busy(kgem, bo->handle))
__kgem_bo_clear_busy(bo);
@ -723,6 +650,53 @@ static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
} while ((bo = bo->proxy));
}
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
{
DBG(("%s: map=%p:%p, tiling=%d, domain=%d\n",
__FUNCTION__, bo->map__gtt, bo->map__cpu, bo->tiling, bo->domain));
if (bo->tiling == I915_TILING_NONE && (bo->domain == DOMAIN_CPU || kgem->has_llc))
return bo->map__cpu != NULL;
return bo->map__gtt != NULL;
}
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
{
DBG(("%s: map=%p:%p, tiling=%d, domain=%d, offset=%ld\n",
__FUNCTION__, bo->map__gtt, bo->map__cpu, bo->tiling, bo->domain, (long)bo->presumed_offset));
if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
return true;
if (bo->map__gtt != NULL)
return true;
if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
return false;
if (!bo->presumed_offset)
return __kgem_bo_num_pages(bo) <= kgem->aperture_mappable / 4;
return bo->presumed_offset / PAGE_SIZE + __kgem_bo_num_pages(bo) <= kgem->aperture_mappable;
}
static inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
struct kgem_bo *bo,
bool write)
{
if (bo->purged || (bo->scanout && write))
return false;
if (kgem->has_llc)
return true;
if (bo->domain != DOMAIN_CPU)
return false;
return !write || bo->exec == NULL;
}
#define KGEM_BUFFER_WRITE 0x1
#define KGEM_BUFFER_INPLACE 0x2
#define KGEM_BUFFER_LAST 0x4
@ -742,8 +716,7 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
void kgem_throttle(struct kgem *kgem);
#define MAX_INACTIVE_TIME 10
bool kgem_expire_cache(struct kgem *kgem);
void kgem_purge_cache(struct kgem *kgem);
void kgem_cleanup_cache(struct kgem *kgem);
bool kgem_cleanup_cache(struct kgem *kgem);
void kgem_clean_scanout_cache(struct kgem *kgem);
void kgem_clean_large_cache(struct kgem *kgem);
@ -758,4 +731,6 @@ static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
}
#endif
void choose_memcpy_tiled_x(struct kgem *kgem, int swizzling);
#endif /* KGEM_H */

View File

@ -706,6 +706,19 @@ sna_wait_for_scanline(struct sna *sna,
int intel_get_device_id(struct sna *sna)
{
struct drm_i915_getparam gp;
int devid = 0;
memset(&gp, 0, sizeof(gp));
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &devid;
if (drmIoctl(sna->scrn, DRM_IOCTL_I915_GETPARAM, &gp))
return 0;
return devid;
}
static const struct intel_device_info intel_generic_info = {
.gen = -1,
@ -814,21 +827,6 @@ intel_detect_chipset(struct pci_device *pci)
return &intel_generic_info;
}
int intel_get_device_id(int fd)
{
struct drm_i915_getparam gp;
int devid = 0;
memset(&gp, 0, sizeof(gp));
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &devid;
if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
return 0;
return devid;
}
int drmIoctl(int fd, unsigned long request, void *arg)
{
ioctl_t io;

View File

@ -448,7 +448,8 @@ struct sna {
unsigned flags;
#define SNA_NO_WAIT 0x1
#define SNA_NO_FLIP 0x2
#define SNA_TRIPLE_BUFFER 0x4
#define SNA_NO_VSYNC 0x4
#define SNA_TRIPLE_BUFFER 0x8
#define SNA_TEAR_FREE 0x10
#define SNA_FORCE_SHADOW 0x20
#define SNA_FLUSH_GTT 0x40
@ -490,6 +491,7 @@ struct sna {
uint32_t fill_alu;
} blt_state;
union {
unsigned gt;
struct gen3_render_state gen3;
struct gen4_render_state gen4;
struct gen5_render_state gen5;
@ -497,6 +499,8 @@ struct sna {
struct gen7_render_state gen7;
} render_state;
bool dri_available;
bool dri_open;
/* Broken-out options. */
// OptionInfoPtr Options;
@ -611,4 +615,7 @@ sna_transform_equal(const PictTransform *a, const PictTransform *b)
return memcmp(a, b, sizeof(*a)) == 0;
}
int intel_get_device_id(struct sna *sna);
#endif /* _SNA_H */

View File

@ -42,22 +42,22 @@
#define BLT_SRC_TILED (1<<15)
#define BLT_DST_TILED (1<<11)
#define COLOR_BLT_CMD ((2<<29)|(0x40<<22)|(0x3))
#define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4))
#define XY_SETUP_BLT ((2<<29)|(1<<22)|6)
#define XY_SETUP_MONO_PATTERN_SL_BLT ((2<<29)|(0x11<<22)|7)
#define XY_SETUP_CLIP ((2<<29)|(3<<22)|1)
#define XY_SCANLINE_BLT ((2<<29)|(0x25<<22)|1)
#define XY_TEXT_IMMEDIATE_BLT ((2<<29)|(0x31<<22)|(1<<16))
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|0x4)
#define XY_PAT_BLT ((2<<29)|(0x51<<22)|0x4)
#define XY_PAT_BLT_IMMEDIATE ((2<<29)|(0x72<<22))
#define XY_MONO_PAT ((0x2<<29)|(0x52<<22)|0x7)
#define XY_MONO_SRC_COPY ((0x2<<29)|(0x54<<22)|(0x6))
#define XY_MONO_SRC_COPY_IMM ((0x2<<29)|(0x71<<22))
#define XY_FULL_MONO_PATTERN_BLT ((0x2<<29)|(0x57<<22)|0xa)
#define XY_FULL_MONO_PATTERN_MONO_SRC_BLT ((0x2<<29)|(0x58<<22)|0xa)
#define COLOR_BLT_CMD (2<<29|0x40<<22|(0x3))
#define XY_COLOR_BLT (2<<29|0x50<<22|(0x4))
#define XY_SETUP_BLT (2<<29|0x01<<22)
#define XY_SETUP_MONO_PATTERN_SL_BLT (2<<29|0x11<<22)
#define XY_SETUP_CLIP (2<<29|0x03<<22|1)
#define XY_SCANLINE_BLT (2<<29|0x25<<22|1)
#define XY_TEXT_IMMEDIATE_BLT (2<<29|0x31<<22|(1<<16))
#define XY_SRC_COPY_BLT_CMD (2<<29|0x53<<22)
#define SRC_COPY_BLT_CMD (2<<29|0x43<<22|0x4)
#define XY_PAT_BLT (2<<29|0x51<<22)
#define XY_PAT_BLT_IMMEDIATE (2<<29|0x72<<22)
#define XY_MONO_PAT (2<<29|0x52<<22)
#define XY_MONO_SRC_COPY (2<<29|0x54<<22)
#define XY_MONO_SRC_COPY_IMM (2<<29|0x71<<22)
#define XY_FULL_MONO_PATTERN_BLT (2<<29|0x57<<22)
#define XY_FULL_MONO_PATTERN_MONO_SRC_BLT (2<<29|0x58<<22)
/* FLUSH commands */
#define BRW_3D(Pipeline,Opcode,Subopcode) \

View File

@ -104,6 +104,7 @@ struct sna_composite_op {
uint32_t inplace :1;
uint32_t overwrites:1;
uint32_t bpp : 6;
uint32_t alu : 4;
uint32_t cmd;
uint32_t br13;
@ -245,7 +246,7 @@ struct sna_render {
struct sna_solid_cache {
struct kgem_bo *cache_bo;
struct kgem_bo *bo[1024];
uint32_t color[1025];
uint32_t color[1024];
int last;
int size;
int dirty;
@ -381,6 +382,7 @@ enum {
};
struct gen6_render_state {
unsigned gt;
const struct gt_info *info;
struct kgem_bo *general_bo;
@ -430,6 +432,7 @@ enum {
};
struct gen7_render_state {
unsigned gt;
const struct gt_info *info;
struct kgem_bo *general_bo;

View File

@ -70,10 +70,10 @@ static void i830_done_composite(PixmapPtr dest)
// intel_debug_flush(scrn);
}
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
int uxa_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
{
struct intel_screen_private *intel = intel_get_screen_private();
drm_intel_bo *bo;
struct intel_screen_private *intel = intel_get_screen_private();
drm_intel_bo *bo;
surface_t *sf;
unsigned int size;
@ -118,14 +118,14 @@ err_1:
return -1;
};
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
void uxa_set_bo_handle(bitmap_t *bitmap, int handle)
{
sna_bitmap_from_handle(bitmap, handle);
uxa_bitmap_from_handle(bitmap, handle);
};
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
int w, int h, int src_x, int src_y)
int uxa_blit_tex(bitmap_t *bitmap, int scale, int vsync,
int dst_x, int dst_y,int w, int h, int src_x, int src_y)
{
// DBG("%s\n", __FUNCTION__);