set irq handler & SB sna

git-svn-id: svn://kolibrios.org@2351 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2012-02-16 10:48:38 +00:00
parent db3a775207
commit b3f0b80a43
31 changed files with 8613 additions and 817 deletions

View File

@ -6,20 +6,28 @@
#include "intel_drv.h"
#include "bitmap.h"
#define DRIVER_CAPS_0 HW_BIT_BLIT | HW_TEX_BLIT;
#define DRIVER_CAPS_1 0
extern struct drm_device *main_device;
struct hman bm_man;
void __attribute__((regparm(1))) destroy_bitmap(bitmap_t *bitmap)
{
printf("destroy bitmap %d\n", bitmap->handle);
free_handle(&bm_man, bitmap->handle);
bitmap->handle = 0;
i915_gem_object_unpin(bitmap->obj);
bitmap->obj->base.read_domains = I915_GEM_DOMAIN_GTT;
bitmap->obj->base.write_domain = I915_GEM_DOMAIN_CPU;
mutex_lock(&main_device->struct_mutex);
drm_gem_object_unreference(&bitmap->obj->base);
mutex_unlock(&main_device->struct_mutex);
__DestroyObject(bitmap);
};
extern struct drm_device *main_device;
struct hman bm_man;
int init_bitmaps()
{
int ret;
@ -124,10 +132,12 @@ int create_surface(struct io_call_10 *pbitmap)
{
*dst++ = (0xFFFFF000 & *src++) | 0x207 ; // map as shared page
};
// while(max_count--)
// *dst++ = 0; // cleanup unused space
while(max_count--)
*dst++ = 0; // cleanup unused space
}
obj->mapped = uaddr ;
bitmap->handle = handle;
bitmap->uaddr = uaddr;
bitmap->pitch = pitch;
@ -221,3 +231,83 @@ int free_handle(struct hman *man, u32 handle)
};
void *drm_intel_bo_map(struct drm_i915_gem_object *obj, int write_enable)
{
u8 *kaddr;
kaddr = AllocKernelSpace(obj->base.size);
if( kaddr != NULL)
{
u32_t *src = (u32_t*)obj->pages;
u32_t *dst = &((u32_t*)page_tabs)[(u32_t)kaddr >> 12];
u32 count = obj->base.size/4096;
while(count--)
{
*dst++ = (0xFFFFF000 & *src++) | 0x003 ;
};
return kaddr;
};
return NULL;
}
void destroy_gem_object(uint32_t handle)
{
struct drm_i915_gem_object *obj = (void*)handle;
drm_gem_object_unreference(&obj->base);
};
void write_gem_object(uint32_t handle, u32 offset, u32 size, u8* src)
{
struct drm_i915_gem_object *obj = (void*)handle;
u8 *dst;
int ret;
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
return;
dst = drm_intel_bo_map(obj, true);
if( dst != NULL )
{
memmove(dst+offset, src, size);
FreeKernelSpace(dst);
};
};
u32 get_buffer_offset(uint32_t handle)
{
struct drm_i915_gem_object *obj = (void*)handle;
return obj->gtt_offset;
};
int get_driver_caps(hwcaps_t *caps)
{
int ret = 0;
ENTER();
dbgprintf("caps ptr %x\n", caps);
switch(caps->idx)
{
case 0:
caps->opt[0] = DRIVER_CAPS_0;
caps->opt[1] = DRIVER_CAPS_1;
break;
case 1:
caps->cap1.max_tex_width = 4096;
caps->cap1.max_tex_height = 4096;
break;
default:
ret = 1;
};
caps->idx = 1;
return ret;
}

View File

@ -66,6 +66,25 @@ struct io_call_10 /* SRV_CREATE_SURFACE */
u32 format; // reserved mbz
};
typedef struct
{
uint32_t idx;
union
{
uint32_t opt[2];
struct {
uint32_t max_tex_width;
uint32_t max_tex_height;
}cap1;
};
}hwcaps_t;
#define HW_BIT_BLIT (1<<0) /* BGRX blitter */
#define HW_TEX_BLIT (1<<1) /* stretch blit */
#define HW_VID_BLIT (1<<2) /* planar and packed video */
/* 3 - 63 reserved */
int get_driver_caps(hwcaps_t *caps);
int create_surface(struct io_call_10 *pbitmap);
int init_bitmaps();

View File

@ -34,7 +34,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include <drm/intel-gtt.h>
//#include "i915_trace.h"
#include "i915_trace.h"
//#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
//#include <linux/vgaarb.h>
@ -241,9 +241,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
// ret = drm_irq_install(dev);
// if (ret)
// goto cleanup_gem;
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
@ -521,8 +521,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */
dev_priv->has_gem = 1;
// intel_irq_init(dev);
intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);

View File

@ -491,7 +491,7 @@ struct drm_i915_gem_relocation_entry {
__u32 delta;
/** Offset in the buffer the relocation entry will be written into */
__u64 offset;
__u32 offset;
/**
* Offset value of the target buffer that the relocation entry was last
@ -501,7 +501,7 @@ struct drm_i915_gem_relocation_entry {
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
__u64 presumed_offset;
__u32 presumed_offset;
/**
* Target memory domains read by this operation.

View File

@ -444,8 +444,6 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_g4;
main_device = dev;
LEAVE();
return 0;

View File

@ -29,7 +29,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
//#include "i915_trace.h"
#include "i915_trace.h"
#include "intel_drv.h"
//#include <linux/shmem_fs.h>
#include <linux/slab.h>
@ -236,6 +236,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
return 0;
}
#endif
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@ -246,8 +247,6 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
size_t pinned;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
pinned = 0;
mutex_lock(&dev->struct_mutex);
@ -261,8 +260,9 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
static int
i915_gem_create(struct drm_file *file,
#if 0
int i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
uint32_t *handle_p)
@ -290,6 +290,7 @@ i915_gem_create(struct drm_file *file,
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
*handle_p = handle;
return 0;
@ -745,8 +746,6 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
@ -794,8 +793,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int i;
ENTER();
BUG_ON(obj->madv == __I915_MADV_PURGED);
// if (obj->tiling_mode != I915_TILING_NONE)
@ -811,8 +808,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
free(obj->pages);
obj->pages = NULL;
LEAVE();
}
void
@ -994,9 +989,13 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
}
/**
@ -1007,7 +1006,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
int ret = 0;
ENTER();
if (obj->gtt_space == NULL)
return 0;
@ -1047,6 +1045,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret == -ERESTARTSYS)
return ret;
trace_i915_gem_object_unbind(obj);
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
@ -1063,7 +1062,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
LEAVE();
return ret;
}
@ -1077,6 +1075,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
return 0;
trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
@ -1235,9 +1234,6 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
@ -1423,6 +1419,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
trace_i915_gem_object_bind(obj, map_and_fenceable);
return 0;
}
@ -1528,6 +1525,9 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
@ -1544,6 +1544,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
}
/**
@ -1591,6 +1594,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
return 0;
}
@ -1646,6 +1653,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
}
obj->cache_level = cache_level;
@ -1713,6 +1723,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
return 0;
}
@ -1790,6 +1803,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
return 0;
}
@ -1930,8 +1946,6 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj)
@ -2000,8 +2014,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ENTER();
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
list_move(&obj->mm_list,
@ -2009,6 +2021,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
return;
}
trace_i915_gem_object_destroy(obj);
// if (obj->base.map_list.map)
// drm_gem_free_mmap_offset(&obj->base);
@ -2019,7 +2032,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
LEAVE();
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
@ -2027,7 +2039,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
ENTER();
while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
@ -2035,7 +2046,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
// i915_gem_detach_phys_object(dev, obj);
i915_gem_free_object_tail(obj);
LEAVE();
}
@ -2054,7 +2064,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ENTER();
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@ -2072,7 +2082,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
}
dev_priv->next_seqno = 1;
LEAVE();
return 0;
cleanup_bsd_ring:

View File

@ -26,7 +26,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
//#include "i915_trace.h"
#include "i915_trace.h"
#include "intel_drv.h"
#define AGP_USER_TYPES (1 << 16)

View File

@ -0,0 +1,442 @@
/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
*/
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/irqreturn.h>
//#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#define MAX_NOPID ((u32)~0)
/**
* Interrupts that are always left unmasked.
*
* Since pipe events are edge-triggered from the PIPESTAT register to IIR,
* we leave them always unmasked in IMR and then control enabling them through
* PIPESTAT alone.
*/
#define I915_INTERRUPT_ENABLE_FIX \
(I915_ASLE_INTERRUPT | \
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
/** Interrupts that we mask and unmask at runtime. */
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
PIPE_VBLANK_INTERRUPT_STATUS)
#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
PIPE_VBLANK_INTERRUPT_ENABLE)
#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
DRM_I915_VBLANK_PIPE_B)
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
POSTING_READ(DEIMR);
}
}
static inline void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
POSTING_READ(DEIMR);
}
}
static int ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
atomic_inc(&dev_priv->irq_received);
if (IS_GEN6(dev))
bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
pm_iir = I915_READ(GEN6_PMIIR);
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
if (HAS_PCH_CPT(dev))
hotplug_mask = SDE_HOTPLUG_MASK_CPT;
else
hotplug_mask = SDE_HOTPLUG_MASK;
ret = IRQ_HANDLED;
// if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
// notify_ring(dev, &dev_priv->ring[RCS]);
// if (gt_iir & bsd_usr_interrupt)
// notify_ring(dev, &dev_priv->ring[VCS]);
// if (gt_iir & GT_BLT_USER_INTERRUPT)
// notify_ring(dev, &dev_priv->ring[BCS]);
// if (de_iir & DE_GSE)
// intel_opregion_gse_intr(dev);
// if (de_iir & DE_PLANEA_FLIP_DONE) {
// intel_prepare_page_flip(dev, 0);
// intel_finish_page_flip_plane(dev, 0);
// }
// if (de_iir & DE_PLANEB_FLIP_DONE) {
// intel_prepare_page_flip(dev, 1);
// intel_finish_page_flip_plane(dev, 1);
// }
// if (de_iir & DE_PIPEA_VBLANK)
// drm_handle_vblank(dev, 0);
// if (de_iir & DE_PIPEB_VBLANK)
// drm_handle_vblank(dev, 1);
/* check event from PCH */
// if (de_iir & DE_PCH_EVENT) {
// if (pch_iir & hotplug_mask)
// queue_work(dev_priv->wq, &dev_priv->hotplug_work);
// pch_irq_handler(dev);
// }
// if (de_iir & DE_PCU_EVENT) {
// I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
// i915_handle_rps_change(dev);
// }
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
/*
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
* dev_priv->pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
* The mask bit in IMR is cleared by rps_work.
*/
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
dev_priv->pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
// queue_work(dev_priv->wq, &dev_priv->rps_work);
}
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
return ret;
}
/* drm_dma.h hooks
*/
static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
atomic_set(&dev_priv->irq_received, 0);
// INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
// INIT_WORK(&dev_priv->error_work, i915_error_work_func);
// if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
// INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
if (IS_GEN6(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
* MI_USER_INTERRUPT. This appears to serialize the
* previous seqno write out before the interrupt
* happens.
*/
I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
}
/* XXX hotplug from PCH */
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
POSTING_READ(DEIER);
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
POSTING_READ(SDEIER);
}
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
*
* This register is the same on all known PCH chips.
*/
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug;
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_irqs;
u32 hotplug_mask;
// DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
// if (HAS_BSD(dev))
// DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
// if (HAS_BLT(dev))
// DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
POSTING_READ(DEIER);
dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
if (IS_GEN6(dev))
render_irqs =
GT_USER_INTERRUPT |
GT_GEN6_BSD_USER_INTERRUPT |
GT_BLT_USER_INTERRUPT;
else
render_irqs =
GT_USER_INTERRUPT |
GT_PIPE_NOTIFY |
GT_BSD_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
} else {
hotplug_mask = (SDE_CRT_HOTPLUG |
SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG |
SDE_PORTD_HOTPLUG |
SDE_AUX_MASK);
}
dev_priv->pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
ironlake_enable_pch_hotplug(dev);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
}
return 0;
}
void intel_irq_init(struct drm_device *dev)
{
#if 0
if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
} else {
dev->driver->irq_preinstall = i915_driver_irq_preinstall;
dev->driver->irq_postinstall = i915_driver_irq_postinstall;
dev->driver->irq_uninstall = i915_driver_irq_uninstall;
dev->driver->irq_handler = i915_driver_irq_handler;
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
#endif
}
static struct drm_device *irq_device;
void irq_handler_kms()
{
// printf("%s\n",__FUNCTION__);
ironlake_irq_handler(irq_device);
}
int drm_irq_install(struct drm_device *dev)
{
int irq_line;
int ret = 0;
ENTER();
mutex_lock(&dev->struct_mutex);
/* Driver must have been initialized */
if (!dev->dev_private) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
if (dev->irq_enabled) {
mutex_unlock(&dev->struct_mutex);
return -EBUSY;
}
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
irq_device = dev;
irq_line = drm_dev_to_irq(dev);
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
ironlake_irq_preinstall(dev);
ret = AttachIntHandler(irq_line, irq_handler_kms, 2);
if (ret == 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
mutex_unlock(&dev->struct_mutex);
return ret;
}
ret = ironlake_irq_postinstall(dev);
// if (ret < 0) {
// mutex_lock(&dev->struct_mutex);
// dev->irq_enabled = 0;
// mutex_unlock(&dev->struct_mutex);
// free_irq(drm_dev_to_irq(dev), dev);
// }
u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
cmd&= ~(1<<10);
PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
dbgprintf("PCI_CMD: %04x\n", cmd);
DRM_INFO("i915: irq initialized.\n");
LEAVE();
return ret;
}

View File

@ -0,0 +1,17 @@
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
//#include <linux/stringify.h>
#include <linux/types.h>
//#include <linux/tracepoint.h>
#define trace_i915_gem_object_create(x)
#define trace_i915_gem_object_destroy(x)
#define trace_i915_gem_object_change_domain(a, b, c)
#define trace_i915_gem_object_unbind(x)
#define trace_i915_gem_ring_flush(a, b, c)
#define trace_i915_gem_object_bind(a, b)
#define trace_i915_ring_wait_end(x)
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright © 2006 Intel Corporation
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),

View File

@ -36,9 +36,8 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
//#include "i915_trace.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
phys_addr_t get_bus_addr(void);
@ -4510,8 +4509,6 @@ void sandybridge_update_wm(struct drm_device *dev)
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
ENTER();
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&sandybridge_display_wm_info, latency,
@ -4794,10 +4791,9 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
ENTER();
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(dev);
LEAVE();
}
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,

View File

@ -206,17 +206,8 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
intel_dp_link_required(int pixel_clock, int bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
if (check_bpp)
bpp = check_bpp;
else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
}
@ -243,12 +234,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
mode_rate = intel_dp_link_required(mode->clock, 24);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
if (mode_rate > max_rate) {
mode_rate = intel_dp_link_required(intel_dp,
mode->clock, 18);
mode_rate = intel_dp_link_required(mode->clock, 18);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
else
@ -681,7 +671,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@ -699,7 +689,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(intel_dp, mode->clock, bpp)
if (intel_dp_link_required(mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;

View File

@ -622,7 +622,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
.destroy = intel_encoder_destroy,
};
static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
return 1;
@ -694,6 +694,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "AOpen i45GMx-I",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",

View File

@ -33,7 +33,7 @@
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
//#include "i915_trace.h"
#include "i915_trace.h"
#include "intel_drv.h"
/*
@ -1139,8 +1139,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
unsigned long end;
u32 head;
ENTER();
/* If the reported head position has wrapped or hasn't advanced,
* fallback to the slow and accurate path.
*/
@ -1149,10 +1147,7 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
ring->head = head;
ring->space = ring_space(ring);
if (ring->space >= n)
{
LEAVE();
return 0;
};
}
@ -1161,20 +1156,15 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
if (ring->space >= n) {
// trace_i915_ring_wait_end(ring);
LEAVE();
trace_i915_ring_wait_end(ring);
return 0;
}
msleep(1);
if (atomic_read(&dev_priv->mm.wedged))
{
LEAVE();
return -EAGAIN;
};
} while (!time_after(jiffies, end));
LEAVE();
trace_i915_ring_wait_end(ring);
return -EBUSY;
}

View File

@ -17,6 +17,8 @@
#include "bitmap.h"
extern struct drm_device *main_device;
typedef struct
{
@ -67,6 +69,9 @@ static display_t *os_display;
u32_t cmd_buffer;
u32_t cmd_offset;
void init_render();
int sna_init();
int init_cursor(cursor_t *cursor);
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor);
static void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y);
@ -210,6 +215,8 @@ int init_display_kms(struct drm_device *dev)
};
#endif
main_device = dev;
int err;
err = init_bitmaps();
@ -218,6 +225,8 @@ int init_display_kms(struct drm_device *dev)
printf("Initialize bitmap manager\n");
};
sna_init();
LEAVE();
return 0;
@ -577,7 +586,6 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
return old;
};
extern struct drm_device *main_device;
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
@ -748,6 +756,7 @@ int blit_video(u32 hbitmap, int dst_x, int dst_y,
#else
u8* src_offset;
u8* dst_offset;
u32 ifl;
src_offset = (u8*)(src_y*bitmap->pitch + src_x*4);
src_offset += (u32)bitmap->uaddr;
@ -757,6 +766,7 @@ int blit_video(u32 hbitmap, int dst_x, int dst_y,
u32_t tmp_h = height;
ifl = safe_cli();
while( tmp_h--)
{
u32_t tmp_w = width;
@ -774,6 +784,7 @@ int blit_video(u32 hbitmap, int dst_x, int dst_y,
tmp_dst++;
};
};
safe_sti(ifl);
}
#endif
@ -810,14 +821,12 @@ int blit_video(u32 hbitmap, int dst_x, int dst_y,
i915_gem_object_set_to_gtt_domain(bitmap->obj, false);
if (HAS_BLT(main_device))
ring = &dev_priv->ring[BCS];
else
ring = &dev_priv->ring[RCS];
ring->dispatch_execbuffer(ring, cmd_offset, n*4);
{
int ret;
ring = &dev_priv->ring[BCS];
ring->dispatch_execbuffer(ring, cmd_offset, n*4);
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
@ -827,8 +836,179 @@ int blit_video(u32 hbitmap, int dst_x, int dst_y,
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
else
{
ring = &dev_priv->ring[RCS];
ring->dispatch_execbuffer(ring, cmd_offset, n*4);
ring->flush(ring, 0, I915_GEM_DOMAIN_RENDER);
};
bitmap->obj->base.read_domains = I915_GEM_DOMAIN_CPU;
bitmap->obj->base.write_domain = I915_GEM_DOMAIN_CPU;
return 0;
fail:
return -1;
};
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
POSTING_READ(DEIMR);
}
}
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
// if (!i915_pipe_enabled(dev, pipe))
// return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
static int i915_interrupt_info(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i, pipe;
if (!HAS_PCH_SPLIT(dev)) {
dbgprintf("Interrupt enable: %08x\n",
I915_READ(IER));
dbgprintf("Interrupt identity: %08x\n",
I915_READ(IIR));
dbgprintf("Interrupt mask: %08x\n",
I915_READ(IMR));
for_each_pipe(pipe)
dbgprintf("Pipe %c stat: %08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
} else {
dbgprintf("North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
dbgprintf("North Display Interrupt identity: %08x\n",
I915_READ(DEIIR));
dbgprintf("North Display Interrupt mask: %08x\n",
I915_READ(DEIMR));
dbgprintf("South Display Interrupt enable: %08x\n",
I915_READ(SDEIER));
dbgprintf("South Display Interrupt identity: %08x\n",
I915_READ(SDEIIR));
dbgprintf("South Display Interrupt mask: %08x\n",
I915_READ(SDEIMR));
dbgprintf("Graphics Interrupt enable: %08x\n",
I915_READ(GTIER));
dbgprintf("Graphics Interrupt identity: %08x\n",
I915_READ(GTIIR));
dbgprintf("Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
dbgprintf("Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
for (i = 0; i < I915_NUM_RINGS; i++) {
if (IS_GEN6(dev) || IS_GEN7(dev)) {
printf("Graphics Interrupt mask (%s): %08x\n",
dev_priv->ring[i].name,
I915_READ_IMR(&dev_priv->ring[i]));
}
// i915_ring_seqno_info(m, &dev_priv->ring[i]);
}
return 0;
}
void execute_buffer (struct drm_i915_gem_object *buffer, uint32_t offset,
int size)
{
struct intel_ring_buffer *ring;
drm_i915_private_t *dev_priv = main_device->dev_private;
u32 invalidate;
u32 seqno = 2;
offset += buffer->gtt_offset;
// dbgprintf("execute %x size %d\n", offset, size);
// asm volatile(
// "mfence \n"
// "wbinvd \n"
// "mfence \n"
// :::"memory");
ring = &dev_priv->ring[RCS];
ring->dispatch_execbuffer(ring, offset, size);
invalidate = I915_GEM_DOMAIN_COMMAND;
if (INTEL_INFO(main_device)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0))
i915_gem_next_request_seqno(ring);
ring->irq_get(ring);
ring->add_request(ring, &seqno);
// i915_interrupt_info(main_device);
// ironlake_enable_vblank(main_device, 0);
};
int blit_textured(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h)
{
drm_i915_private_t *dev_priv = main_device->dev_private;
bitmap_t *src_bitmap, *dst_bitmap;
bitmap_t screen;
rect_t winrc;
// dbgprintf(" handle: %d dx %d dy %d sx %d sy %d w %d h %d\n",
// hbitmap, dst_x, dst_y, src_x, src_y, w, h);
if(unlikely(hbitmap==0))
return -1;
src_bitmap = (bitmap_t*)hman_get_data(&bm_man, hbitmap);
// dbgprintf("bitmap %x\n", src_bitmap);
if(unlikely(src_bitmap==NULL))
return -1;
GetWindowRect(&winrc);
screen.pitch = os_display->pitch;
screen.gaddr = 0;
screen.width = os_display->width;
screen.height = os_display->height;
screen.obj = (void*)-1;
dst_bitmap = &screen;
dst_x+= winrc.left;
dst_y+= winrc.top;
i915_gem_object_set_to_gtt_domain(src_bitmap->obj, false);
sna_blit_copy(dst_bitmap, dst_x, dst_y, w, h, src_bitmap, src_x, src_y);
src_bitmap->obj->base.read_domains = I915_GEM_DOMAIN_CPU;
src_bitmap->obj->base.write_domain = I915_GEM_DOMAIN_CPU;
};

View File

@ -20,9 +20,11 @@ void parse_cmdline(char *cmdline, char *log);
int _stdcall display_handler(ioctl_t *io);
int init_agp(void);
int create_video(int width, int height, u32_t *outp);
int video_blit(uint64_t src_offset, int x, int y,
int w, int h, int pitch);
int blit_video(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
int blit_textured(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h);
static char log[256];
@ -47,7 +49,8 @@ u32_t drvEntry(int action, char *cmdline)
if(!dbg_open(log))
{
strcpy(log, "/RD/1/DRIVERS/i915.log");
// strcpy(log, "/RD/1/DRIVERS/i915.log");
strcpy(log, "/HD1/2/i915.log");
if(!dbg_open(log))
{
@ -57,6 +60,9 @@ u32_t drvEntry(int action, char *cmdline)
}
dbgprintf("i915 blitter preview\n cmdline: %s\n", cmdline);
cpu_detect();
dbgprintf("\ncache line size %d\n", x86_clflush_size);
enum_pci_devices();
err = i915_init();
@ -79,12 +85,13 @@ u32_t drvEntry(int action, char *cmdline)
#define COMPATIBLE_API 0x0100 /* 1.00 */
#define API_VERSION (COMPATIBLE_API << 16) | CURRENT_API
#define DISPLAY_VERSION CURRENT_API
#define DISPLAY_VERSION API_VERSION
#define SRV_GETVERSION 0
#define SRV_ENUM_MODES 1
#define SRV_SET_MODE 2
#define SRV_GET_CAPS 3
#define SRV_CREATE_SURFACE 10
@ -132,6 +139,10 @@ int _stdcall display_handler(ioctl_t *io)
retval = set_user_mode((videomode_t*)inp);
break;
case SRV_GET_CAPS:
retval = get_driver_caps((hwcaps_t*)inp);
break;
case SRV_CREATE_SURFACE:
// check_input(8);
retval = create_surface((struct io_call_10*)inp);
@ -139,7 +150,10 @@ int _stdcall display_handler(ioctl_t *io)
case SRV_BLIT_VIDEO:
blit_video( inp[0], inp[1], inp[2],
// blit_video( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
blit_textured( inp[0], inp[1], inp[2],
inp[3], inp[4], inp[5], inp[6]);
retval = 0;
@ -211,19 +225,22 @@ void parse_cmdline(char *cmdline, char *log)
};
};
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile(
"cpuid"
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "" (*eax), "2" (*ecx));
: "0" (*eax), "2" (*ecx)
: "memory");
}
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)

View File

@ -350,11 +350,11 @@ static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE);
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0);
if(unlikely(dev == NULL))
return NULL;
INIT_LIST_HEAD(&dev->link);
if(unlikely(dev == NULL))
return NULL;
dev->pci_dev.busnr = busnr;
dev->pci_dev.devfn = devfn;

View File

@ -0,0 +1,4 @@
{ 0x0060005a, 0x204077be, 0x000000c0, 0x008d0040 },
{ 0x0060005a, 0x206077be, 0x000000c0, 0x008d0080 },
{ 0x0060005a, 0x208077be, 0x000000d0, 0x008d0040 },
{ 0x0060005a, 0x20a077be, 0x000000d0, 0x008d0080 },

View File

@ -0,0 +1,12 @@
{ 0x0060005a, 0x23c077bd, 0x000000e0, 0x008d0040 },
{ 0x0060005a, 0x23e077bd, 0x000000e0, 0x008d0080 },
{ 0x01600038, 0x218003bd, 0x008d03c0, 0x00000000 },
{ 0x01600038, 0x21a003bd, 0x008d03e0, 0x00000000 },
{ 0x0060005a, 0x23c077bd, 0x000000c0, 0x008d0040 },
{ 0x0060005a, 0x23e077bd, 0x000000c0, 0x008d0080 },
{ 0x00600041, 0x204077be, 0x008d03c0, 0x008d0180 },
{ 0x00600041, 0x206077be, 0x008d03e0, 0x008d01a0 },
{ 0x0060005a, 0x23c077bd, 0x000000d0, 0x008d0040 },
{ 0x0060005a, 0x23e077bd, 0x000000d0, 0x008d0080 },
{ 0x00600041, 0x208077be, 0x008d03c0, 0x008d0180 },
{ 0x00600041, 0x20a077be, 0x008d03e0, 0x008d01a0 },

View File

@ -0,0 +1,3 @@
{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 },
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 },
{ 0x02800031, 0x21c01cc9, 0x00000020, 0x0a8a0001 },

View File

@ -0,0 +1,17 @@
{ 0x00600001, 0x204003be, 0x008d01c0, 0x00000000 },
{ 0x00600001, 0x206003be, 0x008d01e0, 0x00000000 },
{ 0x00600001, 0x208003be, 0x008d0200, 0x00000000 },
{ 0x00600001, 0x20a003be, 0x008d0220, 0x00000000 },
{ 0x00600001, 0x20c003be, 0x008d0240, 0x00000000 },
{ 0x00600001, 0x20e003be, 0x008d0260, 0x00000000 },
{ 0x00600001, 0x210003be, 0x008d0280, 0x00000000 },
{ 0x00600001, 0x212003be, 0x008d02a0, 0x00000000 },
{ 0x05800031, 0x24001cc8, 0x00000040, 0x90019000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 },

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#ifndef _SNA_COMPILER_H_
#define _SNA_COMPILER_H_
#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
#define noinline __attribute__((noinline))
#define fastcall __attribute__((regparm(3)))
#define must_check __attribute__((warn_unused_result))
#define constant __attribute__((const))
#else
#define likely(expr) (expr)
#define unlikely(expr) (expr)
#define noinline
#define fastcall
#define must_check
#define constant
#endif
#ifdef HAVE_VALGRIND
#define VG(x) x
#else
#define VG(x)
#endif
#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
#define COMPILE_TIME_ASSERT(E) ((void)sizeof(char[1 - 2*!(E)]))
#endif /* _SNA_COMPILER_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,529 @@
/*
* Copyright (c) 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#ifndef KGEM_H
#define KGEM_H
#include "compiler.h"
#include <linux/list.h>
//#include <stdarg.h>
#include <i915_drm.h>
#if DEBUG_KGEM
#define DBG_HDR(x) ErrorF x
#else
#define DBG_HDR(x)
#endif
struct kgem_bo {
struct kgem_bo *proxy;
struct list_head list;
struct list_head request;
struct list_head vma;
void *map;
uint32_t gaddr;
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
struct kgem_request *rq;
struct drm_i915_gem_exec_object2 *exec;
struct kgem_bo_binding {
struct kgem_bo_binding *next;
uint32_t format;
uint16_t offset;
} binding;
uint32_t unique_id;
uint32_t refcnt;
uint32_t handle;
uint32_t presumed_offset;
uint32_t delta;
union {
struct {
uint32_t count:27;
uint32_t bucket:5;
#define NUM_CACHE_BUCKETS 16
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
} pages;
uint32_t bytes;
} size;
uint32_t pitch : 18; /* max 128k */
uint32_t tiling : 2;
uint32_t reusable : 1;
uint32_t dirty : 1;
uint32_t domain : 2;
uint32_t needs_flush : 1;
uint32_t vmap : 1;
uint32_t io : 1;
uint32_t flush : 1;
uint32_t scanout : 1;
uint32_t sync : 1;
uint32_t purged : 1;
};
#define DOMAIN_NONE 0
#define DOMAIN_CPU 1
#define DOMAIN_GTT 2
#define DOMAIN_GPU 3
struct kgem_request {
struct list_head list;
struct kgem_bo *bo;
struct list_head buffers;
};
enum {
MAP_GTT = 0,
MAP_CPU,
NUM_MAP_TYPES,
};
struct kgem {
int fd;
int wedged;
int gen;
uint32_t unique_id;
enum kgem_mode {
/* order matches I915_EXEC_RING ordering */
KGEM_NONE = 0,
KGEM_RENDER,
KGEM_BSD,
KGEM_BLT,
} mode, ring;
struct list_head flushing;
struct list_head large;
struct list_head active[NUM_CACHE_BUCKETS][3];
struct list_head inactive[NUM_CACHE_BUCKETS];
struct list_head partial;
struct list_head requests;
struct kgem_request *next_request;
struct {
struct list_head inactive[NUM_CACHE_BUCKETS];
int16_t count;
} vma[NUM_MAP_TYPES];
uint16_t nbatch;
uint16_t surface;
uint16_t nexec;
uint16_t nreloc;
uint16_t nfence;
uint16_t max_batch_size;
uint32_t flush:1;
uint32_t sync:1;
uint32_t need_expire:1;
uint32_t need_purge:1;
uint32_t need_retire:1;
uint32_t scanout:1;
uint32_t flush_now:1;
uint32_t busy:1;
uint32_t has_vmap :1;
uint32_t has_relaxed_fencing :1;
uint32_t has_semaphores :1;
uint32_t has_llc :1;
uint32_t has_cpu_bo :1;
uint16_t fence_max;
uint16_t half_cpu_cache_pages;
uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
uint32_t aperture, aperture_fenced;
uint32_t min_alignment;
uint32_t max_upload_tile_size, max_copy_tile_size;
uint32_t max_gpu_size, max_cpu_size;
uint32_t large_object_size, max_object_size;
uint32_t partial_buffer_size;
// void (*context_switch)(struct kgem *kgem, int new_mode);
void (*retire)(struct kgem *kgem);
uint32_t *batch;
uint32_t *batch_ptr;
int batch_idx;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 exec[256];
struct drm_i915_gem_relocation_entry reloc[384];
};
#define KGEM_BATCH_RESERVED 1
#define KGEM_RELOC_RESERVED 4
#define KGEM_EXEC_RESERVED 1
#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED)
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
void kgem_init(struct kgem *kgem, int gen);
void kgem_reset(struct kgem *kgem);
struct kgem_bo *kgem_create_map(struct kgem *kgem,
void *ptr, uint32_t size,
bool read_only);
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size);
struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
int offset, int length);
//struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
// const void *data,
// BoxPtr box,
// int stride, int bpp);
int kgem_choose_tiling(struct kgem *kgem,
int tiling, int width, int height, int bpp);
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
#define KGEM_CAN_CREATE_GPU 0x1
#define KGEM_CAN_CREATE_CPU 0x2
#define KGEM_CAN_CREATE_LARGE 0x4
struct kgem_bo *
kgem_replace_bo(struct kgem *kgem,
struct kgem_bo *src,
uint32_t width,
uint32_t height,
uint32_t pitch,
uint32_t bpp);
enum {
CREATE_EXACT = 0x1,
CREATE_INACTIVE = 0x2,
CREATE_CPU_MAP = 0x4,
CREATE_GTT_MAP = 0x8,
CREATE_SCANOUT = 0x10,
};
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
int width,
int height,
int bpp,
int tiling,
uint32_t flags);
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
bool kgem_retire(struct kgem *kgem);
void _kgem_submit(struct kgem *kgem);
static inline void kgem_submit(struct kgem *kgem)
{
if (kgem->nbatch)
_kgem_submit(kgem);
}
/*
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
{
if (bo->exec)
_kgem_submit(kgem);
}
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
{
kgem_bo_submit(kgem, bo);
if (!bo->needs_flush)
return;
__kgem_flush(kgem, bo);
bo->needs_flush = false;
}
*/
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
{
bo->refcnt++;
return bo;
}
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
{
assert(bo->refcnt);
if (--bo->refcnt == 0)
_kgem_bo_destroy(kgem, bo);
}
void kgem_clear_dirty(struct kgem *kgem);
static inline void kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
{
assert(!kgem->wedged);
#if DEBUG_FLUSH_BATCH
kgem_submit(kgem);
#endif
if (kgem->mode == mode)
return;
// kgem->context_switch(kgem, mode);
kgem->mode = mode;
}
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
{
assert(kgem->mode == KGEM_NONE);
// kgem->context_switch(kgem, mode);
kgem->mode = mode;
}
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
{
return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
}
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
{
return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
}
static inline bool kgem_check_exec(struct kgem *kgem, int n)
{
return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
}
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
int num_dwords,
int num_surfaces)
{
return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
kgem_check_reloc(kgem, num_surfaces);
}
static inline uint32_t *kgem_get_batch(struct kgem *kgem, int num_dwords)
{
if (!kgem_check_batch(kgem, num_dwords))
_kgem_submit(kgem);
return kgem->batch + kgem->nbatch;
}
static inline void kgem_advance_batch(struct kgem *kgem, int num_dwords)
{
kgem->nbatch += num_dwords;
}
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo);
static inline void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
{
if (bo->proxy)
bo = bo->proxy;
if (bo->exec == NULL)
_kgem_add_bo(kgem, bo);
}
#define KGEM_RELOC_FENCED 0x8000
uint32_t kgem_add_reloc(struct kgem *kgem,
uint32_t pos,
struct kgem_bo *bo,
uint32_t read_write_domains,
uint32_t delta);
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
const void *data, int length);
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
void kgem_get_tile_size(struct kgem *kgem, int tiling,
int *tile_width, int *tile_height, int *tile_size);
static inline int kgem_bo_size(struct kgem_bo *bo)
{
assert(!(bo->proxy && bo->io));
return PAGE_SIZE * bo->size.pages.count;
}
static inline int kgem_buffer_size(struct kgem_bo *bo)
{
assert(bo->proxy && bo->io);
return bo->size.bytes;
}
/*
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
struct kgem_bo *bo)
{
int pitch = bo->pitch;
if (kgem->gen >= 40 && bo->tiling)
pitch /= 4;
if (pitch > MAXSHORT) {
DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
__FUNCTION__, pitch));
return false;
}
return true;
}
static inline bool kgem_bo_can_blt(struct kgem *kgem,
struct kgem_bo *bo)
{
if (bo->tiling == I915_TILING_Y) {
DBG(("%s: can not blt to handle=%d, tiling=Y\n",
__FUNCTION__, bo->handle));
return false;
}
return kgem_bo_blt_pitch_is_ok(kgem, bo);
}
*/
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
struct kgem_bo *bo)
{
DBG_HDR(("%s: domain=%d, offset: %d size: %d\n",
__FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
if (bo->domain == DOMAIN_GTT)
return true;
if (IS_GTT_MAP(bo->map))
return true;
if (kgem->gen < 40 && bo->tiling &&
bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
return false;
if (!bo->presumed_offset)
return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
}
static inline bool kgem_bo_mapped(struct kgem_bo *bo)
{
DBG_HDR(("%s: map=%p, tiling=%d\n", __FUNCTION__, bo->map, bo->tiling));
if (bo->map == NULL)
return false;
return IS_CPU_MAP(bo->map) == !bo->tiling;
}
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
{
DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
__FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
assert(bo->proxy == NULL);
return bo->rq;
}
/*
static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
{
DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n",
__FUNCTION__, bo->handle,
bo->domain, bo->presumed_offset, bo->size));
if (!kgem_bo_is_mappable(kgem, bo))
return true;
if (kgem->wedged)
return false;
if (kgem_bo_is_busy(bo))
return true;
if (bo->presumed_offset == 0)
return !list_is_empty(&kgem->requests);
return false;
}
*/
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
{
if (bo == NULL)
return FALSE;
return bo->dirty;
}
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
{
DBG_HDR(("%s: handle=%d\n", __FUNCTION__, bo->handle));
bo->dirty = true;
}
void kgem_sync(struct kgem *kgem);
#define KGEM_BUFFER_WRITE 0x1
#define KGEM_BUFFER_INPLACE 0x2
#define KGEM_BUFFER_LAST 0x4
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
uint32_t size, uint32_t flags,
void **ret);
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
int width, int height, int bpp,
uint32_t flags,
void **ret);
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
void kgem_throttle(struct kgem *kgem);
#define MAX_INACTIVE_TIME 10
bool kgem_expire_cache(struct kgem *kgem);
void kgem_purge_cache(struct kgem *kgem);
void kgem_cleanup_cache(struct kgem *kgem);
#if HAS_EXTRA_DEBUG
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
#else
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
{
(void)kgem;
(void)nbatch;
}
#endif
#undef DBG_HDR
u32 get_buffer_offset(uint32_t handle);
#endif /* KGEM_H */

View File

@ -0,0 +1,323 @@
#include <drmP.h>
#include <drm.h>
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_drv.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <errno-base.h>
#include <memory.h>
#include <syscall.h>
#include "../bitmap.h"
#include "sna.h"
struct kgem_bo *create_bo(bitmap_t *bitmap);
static Bool sna_solid_cache_init(struct sna *sna);
struct sna *sna_device;
void no_render_init(struct sna *sna)
{
struct sna_render *render = &sna->render;
memset (render,0, sizeof (*render));
render->vertices = render->vertex_data;
render->vertex_size = ARRAY_SIZE(render->vertex_data);
// render->composite = no_render_composite;
// render->copy_boxes = no_render_copy_boxes;
// render->copy = no_render_copy;
// render->fill_boxes = no_render_fill_boxes;
// render->fill = no_render_fill;
// render->fill_one = no_render_fill_one;
// render->clear = no_render_clear;
// render->reset = no_render_reset;
// render->flush = no_render_flush;
// render->fini = no_render_fini;
// sna->kgem.context_switch = no_render_context_switch;
// sna->kgem.retire = no_render_retire;
// if (sna->kgem.gen >= 60)
sna->kgem.ring = KGEM_RENDER;
}
Bool sna_accel_init(struct sna *sna)
{
const char *backend;
// list_init(&sna->deferred_free);
// list_init(&sna->dirty_pixmaps);
// list_init(&sna->active_pixmaps);
// list_init(&sna->inactive_clock[0]);
// list_init(&sna->inactive_clock[1]);
// sna_accel_install_timers(sna);
backend = "no";
sna->have_render = false;
sna->default_tiling = 0; //I915_TILING_X;
no_render_init(sna);
if ((sna->have_render = gen6_render_init(sna)))
backend = "SandyBridge";
/*
if (sna->chipset.info->gen >= 80) {
} else if (sna->chipset.info->gen >= 70) {
if ((sna->have_render = gen7_render_init(sna)))
backend = "IvyBridge";
} else if (sna->chipset.info->gen >= 60) {
if ((sna->have_render = gen6_render_init(sna)))
backend = "SandyBridge";
} else if (sna->chipset.info->gen >= 50) {
if ((sna->have_render = gen5_render_init(sna)))
backend = "Ironlake";
} else if (sna->chipset.info->gen >= 40) {
if ((sna->have_render = gen4_render_init(sna)))
backend = "Broadwater";
} else if (sna->chipset.info->gen >= 30) {
if ((sna->have_render = gen3_render_init(sna)))
backend = "gen3";
} else if (sna->chipset.info->gen >= 20) {
if ((sna->have_render = gen2_render_init(sna)))
backend = "gen2";
}
*/
DBG(("%s(backend=%s, have_render=%d)\n",
__FUNCTION__, backend, sna->have_render));
kgem_reset(&sna->kgem);
if (!sna_solid_cache_init(sna))
return FALSE;
sna_device = sna;
#if 0
{
struct kgem_bo *screen_bo;
bitmap_t screen;
screen.pitch = 1024*4;
screen.gaddr = 0;
screen.width = 1024;
screen.height = 768;
screen.obj = (void*)-1;
screen_bo = create_bo(&screen);
sna->render.clear(sna, &screen, screen_bo);
}
#endif
return TRUE;
}
int sna_init()
{
struct sna *sna;
DBG(("%s\n", __FUNCTION__));
sna = kzalloc(sizeof(struct sna), 0);
if (sna == NULL)
return FALSE;
// sna->mode.cpp = 4;
kgem_init(&sna->kgem, 60);
/*
if (!xf86ReturnOptValBool(sna->Options,
OPTION_RELAXED_FENCING,
sna->kgem.has_relaxed_fencing)) {
xf86DrvMsg(scrn->scrnIndex,
sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
"Disabling use of relaxed fencing\n");
sna->kgem.has_relaxed_fencing = 0;
}
if (!xf86ReturnOptValBool(sna->Options,
OPTION_VMAP,
sna->kgem.has_vmap)) {
xf86DrvMsg(scrn->scrnIndex,
sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
"Disabling use of vmap\n");
sna->kgem.has_vmap = 0;
}
*/
/* Disable tiling by default */
sna->tiling = SNA_TILING_DISABLE;
/* Default fail-safe value of 75 Hz */
// sna->vblank_interval = 1000 * 1000 * 1000 / 75;
sna->flags = 0;
sna->flags |= SNA_NO_THROTTLE;
sna->flags |= SNA_NO_DELAYED_FLUSH;
return sna_accel_init(sna);
}
static Bool sna_solid_cache_init(struct sna *sna)
{
struct sna_solid_cache *cache = &sna->render.solid_cache;
DBG(("%s\n", __FUNCTION__));
cache->cache_bo =
kgem_create_linear(&sna->kgem, sizeof(cache->color));
if (!cache->cache_bo)
return FALSE;
/*
* Initialise [0] with white since it is very common and filling the
* zeroth slot simplifies some of the checks.
*/
cache->color[0] = 0xffffffff;
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
cache->bo[0]->pitch = 4;
cache->dirty = 1;
cache->size = 1;
cache->last = 0;
return TRUE;
}
void
sna_render_flush_solid(struct sna *sna)
{
struct sna_solid_cache *cache = &sna->render.solid_cache;
DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
assert(cache->dirty);
assert(cache->size);
kgem_bo_write(&sna->kgem, cache->cache_bo,
cache->color, cache->size*sizeof(uint32_t));
cache->dirty = 0;
cache->last = 0;
}
static void
sna_render_finish_solid(struct sna *sna, bool force)
{
struct sna_solid_cache *cache = &sna->render.solid_cache;
int i;
DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
if (!force && cache->cache_bo->domain != DOMAIN_GPU)
return;
if (cache->dirty)
sna_render_flush_solid(sna);
for (i = 0; i < cache->size; i++) {
if (cache->bo[i] == NULL)
continue;
kgem_bo_destroy(&sna->kgem, cache->bo[i]);
cache->bo[i] = NULL;
}
kgem_bo_destroy(&sna->kgem, cache->cache_bo);
DBG(("sna_render_finish_solid reset\n"));
cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
cache->bo[0]->pitch = 4;
if (force)
cache->size = 1;
}
struct kgem_bo *
sna_render_get_solid(struct sna *sna, uint32_t color)
{
struct sna_solid_cache *cache = &sna->render.solid_cache;
int i;
DBG(("%s: %08x\n", __FUNCTION__, color));
// if ((color & 0xffffff) == 0) /* alpha only */
// return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
if (color == 0xffffffff) {
DBG(("%s(white)\n", __FUNCTION__));
return kgem_bo_reference(cache->bo[0]);
}
if (cache->color[cache->last] == color) {
DBG(("sna_render_get_solid(%d) = %x (last)\n",
cache->last, color));
return kgem_bo_reference(cache->bo[cache->last]);
}
for (i = 1; i < cache->size; i++) {
if (cache->color[i] == color) {
if (cache->bo[i] == NULL) {
DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
i, color));
goto create;
} else {
DBG(("sna_render_get_solid(%d) = %x (old)\n",
i, color));
goto done;
}
}
}
sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
i = cache->size++;
cache->color[i] = color;
cache->dirty = 1;
DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
create:
cache->bo[i] = kgem_create_proxy(cache->cache_bo,
i*sizeof(uint32_t), sizeof(uint32_t));
cache->bo[i]->pitch = 4;
done:
cache->last = i;
return kgem_bo_reference(cache->bo[i]);
}
int sna_blit_copy(bitmap_t *dst_bitmap, int dst_x, int dst_y,
int w, int h, bitmap_t *src_bitmap, int src_x, int src_y)
{
struct kgem_bo src_bo, dst_bo;
memset(&src_bo, 0, sizeof(src_bo));
memset(&dst_bo, 0, sizeof(dst_bo));
src_bo.gaddr = src_bitmap->gaddr;
src_bo.pitch = src_bitmap->pitch;
src_bo.tiling = 0;
dst_bo.gaddr = dst_bitmap->gaddr;
dst_bo.pitch = dst_bitmap->pitch;
dst_bo.tiling = 0;
sna_device->render.copy(sna_device, 0, src_bitmap, &src_bo,
dst_bitmap, &dst_bo, dst_x, dst_y,
src_x, src_y, w, h);
};

View File

@ -0,0 +1,125 @@
#define FALSE 0
#define TRUE 1
#define DBG(x)
//#define DBG(x) dbgprintf x
#define assert(x)
#include "compiler.h"
#include <linux/kernel.h>
struct pixman_box16
{
int16_t x1, y1, x2, y2;
};
typedef struct pixman_box16 BoxRec;
typedef unsigned int CARD32;
#include "sna_render.h"
#include "kgem.h"
#define PictOpClear 0
#define PictOpSrc 1
#define PictOpDst 2
#define PictOpOver 3
#define PictOpOverReverse 4
#define PictOpIn 5
#define PictOpInReverse 6
#define PictOpOut 7
#define PictOpOutReverse 8
#define PictOpAtop 9
#define PictOpAtopReverse 10
#define PictOpXor 11
#define PictOpAdd 12
#define PictOpSaturate 13
#define PictOpMaximum 13
struct sna {
unsigned flags;
#define SNA_NO_THROTTLE 0x1
#define SNA_NO_DELAYED_FLUSH 0x2
// int timer[NUM_TIMERS];
// uint16_t timer_active;
// uint16_t timer_ready;
// int vblank_interval;
// struct list deferred_free;
// struct list dirty_pixmaps;
// struct list active_pixmaps;
// struct list inactive_clock[2];
unsigned int tiling;
#define SNA_TILING_DISABLE 0x0
#define SNA_TILING_FB 0x1
#define SNA_TILING_2D 0x2
#define SNA_TILING_3D 0x4
#define SNA_TILING_ALL (~0)
int Chipset;
// EntityInfoPtr pEnt;
// struct pci_device *PciInfo;
// struct intel_chipset chipset;
// PicturePtr clear;
struct {
uint32_t fill_bo;
uint32_t fill_pixel;
uint32_t fill_alu;
} blt_state;
union {
// struct gen2_render_state gen2;
// struct gen3_render_state gen3;
// struct gen4_render_state gen4;
// struct gen5_render_state gen5;
struct gen6_render_state gen6;
// struct gen7_render_state gen7;
} render_state;
uint32_t have_render;
uint32_t default_tiling;
// Bool directRenderingOpen;
// char *deviceName;
/* Broken-out options. */
// OptionInfoPtr Options;
/* Driver phase/state information */
// Bool suspended;
struct kgem kgem;
struct sna_render render;
};
static inline int vertex_space(struct sna *sna)
{
return sna->render.vertex_size - sna->render.vertex_used;
}
static inline void vertex_emit(struct sna *sna, float v)
{
assert(sna->render.vertex_used < sna->render.vertex_size);
sna->render.vertices[sna->render.vertex_used++] = v;
}
static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y)
{
int16_t *v = (int16_t *)&sna->render.vertices[sna->render.vertex_used++];
assert(sna->render.vertex_used <= sna->render.vertex_size);
v[0] = x;
v[1] = y;
}
static inline void batch_emit(struct sna *sna, uint32_t dword)
{
assert(sna->kgem.mode != KGEM_NONE);
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
sna->kgem.batch[sna->kgem.nbatch++] = dword;
}

View File

@ -0,0 +1,81 @@
#ifndef SNA_REG_H
#define SNA_REG_H
/* Flush */
#define MI_FLUSH (0x04<<23)
#define MI_FLUSH_DW (0x26<<23)
#define MI_WRITE_DIRTY_STATE (1<<4)
#define MI_END_SCENE (1<<3)
#define MI_GLOBAL_SNAPSHOT_COUNT_RESET (1<<3)
#define MI_INHIBIT_RENDER_CACHE_FLUSH (1<<2)
#define MI_STATE_INSTRUCTION_CACHE_FLUSH (1<<1)
#define MI_INVALIDATE_MAP_CACHE (1<<0)
/* broadwater flush bits */
#define BRW_MI_GLOBAL_SNAPSHOT_RESET (1 << 3)
#define MI_BATCH_BUFFER_END (0xA << 23)
/* Noop */
#define MI_NOOP 0x00
#define MI_NOOP_WRITE_ID (1<<22)
#define MI_NOOP_ID_MASK (1<<22 - 1)
/* Wait for Events */
#define MI_WAIT_FOR_EVENT (0x03<<23)
#define MI_WAIT_FOR_PIPEB_SVBLANK (1<<18)
#define MI_WAIT_FOR_PIPEA_SVBLANK (1<<17)
#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
#define MI_WAIT_FOR_PIPEB_VBLANK (1<<7)
#define MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW (1<<5)
#define MI_WAIT_FOR_PIPEA_VBLANK (1<<3)
#define MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW (1<<1)
/* Set the scan line for MI_WAIT_FOR_PIPE?_SCAN_LINE_WINDOW */
#define MI_LOAD_SCAN_LINES_INCL (0x12<<23)
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEA (0)
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEB (0x1<<20)
/* BLT commands */
#define BLT_WRITE_ALPHA (1<<21)
#define BLT_WRITE_RGB (1<<20)
#define BLT_SRC_TILED (1<<15)
#define BLT_DST_TILED (1<<11)
#define COLOR_BLT_CMD ((2<<29)|(0x40<<22)|(0x3))
#define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4))
#define XY_SETUP_BLT ((2<<29)|(1<<22)|6)
#define XY_SETUP_MONO_PATTERN_SL_BLT ((2<<29)|(0x11<<22)|7)
#define XY_SETUP_CLIP ((2<<29)|(3<<22)|1)
#define XY_SCANLINE_BLT ((2<<29)|(0x25<<22)|1)
#define XY_TEXT_IMMEDIATE_BLT ((2<<29)|(0x31<<22)|(1<<16))
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|0x4)
#define XY_PAT_BLT_IMMEDIATE ((2<<29)|(0x72<<22))
#define XY_MONO_PAT ((0x2<<29)|(0x52<<22)|0x7)
#define XY_MONO_SRC_COPY ((0x2<<29)|(0x54<<22)|(0x6))
#define XY_MONO_SRC_COPY_IMM ((0x2<<29)|(0x71<<22))
#define XY_FULL_MONO_PATTERN_BLT ((0x2<<29)|(0x57<<22)|0xa)
#define XY_FULL_MONO_PATTERN_MONO_SRC_BLT ((0x2<<29)|(0x58<<22)|0xa)
/* FLUSH commands */
#define BRW_3D(Pipeline,Opcode,Subopcode) \
((3 << 29) | \
((Pipeline) << 27) | \
((Opcode) << 24) | \
((Subopcode) << 16))
#define PIPE_CONTROL BRW_3D(3, 2, 0)
#define PIPE_CONTROL_NOWRITE (0 << 14)
#define PIPE_CONTROL_WRITE_QWORD (1 << 14)
#define PIPE_CONTROL_WRITE_DEPTH (2 << 14)
#define PIPE_CONTROL_WRITE_TIME (3 << 14)
#define PIPE_CONTROL_DEPTH_STALL (1 << 13)
#define PIPE_CONTROL_WC_FLUSH (1 << 12)
#define PIPE_CONTROL_IS_FLUSH (1 << 11)
#define PIPE_CONTROL_TC_FLUSH (1 << 10)
#define PIPE_CONTROL_NOTIFY_ENABLE (1 << 8)
#define PIPE_CONTROL_GLOBAL_GTT (1 << 2)
#define PIPE_CONTROL_LOCAL_PGTT (0 << 2)
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1 << 0)
#endif

View File

@ -0,0 +1,494 @@
#ifndef SNA_RENDER_H
#define SNA_RENDER_H
typedef int Bool;
#define GRADIENT_CACHE_SIZE 16
struct sna;
struct sna_composite_rectangles {
struct sna_coordinate {
int16_t x, y;
} src, mask, dst;
int16_t width, height;
};
struct sna_composite_op {
fastcall void (*blt)(struct sna *sna, const struct sna_composite_op *op,
const struct sna_composite_rectangles *r);
fastcall void (*box)(struct sna *sna,
const struct sna_composite_op *op,
const BoxRec *box);
void (*boxes)(struct sna *sna, const struct sna_composite_op *op,
const BoxRec *box, int nbox);
void (*done)(struct sna *sna, const struct sna_composite_op *op);
struct sna_damage **damage;
uint32_t op;
struct {
bitmap_t *pixmap;
CARD32 format;
struct kgem_bo *bo;
int16_t x, y;
uint16_t width, height;
} dst;
struct sna_composite_channel {
struct kgem_bo *bo;
// PictTransform *transform;
uint16_t width;
uint16_t height;
uint32_t pict_format;
uint32_t card_format;
uint32_t filter;
uint32_t repeat;
uint32_t is_affine : 1;
uint32_t is_solid : 1;
uint32_t is_linear : 1;
uint32_t is_opaque : 1;
uint32_t alpha_fixup : 1;
uint32_t rb_reversed : 1;
int16_t offset[2];
float scale[2];
// pixman_transform_t embedded_transform;
union {
struct {
uint32_t pixel;
float linear_dx;
float linear_dy;
float linear_offset;
} gen2;
struct gen3_shader_channel {
int type;
uint32_t mode;
uint32_t constants;
} gen3;
} u;
} src, mask;
uint32_t is_affine : 1;
uint32_t has_component_alpha : 1;
uint32_t need_magic_ca_pass : 1;
uint32_t rb_reversed : 1;
int16_t floats_per_vertex;
int16_t floats_per_rect;
fastcall void (*prim_emit)(struct sna *sna,
const struct sna_composite_op *op,
const struct sna_composite_rectangles *r);
struct sna_composite_redirect {
struct kgem_bo *real_bo;
struct sna_damage **real_damage, *damage;
BoxRec box;
} redirect;
union {
struct sna_blt_state {
bitmap_t *src_pixmap;
int16_t sx, sy;
uint32_t inplace :1;
uint32_t overwrites:1;
uint32_t bpp : 6;
uint32_t cmd;
uint32_t br13;
uint32_t pitch[2];
uint32_t pixel;
struct kgem_bo *bo[2];
} blt;
struct {
float constants[8];
uint32_t num_constants;
} gen3;
struct {
int wm_kernel;
int ve_id;
} gen4;
struct {
int wm_kernel;
int ve_id;
} gen5;
struct {
int wm_kernel;
int nr_surfaces;
int nr_inputs;
int ve_id;
} gen6;
struct {
int wm_kernel;
int nr_surfaces;
int nr_inputs;
int ve_id;
} gen7;
void *priv;
} u;
};
struct sna_render {
int max_3d_size;
int max_3d_pitch;
/*
Bool (*composite)(struct sna *sna, uint8_t op,
PicturePtr dst, PicturePtr src, PicturePtr mask,
int16_t src_x, int16_t src_y,
int16_t msk_x, int16_t msk_y,
int16_t dst_x, int16_t dst_y,
int16_t w, int16_t h,
struct sna_composite_op *tmp);
Bool (*composite_spans)(struct sna *sna, uint8_t op,
PicturePtr dst, PicturePtr src,
int16_t src_x, int16_t src_y,
int16_t dst_x, int16_t dst_y,
int16_t w, int16_t h,
unsigned flags,
struct sna_composite_spans_op *tmp);
#define COMPOSITE_SPANS_RECTILINEAR 0x1
Bool (*video)(struct sna *sna,
struct sna_video *video,
struct sna_video_frame *frame,
RegionPtr dstRegion,
short src_w, short src_h,
short drw_w, short drw_h,
PixmapPtr pixmap);
Bool (*fill_boxes)(struct sna *sna,
CARD8 op,
PictFormat format,
const xRenderColor *color,
PixmapPtr dst, struct kgem_bo *dst_bo,
const BoxRec *box, int n);
Bool (*fill)(struct sna *sna, uint8_t alu,
PixmapPtr dst, struct kgem_bo *dst_bo,
uint32_t color,
struct sna_fill_op *tmp);
Bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
uint32_t color,
int16_t x1, int16_t y1, int16_t x2, int16_t y2,
uint8_t alu);
*/
Bool (*clear)(struct sna *sna, bitmap_t *dst, struct kgem_bo *dst_bo);
/*
Bool (*copy_boxes)(struct sna *sna, uint8_t alu,
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
const BoxRec *box, int n);
*/
Bool (*copy)(struct sna *sna, uint8_t alu,
bitmap_t *src, struct kgem_bo *src_bo,
bitmap_t *dst, struct kgem_bo *dst_bo,
int dst_x, int dst_y, int src_x, int src_y,
int w, int h);
void (*flush)(struct sna *sna);
void (*reset)(struct sna *sna);
void (*fini)(struct sna *sna);
// struct sna_alpha_cache {
// struct kgem_bo *cache_bo;
// struct kgem_bo *bo[256];
// } alpha_cache;
struct sna_solid_cache {
struct kgem_bo *cache_bo;
uint32_t color[1024];
struct kgem_bo *bo[1024];
int last;
int size;
int dirty;
} solid_cache;
// struct {
// struct sna_gradient_cache {
// struct kgem_bo *bo;
// int nstops;
// PictGradientStop *stops;
// } cache[GRADIENT_CACHE_SIZE];
// int size;
// } gradient_cache;
// struct sna_glyph_cache{
// PicturePtr picture;
// struct sna_glyph **glyphs;
// uint16_t count;
// uint16_t evict;
// } glyph[2];
uint16_t vertex_start;
uint16_t vertex_index;
uint16_t vertex_used;
uint16_t vertex_size;
uint16_t vertex_reloc[8];
struct kgem_bo *vbo;
float *vertices;
float vertex_data[1024];
};
enum {
GEN6_WM_KERNEL_NOMASK = 0,
GEN6_WM_KERNEL_NOMASK_PROJECTIVE,
GEN6_KERNEL_COUNT
};
struct gen6_render_state {
struct kgem_bo *general_bo;
uint32_t vs_state;
uint32_t sf_state;
uint32_t sf_mask_state;
uint32_t wm_state;
uint32_t wm_kernel[GEN6_KERNEL_COUNT];
uint32_t cc_vp;
uint32_t cc_blend;
uint32_t drawrect_offset;
uint32_t drawrect_limit;
uint32_t blend;
uint32_t samplers;
uint32_t kernel;
uint16_t num_sf_outputs;
uint16_t vb_id;
uint16_t ve_id;
uint16_t vertex_offset;
uint16_t last_primitive;
int16_t floats_per_vertex;
uint16_t surface_table;
Bool needs_invariant;
};
enum {
GEN7_WM_KERNEL_NOMASK = 0,
GEN7_WM_KERNEL_NOMASK_PROJECTIVE,
GEN7_WM_KERNEL_MASK,
GEN7_WM_KERNEL_MASK_PROJECTIVE,
GEN7_WM_KERNEL_MASKCA,
GEN7_WM_KERNEL_MASKCA_PROJECTIVE,
GEN7_WM_KERNEL_MASKCA_SRCALPHA,
GEN7_WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
GEN7_WM_KERNEL_VIDEO_PLANAR,
GEN7_WM_KERNEL_VIDEO_PACKED,
GEN7_KERNEL_COUNT
};
struct sna_static_stream {
uint32_t size, used;
uint8_t *data;
};
int sna_static_stream_init(struct sna_static_stream *stream);
uint32_t sna_static_stream_add(struct sna_static_stream *stream,
const void *data, uint32_t len, uint32_t align);
void *sna_static_stream_map(struct sna_static_stream *stream,
uint32_t len, uint32_t align);
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream,
void *ptr);
struct kgem_bo *sna_static_stream_fini(struct sna *sna,
struct sna_static_stream *stream);
/*
struct kgem_bo *
sna_render_get_solid(struct sna *sna,
uint32_t color);
void
sna_render_flush_solid(struct sna *sna);
struct kgem_bo *
sna_render_get_gradient(struct sna *sna,
PictGradient *pattern);
uint32_t sna_rgba_for_color(uint32_t color, int depth);
Bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
*/
void no_render_init(struct sna *sna);
Bool gen2_render_init(struct sna *sna);
Bool gen3_render_init(struct sna *sna);
Bool gen4_render_init(struct sna *sna);
Bool gen5_render_init(struct sna *sna);
Bool gen6_render_init(struct sna *sna);
Bool gen7_render_init(struct sna *sna);
/*
Bool sna_tiling_composite(uint32_t op,
PicturePtr src,
PicturePtr mask,
PicturePtr dst,
int16_t src_x, int16_t src_y,
int16_t mask_x, int16_t mask_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
struct sna_composite_op *tmp);
Bool sna_tiling_fill_boxes(struct sna *sna,
CARD8 op,
PictFormat format,
const xRenderColor *color,
PixmapPtr dst, struct kgem_bo *dst_bo,
const BoxRec *box, int n);
Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
const BoxRec *box, int n);
Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
int bpp, const BoxRec *box, int nbox);
Bool sna_blt_composite(struct sna *sna,
uint32_t op,
PicturePtr src,
PicturePtr dst,
int16_t src_x, int16_t src_y,
int16_t dst_x, int16_t dst_y,
int16_t width, int16_t height,
struct sna_composite_op *tmp);
bool sna_blt_fill(struct sna *sna, uint8_t alu,
struct kgem_bo *bo,
int bpp,
uint32_t pixel,
struct sna_fill_op *fill);
bool sna_blt_copy(struct sna *sna, uint8_t alu,
struct kgem_bo *src,
struct kgem_bo *dst,
int bpp,
struct sna_copy_op *copy);
Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
struct kgem_bo *bo,
int bpp,
uint32_t pixel,
const BoxRec *box, int n);
Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
int bpp,
const BoxRec *box, int n);
Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
const BoxRec *box, int nbox);
Bool _sna_get_pixel_from_rgba(uint32_t *pixel,
uint16_t red,
uint16_t green,
uint16_t blue,
uint16_t alpha,
uint32_t format);
static inline Bool
sna_get_pixel_from_rgba(uint32_t * pixel,
uint16_t red,
uint16_t green,
uint16_t blue,
uint16_t alpha,
uint32_t format)
{
switch (format) {
case PICT_x8r8g8b8:
alpha = 0xffff;
case PICT_a8r8g8b8:
*pixel = ((alpha >> 8 << 24) |
(red >> 8 << 16) |
(green & 0xff00) |
(blue >> 8));
return TRUE;
case PICT_a8:
*pixel = alpha >> 8;
return TRUE;
}
return _sna_get_pixel_from_rgba(pixel, red, green, blue, alpha, format);
}
int
sna_render_pixmap_bo(struct sna *sna,
struct sna_composite_channel *channel,
PixmapPtr pixmap,
int16_t x, int16_t y,
int16_t w, int16_t h,
int16_t dst_x, int16_t dst_y);
bool
sna_render_pixmap_partial(struct sna *sna,
PixmapPtr pixmap,
struct kgem_bo *bo,
struct sna_composite_channel *channel,
int16_t x, int16_t y,
int16_t w, int16_t h);
int
sna_render_picture_extract(struct sna *sna,
PicturePtr picture,
struct sna_composite_channel *channel,
int16_t x, int16_t y,
int16_t w, int16_t h,
int16_t dst_x, int16_t dst_y);
int
sna_render_picture_fixup(struct sna *sna,
PicturePtr picture,
struct sna_composite_channel *channel,
int16_t x, int16_t y,
int16_t w, int16_t h,
int16_t dst_x, int16_t dst_y);
int
sna_render_picture_convert(struct sna *sna,
PicturePtr picture,
struct sna_composite_channel *channel,
PixmapPtr pixmap,
int16_t x, int16_t y,
int16_t w, int16_t h,
int16_t dst_x, int16_t dst_y);
inline static void sna_render_composite_redirect_init(struct sna_composite_op *op)
{
struct sna_composite_redirect *t = &op->redirect;
t->real_bo = NULL;
t->damage = NULL;
}
Bool
sna_render_composite_redirect(struct sna *sna,
struct sna_composite_op *op,
int x, int y, int width, int height);
void
sna_render_composite_redirect_done(struct sna *sna,
const struct sna_composite_op *op);
bool
sna_composite_mask_is_opaque(PicturePtr mask);
*/
#endif /* SNA_RENDER_H */

View File

@ -0,0 +1,108 @@
/*
* Copyright © 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include <linux/kernel.h>
#include "../bitmap.h"
#include "sna.h"
#include "sna_render.h"
#include <memory.h>
#if DEBUG_STREAM
#undef DBG
#define DBG(x) ErrorF x
#endif
int sna_static_stream_init(struct sna_static_stream *stream)
{
stream->used = 0;
stream->size = 64*1024;
stream->data = malloc(stream->size);
return stream->data != NULL;
}
static uint32_t sna_static_stream_alloc(struct sna_static_stream *stream,
uint32_t len, uint32_t align)
{
uint32_t offset = ALIGN(stream->used, align);
uint32_t size = offset + len;
if (size > stream->size) {
/*
do
stream->size *= 2;
while (stream->size < size);
stream->data = realloc(stream->data, stream->size);
*/
dbgprintf("%s: EPIC FAIL\n", __FUNCTION__);
return 0;
}
stream->used = size;
return offset;
}
uint32_t sna_static_stream_add(struct sna_static_stream *stream,
const void *data, uint32_t len, uint32_t align)
{
uint32_t offset = sna_static_stream_alloc(stream, len, align);
memcpy(stream->data + offset, data, len);
return offset;
}
void *sna_static_stream_map(struct sna_static_stream *stream,
uint32_t len, uint32_t align)
{
uint32_t offset = sna_static_stream_alloc(stream, len, align);
return memset(stream->data + offset, 0, len);
}
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream, void *ptr)
{
return (uint8_t *)ptr - stream->data;
}
struct kgem_bo *sna_static_stream_fini(struct sna *sna,
struct sna_static_stream *stream)
{
struct kgem_bo *bo;
DBG(("uploaded %d bytes of static state\n", stream->used));
bo = kgem_create_linear(&sna->kgem, stream->used);
if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) {
// kgem_bo_destroy(&sna->kgem, bo);
return NULL;
}
free(stream->data);
LEAVE();
return bo;
}