forked from KolibriOS/kolibrios
initialize framebuffer
git-svn-id: svn://kolibrios.org@2335 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
4151b7bbc2
commit
14a185a759
@ -1314,26 +1314,134 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
||||||
|
enum i915_cache_level cache_level)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (obj->cache_level == cache_level)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (obj->pin_count) {
|
||||||
|
DRM_DEBUG("can not change the cache level of pinned objects\n");
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (obj->gtt_space) {
|
||||||
|
ret = i915_gem_object_finish_gpu(obj);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
i915_gem_object_finish_gtt(obj);
|
||||||
|
|
||||||
|
/* Before SandyBridge, you could not use tiling or fence
|
||||||
|
* registers with snooped memory, so relinquish any fences
|
||||||
|
* currently pointing to our region in the aperture.
|
||||||
|
*/
|
||||||
|
if (INTEL_INFO(obj->base.dev)->gen < 6) {
|
||||||
|
ret = i915_gem_object_put_fence(obj);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
i915_gem_gtt_rebind_object(obj, cache_level);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cache_level == I915_CACHE_NONE) {
|
||||||
|
u32 old_read_domains, old_write_domain;
|
||||||
|
|
||||||
|
/* If we're coming from LLC cached, then we haven't
|
||||||
|
* actually been tracking whether the data is in the
|
||||||
|
* CPU cache or not, since we only allow one bit set
|
||||||
|
* in obj->write_domain and have been skipping the clflushes.
|
||||||
|
* Just set it to the CPU cache for now.
|
||||||
|
*/
|
||||||
|
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
|
||||||
|
WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
|
||||||
|
|
||||||
|
old_read_domains = obj->base.read_domains;
|
||||||
|
old_write_domain = obj->base.write_domain;
|
||||||
|
|
||||||
|
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
|
||||||
|
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||||
|
|
||||||
|
trace_i915_gem_object_change_domain(obj,
|
||||||
|
old_read_domains,
|
||||||
|
old_write_domain);
|
||||||
|
}
|
||||||
|
|
||||||
|
obj->cache_level = cache_level;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prepare buffer for display plane (scanout, cursors, etc).
|
||||||
|
* Can be called from an uninterruptible phase (modesetting) and allows
|
||||||
|
* any flushes to be pipelined (for pageflips).
|
||||||
|
*
|
||||||
|
* For the display plane, we want to be in the GTT but out of any write
|
||||||
|
* domains. So in many ways this looks like set_to_gtt_domain() apart from the
|
||||||
|
* ability to pipeline the waits, pinning and any additional subtleties
|
||||||
|
* that may differentiate the display plane from ordinary buffers.
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||||
|
u32 alignment,
|
||||||
|
struct intel_ring_buffer *pipelined)
|
||||||
|
{
|
||||||
|
u32 old_read_domains, old_write_domain;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = i915_gem_object_flush_gpu_write_domain(obj);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (pipelined != obj->ring) {
|
||||||
|
ret = i915_gem_object_wait_rendering(obj);
|
||||||
|
if (ret == -ERESTARTSYS)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The display engine is not coherent with the LLC cache on gen6. As
|
||||||
|
* a result, we make sure that the pinning that is about to occur is
|
||||||
|
* done with uncached PTEs. This is lowest common denominator for all
|
||||||
|
* chipsets.
|
||||||
|
*
|
||||||
|
* However for gen6+, we could do better by using the GFDT bit instead
|
||||||
|
* of uncaching, which would allow us to flush all the LLC-cached data
|
||||||
|
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
|
||||||
|
*/
|
||||||
|
// ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
|
||||||
|
// if (ret)
|
||||||
|
// return ret;
|
||||||
|
|
||||||
|
/* As the user may map the buffer once pinned in the display plane
|
||||||
|
* (e.g. libkms for the bootup splash), we have to ensure that we
|
||||||
|
* always use map_and_fenceable for all scanout buffers.
|
||||||
|
*/
|
||||||
|
ret = i915_gem_object_pin(obj, alignment, true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
i915_gem_object_flush_cpu_write_domain(obj);
|
||||||
|
|
||||||
|
old_write_domain = obj->base.write_domain;
|
||||||
|
old_read_domains = obj->base.read_domains;
|
||||||
|
|
||||||
|
/* It should now be out of any other write domains, and we can update
|
||||||
|
* the domain values for our changes.
|
||||||
|
*/
|
||||||
|
BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
|
||||||
|
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
|
||||||
|
|
||||||
|
// trace_i915_gem_object_change_domain(obj,
|
||||||
|
// old_read_domains,
|
||||||
|
// old_write_domain);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1920,13 +1920,61 @@ out_disable:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||||
|
struct drm_i915_gem_object *obj,
|
||||||
|
struct intel_ring_buffer *pipelined)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
u32 alignment;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
switch (obj->tiling_mode) {
|
||||||
|
case I915_TILING_NONE:
|
||||||
|
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
|
||||||
|
alignment = 128 * 1024;
|
||||||
|
else if (INTEL_INFO(dev)->gen >= 4)
|
||||||
|
alignment = 4 * 1024;
|
||||||
|
else
|
||||||
|
alignment = 64 * 1024;
|
||||||
|
break;
|
||||||
|
case I915_TILING_X:
|
||||||
|
/* pin() will align the object as required by fence */
|
||||||
|
alignment = 0;
|
||||||
|
break;
|
||||||
|
case I915_TILING_Y:
|
||||||
|
/* FIXME: Is this true? */
|
||||||
|
DRM_ERROR("Y tiled not allowed for scan out buffers\n");
|
||||||
|
return -EINVAL;
|
||||||
|
default:
|
||||||
|
BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
dev_priv->mm.interruptible = false;
|
||||||
|
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
|
||||||
|
if (ret)
|
||||||
|
goto err_interruptible;
|
||||||
|
|
||||||
|
/* Install a fence for tiled scan-out. Pre-i965 always needs a
|
||||||
|
* fence, whereas 965+ only requires a fence if using
|
||||||
|
* framebuffer compression. For simplicity, we always install
|
||||||
|
* a fence as the cost is not that onerous.
|
||||||
|
*/
|
||||||
|
// if (obj->tiling_mode != I915_TILING_NONE) {
|
||||||
|
// ret = i915_gem_object_get_fence(obj, pipelined);
|
||||||
|
// if (ret)
|
||||||
|
// goto err_unpin;
|
||||||
|
// }
|
||||||
|
|
||||||
|
dev_priv->mm.interruptible = true;
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_unpin:
|
||||||
|
// i915_gem_object_unpin(obj);
|
||||||
|
err_interruptible:
|
||||||
|
dev_priv->mm.interruptible = true;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||||
int x, int y)
|
int x, int y)
|
||||||
@ -6508,13 +6556,49 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static const struct drm_framebuffer_funcs intel_fb_funcs = {
|
||||||
|
// .destroy = intel_user_framebuffer_destroy,
|
||||||
|
// .create_handle = intel_user_framebuffer_create_handle,
|
||||||
|
};
|
||||||
|
|
||||||
|
int intel_framebuffer_init(struct drm_device *dev,
|
||||||
|
struct intel_framebuffer *intel_fb,
|
||||||
|
struct drm_mode_fb_cmd *mode_cmd,
|
||||||
|
struct drm_i915_gem_object *obj)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (obj->tiling_mode == I915_TILING_Y)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (mode_cmd->pitch & 63)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
switch (mode_cmd->bpp) {
|
||||||
|
case 8:
|
||||||
|
case 16:
|
||||||
|
/* Only pre-ILK can handle 5:5:5 */
|
||||||
|
if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
|
||||||
|
return -EINVAL;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 24:
|
||||||
|
case 32:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("framebuffer init failed %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
|
||||||
|
intel_fb->obj = obj;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -7528,7 +7612,6 @@ void intel_modeset_init(struct drm_device *dev)
|
|||||||
dev->mode_config.max_width = 8192;
|
dev->mode_config.max_width = 8192;
|
||||||
dev->mode_config.max_height = 8192;
|
dev->mode_config.max_height = 8192;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev->mode_config.fb_base = get_bus_addr();
|
dev->mode_config.fb_base = get_bus_addr();
|
||||||
|
|
||||||
DRM_DEBUG_KMS("%d display pipe%s available.\n",
|
DRM_DEBUG_KMS("%d display pipe%s available.\n",
|
||||||
|
@ -45,10 +45,167 @@
|
|||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
|
|
||||||
|
|
||||||
|
struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
|
||||||
|
{
|
||||||
|
#define BYTES_PER_LONG (BITS_PER_LONG/8)
|
||||||
|
#define PADDING (BYTES_PER_LONG - (sizeof(struct fb_info) % BYTES_PER_LONG))
|
||||||
|
int fb_info_size = sizeof(struct fb_info);
|
||||||
|
struct fb_info *info;
|
||||||
|
char *p;
|
||||||
|
|
||||||
|
if (size)
|
||||||
|
fb_info_size += PADDING;
|
||||||
|
|
||||||
|
p = kzalloc(fb_info_size + size, GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!p)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
info = (struct fb_info *) p;
|
||||||
|
|
||||||
|
if (size)
|
||||||
|
info->par = p + fb_info_size;
|
||||||
|
|
||||||
|
return info;
|
||||||
|
#undef PADDING
|
||||||
|
#undef BYTES_PER_LONG
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static struct fb_ops intelfb_ops = {
|
||||||
|
// .owner = THIS_MODULE,
|
||||||
|
.fb_check_var = drm_fb_helper_check_var,
|
||||||
|
.fb_set_par = drm_fb_helper_set_par,
|
||||||
|
// .fb_fillrect = cfb_fillrect,
|
||||||
|
// .fb_copyarea = cfb_copyarea,
|
||||||
|
// .fb_imageblit = cfb_imageblit,
|
||||||
|
// .fb_pan_display = drm_fb_helper_pan_display,
|
||||||
|
.fb_blank = drm_fb_helper_blank,
|
||||||
|
// .fb_setcmap = drm_fb_helper_setcmap,
|
||||||
|
// .fb_debug_enter = drm_fb_helper_debug_enter,
|
||||||
|
// .fb_debug_leave = drm_fb_helper_debug_leave,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int intelfb_create(struct intel_fbdev *ifbdev,
|
||||||
|
struct drm_fb_helper_surface_size *sizes)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = ifbdev->helper.dev;
|
||||||
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||||
|
struct fb_info *info;
|
||||||
|
struct drm_framebuffer *fb;
|
||||||
|
struct drm_mode_fb_cmd mode_cmd;
|
||||||
|
struct drm_i915_gem_object *obj;
|
||||||
|
struct device *device = &dev->pdev->dev;
|
||||||
|
int size, ret;
|
||||||
|
|
||||||
|
/* we don't do packed 24bpp */
|
||||||
|
if (sizes->surface_bpp == 24)
|
||||||
|
sizes->surface_bpp = 32;
|
||||||
|
|
||||||
|
mode_cmd.width = sizes->surface_width;
|
||||||
|
mode_cmd.height = sizes->surface_height;
|
||||||
|
|
||||||
|
mode_cmd.bpp = sizes->surface_bpp;
|
||||||
|
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
|
||||||
|
mode_cmd.depth = sizes->surface_depth;
|
||||||
|
|
||||||
|
size = mode_cmd.pitch * mode_cmd.height;
|
||||||
|
size = ALIGN(size, PAGE_SIZE);
|
||||||
|
obj = i915_gem_alloc_object(dev, size);
|
||||||
|
if (!obj) {
|
||||||
|
DRM_ERROR("failed to allocate framebuffer\n");
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
|
/* Flush everything out, we'll be doing GTT only from now on */
|
||||||
|
ret = intel_pin_and_fence_fb_obj(dev, obj, false);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("failed to pin fb: %d\n", ret);
|
||||||
|
goto out_unref;
|
||||||
|
}
|
||||||
|
|
||||||
|
info = framebuffer_alloc(0, device);
|
||||||
|
if (!info) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_unpin;
|
||||||
|
}
|
||||||
|
|
||||||
|
info->par = ifbdev;
|
||||||
|
|
||||||
|
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
|
||||||
|
if (ret)
|
||||||
|
goto out_unpin;
|
||||||
|
|
||||||
|
fb = &ifbdev->ifb.base;
|
||||||
|
|
||||||
|
ifbdev->helper.fb = fb;
|
||||||
|
ifbdev->helper.fbdev = info;
|
||||||
|
|
||||||
|
strcpy(info->fix.id, "inteldrmfb");
|
||||||
|
|
||||||
|
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
|
||||||
|
info->fbops = &intelfb_ops;
|
||||||
|
|
||||||
|
/* setup aperture base/size for vesafb takeover */
|
||||||
|
info->apertures = alloc_apertures(1);
|
||||||
|
if (!info->apertures) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out_unpin;
|
||||||
|
}
|
||||||
|
info->apertures->ranges[0].base = dev->mode_config.fb_base;
|
||||||
|
info->apertures->ranges[0].size =
|
||||||
|
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
|
||||||
|
|
||||||
|
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
|
||||||
|
info->fix.smem_len = size;
|
||||||
|
|
||||||
|
info->screen_size = size;
|
||||||
|
|
||||||
|
// memset(info->screen_base, 0, size);
|
||||||
|
|
||||||
|
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
|
||||||
|
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
|
||||||
|
fb->width, fb->height,
|
||||||
|
obj->gtt_offset, obj);
|
||||||
|
|
||||||
|
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
// vga_switcheroo_client_fb_set(dev->pdev, info);
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
out_unpin:
|
||||||
|
// i915_gem_object_unpin(obj);
|
||||||
|
out_unref:
|
||||||
|
// drm_gem_object_unreference(&obj->base);
|
||||||
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
|
||||||
|
struct drm_fb_helper_surface_size *sizes)
|
||||||
|
{
|
||||||
|
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
|
||||||
|
int new_fb = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!helper->fb) {
|
||||||
|
ret = intelfb_create(ifbdev, sizes);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
new_fb = 1;
|
||||||
|
}
|
||||||
|
return new_fb;
|
||||||
|
}
|
||||||
|
|
||||||
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
|
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
|
||||||
.gamma_set = intel_crtc_fb_gamma_set,
|
.gamma_set = intel_crtc_fb_gamma_set,
|
||||||
.gamma_get = intel_crtc_fb_gamma_get,
|
.gamma_get = intel_crtc_fb_gamma_get,
|
||||||
// .fb_probe = intel_fb_find_or_create_single,
|
.fb_probe = intel_fb_find_or_create_single,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -1242,7 +1242,7 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
|
|||||||
ptr = ioremap(obj->pages[0], 4096);
|
ptr = ioremap(obj->pages[0], 4096);
|
||||||
*ptr++ = MI_BATCH_BUFFER_END;
|
*ptr++ = MI_BATCH_BUFFER_END;
|
||||||
*ptr++ = MI_NOOP;
|
*ptr++ = MI_NOOP;
|
||||||
iounmap(obj->pages[0]);
|
// iounmap(obj->pages[0]);
|
||||||
|
|
||||||
ret = i915_gem_object_set_to_gtt_domain(obj, false);
|
ret = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
Loading…
Reference in New Issue
Block a user