initialize ring buffers

git-svn-id: svn://kolibrios.org@2332 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2011-12-30 05:16:40 +00:00
parent e7a9812564
commit eba316e7bf
9 changed files with 3378 additions and 41 deletions

View File

@ -280,10 +280,11 @@ static unsigned int intel_gtt_stolen_size(void)
} }
if (stolen_size > 0) { if (stolen_size > 0) {
dbgprintf("detected %dK %s memory\n", dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
stolen_size / KB(1), local ? "local" : "stolen"); stolen_size / KB(1), local ? "local" : "stolen");
} else { } else {
dbgprintf("no pre-allocated video memory detected\n"); dev_info(&intel_private.bridge_dev->dev,
"no pre-allocated video memory detected\n");
stolen_size = 0; stolen_size = 0;
} }
@ -354,7 +355,8 @@ static unsigned int i965_gtt_total_entries(void)
size = KB(1024 + 512); size = KB(1024 + 512);
break; break;
default: default:
dbgprintf("unknown page table size, assuming 512KB\n"); dev_info(&intel_private.pcidev->dev,
"unknown page table size, assuming 512KB\n");
size = KB(512); size = KB(512);
} }
@ -529,7 +531,8 @@ static bool intel_enable_gtt(void)
pci_read_config_word(intel_private.bridge_dev, pci_read_config_word(intel_private.bridge_dev,
I830_GMCH_CTRL, &gmch_ctrl); I830_GMCH_CTRL, &gmch_ctrl);
if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
dbgprintf("failed to enable the GTT: GMCH_CTRL=%x\n", dev_err(&intel_private.pcidev->dev,
"failed to enable the GTT: GMCH_CTRL=%x\n",
gmch_ctrl); gmch_ctrl);
return false; return false;
} }
@ -544,7 +547,8 @@ static bool intel_enable_gtt(void)
reg = intel_private.registers+I810_PGETBL_CTL; reg = intel_private.registers+I810_PGETBL_CTL;
writel(intel_private.PGETBL_save, reg); writel(intel_private.PGETBL_save, reg);
if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
dbgprintf("failed to enable the GTT: PGETBL=%x [expected %x]\n", dev_err(&intel_private.pcidev->dev,
"failed to enable the GTT: PGETBL=%x [expected %x]\n",
readl(reg), intel_private.PGETBL_save); readl(reg), intel_private.PGETBL_save);
return false; return false;
} }
@ -556,6 +560,31 @@ static bool intel_enable_gtt(void)
} }
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags)
{
int i, j;
for (i = 0, j = first_entry; i < num_entries; i++, j++) {
dma_addr_t addr = (dma_addr_t)(pages[i]);
intel_private.driver->write_entry(addr,
j, flags);
}
readl(intel_private.gtt+j-1);
}
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
{
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
}
static void intel_i9xx_setup_flush(void) static void intel_i9xx_setup_flush(void)
{ {
@ -766,6 +795,12 @@ const struct intel_gtt *intel_gtt_get(void)
return &intel_private.base; return &intel_private.base;
} }
void intel_gtt_chipset_flush(void)
{
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
phys_addr_t get_bus_addr(void) phys_addr_t get_bus_addr(void)
{ {

View File

@ -164,6 +164,7 @@ intel_setup_mchbar(struct drm_device *dev)
#define LFB_SIZE 0xC00000
static int i915_load_gem_init(struct drm_device *dev) static int i915_load_gem_init(struct drm_device *dev)
{ {
@ -181,8 +182,6 @@ static int i915_load_gem_init(struct drm_device *dev)
/* Basic memrange allocator for stolen space */ /* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
//0xC00000 >> PAGE_SHIFT
/* Let GEM Manage all of the aperture. /* Let GEM Manage all of the aperture.
* *
* However, leave one page at the end still bound to the scratch page. * However, leave one page at the end still bound to the scratch page.
@ -192,13 +191,13 @@ static int i915_load_gem_init(struct drm_device *dev)
* at the last page of the aperture. One page should be enough to * at the last page of the aperture. One page should be enough to
* keep any prefetching inside of the aperture. * keep any prefetching inside of the aperture.
*/ */
// i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); i915_gem_do_init(dev, LFB_SIZE, mappable_size, gtt_size - PAGE_SIZE - LFB_SIZE);
// mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
// ret = i915_gem_init_ringbuffer(dev); ret = i915_gem_init_ringbuffer(dev);
// mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
// if (ret) if (ret)
// return ret; return ret;
/* Try to set up FBC with a reasonable compressed buffer size */ /* Try to set up FBC with a reasonable compressed buffer size */
// if (I915_HAS_FBC(dev) && i915_powersave) { // if (I915_HAS_FBC(dev) && i915_powersave) {
@ -240,13 +239,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_switcheroo; goto cleanup_vga_switcheroo;
#if 0
intel_modeset_gem_init(dev); intel_modeset_gem_init(dev);
ret = drm_irq_install(dev); // ret = drm_irq_install(dev);
if (ret) // if (ret)
goto cleanup_gem; // goto cleanup_gem;
/* Always safe in the mode setting case. */ /* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */ /* FIXME: do pre/post-mode set stuff in core KMS code */
@ -256,13 +253,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_irq; goto cleanup_irq;
drm_kms_helper_poll_init(dev); // drm_kms_helper_poll_init(dev);
/* We're off and running w/KMS */ /* We're off and running w/KMS */
dev_priv->mm.suspended = 0; dev_priv->mm.suspended = 0;
#endif
return 0; return 0;
cleanup_irq: cleanup_irq:

View File

@ -49,7 +49,7 @@
int i915_panel_ignore_lid __read_mostly = 0; int i915_panel_ignore_lid __read_mostly = 0;
unsigned int i915_powersave __read_mostly = 1; unsigned int i915_powersave __read_mostly = 0;
unsigned int i915_enable_rc6 __read_mostly = 0; unsigned int i915_enable_rc6 __read_mostly = 0;

View File

@ -35,7 +35,7 @@
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
//#include <linux/io-mapping.h> //#include <linux/io-mapping.h>
#include <linux/i2c.h> #include <linux/i2c.h>
//#include <drm/intel-gtt.h> #include <drm/intel-gtt.h>
//#include <linux/backlight.h> //#include <linux/backlight.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
@ -293,8 +293,8 @@ typedef struct drm_i915_private {
drm_dma_handle_t *status_page_dmah; drm_dma_handle_t *status_page_dmah;
// uint32_t counter; // uint32_t counter;
// drm_local_map_t hws_map; // drm_local_map_t hws_map;
// struct drm_i915_gem_object *pwrctx; struct drm_i915_gem_object *pwrctx;
// struct drm_i915_gem_object *renderctx; struct drm_i915_gem_object *renderctx;
// struct resource mch_res; // struct resource mch_res;
@ -552,7 +552,7 @@ typedef struct drm_i915_private {
/** Memory allocator for GTT stolen memory */ /** Memory allocator for GTT stolen memory */
struct drm_mm stolen; struct drm_mm stolen;
/** Memory allocator for GTT */ /** Memory allocator for GTT */
// struct drm_mm gtt_space; struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt /** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */ * mappings on resume */
struct list_head gtt_list; struct list_head gtt_list;
@ -722,7 +722,7 @@ typedef struct drm_i915_private {
unsigned long last_gpu_reset; unsigned long last_gpu_reset;
/* list of fbdev register on this device */ /* list of fbdev register on this device */
// struct intel_fbdev *fbdev; struct intel_fbdev *fbdev;
// struct backlight_device *backlight; // struct backlight_device *backlight;
@ -1154,14 +1154,14 @@ int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
// return (int32_t)(seq1 - seq2) >= 0; // return (int32_t)(seq1 - seq2) >= 0;
//} //}
//static inline u32 static inline u32
//i915_gem_next_request_seqno(struct intel_ring_buffer *ring) i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
//{ {
// drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
// return ring->outstanding_lazy_request = dev_priv->next_seqno; return ring->outstanding_lazy_request = dev_priv->next_seqno;
//} }
/*
void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj); void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@ -1206,7 +1206,7 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level); enum i915_cache_level cache_level);
*/
/* i915_gem_gtt.c */ /* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,138 @@
/*
* Copyright © 2010 Daniel Vetter
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
//#include "i915_trace.h"
#include "intel_drv.h"
#define AGP_USER_TYPES (1 << 16)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
{
switch (cache_level) {
case I915_CACHE_LLC_MLC:
if (INTEL_INFO(dev)->gen >= 6)
return AGP_USER_CACHED_MEMORY_LLC_MLC;
/* Older chipsets do not have this extra level of CPU
* cacheing, so fallthrough and request the PTE simply
* as cached.
*/
case I915_CACHE_LLC:
return AGP_USER_CACHED_MEMORY;
default:
case I915_CACHE_NONE:
return AGP_USER_MEMORY;
}
}
#if 0
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_rebind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
}
#endif
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
int ret;
ENTER();
// if (dev_priv->mm.gtt->needs_dmar) {
// ret = intel_gtt_map_memory(obj->pages,
// obj->base.size >> PAGE_SHIFT,
// &obj->sg_list,
// &obj->num_sg);
// if (ret != 0)
// return ret;
// intel_gtt_insert_sg_entries(obj->sg_list,
// obj->num_sg,
// obj->gtt_space->start >> PAGE_SHIFT,
// agp_type);
// } else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
LEAVE();
return 0;
}
#if 0
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
}
}
#endif

View File

@ -5668,9 +5668,26 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
/** Sets the color ramps on behalf of RandR */
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_crtc->lut_r[regno] = red >> 8;
intel_crtc->lut_g[regno] = green >> 8;
intel_crtc->lut_b[regno] = blue >> 8;
}
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
*red = intel_crtc->lut_r[regno] << 8;
*green = intel_crtc->lut_g[regno] << 8;
*blue = intel_crtc->lut_b[regno] << 8;
}
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size) u16 *blue, uint32_t start, uint32_t size)
@ -7103,10 +7120,109 @@ static void cpt_init_clock_gating(struct drm_device *dev)
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
} }
static void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx) {
// i915_gem_object_unpin(dev_priv->renderctx);
// drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
}
if (dev_priv->pwrctx) {
// i915_gem_object_unpin(dev_priv->pwrctx);
// drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
}
}
static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx == NULL)
// dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
return -ENOMEM;
if (dev_priv->pwrctx == NULL)
// dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
return 0;
}
void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!i915_enable_rc6)
return;
mutex_lock(&dev->struct_mutex);
ret = ironlake_setup_rc6(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return;
}
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
#if 0
ret = BEGIN_LP_RING(6);
if (ret) {
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
}
OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_SUSPEND_FLUSH);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
/*
* Wait for the command parser to advance past MI_SET_CONTEXT. The HW
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(LP_RING(dev_priv));
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
}
#endif
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
mutex_unlock(&dev->struct_mutex);
}
void intel_init_clock_gating(struct drm_device *dev) void intel_init_clock_gating(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -7438,6 +7554,17 @@ void intel_modeset_init(struct drm_device *dev)
gen6_update_ring_freq(dev_priv); gen6_update_ring_freq(dev_priv);
} }
// INIT_WORK(&dev_priv->idle_work, intel_idle_update);
// setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
// (unsigned long)dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
{
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
// intel_setup_overlay(dev);
} }

View File

@ -0,0 +1,85 @@
/*
* Copyright © 2007 David Airlie
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* David Airlie
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
//#include <linux/mm.h>
//#include <linux/tty.h>
#include <linux/sysrq.h>
//#include <linux/delay.h>
#include <linux/fb.h>
//#include <linux/init.h>
//#include <linux/vga_switcheroo.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
// .fb_probe = intel_fb_find_or_create_single,
};
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
ENTER();
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
if (!ifbdev)
return -ENOMEM;
dev_priv->fbdev = ifbdev;
ifbdev->helper.funcs = &intel_fb_helper_funcs;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
dev_priv->num_pipe,
INTELFB_CONN_LIMIT);
if (ret) {
kfree(ifbdev);
return ret;
}
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
drm_fb_helper_initial_config(&ifbdev->helper, 32);
LEAVE();
return 0;
}

File diff suppressed because it is too large Load Diff