From 0b68aa81cf58949db631e927914d7e58e73492e1 Mon Sep 17 00:00:00 2001 From: "Sergey Semyonov (Serge)" Date: Tue, 2 Feb 2016 17:21:05 +0000 Subject: [PATCH] i915-4.4.1 git-svn-id: svn://kolibrios.org@6131 a494cfbc-eb01-0410-851d-a64ba20cac60 --- drivers/video/drm/drm_cache.c | 105 ++++--- drivers/video/drm/drm_gem.c | 4 +- drivers/video/drm/drm_irq.c | 15 +- drivers/video/drm/drm_stub.c | 15 - drivers/video/drm/i915/i915_dma.c | 6 +- drivers/video/drm/i915/i915_gem.c | 82 +++--- drivers/video/drm/i915/i915_gem_stolen.c | 10 + drivers/video/drm/i915/i915_irq.c | 18 +- drivers/video/drm/i915/i915_params.c | 2 +- drivers/video/drm/i915/i915_trace.h | 2 +- drivers/video/drm/i915/kms_display.c | 33 ++- drivers/video/drm/i915/kos_gem_fb.c | 92 ------ drivers/video/drm/i915/main.c | 9 +- drivers/video/drm/i915/pci.c | 355 ++++++++++++++++++----- drivers/video/drm/i915/utils.c | 19 -- 15 files changed, 425 insertions(+), 342 deletions(-) diff --git a/drivers/video/drm/drm_cache.c b/drivers/video/drm/drm_cache.c index 9217175d21..7e67217c79 100644 --- a/drivers/video/drm/drm_cache.c +++ b/drivers/video/drm/drm_cache.c @@ -33,8 +33,13 @@ extern int x86_clflush_size; +#if defined(CONFIG_X86) -#if 0 +/* + * clflushopt is an unordered instruction which needs fencing with mfence or + * sfence to avoid ordering issues. For drm_clflush_page this fencing happens + * in the caller. + */ static void drm_clflush_page(struct page *page) { @@ -66,70 +71,44 @@ static void drm_cache_flush_clflush(struct page *pages[], void drm_clflush_pages(struct page *pages[], unsigned long num_pages) { - uint8_t *pva; - unsigned int i, j; - pva = AllocKernelSpace(4096); +#if defined(CONFIG_X86) + drm_cache_flush_clflush(pages, num_pages); + return; - if(pva != NULL) - { - dma_addr_t *src, *dst; - u32 count; +#elif defined(__powerpc__) + unsigned long i; + for (i = 0; i < num_pages; i++) { + struct page *page = pages[i]; + void *page_virtual; - for (i = 0; i < num_pages; i++) - { - mb(); - MapPage(pva, page_to_phys(pages[i]), 0x001); - for (j = 0; j < PAGE_SIZE; j += x86_clflush_size) - clflush(pva + j); - } - FreeKernelSpace(pva); - } - mb(); + if (unlikely(page == NULL)) + continue; + + page_virtual = kmap_atomic(page); + flush_dcache_range((unsigned long)page_virtual, + (unsigned long)page_virtual + PAGE_SIZE); + kunmap_atomic(page_virtual); + } +#else + printk(KERN_ERR "Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); +#endif } EXPORT_SYMBOL(drm_clflush_pages); void drm_clflush_sg(struct sg_table *st) -{ - struct sg_page_iter sg_iter; - struct page *page; - - uint8_t *pva; - unsigned int i; - - pva = AllocKernelSpace(4096); - if( pva != NULL) - { - mb(); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) - { - page = sg_page_iter_page(&sg_iter); - - MapPage(pva,page_to_phys(page), 0x001); - - for (i = 0; i < PAGE_SIZE; i += x86_clflush_size) - clflush(pva + i); - }; - FreeKernelSpace(pva); - }; - mb(); -} -EXPORT_SYMBOL(drm_clflush_sg); - -#if 0 -void -drm_clflush_virt_range(void *addr, unsigned long length) { #if defined(CONFIG_X86) if (cpu_has_clflush) { - const int size = boot_cpu_data.x86_clflush_size; - void *end = addr + length; - addr = (void *)(((unsigned long)addr) & -size); + struct sg_page_iter sg_iter; + mb(); - for (; addr < end; addr += size) - clflushopt(addr); + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) + drm_clflush_page(sg_page_iter_page(&sg_iter)); mb(); + return; } @@ -140,6 +119,26 @@ drm_clflush_virt_range(void *addr, unsigned long length) WARN_ON_ONCE(1); #endif } -EXPORT_SYMBOL(drm_clflush_virt_range); +EXPORT_SYMBOL(drm_clflush_sg); +void +drm_clflush_virt_range(void *addr, unsigned long length) +{ +#if defined(CONFIG_X86) + if (1) { + const int size = x86_clflush_size; + void *end = addr + length; + addr = (void *)(((unsigned long)addr) & -size); + mb(); + for (; addr < end; addr += size) + clflush(addr); + mb(); + return; + } + +#else + printk(KERN_ERR "Architecture has no drm_cache.c support\n"); + WARN_ON_ONCE(1); #endif +} +EXPORT_SYMBOL(drm_clflush_virt_range); diff --git a/drivers/video/drm/drm_gem.c b/drivers/video/drm/drm_gem.c index afea62280a..9794e1db21 100644 --- a/drivers/video/drm/drm_gem.c +++ b/drivers/video/drm/drm_gem.c @@ -264,7 +264,7 @@ EXPORT_SYMBOL(drm_gem_handle_delete); * @file: drm file-private structure to remove the dumb handle from * @dev: corresponding drm_device * @handle: the dumb handle to remove - * + * * This implements the ->dumb_destroy kms driver callback for drivers which use * gem to manage their backing storage. */ @@ -281,7 +281,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy); * @file_priv: drm file-private structure to register the handle for * @obj: object to register * @handlep: pointer to return the created handle to the caller - * + * * This expects the dev->object_name_lock to be held already and will drop it * before returning. Used to avoid races in establishing new handles when * importing an object from either an flink name or a dma-buf. diff --git a/drivers/video/drm/drm_irq.c b/drivers/video/drm/drm_irq.c index 3cce9291b6..136ffd8145 100644 --- a/drivers/video/drm/drm_irq.c +++ b/drivers/video/drm/drm_irq.c @@ -54,6 +54,10 @@ static inline ktime_t ktime_mono_to_real(ktime_t mono) return mono; } +irqreturn_t device_irq_handler(struct drm_device *dev) +{ + return dev->driver->irq_handler(0, dev); +} /* Access macro for slots in vblank timestamp ringbuffer. */ #define vblanktimestamp(dev, pipe, count) \ @@ -401,15 +405,6 @@ EXPORT_SYMBOL(drm_vblank_init); -irqreturn_t device_irq_handler(struct drm_device *dev) -{ - -// printf("video irq\n"); - -// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ; - - return dev->driver->irq_handler(0, dev); -} /** * drm_irq_install - install IRQ handler @@ -1424,7 +1419,7 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) list_del(&e->base.link); drm_vblank_put(dev, pipe); - send_vblank_event(dev, e, seq, &now); +// send_vblank_event(dev, e, seq, &now); } } diff --git a/drivers/video/drm/drm_stub.c b/drivers/video/drm/drm_stub.c index 1edc9c307b..c73fc1b2f4 100644 --- a/drivers/video/drm/drm_stub.c +++ b/drivers/video/drm/drm_stub.c @@ -547,21 +547,6 @@ int drm_order(unsigned long size) return order; } -extern int x86_clflush_size; - - -void drm_clflush_virt_range(void *addr, unsigned long length) -{ - char *tmp = addr; - char *end = tmp + length; - mb(); - for (; tmp < end; tmp += x86_clflush_size) - clflush(tmp); - clflush(end - 1); - mb(); - return; -} - int drm_sysfs_connector_add(struct drm_connector *connector) { return 0; diff --git a/drivers/video/drm/i915/i915_dma.c b/drivers/video/drm/i915/i915_dma.c index fc6427434f..05415e834b 100644 --- a/drivers/video/drm/i915/i915_dma.c +++ b/drivers/video/drm/i915/i915_dma.c @@ -181,7 +181,7 @@ static int i915_get_bridge_dev(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + dev_priv->bridge_dev = _pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); if (!dev_priv->bridge_dev) { DRM_ERROR("bridge device not found\n"); return -1; @@ -899,8 +899,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the * workqueue at any time. Use an ordered one. - */ - dev_priv->wq = (struct workqueue_struct *)alloc_ordered_workqueue("i915", 0); + */ + dev_priv->wq = (struct workqueue_struct *)alloc_ordered_workqueue("i915", 0); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); ret = -ENOMEM; diff --git a/drivers/video/drm/i915/i915_gem.c b/drivers/video/drm/i915/i915_gem.c index d330bcd3ab..3caeb63328 100644 --- a/drivers/video/drm/i915/i915_gem.c +++ b/drivers/video/drm/i915/i915_gem.c @@ -40,6 +40,7 @@ #define RQ_BUG_ON(expr) extern int x86_clflush_size; +#define __copy_to_user_inatomic __copy_to_user #define PROT_READ 0x1 /* page can be read */ #define PROT_WRITE 0x2 /* page can be written */ @@ -57,7 +58,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr, #define MAX_ERRNO 4095 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) - +#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); @@ -238,9 +239,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, args->size, &args->handle); } - -#if 0 - static inline int __copy_to_user_swizzled(char __user *cpu_vaddr, const char *gpu_vaddr, int gpu_offset, @@ -293,6 +291,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, return 0; } +/* + * Pins the specified object's pages and synchronizes the object with + * GPU accesses. Sets needs_clflush to non-zero if the caller should + * flush the object from the CPU cache. + */ +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, + int *needs_clflush) +{ + int ret; + + *needs_clflush = 0; + + if (!obj->base.filp) + return -EINVAL; + + if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { + /* If we're not in the cpu read domain, set ourself into the gtt + * read domain and manually flush cachelines (if required). This + * optimizes for the case when the gpu will dirty the data + * anyway again before the next pread happens. */ + *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, + obj->cache_level); + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + } + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ret; + + i915_gem_object_pin_pages(obj); + + return ret; +} + /* Per-page copy function for the shmem pread fastpath. * Flushes invalid cachelines before reading the target if * needs_clflush is set. */ @@ -424,16 +458,6 @@ i915_gem_shmem_pread(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); - if (likely(!i915.prefault_disable) && !prefaulted) { - ret = fault_in_multipages_writeable(user_data, remain); - /* Userspace is tricking us, but we've already clobbered - * its pages with the prefault and promised to write the - * data up to the first fault. Hence ignore any errors - * and just continue. */ - (void)ret; - prefaulted = 1; - } - ret = shmem_pread_slow(page, shmem_page_offset, page_length, user_data, page_do_bit17_swizzling, needs_clflush); @@ -471,11 +495,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (args->size == 0) return 0; - if (!access_ok(VERIFY_WRITE, - to_user_ptr(args->data_ptr), - args->size)) - return -EFAULT; - ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -516,27 +535,7 @@ unlock: * page faults in the source data */ -static inline int -fast_user_write(struct io_mapping *mapping, - loff_t page_base, int page_offset, - char __user *user_data, - int length) -{ - void __iomem *vaddr_atomic; - void *vaddr; - unsigned long unwritten; - vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); - /* We can use the cpu mem copy function because this is X86. */ - vaddr = (void __force*)vaddr_atomic + page_offset; - unwritten = __copy_from_user_inatomic_nocache(vaddr, - user_data, length); - io_mapping_unmap_atomic(vaddr_atomic); - return unwritten; -} -#endif - -#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. @@ -585,9 +584,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW); + MapPage(dev_priv->gtt.mappable, + dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW); - memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length); + memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length); remain -= page_length; user_data += page_length; diff --git a/drivers/video/drm/i915/i915_gem_stolen.c b/drivers/video/drm/i915/i915_gem_stolen.c index d42224f328..280663ce7c 100644 --- a/drivers/video/drm/i915/i915_gem_stolen.c +++ b/drivers/video/drm/i915/i915_gem_stolen.c @@ -415,6 +415,16 @@ int i915_gem_init_stolen(struct drm_device *dev) */ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size); + { + u32 usable_size = dev_priv->gtt.stolen_usable_size >> 20; + if(i915.fbsize > usable_size) + { + i915.fbsize = usable_size; + DRM_DEBUG_KMS("Adjust framebuffer size to match reserved memory\n" + "new fbsize %dMB\n",i915.fbsize); + } + } + return 0; } diff --git a/drivers/video/drm/i915/i915_irq.c b/drivers/video/drm/i915/i915_irq.c index 87e9337f35..f2b498b52e 100644 --- a/drivers/video/drm/i915/i915_irq.c +++ b/drivers/video/drm/i915/i915_irq.c @@ -1445,8 +1445,8 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, *pin_mask |= BIT(i); -// if (!intel_hpd_pin_to_port(i, &port)) -// continue; +// if (!intel_hpd_pin_to_port(i, &port)) + continue; if (long_pulse_detect(port, dig_hotplug_reg)) *long_mask |= BIT(i); @@ -2009,8 +2009,8 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; - if (hotplug_trigger) - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); +// if (hotplug_trigger) +// ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); if (de_iir & DE_AUX_CHANNEL_A) dp_aux_irq_handler(dev); @@ -4474,13 +4474,3 @@ void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) dev_priv->dev->driver->irq_preinstall(dev_priv->dev); dev_priv->dev->driver->irq_postinstall(dev_priv->dev); } - -irqreturn_t intel_irq_handler(struct drm_device *dev) -{ - -// printf("i915 irq\n"); -// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ; - - return dev->driver->irq_handler(0, dev); -} - diff --git a/drivers/video/drm/i915/i915_params.c b/drivers/video/drm/i915/i915_params.c index 939a8321ca..945d2b67fb 100644 --- a/drivers/video/drm/i915/i915_params.c +++ b/drivers/video/drm/i915/i915_params.c @@ -31,7 +31,7 @@ struct i915_params i915 __read_mostly = { .lvds_channel_mode = 0, .panel_use_ssc = -1, .vbt_sdvo_panel_type = -1, - .enable_rc6 = -1, + .enable_rc6 = 0, .enable_fbc = -1, .enable_execlists = -1, .enable_hangcheck = true, diff --git a/drivers/video/drm/i915/i915_trace.h b/drivers/video/drm/i915/i915_trace.h index 5537e5674c..00558d9d28 100644 --- a/drivers/video/drm/i915/i915_trace.h +++ b/drivers/video/drm/i915/i915_trace.h @@ -43,5 +43,5 @@ #define trace_i915_page_table_entry_map(vm, pde, pt, index, count, GEN6_PTES) #define trace_i915_va_alloc(vm,start,size,name) #define trace_i915_gem_request_notify(ring) - +#define trace_i915_gem_object_pread(obj, offset, size) #endif diff --git a/drivers/video/drm/i915/kms_display.c b/drivers/video/drm/i915/kms_display.c index 1b29ccff7f..ab2fc1bf78 100644 --- a/drivers/video/drm/i915/kms_display.c +++ b/drivers/video/drm/i915/kms_display.c @@ -374,6 +374,7 @@ static int check_connector(struct drm_device *dev, struct drm_connector *connect connector->name, connector->base.id); return -EINVAL; }; + connector->encoder = encoder; } crtc = encoder->crtc; @@ -382,7 +383,8 @@ static int check_connector(struct drm_device *dev, struct drm_connector *connect if(crtc != NULL) { - encoder->crtc = crtc; + DRM_DEBUG_KMS("%s connector: %p encode: %p crtc: %p\n",__FUNCTION__, + connector, encoder, crtc); return 0; } else @@ -823,8 +825,6 @@ int init_cursor(cursor_t *cursor) FreeKernelSpace(mapped); -// release old cursor - KernelFree(cursor->data); cursor->data = bits; @@ -1153,13 +1153,21 @@ int i915_mask_update_ex(struct drm_device *dev, void *data, mask->height== 0 ) return 1; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file, mask->handle); if (obj == NULL) - return -ENOENT; + { + ret = -ENOENT; + goto unlock; + } - if (!obj->filp) { - drm_gem_object_unreference_unlocked(obj); - return -EINVAL; + if (!obj->filp) + { + ret = -ENOENT; + goto out; } #if 0 @@ -1179,10 +1187,6 @@ int i915_mask_update_ex(struct drm_device *dev, void *data, u8* dst_offset; u32 ifl; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto err1; - i915_gem_object_set_to_cpu_domain(to_intel_bo(obj), true); src_offset = os_display->win_map; @@ -1301,11 +1305,12 @@ int i915_mask_update_ex(struct drm_device *dev, void *data, } #endif -err2: - mutex_unlock(&dev->struct_mutex); -err1: +out: drm_gem_object_unreference(obj); +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; } diff --git a/drivers/video/drm/i915/kos_gem_fb.c b/drivers/video/drm/i915/kos_gem_fb.c index a6c96938eb..533bae775a 100644 --- a/drivers/video/drm/i915/kos_gem_fb.c +++ b/drivers/video/drm/i915/kos_gem_fb.c @@ -126,96 +126,4 @@ cleanup: } -struct drm_i915_gem_object * -kos_gem_fb_object_create(struct drm_device *dev, - u32 gtt_offset, - u32 size) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *ggtt = &dev_priv->gtt.base; - struct drm_i915_gem_object *obj; - struct drm_mm_node *fb_node; - struct i915_vma *vma; - int ret; - - DRM_DEBUG_KMS("creating preallocated framebuffer object: gtt_offset=%x, size=%x\n", - gtt_offset, size); - - /* KISS and expect everything to be page-aligned */ - BUG_ON(size & 4095); - - if (WARN_ON(size == 0)) - return NULL; - - fb_node = kzalloc(sizeof(*fb_node), GFP_KERNEL); - if (!fb_node) - return NULL; - - fb_node->start = gtt_offset; - fb_node->size = size; - - obj = _kos_fb_object_create(dev, fb_node); - if (obj == NULL) { - DRM_DEBUG_KMS("failed to preallocate framebuffer object\n"); - kfree(fb_node); - return NULL; - } - - vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_out; - } - - /* To simplify the initialisation sequence between KMS and GTT, - * we allow construction of the stolen object prior to - * setting up the GTT space. The actual reservation will occur - * later. - */ - vma->node.start = gtt_offset; - vma->node.size = size; - if (drm_mm_initialized(&ggtt->mm)) { - ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); - if (ret) { - DRM_DEBUG_KMS("failed to allocate framebuffer GTT space\n"); - goto err_vma; - } - } - -// obj->has_global_gtt_mapping = 1; - - list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&vma->mm_list, &ggtt->inactive_list); - - mutex_lock(&dev->object_name_lock); - idr_preload(GFP_KERNEL); - - if (!obj->base.name) { - ret = idr_alloc(&dev->object_name_idr, &obj->base, 1, 0, GFP_NOWAIT); - if (ret < 0) - goto err_gem; - - obj->base.name = ret; - - /* Allocate a reference for the name table. */ - drm_gem_object_reference(&obj->base); - - DRM_DEBUG_KMS("%s allocate fb name %d\n", __FUNCTION__, obj->base.name ); - } - - idr_preload_end(); - mutex_unlock(&dev->object_name_lock); - drm_gem_object_unreference(&obj->base); - return obj; - -err_gem: - idr_preload_end(); - mutex_unlock(&dev->object_name_lock); -err_vma: - i915_gem_vma_destroy(vma); -err_out: - kfree(fb_node); - drm_gem_object_unreference(&obj->base); - return NULL; -} diff --git a/drivers/video/drm/i915/main.c b/drivers/video/drm/i915/main.c index 394b808399..a2bfce0085 100644 --- a/drivers/video/drm/i915/main.c +++ b/drivers/video/drm/i915/main.c @@ -14,7 +14,7 @@ #include "bitmap.h" #include "i915_kos32.h" -#define DRV_NAME "i915 v4.4" +#define DRV_NAME "i915 v4.4.1" #define I915_DEV_CLOSE 0 #define I915_DEV_INIT 1 @@ -374,6 +374,8 @@ int do_command_line(const char* usercmd) #define SRV_MASK_UPDATE 45 #define SRV_MASK_UPDATE_EX 46 +#define SRV_I915_GEM_PREAD 47 + #define check_input(size) \ if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \ break; @@ -459,6 +461,10 @@ int _stdcall display_handler(ioctl_t *io) retval = i915_gem_set_caching_ioctl(main_device, inp, file); break; + case SRV_I915_GEM_PREAD: + retval = i915_gem_pread_ioctl(main_device, inp, file); + break; + case SRV_I915_GEM_PWRITE: retval = i915_gem_pwrite_ioctl(main_device, inp, file); break; @@ -498,7 +504,6 @@ int _stdcall display_handler(ioctl_t *io) break; case SRV_I915_GEM_EXECBUFFER2: -// printf("SRV_I915_GEM_EXECBUFFER2\n"); retval = i915_gem_execbuffer2(main_device, inp, file); break; diff --git a/drivers/video/drm/i915/pci.c b/drivers/video/drm/i915/pci.c index 1d50b1b14f..43edaaa58f 100644 --- a/drivers/video/drm/i915/pci.c +++ b/drivers/video/drm/i915/pci.c @@ -1,10 +1,12 @@ +#include #include #include #include #include +#include + #include -#include extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn); @@ -372,7 +374,7 @@ static pci_dev_t* pci_scan_device(u32 busnr, int devfn) -int pci_scan_slot(u32 bus, int devfn) +int _pci_scan_slot(u32 bus, int devfn) { int func, nr = 0; @@ -493,7 +495,7 @@ int enum_pci_devices() for(;bus <= last_bus; bus++) { for (devfn = 0; devfn < 0x100; devfn += 8) - pci_scan_slot(bus, devfn); + _pci_scan_slot(bus, devfn); } @@ -560,7 +562,7 @@ pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) dev = (pci_dev_t*)dev->link.next) { if( dev->pci_dev.vendor != vendor ) - continue; + continue; if(dev->pci_dev.device == device) { @@ -571,7 +573,7 @@ pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) }; -struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) +struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) { pci_dev_t *dev; @@ -664,13 +666,6 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr) } -static inline void -pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, - struct resource *res) -{ - region->start = res->start; - region->end = res->end; -} int pci_enable_rom(struct pci_dev *pdev) @@ -682,7 +677,7 @@ int pci_enable_rom(struct pci_dev *pdev) if (!res->flags) return -1; - pcibios_resource_to_bus(pdev, ®ion, res); + _pcibios_resource_to_bus(pdev, ®ion, res); pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_MASK; rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; @@ -758,49 +753,49 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) */ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { - struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; - loff_t start; - void __iomem *rom; + struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; + loff_t start; + void __iomem *rom; - /* - * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy - * memory map if the VGA enable bit of the Bridge Control register is - * set for embedded VGA. - */ - if (res->flags & IORESOURCE_ROM_SHADOW) { - /* primary video rom always starts here */ - start = (loff_t)0xC0000; - *size = 0x20000; /* cover C000:0 through E000:0 */ - } else { + /* + * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy + * memory map if the VGA enable bit of the Bridge Control register is + * set for embedded VGA. + */ + if (res->flags & IORESOURCE_ROM_SHADOW) { + /* primary video rom always starts here */ + start = (loff_t)0xC0000; + *size = 0x20000; /* cover C000:0 through E000:0 */ + } else { if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) { - *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - return (void __iomem *)(unsigned long) - pci_resource_start(pdev, PCI_ROM_RESOURCE); - } else { + *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + return (void __iomem *)(unsigned long) + pci_resource_start(pdev, PCI_ROM_RESOURCE); + } else { start = (loff_t)0xC0000; *size = 0x20000; /* cover C000:0 through E000:0 */ - } } + } - rom = ioremap(start, *size); - if (!rom) { - /* restore enable if ioremap fails */ - if (!(res->flags & (IORESOURCE_ROM_ENABLE | - IORESOURCE_ROM_SHADOW | - IORESOURCE_ROM_COPY))) - pci_disable_rom(pdev); - return NULL; - } + rom = ioremap(start, *size); + if (!rom) { + /* restore enable if ioremap fails */ + if (!(res->flags & (IORESOURCE_ROM_ENABLE | + IORESOURCE_ROM_SHADOW | + IORESOURCE_ROM_COPY))) + pci_disable_rom(pdev); + return NULL; + } - /* - * Try to find the true size of the ROM since sometimes the PCI window - * size is much larger than the actual size of the ROM. - * True size is important if the ROM is going to be copied. - */ - *size = pci_get_rom_size(pdev, rom, *size); - return rom; + /* + * Try to find the true size of the ROM since sometimes the PCI window + * size is much larger than the actual size of the ROM. + * True size is important if the ROM is going to be copied. + */ + *size = pci_get_rom_size(pdev, rom, *size); + return rom; } void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) @@ -817,50 +812,260 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) pci_disable_rom(pdev); } -#if 0 -void pcibios_set_master(struct pci_dev *dev) -{ - u8 lat; - - /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */ - if (pci_is_pcie(dev)) - return; - - pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); - if (lat < 16) - lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; - else if (lat > pcibios_max_latency) - lat = pcibios_max_latency; - else - return; - dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat); - pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); -} -#endif - - static void __pci_set_master(struct pci_dev *dev, bool enable) { u16 old_cmd, cmd; pci_read_config_word(dev, PCI_COMMAND, &old_cmd); if (enable) - cmd = old_cmd | PCI_COMMAND_MASTER; + cmd = old_cmd | PCI_COMMAND_MASTER; else - cmd = old_cmd & ~PCI_COMMAND_MASTER; + cmd = old_cmd & ~PCI_COMMAND_MASTER; if (cmd != old_cmd) { dbgprintf("%s bus mastering\n", enable ? "enabling" : "disabling"); - pci_write_config_word(dev, PCI_COMMAND, cmd); - } + pci_write_config_word(dev, PCI_COMMAND, cmd); + } dev->is_busmaster = enable; } + +/* pci_set_master - enables bus-mastering for device dev + * @dev: the PCI device to enable + * + * Enables bus-mastering on the device and calls pcibios_set_master() + * to do the needed arch specific settings. + */ void pci_set_master(struct pci_dev *dev) { - __pci_set_master(dev, true); -// pcibios_set_master(dev); + __pci_set_master(dev, true); +// pcibios_set_master(dev); +} + +/** + * pci_clear_master - disables bus-mastering for device dev + * @dev: the PCI device to disable + */ +void pci_clear_master(struct pci_dev *dev) +{ + __pci_set_master(dev, false); +} + + +static inline int pcie_cap_version(const struct pci_dev *dev) +{ + return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS; +} + +static inline bool pcie_cap_has_devctl(const struct pci_dev *dev) +{ + return true; +} + +static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_ENDPOINT || + type == PCI_EXP_TYPE_LEG_END; +} + +static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + (type == PCI_EXP_TYPE_DOWNSTREAM && + dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT); +} + +static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev) +{ + int type = pci_pcie_type(dev); + + return pcie_cap_version(dev) > 1 || + type == PCI_EXP_TYPE_ROOT_PORT || + type == PCI_EXP_TYPE_RC_EC; +} + +static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) +{ + if (!pci_is_pcie(dev)) + return false; + + switch (pos) { + case PCI_EXP_FLAGS_TYPE: + return true; + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_DEVSTA: + return pcie_cap_has_devctl(dev); + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + case PCI_EXP_LNKSTA: + return pcie_cap_has_lnkctl(dev); + case PCI_EXP_SLTCAP: + case PCI_EXP_SLTCTL: + case PCI_EXP_SLTSTA: + return pcie_cap_has_sltctl(dev); + case PCI_EXP_RTCTL: + case PCI_EXP_RTCAP: + case PCI_EXP_RTSTA: + return pcie_cap_has_rtctl(dev); + case PCI_EXP_DEVCAP2: + case PCI_EXP_DEVCTL2: + case PCI_EXP_LNKCAP2: + case PCI_EXP_LNKCTL2: + case PCI_EXP_LNKSTA2: + return pcie_cap_version(dev) > 1; + default: + return false; + } +} + +/* + * Note that these accessor functions are only for the "PCI Express + * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the + * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) + */ +int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) +{ + int ret; + + *val = 0; + if (pos & 1) + return -EINVAL; + + if (pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_word() fails, it may + * have been written as 0xFFFF if hardware error happens + * during pci_read_config_word(). + */ + if (ret) + *val = 0; + return ret; + } + + /* + * For Functions that do not implement the Slot Capabilities, + * Slot Status, and Slot Control registers, these spaces must + * be hardwired to 0b, with the exception of the Presence Detect + * State bit in the Slot Status register of Downstream Ports, + * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) + */ + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} +EXPORT_SYMBOL(pcie_capability_read_word); + +int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) +{ + int ret; + + *val = 0; + if (pos & 3) + return -EINVAL; + + if (pcie_capability_reg_implemented(dev, pos)) { + ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); + /* + * Reset *val to 0 if pci_read_config_dword() fails, it may + * have been written as 0xFFFFFFFF if hardware error happens + * during pci_read_config_dword(). + */ + if (ret) + *val = 0; + return ret; + } + + if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL && + pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { + *val = PCI_EXP_SLTSTA_PDS; + } + + return 0; +} +EXPORT_SYMBOL(pcie_capability_read_dword); + +int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) +{ + if (pos & 1) + return -EINVAL; + + if (!pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); +} +EXPORT_SYMBOL(pcie_capability_write_word); + +int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) +{ + if (pos & 3) + return -EINVAL; + + if (!pcie_capability_reg_implemented(dev, pos)) + return 0; + + return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); +} +EXPORT_SYMBOL(pcie_capability_write_dword); + +int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set) +{ + int ret; + u16 val; + + ret = pcie_capability_read_word(dev, pos, &val); + if (!ret) { + val &= ~clear; + val |= set; + ret = pcie_capability_write_word(dev, pos, val); + } + + return ret; } +int pcie_get_readrq(struct pci_dev *dev) +{ + u16 ctl; + + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); + + return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); +} +EXPORT_SYMBOL(pcie_get_readrq); + +/** + * pcie_set_readrq - set PCI Express maximum memory read request + * @dev: PCI device to query + * @rq: maximum memory read count in bytes + * valid values are 128, 256, 512, 1024, 2048, 4096 + * + * If possible sets maximum memory read request in bytes + */ +int pcie_set_readrq(struct pci_dev *dev, int rq) +{ + u16 v; + + if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) + return -EINVAL; + + v = (ffs(rq) - 8) << 12; + + return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, v); +} + diff --git a/drivers/video/drm/i915/utils.c b/drivers/video/drm/i915/utils.c index a241f1c93d..366970d9c9 100644 --- a/drivers/video/drm/i915/utils.c +++ b/drivers/video/drm/i915/utils.c @@ -588,25 +588,6 @@ void kunmap_atomic(void *vaddr) MutexUnlock(&kmap_mutex); } -size_t strlcat(char *dest, const char *src, size_t count) -{ - size_t dsize = strlen(dest); - size_t len = strlen(src); - size_t res = dsize + len; - - /* This would be a bug */ - BUG_ON(dsize >= count); - - dest += dsize; - count -= dsize; - if (len >= count) - len = count-1; - memcpy(dest, src, len); - dest[len] = 0; - return res; -} -EXPORT_SYMBOL(strlcat); - void msleep(unsigned int msecs) { msecs /= 10;