i915-4.4.30
git-svn-id: svn://kolibrios.org@6660 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
c2e8916773
commit
bbf8a71cf4
@ -150,7 +150,7 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
|
||||
for (i = 0; i < state->num_connector; i++) {
|
||||
struct drm_connector *connector = state->connectors[i];
|
||||
|
||||
if (!connector)
|
||||
if (!connector || !connector->funcs)
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -367,6 +367,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
|
||||
drm_property_unreference_blob(state->mode_blob);
|
||||
state->mode_blob = NULL;
|
||||
|
||||
memset(&state->mode, 0, sizeof(state->mode));
|
||||
|
||||
if (blob) {
|
||||
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
|
||||
drm_mode_convert_umode(&state->mode,
|
||||
@ -379,7 +381,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
|
||||
DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
|
||||
state->mode.name, state);
|
||||
} else {
|
||||
memset(&state->mode, 0, sizeof(state->mode));
|
||||
state->enable = false;
|
||||
DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
|
||||
state);
|
||||
|
@ -130,6 +130,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
|
||||
mb();
|
||||
for (; addr < end; addr += size)
|
||||
clflush(addr);
|
||||
clflush(end - 1); /* force serialisation */
|
||||
mb();
|
||||
return;
|
||||
}
|
||||
|
@ -2685,8 +2685,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
|
||||
/*
|
||||
* Check whether the primary plane supports the fb pixel format.
|
||||
* Drivers not implementing the universal planes API use a
|
||||
@ -2743,10 +2741,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
|
||||
|
||||
for (i = 0; i < crtc_req->count_connectors; i++) {
|
||||
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
|
||||
// if (get_user(out_id, &set_connectors_ptr[i])) {
|
||||
// ret = -EFAULT;
|
||||
// goto out;
|
||||
// }
|
||||
if (get_user(out_id, &set_connectors_ptr[i])) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
connector = drm_connector_find(dev, out_id);
|
||||
if (!connector) {
|
||||
@ -3420,7 +3418,6 @@ out_err1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* drm_fb_release - remove and free the FBs on this file
|
||||
* @priv: drm file for the ioctl
|
||||
|
@ -178,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
|
||||
{
|
||||
struct drm_dp_aux_msg msg;
|
||||
unsigned int retry;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.address = offset;
|
||||
@ -186,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
|
||||
msg.buffer = buffer;
|
||||
msg.size = size;
|
||||
|
||||
mutex_lock(&aux->hw_mutex);
|
||||
|
||||
/*
|
||||
* The specification doesn't give any recommendation on how often to
|
||||
* retry native transactions. We used to retry 7 times like for
|
||||
@ -194,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
|
||||
*/
|
||||
for (retry = 0; retry < 32; retry++) {
|
||||
|
||||
mutex_lock(&aux->hw_mutex);
|
||||
err = aux->transfer(aux, &msg);
|
||||
mutex_unlock(&aux->hw_mutex);
|
||||
if (err < 0) {
|
||||
if (err == -EBUSY)
|
||||
continue;
|
||||
|
||||
return err;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
||||
switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
|
||||
case DP_AUX_NATIVE_REPLY_ACK:
|
||||
if (err < size)
|
||||
return -EPROTO;
|
||||
return err;
|
||||
err = -EPROTO;
|
||||
goto unlock;
|
||||
|
||||
case DP_AUX_NATIVE_REPLY_NACK:
|
||||
return -EIO;
|
||||
err = -EIO;
|
||||
goto unlock;
|
||||
|
||||
case DP_AUX_NATIVE_REPLY_DEFER:
|
||||
usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
|
||||
@ -221,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("too many retries, giving up\n");
|
||||
return -EIO;
|
||||
err = -EIO;
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&aux->hw_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -543,9 +548,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
|
||||
int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
|
||||
|
||||
for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
|
||||
mutex_lock(&aux->hw_mutex);
|
||||
ret = aux->transfer(aux, msg);
|
||||
mutex_unlock(&aux->hw_mutex);
|
||||
if (ret < 0) {
|
||||
if (ret == -EBUSY)
|
||||
continue;
|
||||
@ -684,6 +687,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
|
||||
mutex_lock(&aux->hw_mutex);
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
msg.address = msgs[i].addr;
|
||||
drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
|
||||
@ -738,6 +743,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
|
||||
msg.size = 0;
|
||||
(void)drm_dp_i2c_do_msg(aux, &msg);
|
||||
|
||||
mutex_unlock(&aux->hw_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1672,14 +1672,20 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_branch *mstb;
|
||||
int len, ret, port_num;
|
||||
|
||||
port = drm_dp_get_validated_port_ref(mgr, port);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
||||
port_num = port->port_num;
|
||||
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
|
||||
if (!mstb) {
|
||||
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
|
||||
|
||||
if (!mstb)
|
||||
if (!mstb) {
|
||||
drm_dp_put_port(port);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
||||
if (!txmsg) {
|
||||
@ -1704,6 +1710,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||
kfree(txmsg);
|
||||
fail_put:
|
||||
drm_dp_put_mst_branch_device(mstb);
|
||||
drm_dp_put_port(port);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1786,6 +1793,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
|
||||
req_payload.start_slot = cur_slots;
|
||||
if (mgr->proposed_vcpis[i]) {
|
||||
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
|
||||
port = drm_dp_get_validated_port_ref(mgr, port);
|
||||
if (!port) {
|
||||
mutex_unlock(&mgr->payload_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
|
||||
} else {
|
||||
port = NULL;
|
||||
@ -1811,6 +1823,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
|
||||
mgr->payloads[i].payload_state = req_payload.payload_state;
|
||||
}
|
||||
cur_slots += req_payload.num_slots;
|
||||
|
||||
if (port)
|
||||
drm_dp_put_port(port);
|
||||
}
|
||||
|
||||
for (i = 0; i < mgr->max_payloads; i++) {
|
||||
@ -2114,6 +2129,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
|
||||
|
||||
if (mgr->mst_primary) {
|
||||
int sret;
|
||||
u8 guid[16];
|
||||
|
||||
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
|
||||
if (sret != DP_RECEIVER_CAP_SIZE) {
|
||||
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
|
||||
@ -2128,6 +2145,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
|
||||
ret = -1;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Some hubs forget their guids after they resume */
|
||||
sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
|
||||
if (sret != 16) {
|
||||
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
|
||||
ret = -1;
|
||||
goto out_unlock;
|
||||
}
|
||||
drm_dp_check_mstb_guid(mgr->mst_primary, guid);
|
||||
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -1;
|
||||
|
@ -73,6 +73,8 @@
|
||||
#define EDID_QUIRK_FORCE_8BPC (1 << 8)
|
||||
/* Force 12bpc */
|
||||
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
|
||||
/* Force 6bpc */
|
||||
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
|
||||
|
||||
struct detailed_mode_closure {
|
||||
struct drm_connector *connector;
|
||||
@ -99,6 +101,9 @@ static struct edid_quirk {
|
||||
/* Unknown Acer */
|
||||
{ "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
|
||||
|
||||
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
|
||||
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
|
||||
|
||||
/* Belinea 10 15 55 */
|
||||
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
|
||||
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
|
||||
@ -3820,6 +3825,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
|
||||
|
||||
drm_add_display_info(edid, &connector->display_info, connector);
|
||||
|
||||
if (quirks & EDID_QUIRK_FORCE_6BPC)
|
||||
connector->display_info.bpc = 6;
|
||||
|
||||
if (quirks & EDID_QUIRK_FORCE_8BPC)
|
||||
connector->display_info.bpc = 8;
|
||||
|
||||
|
@ -431,7 +431,6 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
|
||||
bound++;
|
||||
}
|
||||
|
||||
dbgprintf("%s bound %d crtcs_bound %d\n", __FUNCTION__, bound, crtcs_bound);
|
||||
if (bound < crtcs_bound)
|
||||
return false;
|
||||
|
||||
@ -1588,7 +1587,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
int n, int width, int height)
|
||||
{
|
||||
int c, o;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_connector *connector;
|
||||
const struct drm_connector_helper_funcs *connector_funcs;
|
||||
struct drm_encoder *encoder;
|
||||
@ -1607,7 +1605,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
if (modes[n] == NULL)
|
||||
return best_score;
|
||||
|
||||
crtcs = kzalloc(dev->mode_config.num_connector *
|
||||
crtcs = kzalloc(fb_helper->connector_count *
|
||||
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
|
||||
if (!crtcs)
|
||||
return best_score;
|
||||
@ -1653,7 +1651,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
if (score > best_score) {
|
||||
best_score = score;
|
||||
memcpy(best_crtcs, crtcs,
|
||||
dev->mode_config.num_connector *
|
||||
fb_helper->connector_count *
|
||||
sizeof(struct drm_fb_helper_crtc *));
|
||||
}
|
||||
}
|
||||
|
@ -253,8 +253,8 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
|
||||
|
||||
if (dev->driver->gem_close_object)
|
||||
dev->driver->gem_close_object(obj, filp);
|
||||
drm_gem_object_handle_unreference_unlocked(obj);
|
||||
|
||||
drm_gem_object_handle_unreference_unlocked(obj);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_handle_delete);
|
||||
|
@ -1239,6 +1239,7 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
|
||||
DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
|
||||
pipe, vblank->enabled, vblank->inmodeset);
|
||||
vblank_disable_and_save(dev, pipe);
|
||||
|
||||
wake_up(&vblank->queue);
|
||||
|
||||
/*
|
||||
|
@ -1487,6 +1487,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
|
||||
if (out->status != MODE_OK)
|
||||
goto out;
|
||||
|
||||
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
|
||||
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_core.h>
|
||||
#include "drm_internal.h"
|
||||
|
||||
unsigned int drm_debug = 0; /* 1 to enable debug output */
|
||||
EXPORT_SYMBOL(drm_debug);
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include "agp.h"
|
||||
|
@ -582,6 +582,9 @@ static int intel_gtt_init(void)
|
||||
gtt_map_size = intel_private.gtt_total_entries * 4;
|
||||
|
||||
intel_private.gtt = NULL;
|
||||
if (intel_gtt_can_wc())
|
||||
intel_private.gtt = ioremap_wc(intel_private.gtt_phys_addr,
|
||||
gtt_map_size);
|
||||
if (intel_private.gtt == NULL)
|
||||
intel_private.gtt = ioremap(intel_private.gtt_phys_addr,
|
||||
gtt_map_size);
|
||||
@ -954,7 +957,7 @@ static void intel_i9xx_setup_flush(void)
|
||||
}
|
||||
|
||||
if (intel_private.ifp_resource.start)
|
||||
intel_private.i9xx_flush_page = ioremap(intel_private.ifp_resource.start, PAGE_SIZE);
|
||||
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
|
||||
if (!intel_private.i9xx_flush_page)
|
||||
dev_err(&intel_private.pcidev->dev,
|
||||
"can't ioremap flush page - no chipset flushing\n");
|
||||
|
@ -16,7 +16,7 @@ INCLUDES = -I$(DRV_INCLUDES) \
|
||||
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
|
||||
|
||||
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -fno-ident -msse2 -fomit-frame-pointer -fno-builtin-printf
|
||||
CFLAGS+= -mno-stack-arg-probe -mno-ms-bitfields
|
||||
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
|
||||
|
||||
LIBPATH:= $(DDK_TOPDIR)
|
||||
|
||||
|
@ -43,7 +43,9 @@
|
||||
#endif
|
||||
|
||||
#include "ansidecl.h"
|
||||
#include <stdio.h>
|
||||
#define NULL (void*)0
|
||||
|
||||
//#include <stdio.h>
|
||||
|
||||
/* Comment out all this code if we are using the GNU C Library, and are not
|
||||
actually compiling the library itself. This code is part of the GNU C
|
||||
|
@ -32,7 +32,7 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <stdio.h>
|
||||
//#include <stdio.h>
|
||||
|
||||
#include "getopt.h"
|
||||
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include "i915_trace.h"
|
||||
#include <linux/pci.h>
|
||||
#include <linux/vgaarb.h>
|
||||
//#include <linux/acpi.h>
|
||||
#include <linux/acpi.h>
|
||||
//#include <linux/pnp.h>
|
||||
//#include <linux/vga_switcheroo.h>
|
||||
#include <linux/slab.h>
|
||||
@ -230,8 +230,6 @@ intel_setup_mchbar(struct drm_device *dev)
|
||||
*/
|
||||
dev_priv->mchbar_need_disable = true;
|
||||
|
||||
DRM_INFO("enable MCHBAR\n");
|
||||
|
||||
/* Space is allocated or reserved, so enable it. */
|
||||
if (IS_I915G(dev) || IS_I915GM(dev)) {
|
||||
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
|
||||
@ -313,6 +311,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto cleanup_gem_stolen;
|
||||
|
||||
intel_setup_gmbus(dev);
|
||||
|
||||
/* Important: The output setup functions called by modeset_init need
|
||||
* working irqs for e.g. gmbus and dp aux transfers. */
|
||||
intel_modeset_init(dev);
|
||||
@ -922,7 +922,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
/* Try to make sure MCHBAR is enabled before poking at it */
|
||||
intel_setup_mchbar(dev);
|
||||
intel_setup_gmbus(dev);
|
||||
intel_opregion_setup(dev);
|
||||
|
||||
i915_gem_load(dev);
|
||||
@ -1066,7 +1065,6 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
intel_csr_ucode_fini(dev);
|
||||
|
||||
intel_teardown_gmbus(dev);
|
||||
intel_teardown_mchbar(dev);
|
||||
|
||||
destroy_workqueue(dev_priv->hotplug.dp_wq);
|
||||
|
@ -28,6 +28,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
@ -2647,6 +2647,7 @@ struct drm_i915_cmd_table {
|
||||
extern const struct drm_ioctl_desc i915_ioctls[];
|
||||
extern int i915_max_ioctl;
|
||||
|
||||
extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume_switcheroo(struct drm_device *dev);
|
||||
|
||||
/* i915_params.c */
|
||||
|
@ -34,9 +34,10 @@
|
||||
#include "intel_drv.h"
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
//#include <linux/swap.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#define RQ_BUG_ON(expr)
|
||||
|
||||
extern int x86_clflush_size;
|
||||
@ -1261,7 +1262,7 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
list_add_tail(&req->client_list, &file_priv->mm.request_list);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
|
||||
req->pid = 1;
|
||||
req->pid = (struct pid*)1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1278,6 +1279,7 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
|
||||
list_del(&request->client_list);
|
||||
request->file_priv = NULL;
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
request->pid = NULL;
|
||||
}
|
||||
|
||||
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
|
||||
@ -1306,6 +1308,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
|
||||
struct intel_engine_cs *engine = req->ring;
|
||||
struct drm_i915_gem_request *tmp;
|
||||
|
||||
lockdep_assert_held(&engine->dev->struct_mutex);
|
||||
|
||||
if (list_empty(&req->list))
|
||||
return;
|
||||
|
@ -2740,6 +2740,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
||||
if (drm_mm_initialized(&vm->mm)) {
|
||||
|
@ -2838,7 +2838,14 @@ enum skl_disp_power_wells {
|
||||
#define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998)
|
||||
#define BXT_RP_STATE_CAP 0x138170
|
||||
|
||||
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
|
||||
/*
|
||||
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
|
||||
* 8300) freezing up around GPU hangs. Looks as if even
|
||||
* scheduling/timer interrupts start misbehaving if the RPS
|
||||
* EI/thresholds are "bad", leading to a very sluggish or even
|
||||
* frozen machine.
|
||||
*/
|
||||
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
|
||||
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
|
||||
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
|
||||
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
|
||||
@ -3233,19 +3240,20 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define PORT_HOTPLUG_STAT (dev_priv->info.display_mmio_offset + 0x61114)
|
||||
/*
|
||||
* HDMI/DP bits are gen4+
|
||||
* HDMI/DP bits are g4x+
|
||||
*
|
||||
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
|
||||
* Please check the detailed lore in the commit message for for experimental
|
||||
* evidence.
|
||||
*/
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
|
||||
/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
|
||||
#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
|
||||
/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
|
||||
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
|
||||
/* VLV DP/HDMI bits again match Bspec */
|
||||
#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
|
||||
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
|
||||
#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
|
||||
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
|
||||
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
|
||||
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
|
||||
@ -7350,6 +7358,8 @@ enum skl_disp_power_wells {
|
||||
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
|
||||
#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
|
||||
|
||||
#define CDCLK_FREQ 0x46200
|
||||
|
||||
#define TRANSA_MSA_MISC 0x60410
|
||||
#define TRANSB_MSA_MISC 0x61410
|
||||
#define TRANSC_MSA_MISC 0x62410
|
||||
|
@ -1351,3 +1351,42 @@ intel_parse_bios(struct drm_device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_is_port_present - is the specified digital port present
|
||||
* @dev_priv: i915 device instance
|
||||
* @port: port to check
|
||||
*
|
||||
* Return true if the device in %port is present.
|
||||
*/
|
||||
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
|
||||
{
|
||||
static const struct {
|
||||
u16 dp, hdmi;
|
||||
} port_mapping[] = {
|
||||
[PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
|
||||
[PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
|
||||
[PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
|
||||
[PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
|
||||
};
|
||||
int i;
|
||||
|
||||
/* FIXME maybe deal with port A as well? */
|
||||
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
|
||||
return false;
|
||||
|
||||
if (!dev_priv->vbt.child_dev_num)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
const union child_device_config *p_child =
|
||||
&dev_priv->vbt.child_dev[i];
|
||||
if ((p_child->common.dvo_port == port_mapping[port].dp ||
|
||||
p_child->common.dvo_port == port_mapping[port].hdmi) &&
|
||||
(p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
|
||||
DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -248,8 +248,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
|
||||
pipe_config->has_pch_encoder = true;
|
||||
|
||||
/* LPT FDI RX only supports 8bpc. */
|
||||
if (HAS_PCH_LPT(dev))
|
||||
if (HAS_PCH_LPT(dev)) {
|
||||
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
|
||||
DRM_DEBUG_KMS("LPT only supports 24bpp\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
pipe_config->pipe_bpp = 24;
|
||||
}
|
||||
|
||||
/* FDI must always be 2.7 GHz */
|
||||
if (HAS_DDI(dev)) {
|
||||
|
@ -180,7 +180,8 @@ struct stepping_info {
|
||||
static const struct stepping_info skl_stepping_info[] = {
|
||||
{'A', '0'}, {'B', '0'}, {'C', '0'},
|
||||
{'D', '0'}, {'E', '0'}, {'F', '0'},
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'}
|
||||
{'G', '0'}, {'H', '0'}, {'I', '0'},
|
||||
{'J', '0'}, {'K', '0'}
|
||||
};
|
||||
|
||||
static struct stepping_info bxt_stepping_info[] = {
|
||||
@ -438,6 +439,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
|
||||
return;
|
||||
}
|
||||
#if 0
|
||||
DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
|
||||
|
||||
/*
|
||||
* Obtain a runtime pm reference, until CSR is loaded,
|
||||
* to avoid entering runtime-suspend.
|
||||
|
@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
|
||||
if (dev_priv->edp_low_vswing) {
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
} else {
|
||||
ddi_translations_edp = bdw_ddi_translations_dp;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
}
|
||||
|
||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
|
||||
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 7;
|
||||
@ -3188,12 +3196,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
intel_ddi_clock_get(encoder, pipe_config);
|
||||
}
|
||||
|
||||
static void intel_ddi_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
/* HDMI has nothing special to destroy, so we can go with this. */
|
||||
intel_dp_encoder_destroy(encoder);
|
||||
}
|
||||
|
||||
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
@ -3212,7 +3214,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs intel_ddi_funcs = {
|
||||
.destroy = intel_ddi_destroy,
|
||||
.reset = intel_dp_encoder_reset,
|
||||
.destroy = intel_dp_encoder_destroy,
|
||||
};
|
||||
|
||||
static struct intel_connector *
|
||||
@ -3284,6 +3287,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
intel_encoder->post_disable = intel_ddi_post_disable;
|
||||
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
|
||||
intel_encoder->get_config = intel_ddi_get_config;
|
||||
intel_encoder->suspend = intel_dp_encoder_suspend;
|
||||
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
|
||||
|
@ -2950,13 +2950,13 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
struct drm_i915_gem_object *obj,
|
||||
unsigned int plane)
|
||||
{
|
||||
const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
|
||||
struct i915_vma *vma;
|
||||
unsigned char *offset;
|
||||
u64 offset;
|
||||
|
||||
if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
|
||||
view = &i915_ggtt_view_rotated;
|
||||
@ -2966,14 +2966,16 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
view->type))
|
||||
return -1;
|
||||
|
||||
offset = (unsigned char *)vma->node.start;
|
||||
offset = vma->node.start;
|
||||
|
||||
if (plane == 1) {
|
||||
offset += vma->ggtt_view.rotation_info.uv_start_page *
|
||||
PAGE_SIZE;
|
||||
}
|
||||
|
||||
return (unsigned long)offset;
|
||||
WARN_ON(upper_32_bits(offset));
|
||||
|
||||
return lower_32_bits(offset);
|
||||
}
|
||||
|
||||
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
|
||||
@ -3099,7 +3101,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
|
||||
u32 tile_height, plane_offset, plane_size;
|
||||
unsigned int rotation;
|
||||
int x_offset, y_offset;
|
||||
unsigned long surf_addr;
|
||||
u32 surf_addr;
|
||||
struct intel_crtc_state *crtc_state = intel_crtc->config;
|
||||
struct intel_plane_state *plane_state;
|
||||
int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
|
||||
@ -4447,7 +4449,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
|
||||
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
|
||||
|
||||
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
|
||||
&state->scaler_state.scaler_id, DRM_ROTATE_0,
|
||||
&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
|
||||
state->pipe_src_w, state->pipe_src_h,
|
||||
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
|
||||
}
|
||||
@ -8228,12 +8230,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *encoder;
|
||||
int i;
|
||||
u32 val, final;
|
||||
bool has_lvds = false;
|
||||
bool has_cpu_edp = false;
|
||||
bool has_panel = false;
|
||||
bool has_ck505 = false;
|
||||
bool can_ssc = false;
|
||||
bool using_ssc_source = false;
|
||||
|
||||
/* We need to take the global config into account */
|
||||
for_each_intel_encoder(dev, encoder) {
|
||||
@ -8260,8 +8264,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
can_ssc = true;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
|
||||
has_panel, has_lvds, has_ck505);
|
||||
/* Check if any DPLLs are using the SSC source */
|
||||
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
|
||||
u32 temp = I915_READ(PCH_DPLL(i));
|
||||
|
||||
if (!(temp & DPLL_VCO_ENABLE))
|
||||
continue;
|
||||
|
||||
if ((temp & PLL_REF_INPUT_MASK) ==
|
||||
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
|
||||
using_ssc_source = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
|
||||
has_panel, has_lvds, has_ck505, using_ssc_source);
|
||||
|
||||
/* Ironlake: try to setup display ref clock before DPLL
|
||||
* enabling. This is only under driver's control after
|
||||
@ -8298,9 +8316,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
|
||||
} else
|
||||
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
|
||||
} else {
|
||||
final |= DREF_SSC_SOURCE_DISABLE;
|
||||
final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
|
||||
} else if (using_ssc_source) {
|
||||
final |= DREF_SSC_SOURCE_ENABLE;
|
||||
final |= DREF_SSC1_ENABLE;
|
||||
}
|
||||
|
||||
if (final == val)
|
||||
@ -8346,7 +8364,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
} else {
|
||||
DRM_DEBUG_KMS("Disabling SSC entirely\n");
|
||||
DRM_DEBUG_KMS("Disabling CPU source output\n");
|
||||
|
||||
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
|
||||
|
||||
@ -8357,6 +8375,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
|
||||
if (!using_ssc_source) {
|
||||
DRM_DEBUG_KMS("Disabling SSC source\n");
|
||||
|
||||
/* Turn off the SSC source */
|
||||
val &= ~DREF_SSC_SOURCE_MASK;
|
||||
val |= DREF_SSC_SOURCE_DISABLE;
|
||||
@ -8368,6 +8389,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
|
||||
POSTING_READ(PCH_DREF_CONTROL);
|
||||
udelay(200);
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(val != final);
|
||||
}
|
||||
@ -9669,6 +9691,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
|
||||
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
|
||||
|
||||
intel_update_cdclk(dev);
|
||||
|
||||
WARN(cdclk != dev_priv->cdclk_freq,
|
||||
@ -11928,21 +11952,11 @@ connected_sink_compute_bpp(struct intel_connector *connector,
|
||||
pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
|
||||
}
|
||||
|
||||
/* Clamp bpp to default limit on screens without EDID 1.4 */
|
||||
if (connector->base.display_info.bpc == 0) {
|
||||
int type = connector->base.connector_type;
|
||||
int clamp_bpp = 24;
|
||||
|
||||
/* Fall back to 18 bpp when DP sink capability is unknown. */
|
||||
if (type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||
type == DRM_MODE_CONNECTOR_eDP)
|
||||
clamp_bpp = 18;
|
||||
|
||||
if (bpp > clamp_bpp) {
|
||||
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
|
||||
bpp, clamp_bpp);
|
||||
pipe_config->pipe_bpp = clamp_bpp;
|
||||
}
|
||||
/* Clamp bpp to 8 on screens without EDID 1.4 */
|
||||
if (connector->base.display_info.bpc == 0 && bpp > 24) {
|
||||
DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
|
||||
bpp);
|
||||
pipe_config->pipe_bpp = 24;
|
||||
}
|
||||
}
|
||||
|
||||
@ -14146,6 +14160,8 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
if (I915_READ(PCH_DP_D) & DP_DETECTED)
|
||||
intel_dp_init(dev, PCH_DP_D, PORT_D);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
bool has_edp, has_port;
|
||||
|
||||
/*
|
||||
* The DP_DETECTED bit is the latched state of the DDC
|
||||
* SDA pin at boot. However since eDP doesn't require DDC
|
||||
@ -14154,27 +14170,37 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
* Thus we can't rely on the DP_DETECTED bit alone to detect
|
||||
* eDP ports. Consult the VBT as well as DP_DETECTED to
|
||||
* detect eDP ports.
|
||||
*
|
||||
* Sadly the straps seem to be missing sometimes even for HDMI
|
||||
* ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
|
||||
* and VBT for the presence of the port. Additionally we can't
|
||||
* trust the port type the VBT declares as we've seen at least
|
||||
* HDMI ports that the VBT claim are DP or eDP.
|
||||
*/
|
||||
if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
|
||||
!intel_dp_is_edp(dev, PORT_B))
|
||||
has_edp = intel_dp_is_edp(dev, PORT_B);
|
||||
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
|
||||
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
|
||||
has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
|
||||
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
|
||||
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
|
||||
if (I915_READ(VLV_DP_B) & DP_DETECTED ||
|
||||
intel_dp_is_edp(dev, PORT_B))
|
||||
intel_dp_init(dev, VLV_DP_B, PORT_B);
|
||||
|
||||
if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
|
||||
!intel_dp_is_edp(dev, PORT_C))
|
||||
has_edp = intel_dp_is_edp(dev, PORT_C);
|
||||
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
|
||||
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
|
||||
has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
|
||||
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
|
||||
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
|
||||
if (I915_READ(VLV_DP_C) & DP_DETECTED ||
|
||||
intel_dp_is_edp(dev, PORT_C))
|
||||
intel_dp_init(dev, VLV_DP_C, PORT_C);
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
/* eDP not supported on port D, so don't check VBT */
|
||||
if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
|
||||
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
|
||||
if (I915_READ(CHV_DP_D) & DP_DETECTED)
|
||||
/*
|
||||
* eDP not supported on port D,
|
||||
* so no need to worry about it
|
||||
*/
|
||||
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
|
||||
if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
|
||||
intel_dp_init(dev, CHV_DP_D, PORT_D);
|
||||
if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
|
||||
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
|
||||
}
|
||||
|
||||
intel_dsi_init(dev);
|
||||
|
@ -3626,7 +3626,6 @@ static bool
|
||||
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
|
||||
uint8_t dp_train_pat)
|
||||
{
|
||||
if (!intel_dp->train_set_valid)
|
||||
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
|
||||
intel_dp_set_signal_levels(intel_dp, DP);
|
||||
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
|
||||
@ -3744,22 +3743,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* if we used previously trained voltage and pre-emphasis values
|
||||
* and we don't get clock recovery, reset link training values
|
||||
*/
|
||||
if (intel_dp->train_set_valid) {
|
||||
DRM_DEBUG_KMS("clock recovery not ok, reset");
|
||||
/* clear the flag as we are not reusing train set */
|
||||
intel_dp->train_set_valid = false;
|
||||
if (!intel_dp_reset_link_train(intel_dp, &DP,
|
||||
DP_TRAINING_PATTERN_1 |
|
||||
DP_LINK_SCRAMBLING_DISABLE)) {
|
||||
DRM_ERROR("failed to enable link training\n");
|
||||
return;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check to see if we've tried the max voltage */
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
@ -3852,7 +3835,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
/* Make sure clock is still ok */
|
||||
if (!drm_dp_clock_recovery_ok(link_status,
|
||||
intel_dp->lane_count)) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
training_pattern |
|
||||
@ -3869,7 +3851,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
|
||||
/* Try 5 times, then try clock recovery if that fails */
|
||||
if (tries > 5) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
training_pattern |
|
||||
@ -3891,11 +3872,9 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
|
||||
intel_dp->DP = DP;
|
||||
|
||||
if (channel_eq) {
|
||||
intel_dp->train_set_valid = true;
|
||||
if (channel_eq)
|
||||
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
@ -4611,20 +4590,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
return I915_READ(PORT_HOTPLUG_STAT) & bit;
|
||||
}
|
||||
|
||||
static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *port)
|
||||
{
|
||||
u32 bit;
|
||||
|
||||
switch (port->port) {
|
||||
case PORT_B:
|
||||
bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
|
||||
bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
|
||||
break;
|
||||
case PORT_C:
|
||||
bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
|
||||
bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
|
||||
break;
|
||||
case PORT_D:
|
||||
bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
|
||||
bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(port->port);
|
||||
@ -4676,8 +4655,8 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
|
||||
return cpt_digital_port_connected(dev_priv, port);
|
||||
else if (IS_BROXTON(dev_priv))
|
||||
return bxt_digital_port_connected(dev_priv, port);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
return vlv_digital_port_connected(dev_priv, port);
|
||||
else if (IS_GM45(dev_priv))
|
||||
return gm45_digital_port_connected(dev_priv, port);
|
||||
else
|
||||
return g4x_digital_port_connected(dev_priv, port);
|
||||
}
|
||||
@ -5029,7 +5008,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
|
||||
kfree(intel_dig_port);
|
||||
}
|
||||
|
||||
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
|
||||
|
||||
@ -5071,15 +5050,17 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
|
||||
edp_panel_vdd_schedule_off(intel_dp);
|
||||
}
|
||||
|
||||
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_dp *intel_dp;
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
if (!HAS_DDI(dev_priv))
|
||||
intel_dp->DP = I915_READ(intel_dp->output_reg);
|
||||
|
||||
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
|
||||
return;
|
||||
|
||||
intel_dp = enc_to_intel_dp(encoder);
|
||||
|
||||
pps_lock(intel_dp);
|
||||
|
||||
/*
|
||||
@ -5151,9 +5132,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
if (long_hpd) {
|
||||
/* indicate that we need to restart link training */
|
||||
intel_dp->train_set_valid = false;
|
||||
|
||||
if (!intel_digital_port_connected(dev_priv, intel_dig_port))
|
||||
goto mst_fail;
|
||||
|
||||
@ -6127,8 +6105,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
||||
bool intel_dp_init(struct drm_device *dev,
|
||||
int output_reg,
|
||||
enum port port)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
@ -6138,7 +6117,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
||||
|
||||
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
|
||||
if (!intel_dig_port)
|
||||
return;
|
||||
return false;
|
||||
|
||||
intel_connector = intel_connector_alloc();
|
||||
if (!intel_connector)
|
||||
@ -6193,15 +6172,14 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
||||
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
|
||||
goto err_init_connector;
|
||||
|
||||
return;
|
||||
return true;
|
||||
|
||||
err_init_connector:
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(intel_connector);
|
||||
err_connector_alloc:
|
||||
kfree(intel_dig_port);
|
||||
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_dp_mst_suspend(struct drm_device *dev)
|
||||
|
@ -477,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
intel_connector->unregister(intel_connector);
|
||||
|
||||
/* need to nuke the connector */
|
||||
drm_modeset_lock_all(dev);
|
||||
if (connector->state->crtc) {
|
||||
@ -490,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
|
||||
WARN(ret, "Disabling mst crtc failed with %i\n", ret);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_connector->unregister(intel_connector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_connector_remove_from_fbdev(intel_connector);
|
||||
drm_connector_cleanup(connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
@ -782,7 +782,6 @@ struct intel_dp {
|
||||
bool has_aux_irq,
|
||||
int send_bytes,
|
||||
uint32_t aux_clock_divider);
|
||||
bool train_set_valid;
|
||||
|
||||
/* Displayport compliance testing */
|
||||
unsigned long compliance_test_type;
|
||||
@ -1177,7 +1176,7 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
|
||||
|
||||
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
|
||||
struct drm_i915_gem_object *obj,
|
||||
unsigned int plane);
|
||||
|
||||
@ -1195,7 +1194,7 @@ void intel_csr_ucode_fini(struct drm_device *dev);
|
||||
void assert_csr_loaded(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_dp.c */
|
||||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
bool intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
@ -1203,6 +1202,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
void intel_dp_encoder_reset(struct drm_encoder *encoder);
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
|
||||
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
|
||||
bool intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
|
@ -347,12 +347,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
uint64_t conn_configured = 0, mask;
|
||||
int pass = 0;
|
||||
|
||||
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
|
||||
save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
|
||||
GFP_KERNEL);
|
||||
if (!save_enabled)
|
||||
return false;
|
||||
|
||||
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
|
||||
memcpy(save_enabled, enabled, fb_helper->connector_count);
|
||||
mask = (1 << fb_helper->connector_count) - 1;
|
||||
retry:
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
@ -486,7 +486,7 @@ retry:
|
||||
if (fallback) {
|
||||
bail:
|
||||
DRM_DEBUG_KMS("Not using firmware configuration\n");
|
||||
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
|
||||
memcpy(enabled, save_enabled, fb_helper->connector_count);
|
||||
kfree(save_enabled);
|
||||
return false;
|
||||
}
|
||||
|
@ -1388,8 +1388,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
hdmi_to_dig_port(intel_hdmi));
|
||||
}
|
||||
|
||||
if (!live_status)
|
||||
DRM_DEBUG_KMS("Live status not up!");
|
||||
if (!live_status) {
|
||||
DRM_DEBUG_KMS("HDMI live status down\n");
|
||||
/*
|
||||
* Live status register is not reliable on all intel platforms.
|
||||
* So consider live_status only for certain platforms, for
|
||||
* others, read EDID to determine presence of sink.
|
||||
*/
|
||||
if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
|
||||
live_status = true;
|
||||
}
|
||||
|
||||
intel_hdmi_unset_edid(connector);
|
||||
|
||||
@ -2022,6 +2030,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
enum port port = intel_dig_port->port;
|
||||
uint8_t alternate_ddc_pin;
|
||||
|
||||
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
|
||||
port_name(port));
|
||||
|
||||
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_HDMIA);
|
||||
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
|
||||
|
@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
|
||||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include "intel_drv.h"
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
//#include <linux/acpi.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
/* Private structure for the integrated LVDS support */
|
||||
struct intel_lvds_connector {
|
||||
|
@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev)
|
||||
}
|
||||
|
||||
if (!acpi_video_bus) {
|
||||
DRM_ERROR("No ACPI video bus found\n");
|
||||
DRM_DEBUG_KMS("No ACPI video bus found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1794,16 +1794,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
|
||||
const struct intel_plane_state *pstate,
|
||||
uint32_t mem_value)
|
||||
{
|
||||
int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
|
||||
/*
|
||||
* We treat the cursor plane as always-on for the purposes of watermark
|
||||
* calculation. Until we have two-stage watermark programming merged,
|
||||
* this is necessary to avoid flickering.
|
||||
*/
|
||||
int cpp = 4;
|
||||
int width = pstate->visible ? pstate->base.crtc_w : 64;
|
||||
|
||||
if (!cstate->base.active || !pstate->visible)
|
||||
if (!cstate->base.active)
|
||||
return 0;
|
||||
|
||||
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
drm_rect_width(&pstate->dst),
|
||||
bpp,
|
||||
mem_value);
|
||||
width, cpp, mem_value);
|
||||
}
|
||||
|
||||
/* Only for WM_LP. */
|
||||
@ -3885,6 +3889,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
|
||||
|
||||
memset(active, 0, sizeof(*active));
|
||||
|
||||
active->pipe_enabled = intel_crtc->active;
|
||||
|
||||
if (active->pipe_enabled) {
|
||||
@ -4525,7 +4531,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
|
||||
else
|
||||
gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
|
||||
dev_priv->rps.last_adj = 0;
|
||||
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
|
||||
I915_WRITE(GEN6_PMINTRMSK,
|
||||
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
|
||||
}
|
||||
mutex_unlock(&dev_priv->rps.hw_lock);
|
||||
|
||||
@ -6620,6 +6627,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
|
||||
misccpctl = I915_READ(GEN7_MISCCPCTL);
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
|
||||
I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
|
||||
/*
|
||||
* Wait at least 100 clocks before re-enabling clock gating. See
|
||||
* the definition of L3SQCREG1 in BSpec.
|
||||
*/
|
||||
POSTING_READ(GEN8_L3SQCREG1);
|
||||
udelay(1);
|
||||
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
|
||||
|
||||
/*
|
||||
|
@ -1922,6 +1922,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cleanup_phys_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
|
||||
if (!dev_priv->status_page_dmah)
|
||||
return;
|
||||
|
||||
drm_pci_free(ring->dev, dev_priv->status_page_dmah);
|
||||
ring->status_page.page_addr = NULL;
|
||||
}
|
||||
|
||||
static void cleanup_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
@ -1938,9 +1949,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
|
||||
|
||||
static int init_status_page(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj = ring->status_page.obj;
|
||||
|
||||
if ((obj = ring->status_page.obj) == NULL) {
|
||||
if (obj == NULL) {
|
||||
unsigned flags;
|
||||
int ret;
|
||||
|
||||
@ -2134,7 +2145,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
if (ret)
|
||||
goto error;
|
||||
} else {
|
||||
BUG_ON(ring->id != RCS);
|
||||
WARN_ON(ring->id != RCS);
|
||||
ret = init_phys_status_page(ring);
|
||||
if (ret)
|
||||
goto error;
|
||||
@ -2179,7 +2190,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
||||
if (ring->cleanup)
|
||||
ring->cleanup(ring);
|
||||
|
||||
if (I915_NEED_GFX_HWS(ring->dev)) {
|
||||
cleanup_status_page(ring);
|
||||
} else {
|
||||
WARN_ON(ring->id != RCS);
|
||||
cleanup_phys_status_page(ring);
|
||||
}
|
||||
|
||||
i915_cmd_parser_fini_ring(ring);
|
||||
i915_gem_batch_pool_fini(&ring->batch_pool);
|
||||
@ -2341,11 +2357,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
|
||||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
|
@ -200,7 +200,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(drm_plane->state)->ckey;
|
||||
unsigned long surf_addr;
|
||||
u32 surf_addr;
|
||||
u32 tile_height, plane_offset, plane_size;
|
||||
unsigned int rotation;
|
||||
int x_offset, y_offset;
|
||||
|
@ -1134,6 +1134,10 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get =
|
||||
fw_domains_get_with_thread_status;
|
||||
if (IS_HASWELL(dev))
|
||||
dev_priv->uncore.funcs.force_wake_put =
|
||||
fw_domains_put_with_fifo;
|
||||
else
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <uapi/drm/drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/pci.h>
|
||||
@ -1016,7 +1017,7 @@ int i915_mask_update_ex(struct drm_device *dev, void *data,
|
||||
struct drm_i915_mask_update *mask = data;
|
||||
struct drm_gem_object *obj;
|
||||
static unsigned int mask_seqno[256];
|
||||
static warn_count;
|
||||
static int warn_count;
|
||||
|
||||
rect_t win;
|
||||
u32 winw,winh;
|
||||
|
@ -8,13 +8,15 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dmi.h>
|
||||
#include "../drm_internal.h"
|
||||
|
||||
#include "getopt.h"
|
||||
|
||||
#include "bitmap.h"
|
||||
#include "i915_kos32.h"
|
||||
|
||||
#define DRV_NAME "i915 v4.4.5"
|
||||
#define DRV_NAME "i915 v4.4.30"
|
||||
|
||||
#define I915_DEV_CLOSE 0
|
||||
#define I915_DEV_INIT 1
|
||||
@ -24,6 +26,16 @@ static int my_atoi(char **cmd);
|
||||
static char* parse_mode(char *p, videomode_t *mode);
|
||||
void cpu_detect1();
|
||||
int kmap_init();
|
||||
int fake_framebuffer_create();
|
||||
int i915_init(void);
|
||||
int init_display_kms(struct drm_device *dev, videomode_t *usermode);
|
||||
int get_videomodes(videomode_t *mode, int *count);
|
||||
int set_user_mode(videomode_t *mode);
|
||||
int i915_fbinfo(struct drm_i915_fb_info *fb);
|
||||
int i915_mask_update_ex(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
||||
|
||||
|
||||
unsigned long volatile jiffies;
|
||||
int oops_in_progress;
|
||||
@ -534,7 +546,7 @@ int _stdcall display_handler(ioctl_t *io)
|
||||
break;
|
||||
|
||||
case SRV_FBINFO:
|
||||
retval = i915_fbinfo(inp);
|
||||
retval = i915_fbinfo((struct drm_i915_fb_info*)inp);
|
||||
break;
|
||||
|
||||
case SRV_MASK_UPDATE:
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include <ddk.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
@ -175,23 +177,27 @@ void *memchr_inv(const void *start, int c, size_t bytes)
|
||||
}
|
||||
|
||||
|
||||
|
||||
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, int dir)
|
||||
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
s->dma_address = (dma_addr_t)sg_phys(s);
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
s->dma_length = s->length;
|
||||
#endif
|
||||
}
|
||||
|
||||
return nelems;
|
||||
return nents;
|
||||
}
|
||||
|
||||
void
|
||||
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
};
|
||||
|
||||
|
||||
#define _U 0x01 /* upper */
|
||||
@ -261,98 +267,6 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
|
||||
}
|
||||
|
||||
|
||||
#define KMAP_MAX 256
|
||||
|
||||
static struct mutex kmap_mutex;
|
||||
static struct page* kmap_table[KMAP_MAX];
|
||||
static int kmap_av;
|
||||
static int kmap_first;
|
||||
static void* kmap_base;
|
||||
|
||||
|
||||
int kmap_init()
|
||||
{
|
||||
kmap_base = AllocKernelSpace(KMAP_MAX*4096);
|
||||
if(kmap_base == NULL)
|
||||
return -1;
|
||||
|
||||
kmap_av = KMAP_MAX;
|
||||
MutexInit(&kmap_mutex);
|
||||
return 0;
|
||||
};
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
void *vaddr = NULL;
|
||||
int i;
|
||||
|
||||
do
|
||||
{
|
||||
MutexLock(&kmap_mutex);
|
||||
if(kmap_av != 0)
|
||||
{
|
||||
for(i = kmap_first; i < KMAP_MAX; i++)
|
||||
{
|
||||
if(kmap_table[i] == NULL)
|
||||
{
|
||||
kmap_av--;
|
||||
kmap_first = i;
|
||||
kmap_table[i] = page;
|
||||
vaddr = kmap_base + (i<<12);
|
||||
MapPage(vaddr,(addr_t)page,3);
|
||||
break;
|
||||
};
|
||||
};
|
||||
};
|
||||
MutexUnlock(&kmap_mutex);
|
||||
}while(vaddr == NULL);
|
||||
|
||||
return vaddr;
|
||||
};
|
||||
|
||||
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
void *vaddr;
|
||||
int i;
|
||||
|
||||
MutexLock(&kmap_mutex);
|
||||
|
||||
for(i = 0; i < KMAP_MAX; i++)
|
||||
{
|
||||
if(kmap_table[i] == page)
|
||||
{
|
||||
kmap_av++;
|
||||
if(i < kmap_first)
|
||||
kmap_first = i;
|
||||
kmap_table[i] = NULL;
|
||||
vaddr = kmap_base + (i<<12);
|
||||
MapPage(vaddr,0,0);
|
||||
break;
|
||||
};
|
||||
};
|
||||
|
||||
MutexUnlock(&kmap_mutex);
|
||||
};
|
||||
|
||||
void kunmap_atomic(void *vaddr)
|
||||
{
|
||||
int i;
|
||||
|
||||
MapPage(vaddr,0,0);
|
||||
|
||||
i = (vaddr - kmap_base) >> 12;
|
||||
|
||||
MutexLock(&kmap_mutex);
|
||||
|
||||
kmap_av++;
|
||||
if(i < kmap_first)
|
||||
kmap_first = i;
|
||||
kmap_table[i] = NULL;
|
||||
|
||||
MutexUnlock(&kmap_mutex);
|
||||
}
|
||||
|
||||
void msleep(unsigned int msecs)
|
||||
{
|
||||
@ -684,7 +598,7 @@ int split_cmdline(char *cmdline, char **argv)
|
||||
};
|
||||
|
||||
|
||||
fb_get_options(const char *name, char **option)
|
||||
int fb_get_options(const char *name, char **option)
|
||||
{
|
||||
char *opt, *options = NULL;
|
||||
int retval = 1;
|
||||
|
Loading…
Reference in New Issue
Block a user