Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,71 @@
config DRM_I915
tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
depends on DRM
depends on X86 && PCI
depends on (AGP || AGP=n)
select INTEL_GTT
select AGP_INTEL if AGP
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
select DRM_KMS_HELPER
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
Core i5, Core i7 as well as Atom CPUs with integrated graphics.
If M is selected, the module will be called i915. AGP support
is required for this driver to work. This driver is used by
the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
replaces the older i830 module that supported a subset of the
hardware in older X.org releases.
Note that the older i810/i815 chipsets require the use of the
i810 driver instead, and the Atom z5xx series has an entirely
different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
depends on DRM_I915
default y
help
Choose this option if you want kernel modesetting enabled by default.
If in doubt, say "Y".
config DRM_I915_FBDEV
bool "Enable legacy fbdev support for the modesetting intel driver"
depends on DRM_I915
select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
default y
help
Choose this option if you have a need for the legacy fbdev
support. Note that this support also provide the linux console
support on top of the intel modesetting driver.
If in doubt, say "Y".
config DRM_I915_PRELIMINARY_HW_SUPPORT
bool "Enable preliminary support for prerelease Intel hardware by default"
depends on DRM_I915
default n
help
Choose this option if you have prerelease Intel hardware and want the
i915 driver to support it by default. You can enable such support at
runtime with the module option i915.preliminary_hw_support=1; this
option changes the default for that module option.
If in doubt, say "N".

View file

@ -0,0 +1,82 @@
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
# Please keep these build lists sorted!
# core driver code
i915-y := i915_drv.o \
i915_params.o \
i915_suspend.o \
i915_sysfs.o \
intel_pm.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_context.o \
i915_gem_render_state.o \
i915_gem_debug.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
i915_gem_gtt.o \
i915_gem.o \
i915_gem_stolen.o \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gpu_error.o \
i915_irq.o \
i915_trace_points.o \
intel_lrc.o \
intel_ringbuffer.o \
intel_uncore.o
# autogenerated null render state
i915-y += intel_renderstate_gen6.o \
intel_renderstate_gen7.o \
intel_renderstate_gen8.o
# modesetting core code
i915-y += intel_bios.o \
intel_display.o \
intel_modes.o \
intel_overlay.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
# modesetting output/encoder code
i915-y += dvo_ch7017.o \
dvo_ch7xxx.o \
dvo_ivch.o \
dvo_ns2501.o \
dvo_sil164.o \
dvo_tfp410.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
intel_dp_mst.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
intel_dsi_panel_vbt.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
intel_lvds.o \
intel_panel.o \
intel_sdvo.o \
intel_tv.o
# legacy horrors
i915-y += i915_dma.o \
i915_ums.o
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)

138
drivers/gpu/drm/i915/dvo.h Normal file
View file

@ -0,0 +1,138 @@
/*
* Copyright © 2006 Eric Anholt
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#ifndef _INTEL_DVO_H
#define _INTEL_DVO_H
#include <linux/i2c.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
struct intel_dvo_device {
const char *name;
int type;
/* DVOA/B/C output register */
u32 dvo_reg;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
const struct intel_dvo_dev_ops *dev_ops;
void *dev_priv;
struct i2c_adapter *i2c_bus;
};
struct intel_dvo_dev_ops {
/*
* Initialize the device at startup time.
* Returns NULL if the device does not exist.
*/
bool (*init)(struct intel_dvo_device *dvo,
struct i2c_adapter *i2cbus);
/*
* Called to allow the output a chance to create properties after the
* RandR objects have been created.
*/
void (*create_resources)(struct intel_dvo_device *dvo);
/*
* Turn on/off output.
*
* Because none of our dvo drivers support an intermediate power levels,
* we don't expose this in the interfac.
*/
void (*dpms)(struct intel_dvo_device *dvo, bool enable);
/*
* Callback for testing a video mode for a given output.
*
* This function should only check for cases where a mode can't
* be supported on the output specifically, and not represent
* generic CRTC limitations.
*
* \return MODE_OK if the mode is valid, or another MODE_* otherwise.
*/
int (*mode_valid)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode);
/*
* Callback for preparing mode changes on an output
*/
void (*prepare)(struct intel_dvo_device *dvo);
/*
* Callback for committing mode changes on an output
*/
void (*commit)(struct intel_dvo_device *dvo);
/*
* Callback for setting up a video mode after fixups have been made.
*
* This is only called while the output is disabled. The dpms callback
* must be all that's necessary for the output, to turn the output on
* after this function is called.
*/
void (*mode_set)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/*
* Probe for a connected output, and return detect_status.
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
/*
* Probe the current hw status, returning true if the connected output
* is active.
*/
bool (*get_hw_state)(struct intel_dvo_device *dev);
/**
* Query the device for the modes it provides.
*
* This function may also update MonInfo, mm_width, and mm_height.
*
* \return singly-linked list of modes or NULL if no modes found.
*/
struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
/**
* Clean up driver-specific bits of the output
*/
void (*destroy) (struct intel_dvo_device *dvo);
/**
* Debugging hook to dump device registers to log file
*/
void (*dump_regs)(struct intel_dvo_device *dvo);
};
extern struct intel_dvo_dev_ops sil164_ops;
extern struct intel_dvo_dev_ops ch7xxx_ops;
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
extern struct intel_dvo_dev_ops ns2501_ops;
#endif /* _INTEL_DVO_H */

View file

@ -0,0 +1,414 @@
/*
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "dvo.h"
#define CH7017_TV_DISPLAY_MODE 0x00
#define CH7017_FLICKER_FILTER 0x01
#define CH7017_VIDEO_BANDWIDTH 0x02
#define CH7017_TEXT_ENHANCEMENT 0x03
#define CH7017_START_ACTIVE_VIDEO 0x04
#define CH7017_HORIZONTAL_POSITION 0x05
#define CH7017_VERTICAL_POSITION 0x06
#define CH7017_BLACK_LEVEL 0x07
#define CH7017_CONTRAST_ENHANCEMENT 0x08
#define CH7017_TV_PLL 0x09
#define CH7017_TV_PLL_M 0x0a
#define CH7017_TV_PLL_N 0x0b
#define CH7017_SUB_CARRIER_0 0x0c
#define CH7017_CIV_CONTROL 0x10
#define CH7017_CIV_0 0x11
#define CH7017_CHROMA_BOOST 0x14
#define CH7017_CLOCK_MODE 0x1c
#define CH7017_INPUT_CLOCK 0x1d
#define CH7017_GPIO_CONTROL 0x1e
#define CH7017_INPUT_DATA_FORMAT 0x1f
#define CH7017_CONNECTION_DETECT 0x20
#define CH7017_DAC_CONTROL 0x21
#define CH7017_BUFFERED_CLOCK_OUTPUT 0x22
#define CH7017_DEFEAT_VSYNC 0x47
#define CH7017_TEST_PATTERN 0x48
#define CH7017_POWER_MANAGEMENT 0x49
/** Enables the TV output path. */
#define CH7017_TV_EN (1 << 0)
#define CH7017_DAC0_POWER_DOWN (1 << 1)
#define CH7017_DAC1_POWER_DOWN (1 << 2)
#define CH7017_DAC2_POWER_DOWN (1 << 3)
#define CH7017_DAC3_POWER_DOWN (1 << 4)
/** Powers down the TV out block, and DAC0-3 */
#define CH7017_TV_POWER_DOWN_EN (1 << 5)
#define CH7017_VERSION_ID 0x4a
#define CH7017_DEVICE_ID 0x4b
#define CH7017_DEVICE_ID_VALUE 0x1b
#define CH7018_DEVICE_ID_VALUE 0x1a
#define CH7019_DEVICE_ID_VALUE 0x19
#define CH7017_XCLK_D2_ADJUST 0x53
#define CH7017_UP_SCALER_COEFF_0 0x55
#define CH7017_UP_SCALER_COEFF_1 0x56
#define CH7017_UP_SCALER_COEFF_2 0x57
#define CH7017_UP_SCALER_COEFF_3 0x58
#define CH7017_UP_SCALER_COEFF_4 0x59
#define CH7017_UP_SCALER_VERTICAL_INC_0 0x5a
#define CH7017_UP_SCALER_VERTICAL_INC_1 0x5b
#define CH7017_GPIO_INVERT 0x5c
#define CH7017_UP_SCALER_HORIZONTAL_INC_0 0x5d
#define CH7017_UP_SCALER_HORIZONTAL_INC_1 0x5e
#define CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT 0x5f
/**< Low bits of horizontal active pixel input */
#define CH7017_ACTIVE_INPUT_LINE_OUTPUT 0x60
/** High bits of horizontal active pixel input */
#define CH7017_LVDS_HAP_INPUT_MASK (0x7 << 0)
/** High bits of vertical active line output */
#define CH7017_LVDS_VAL_HIGH_MASK (0x7 << 3)
#define CH7017_VERTICAL_ACTIVE_LINE_OUTPUT 0x61
/**< Low bits of vertical active line output */
#define CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT 0x62
/**< Low bits of horizontal active pixel output */
#define CH7017_LVDS_POWER_DOWN 0x63
/** High bits of horizontal active pixel output */
#define CH7017_LVDS_HAP_HIGH_MASK (0x7 << 0)
/** Enables the LVDS power down state transition */
#define CH7017_LVDS_POWER_DOWN_EN (1 << 6)
/** Enables the LVDS upscaler */
#define CH7017_LVDS_UPSCALER_EN (1 << 7)
#define CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED 0x08
#define CH7017_LVDS_ENCODING 0x64
#define CH7017_LVDS_DITHER_2D (1 << 2)
#define CH7017_LVDS_DITHER_DIS (1 << 3)
#define CH7017_LVDS_DUAL_CHANNEL_EN (1 << 4)
#define CH7017_LVDS_24_BIT (1 << 5)
#define CH7017_LVDS_ENCODING_2 0x65
#define CH7017_LVDS_PLL_CONTROL 0x66
/** Enables the LVDS panel output path */
#define CH7017_LVDS_PANEN (1 << 0)
/** Enables the LVDS panel backlight */
#define CH7017_LVDS_BKLEN (1 << 3)
#define CH7017_POWER_SEQUENCING_T1 0x67
#define CH7017_POWER_SEQUENCING_T2 0x68
#define CH7017_POWER_SEQUENCING_T3 0x69
#define CH7017_POWER_SEQUENCING_T4 0x6a
#define CH7017_POWER_SEQUENCING_T5 0x6b
#define CH7017_GPIO_DRIVER_TYPE 0x6c
#define CH7017_GPIO_DATA 0x6d
#define CH7017_GPIO_DIRECTION_CONTROL 0x6e
#define CH7017_LVDS_PLL_FEEDBACK_DIV 0x71
# define CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT 4
# define CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT 0
# define CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED 0x80
#define CH7017_LVDS_PLL_VCO_CONTROL 0x72
# define CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED 0x80
# define CH7017_LVDS_PLL_VCO_SHIFT 4
# define CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT 0
#define CH7017_OUTPUTS_ENABLE 0x73
# define CH7017_CHARGE_PUMP_LOW 0x0
# define CH7017_CHARGE_PUMP_HIGH 0x3
# define CH7017_LVDS_CHANNEL_A (1 << 3)
# define CH7017_LVDS_CHANNEL_B (1 << 4)
# define CH7017_TV_DAC_A (1 << 5)
# define CH7017_TV_DAC_B (1 << 6)
# define CH7017_DDC_SELECT_DC2 (1 << 7)
#define CH7017_LVDS_OUTPUT_AMPLITUDE 0x74
#define CH7017_LVDS_PLL_EMI_REDUCTION 0x75
#define CH7017_LVDS_POWER_DOWN_FLICKER 0x76
#define CH7017_LVDS_CONTROL_2 0x78
# define CH7017_LOOP_FILTER_SHIFT 5
# define CH7017_PHASE_DETECTOR_SHIFT 0
#define CH7017_BANG_LIMIT_CONTROL 0x7f
struct ch7017_priv {
uint8_t dummy;
};
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = val,
}
};
return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
}
static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
{
uint8_t buf[2] = { addr, val };
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = buf,
};
return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
}
/** Probes for a CH7017 on the given bus and slave address. */
static bool ch7017_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
struct ch7017_priv *priv;
const char *str;
u8 val;
priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
if (priv == NULL)
return false;
dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
goto fail;
switch (val) {
case CH7017_DEVICE_ID_VALUE:
str = "ch7017";
break;
case CH7018_DEVICE_ID_VALUE:
str = "ch7018";
break;
case CH7019_DEVICE_ID_VALUE:
str = "ch7019";
break;
default:
DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
"slave %d.\n",
val, adapter->name, dvo->slave_addr);
goto fail;
}
DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
str, adapter->name, dvo->slave_addr);
return true;
fail:
kfree(priv);
return false;
}
static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
{
return connector_status_connected;
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
if (mode->clock > 160000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void ch7017_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
uint8_t outputs_enable, lvds_control_2, lvds_power_down;
uint8_t horizontal_active_pixel_input;
uint8_t horizontal_active_pixel_output, vertical_active_line_output;
uint8_t active_input_line_output;
DRM_DEBUG_KMS("Registers before mode setting\n");
ch7017_dump_regs(dvo);
/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
if (mode->clock < 100000) {
outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_LOW;
lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
(13 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
(3 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
lvds_control_2 = (1 << CH7017_LOOP_FILTER_SHIFT) |
(0 << CH7017_PHASE_DETECTOR_SHIFT);
} else {
outputs_enable = CH7017_LVDS_CHANNEL_A | CH7017_CHARGE_PUMP_HIGH;
lvds_pll_feedback_div = CH7017_LVDS_PLL_FEEDBACK_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_FEED_BACK_DIVIDER_SHIFT) |
(3 << CH7017_LVDS_PLL_FEED_FORWARD_DIVIDER_SHIFT);
lvds_pll_feedback_div = 35;
lvds_control_2 = (3 << CH7017_LOOP_FILTER_SHIFT) |
(0 << CH7017_PHASE_DETECTOR_SHIFT);
if (1) { /* XXX: dual channel panel detection. Assume yes for now. */
outputs_enable |= CH7017_LVDS_CHANNEL_B;
lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
(2 << CH7017_LVDS_PLL_VCO_SHIFT) |
(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
} else {
lvds_pll_vco_control = CH7017_LVDS_PLL_VCO_DEFAULT_RESERVED |
(1 << CH7017_LVDS_PLL_VCO_SHIFT) |
(13 << CH7017_LVDS_PLL_POST_SCALE_DIV_SHIFT);
}
}
horizontal_active_pixel_input = mode->hdisplay & 0x00ff;
vertical_active_line_output = mode->vdisplay & 0x00ff;
horizontal_active_pixel_output = mode->hdisplay & 0x00ff;
active_input_line_output = ((mode->hdisplay & 0x0700) >> 8) |
(((mode->vdisplay & 0x0700) >> 8) << 3);
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
ch7017_dpms(dvo, false);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
horizontal_active_pixel_output);
ch7017_write(dvo, CH7017_VERTICAL_ACTIVE_LINE_OUTPUT,
vertical_active_line_output);
ch7017_write(dvo, CH7017_ACTIVE_INPUT_LINE_OUTPUT,
active_input_line_output);
ch7017_write(dvo, CH7017_LVDS_PLL_VCO_CONTROL, lvds_pll_vco_control);
ch7017_write(dvo, CH7017_LVDS_PLL_FEEDBACK_DIV, lvds_pll_feedback_div);
ch7017_write(dvo, CH7017_LVDS_CONTROL_2, lvds_control_2);
ch7017_write(dvo, CH7017_OUTPUTS_ENABLE, outputs_enable);
/* Turn the LVDS back on with new settings. */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
DRM_DEBUG_KMS("Registers after mode setting\n");
ch7017_dump_regs(dvo);
}
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
/* Turn off TV/VGA, and never turn it on since we don't support it. */
ch7017_write(dvo, CH7017_POWER_MANAGEMENT,
CH7017_DAC0_POWER_DOWN |
CH7017_DAC1_POWER_DOWN |
CH7017_DAC2_POWER_DOWN |
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
if (enable) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
} else {
/* Turn off the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val | CH7017_LVDS_POWER_DOWN_EN);
}
/* XXX: Should actually wait for update power status somehow */
msleep(20);
}
static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
{
uint8_t val;
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
if (val & CH7017_LVDS_POWER_DOWN_EN)
return false;
else
return true;
}
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
#define DUMP(reg) \
do { \
ch7017_read(dvo, reg, &val); \
DRM_DEBUG_KMS(#reg ": %02x\n", val); \
} while (0)
DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT);
DUMP(CH7017_VERTICAL_ACTIVE_LINE_OUTPUT);
DUMP(CH7017_ACTIVE_INPUT_LINE_OUTPUT);
DUMP(CH7017_LVDS_PLL_VCO_CONTROL);
DUMP(CH7017_LVDS_PLL_FEEDBACK_DIV);
DUMP(CH7017_LVDS_CONTROL_2);
DUMP(CH7017_OUTPUTS_ENABLE);
DUMP(CH7017_LVDS_POWER_DOWN);
}
static void ch7017_destroy(struct intel_dvo_device *dvo)
{
struct ch7017_priv *priv = dvo->dev_priv;
if (priv) {
kfree(priv);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ch7017_ops = {
.init = ch7017_init,
.detect = ch7017_detect,
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.get_hw_state = ch7017_get_hw_state,
.dump_regs = ch7017_dump_regs,
.destroy = ch7017_destroy,
};

View file

@ -0,0 +1,368 @@
/**************************************************************************
Copyright © 2006 Dave Airlie
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sub license, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "dvo.h"
#define CH7xxx_REG_VID 0x4a
#define CH7xxx_REG_DID 0x4b
#define CH7011_VID 0x83 /* 7010 as well */
#define CH7010B_VID 0x05
#define CH7009A_VID 0x84
#define CH7009B_VID 0x85
#define CH7301_VID 0x95
#define CH7xxx_VID 0x84
#define CH7xxx_DID 0x17
#define CH7010_DID 0x16
#define CH7xxx_NUM_REGS 0x4c
#define CH7xxx_CM 0x1c
#define CH7xxx_CM_XCM (1<<0)
#define CH7xxx_CM_MCP (1<<2)
#define CH7xxx_INPUT_CLOCK 0x1d
#define CH7xxx_GPIO 0x1e
#define CH7xxx_GPIO_HPIR (1<<3)
#define CH7xxx_IDF 0x1f
#define CH7xxx_IDF_HSP (1<<3)
#define CH7xxx_IDF_VSP (1<<4)
#define CH7xxx_CONNECTION_DETECT 0x20
#define CH7xxx_CDET_DVI (1<<5)
#define CH7301_DAC_CNTL 0x21
#define CH7301_HOTPLUG 0x23
#define CH7xxx_TCTL 0x31
#define CH7xxx_TVCO 0x32
#define CH7xxx_TPCP 0x33
#define CH7xxx_TPD 0x34
#define CH7xxx_TPVT 0x35
#define CH7xxx_TLPF 0x36
#define CH7xxx_TCT 0x37
#define CH7301_TEST_PATTERN 0x48
#define CH7xxx_PM 0x49
#define CH7xxx_PM_FPD (1<<0)
#define CH7301_PM_DACPD0 (1<<1)
#define CH7301_PM_DACPD1 (1<<2)
#define CH7301_PM_DACPD2 (1<<3)
#define CH7xxx_PM_DVIL (1<<6)
#define CH7xxx_PM_DVIP (1<<7)
#define CH7301_SYNC_POLARITY 0x56
#define CH7301_SYNC_RGB_YUV (1<<0)
#define CH7301_SYNC_POL_DVI (1<<5)
/** @file
* driver for the Chrontel 7xxx DVI chip over DVO.
*/
static struct ch7xxx_id_struct {
uint8_t vid;
char *name;
} ch7xxx_ids[] = {
{ CH7011_VID, "CH7011" },
{ CH7010B_VID, "CH7010B" },
{ CH7009A_VID, "CH7009A" },
{ CH7009B_VID, "CH7009B" },
{ CH7301_VID, "CH7301" },
};
static struct ch7xxx_did_struct {
uint8_t did;
char *name;
} ch7xxx_dids[] = {
{ CH7xxx_DID, "CH7XXX" },
{ CH7010_DID, "CH7010B" },
};
struct ch7xxx_priv {
bool quiet;
};
static char *ch7xxx_get_id(uint8_t vid)
{
int i;
for (i = 0; i < ARRAY_SIZE(ch7xxx_ids); i++) {
if (ch7xxx_ids[i].vid == vid)
return ch7xxx_ids[i].name;
}
return NULL;
}
static char *ch7xxx_get_did(uint8_t did)
{
int i;
for (i = 0; i < ARRAY_SIZE(ch7xxx_dids); i++) {
if (ch7xxx_dids[i].did == did)
return ch7xxx_dids[i].name;
}
return NULL;
}
/** Reads an 8 bit register */
static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
}
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
/** Writes an 8 bit register */
static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!ch7xxx->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
static bool ch7xxx_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
/* this will detect the CH7xxx chip on the specified i2c bus */
struct ch7xxx_priv *ch7xxx;
uint8_t vendor, device;
char *name, *devid;
ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
if (ch7xxx == NULL)
return false;
dvo->i2c_bus = adapter;
dvo->dev_priv = ch7xxx;
ch7xxx->quiet = true;
if (!ch7xxx_readb(dvo, CH7xxx_REG_VID, &vendor))
goto out;
name = ch7xxx_get_id(vendor);
if (!name) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
if (!ch7xxx_readb(dvo, CH7xxx_REG_DID, &device))
goto out;
devid = ch7xxx_get_did(device);
if (!devid) {
DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
"slave %d.\n",
vendor, adapter->name, dvo->slave_addr);
goto out;
}
ch7xxx->quiet = false;
DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
name, vendor, device);
return true;
out:
kfree(ch7xxx);
return false;
}
static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo)
{
uint8_t cdet, orig_pm, pm;
ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm);
pm = orig_pm;
pm &= ~CH7xxx_PM_FPD;
pm |= CH7xxx_PM_DVIL | CH7xxx_PM_DVIP;
ch7xxx_writeb(dvo, CH7xxx_PM, pm);
ch7xxx_readb(dvo, CH7xxx_CONNECTION_DETECT, &cdet);
ch7xxx_writeb(dvo, CH7xxx_PM, orig_pm);
if (cdet & CH7xxx_CDET_DVI)
return connector_status_connected;
return connector_status_disconnected;
}
static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
uint8_t tvco, tpcp, tpd, tlpf, idf;
if (mode->clock <= 65000) {
tvco = 0x23;
tpcp = 0x08;
tpd = 0x16;
tlpf = 0x60;
} else {
tvco = 0x2d;
tpcp = 0x06;
tpd = 0x26;
tlpf = 0xa0;
}
ch7xxx_writeb(dvo, CH7xxx_TCTL, 0x00);
ch7xxx_writeb(dvo, CH7xxx_TVCO, tvco);
ch7xxx_writeb(dvo, CH7xxx_TPCP, tpcp);
ch7xxx_writeb(dvo, CH7xxx_TPD, tpd);
ch7xxx_writeb(dvo, CH7xxx_TPVT, 0x30);
ch7xxx_writeb(dvo, CH7xxx_TLPF, tlpf);
ch7xxx_writeb(dvo, CH7xxx_TCT, 0x00);
ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
/* set the CH7xxx power state */
static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
{
if (enable)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
{
u8 val;
ch7xxx_readb(dvo, CH7xxx_PM, &val);
if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
return true;
else
return false;
}
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
int i;
for (i = 0; i < CH7xxx_NUM_REGS; i++) {
uint8_t val;
if ((i % 8) == 0)
DRM_DEBUG_KMS("\n %02X: ", i);
ch7xxx_readb(dvo, i, &val);
DRM_DEBUG_KMS("%02X ", val);
}
}
static void ch7xxx_destroy(struct intel_dvo_device *dvo)
{
struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
if (ch7xxx) {
kfree(ch7xxx);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ch7xxx_ops = {
.init = ch7xxx_init,
.detect = ch7xxx_detect,
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.get_hw_state = ch7xxx_get_hw_state,
.dump_regs = ch7xxx_dump_regs,
.destroy = ch7xxx_destroy,
};

View file

@ -0,0 +1,436 @@
/*
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "dvo.h"
/*
* register definitions for the i82807aa.
*
* Documentation on this chipset can be found in datasheet #29069001 at
* intel.com.
*/
/*
* VCH Revision & GMBus Base Addr
*/
#define VR00 0x00
# define VR00_BASE_ADDRESS_MASK 0x007f
/*
* Functionality Enable
*/
#define VR01 0x01
/*
* Enable the panel fitter
*/
# define VR01_PANEL_FIT_ENABLE (1 << 3)
/*
* Enables the LCD display.
*
* This must not be set while VR01_DVO_BYPASS_ENABLE is set.
*/
# define VR01_LCD_ENABLE (1 << 2)
/** Enables the DVO repeater. */
# define VR01_DVO_BYPASS_ENABLE (1 << 1)
/** Enables the DVO clock */
# define VR01_DVO_ENABLE (1 << 0)
/*
* LCD Interface Format
*/
#define VR10 0x10
/** Enables LVDS output instead of CMOS */
# define VR10_LVDS_ENABLE (1 << 4)
/** Enables 18-bit LVDS output. */
# define VR10_INTERFACE_1X18 (0 << 2)
/** Enables 24-bit LVDS or CMOS output */
# define VR10_INTERFACE_1X24 (1 << 2)
/** Enables 2x18-bit LVDS or CMOS output. */
# define VR10_INTERFACE_2X18 (2 << 2)
/** Enables 2x24-bit LVDS output */
# define VR10_INTERFACE_2X24 (3 << 2)
/*
* VR20 LCD Horizontal Display Size
*/
#define VR20 0x20
/*
* LCD Vertical Display Size
*/
#define VR21 0x20
/*
* Panel power down status
*/
#define VR30 0x30
/** Read only bit indicating that the panel is not in a safe poweroff state. */
# define VR30_PANEL_ON (1 << 15)
#define VR40 0x40
# define VR40_STALL_ENABLE (1 << 13)
# define VR40_VERTICAL_INTERP_ENABLE (1 << 12)
# define VR40_ENHANCED_PANEL_FITTING (1 << 11)
# define VR40_HORIZONTAL_INTERP_ENABLE (1 << 10)
# define VR40_AUTO_RATIO_ENABLE (1 << 9)
# define VR40_CLOCK_GATING_ENABLE (1 << 8)
/*
* Panel Fitting Vertical Ratio
* (((image_height - 1) << 16) / ((panel_height - 1))) >> 2
*/
#define VR41 0x41
/*
* Panel Fitting Horizontal Ratio
* (((image_width - 1) << 16) / ((panel_width - 1))) >> 2
*/
#define VR42 0x42
/*
* Horizontal Image Size
*/
#define VR43 0x43
/* VR80 GPIO 0
*/
#define VR80 0x80
#define VR81 0x81
#define VR82 0x82
#define VR83 0x83
#define VR84 0x84
#define VR85 0x85
#define VR86 0x86
#define VR87 0x87
/* VR88 GPIO 8
*/
#define VR88 0x88
/* Graphics BIOS scratch 0
*/
#define VR8E 0x8E
# define VR8E_PANEL_TYPE_MASK (0xf << 0)
# define VR8E_PANEL_INTERFACE_CMOS (0 << 4)
# define VR8E_PANEL_INTERFACE_LVDS (1 << 4)
# define VR8E_FORCE_DEFAULT_PANEL (1 << 5)
/* Graphics BIOS scratch 1
*/
#define VR8F 0x8F
# define VR8F_VCH_PRESENT (1 << 0)
# define VR8F_DISPLAY_CONN (1 << 1)
# define VR8F_POWER_MASK (0x3c)
# define VR8F_POWER_POS (2)
struct ivch_priv {
bool quiet;
uint16_t width, height;
};
static void ivch_dump_regs(struct intel_dvo_device *dvo);
/**
* Reads a register on the ivch.
*
* Each of the 256 registers are 16 bits long.
*/
static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[1];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 0,
},
{
.addr = 0,
.flags = I2C_M_NOSTART,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD | I2C_M_NOSTART,
.len = 2,
.buf = in_buf,
}
};
out_buf[0] = addr;
if (i2c_transfer(adapter, msgs, 3) == 3) {
*data = (in_buf[1] << 8) | in_buf[0];
return true;
}
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from "
"%s:%02x.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
/** Writes a 16-bit register on the ivch */
static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
{
struct ivch_priv *priv = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[3];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 3,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = data & 0xff;
out_buf[2] = data >> 8;
if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!priv->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
/** Probes the given bus and slave address for an ivch */
static bool ivch_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
struct ivch_priv *priv;
uint16_t temp;
priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
if (priv == NULL)
return false;
dvo->i2c_bus = adapter;
dvo->dev_priv = priv;
priv->quiet = true;
if (!ivch_read(dvo, VR00, &temp))
goto out;
priv->quiet = false;
/* Since the identification bits are probably zeroes, which doesn't seem
* very unique, check that the value in the base address field matches
* the address it's responding on.
*/
if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
"(%d vs %d)\n",
(temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
goto out;
}
ivch_read(dvo, VR20, &priv->width);
ivch_read(dvo, VR21, &priv->height);
return true;
out:
kfree(priv);
return false;
}
static enum drm_connector_status ivch_detect(struct intel_dvo_device *dvo)
{
return connector_status_connected;
}
static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
if (mode->clock > 112000)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return;
if (enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
if (enable)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
ivch_write(dvo, VR01, vr01);
/* Wait for the panel to make its state transition */
for (i = 0; i < 100; i++) {
if (!ivch_read(dvo, VR30, &vr30))
break;
if (((vr30 & VR30_PANEL_ON) != 0) == enable)
break;
udelay(1000);
}
/* wait some more; vch may fail to resync sometimes without this */
udelay(16 * 1000);
}
static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
uint16_t vr01;
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return false;
if (vr01 & VR01_LCD_ENABLE)
return true;
else
return false;
}
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
uint16_t vr40 = 0;
uint16_t vr01;
vr01 = 0;
vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
VR40_HORIZONTAL_INTERP_ENABLE);
if (mode->hdisplay != adjusted_mode->hdisplay ||
mode->vdisplay != adjusted_mode->vdisplay) {
uint16_t x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
vr40 |= VR40_CLOCK_GATING_ENABLE;
x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
(adjusted_mode->vdisplay - 1)) >> 2;
ivch_write(dvo, VR42, x_ratio);
ivch_write(dvo, VR41, y_ratio);
} else {
vr01 &= ~VR01_PANEL_FIT_ENABLE;
vr40 &= ~VR40_CLOCK_GATING_ENABLE;
}
vr40 &= ~VR40_AUTO_RATIO_ENABLE;
ivch_write(dvo, VR01, vr01);
ivch_write(dvo, VR40, vr40);
ivch_dump_regs(dvo);
}
static void ivch_dump_regs(struct intel_dvo_device *dvo)
{
uint16_t val;
ivch_read(dvo, VR00, &val);
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
DRM_DEBUG_KMS("VR40: 0x%04x\n", val);
/* GPIO registers */
ivch_read(dvo, VR80, &val);
DRM_DEBUG_KMS("VR80: 0x%04x\n", val);
ivch_read(dvo, VR81, &val);
DRM_DEBUG_KMS("VR81: 0x%04x\n", val);
ivch_read(dvo, VR82, &val);
DRM_DEBUG_KMS("VR82: 0x%04x\n", val);
ivch_read(dvo, VR83, &val);
DRM_DEBUG_KMS("VR83: 0x%04x\n", val);
ivch_read(dvo, VR84, &val);
DRM_DEBUG_KMS("VR84: 0x%04x\n", val);
ivch_read(dvo, VR85, &val);
DRM_DEBUG_KMS("VR85: 0x%04x\n", val);
ivch_read(dvo, VR86, &val);
DRM_DEBUG_KMS("VR86: 0x%04x\n", val);
ivch_read(dvo, VR87, &val);
DRM_DEBUG_KMS("VR87: 0x%04x\n", val);
ivch_read(dvo, VR88, &val);
DRM_DEBUG_KMS("VR88: 0x%04x\n", val);
/* Scratch register 0 - AIM Panel type */
ivch_read(dvo, VR8E, &val);
DRM_DEBUG_KMS("VR8E: 0x%04x\n", val);
/* Scratch register 1 - Status register */
ivch_read(dvo, VR8F, &val);
DRM_DEBUG_KMS("VR8F: 0x%04x\n", val);
}
static void ivch_destroy(struct intel_dvo_device *dvo)
{
struct ivch_priv *priv = dvo->dev_priv;
if (priv) {
kfree(priv);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
.get_hw_state = ivch_get_hw_state,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
.dump_regs = ivch_dump_regs,
.destroy = ivch_destroy,
};

View file

@ -0,0 +1,631 @@
/*
*
* Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "dvo.h"
#include "i915_reg.h"
#include "i915_drv.h"
#define NS2501_VID 0x1305
#define NS2501_DID 0x6726
#define NS2501_VID_LO 0x00
#define NS2501_VID_HI 0x01
#define NS2501_DID_LO 0x02
#define NS2501_DID_HI 0x03
#define NS2501_REV 0x04
#define NS2501_RSVD 0x05
#define NS2501_FREQ_LO 0x06
#define NS2501_FREQ_HI 0x07
#define NS2501_REG8 0x08
#define NS2501_8_VEN (1<<5)
#define NS2501_8_HEN (1<<4)
#define NS2501_8_DSEL (1<<3)
#define NS2501_8_BPAS (1<<2)
#define NS2501_8_RSVD (1<<1)
#define NS2501_8_PD (1<<0)
#define NS2501_REG9 0x09
#define NS2501_9_VLOW (1<<7)
#define NS2501_9_MSEL_MASK (0x7<<4)
#define NS2501_9_TSEL (1<<3)
#define NS2501_9_RSEN (1<<2)
#define NS2501_9_RSVD (1<<1)
#define NS2501_9_MDI (1<<0)
#define NS2501_REGC 0x0c
enum {
MODE_640x480,
MODE_800x600,
MODE_1024x768,
};
struct ns2501_reg {
uint8_t offset;
uint8_t value;
};
/*
* Magic values based on what the BIOS on
* Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
*/
static const struct ns2501_reg regs_1024x768[][86] = {
[MODE_640x480] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x05, },
},
[MODE_800x600] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x19, },
[5] = { .offset = 0x1c, .value = 0x64, },
[6] = { .offset = 0x1d, .value = 0x02, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0xd7, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0xf8, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x1a, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x73, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0x27, },
[20] = { .offset = 0x81, .value = 0x03, },
[21] = { .offset = 0x82, .value = 0x41, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x06, },
[33] = { .offset = 0x9c, .value = 0x23, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x30, },
[43] = { .offset = 0xb9, .value = 0xc8, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x20, },
[47] = { .offset = 0x11, .value = 0xc8, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x04, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x83, },
[67] = { .offset = 0xaa, .value = 0x40, },
[68] = { .offset = 0xab, .value = 0x32, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x80, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x07, },
},
[MODE_1024x768] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x01, },
},
};
static const struct ns2501_reg regs_init[] = {
[0] = { .offset = 0x35, .value = 0xff, },
[1] = { .offset = 0x34, .value = 0x00, },
[2] = { .offset = 0x08, .value = 0x30, },
};
struct ns2501_priv {
bool quiet;
const struct ns2501_reg *regs;
};
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
/*
* For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
* laptops does not react on the i2c bus unless
* both the PLL is running and the display is configured in its native
* resolution.
* This function forces the DVO on, and stores the registers it touches.
* Afterwards, registers are restored to regular values.
*
* This is pretty much a hack, though it works.
* Without that, ns2501_readb and ns2501_writeb fail
* when switching the resolution.
*/
/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
}
if (!ns->quiet) {
DRM_DEBUG_KMS
("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
adapter->name, dvo->slave_addr);
}
return false;
}
/*
** Write a register to the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(adapter, &msg, 1) == 1) {
return true;
}
if (!ns->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
/* National Semiconductor 2501 driver for chip on i2c bus
* scan for the chip on the bus.
* Hope the VBIOS initialized the PLL correctly so we can
* talk to it. If not, it will not be seen and not detected.
* Bummer!
*/
static bool ns2501_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
/* this will detect the NS2501 chip on the specified i2c bus */
struct ns2501_priv *ns;
unsigned char ch;
ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
if (ns == NULL)
return false;
dvo->i2c_bus = adapter;
dvo->dev_priv = ns;
ns->quiet = true;
if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
goto out;
if (ch != (NS2501_VID & 0xff)) {
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
goto out;
if (ch != (NS2501_DID & 0xff)) {
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
ns->quiet = false;
DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
return true;
out:
kfree(ns);
return false;
}
static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
{
/*
* This is a Laptop display, it doesn't have hotplugging.
* Even if not, the detection bit of the 2501 is unreliable as
* it only works for some display types.
* It is even more unreliable as the PLL must be active for
* allowing reading from the chiop.
*/
return connector_status_connected;
}
static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
/*
* Currently, these are all the modes I have data from.
* More might exist. Unclear how to find the native resolution
* of the panel in here so we could always accept it
* by disabling the scaler.
*/
if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
(mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
(mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
return MODE_OK;
} else {
return MODE_ONE_SIZE; /* Is this a reasonable error? */
}
}
static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
int mode_idx, i;
DRM_DEBUG_KMS
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
if (mode->hdisplay == 640 && mode->vdisplay == 480)
mode_idx = MODE_640x480;
else if (mode->hdisplay == 800 && mode->vdisplay == 600)
mode_idx = MODE_800x600;
else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
mode_idx = MODE_1024x768;
else
return;
/* Hopefully doing it every time won't hurt... */
for (i = 0; i < ARRAY_SIZE(regs_init); i++)
ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
ns->regs = regs_1024x768[mode_idx];
for (i = 0; i < 84; i++)
ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
}
/* set the NS2501 power state */
static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
{
unsigned char ch;
if (!ns2501_readb(dvo, NS2501_REG8, &ch))
return false;
return ch & NS2501_8_PD;
}
/* set the NS2501 power state */
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
if (enable) {
if (WARN_ON(ns->regs[83].offset != 0x08 ||
ns->regs[84].offset != 0x41 ||
ns->regs[85].offset != 0xc0))
return;
ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
ns2501_writeb(dvo, 0x41, ns->regs[84].value);
ns2501_writeb(dvo, 0x34, 0x01);
msleep(15);
ns2501_writeb(dvo, 0x08, 0x35);
if (!(ns->regs[83].value & NS2501_8_BPAS))
ns2501_writeb(dvo, 0x08, 0x31);
msleep(200);
ns2501_writeb(dvo, 0x34, 0x03);
ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
} else {
ns2501_writeb(dvo, 0x34, 0x01);
msleep(200);
ns2501_writeb(dvo, 0x08, 0x34);
msleep(15);
ns2501_writeb(dvo, 0x34, 0x00);
}
}
static void ns2501_destroy(struct intel_dvo_device *dvo)
{
struct ns2501_priv *ns = dvo->dev_priv;
if (ns) {
kfree(ns);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops ns2501_ops = {
.init = ns2501_init,
.detect = ns2501_detect,
.mode_valid = ns2501_mode_valid,
.mode_set = ns2501_mode_set,
.dpms = ns2501_dpms,
.get_hw_state = ns2501_get_hw_state,
.destroy = ns2501_destroy,
};

View file

@ -0,0 +1,279 @@
/**************************************************************************
Copyright © 2006 Dave Airlie
All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sub license, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
**************************************************************************/
#include "dvo.h"
#define SIL164_VID 0x0001
#define SIL164_DID 0x0006
#define SIL164_VID_LO 0x00
#define SIL164_VID_HI 0x01
#define SIL164_DID_LO 0x02
#define SIL164_DID_HI 0x03
#define SIL164_REV 0x04
#define SIL164_RSVD 0x05
#define SIL164_FREQ_LO 0x06
#define SIL164_FREQ_HI 0x07
#define SIL164_REG8 0x08
#define SIL164_8_VEN (1<<5)
#define SIL164_8_HEN (1<<4)
#define SIL164_8_DSEL (1<<3)
#define SIL164_8_BSEL (1<<2)
#define SIL164_8_EDGE (1<<1)
#define SIL164_8_PD (1<<0)
#define SIL164_REG9 0x09
#define SIL164_9_VLOW (1<<7)
#define SIL164_9_MSEL_MASK (0x7<<4)
#define SIL164_9_TSEL (1<<3)
#define SIL164_9_RSEN (1<<2)
#define SIL164_9_HTPLG (1<<1)
#define SIL164_9_MDI (1<<0)
#define SIL164_REGC 0x0c
struct sil164_priv {
//I2CDevRec d;
bool quiet;
};
#define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr))
static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
}
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct sil164_priv *sil = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!sil->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
/* Silicon Image 164 driver for chip on i2c bus */
static bool sil164_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
/* this will detect the SIL164 chip on the specified i2c bus */
struct sil164_priv *sil;
unsigned char ch;
sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
if (sil == NULL)
return false;
dvo->i2c_bus = adapter;
dvo->dev_priv = sil;
sil->quiet = true;
if (!sil164_readb(dvo, SIL164_VID_LO, &ch))
goto out;
if (ch != (SIL164_VID & 0xff)) {
DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
if (!sil164_readb(dvo, SIL164_DID_LO, &ch))
goto out;
if (ch != (SIL164_DID & 0xff)) {
DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
sil->quiet = false;
DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
return true;
out:
kfree(sil);
return false;
}
static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo)
{
uint8_t reg9;
sil164_readb(dvo, SIL164_REG9, &reg9);
if (reg9 & SIL164_9_HTPLG)
return connector_status_connected;
else
return connector_status_disconnected;
}
static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static void sil164_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* As long as the basics are set up, since we don't have clock
* dependencies in the mode setup, we can just leave the
* registers alone and everything will work fine.
*/
/* recommended programming sequence from doc */
/*sil164_writeb(sil, 0x08, 0x30);
sil164_writeb(sil, 0x09, 0x00);
sil164_writeb(sil, 0x0a, 0x90);
sil164_writeb(sil, 0x0c, 0x89);
sil164_writeb(sil, 0x08, 0x31);*/
/* don't do much */
return;
}
/* set the SIL164 power state */
static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
{
int ret;
unsigned char ch;
ret = sil164_readb(dvo, SIL164_REG8, &ch);
if (ret == false)
return;
if (enable)
ch |= SIL164_8_PD;
else
ch &= ~SIL164_8_PD;
sil164_writeb(dvo, SIL164_REG8, ch);
return;
}
static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
{
int ret;
unsigned char ch;
ret = sil164_readb(dvo, SIL164_REG8, &ch);
if (ret == false)
return false;
if (ch & SIL164_8_PD)
return true;
else
return false;
}
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
sil164_readb(dvo, SIL164_FREQ_LO, &val);
DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
sil164_readb(dvo, SIL164_FREQ_HI, &val);
DRM_DEBUG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG8, &val);
DRM_DEBUG_KMS("SIL164_REG8: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REG9, &val);
DRM_DEBUG_KMS("SIL164_REG9: 0x%02x\n", val);
sil164_readb(dvo, SIL164_REGC, &val);
DRM_DEBUG_KMS("SIL164_REGC: 0x%02x\n", val);
}
static void sil164_destroy(struct intel_dvo_device *dvo)
{
struct sil164_priv *sil = dvo->dev_priv;
if (sil) {
kfree(sil);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops sil164_ops = {
.init = sil164_init,
.detect = sil164_detect,
.mode_valid = sil164_mode_valid,
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.get_hw_state = sil164_get_hw_state,
.dump_regs = sil164_dump_regs,
.destroy = sil164_destroy,
};

View file

@ -0,0 +1,318 @@
/*
* Copyright © 2007 Dave Mueller
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Dave Mueller <dave.mueller@gmx.ch>
*
*/
#include "dvo.h"
/* register definitions according to the TFP410 data sheet */
#define TFP410_VID 0x014C
#define TFP410_DID 0x0410
#define TFP410_VID_LO 0x00
#define TFP410_VID_HI 0x01
#define TFP410_DID_LO 0x02
#define TFP410_DID_HI 0x03
#define TFP410_REV 0x04
#define TFP410_CTL_1 0x08
#define TFP410_CTL_1_TDIS (1<<6)
#define TFP410_CTL_1_VEN (1<<5)
#define TFP410_CTL_1_HEN (1<<4)
#define TFP410_CTL_1_DSEL (1<<3)
#define TFP410_CTL_1_BSEL (1<<2)
#define TFP410_CTL_1_EDGE (1<<1)
#define TFP410_CTL_1_PD (1<<0)
#define TFP410_CTL_2 0x09
#define TFP410_CTL_2_VLOW (1<<7)
#define TFP410_CTL_2_MSEL_MASK (0x7<<4)
#define TFP410_CTL_2_MSEL (1<<4)
#define TFP410_CTL_2_TSEL (1<<3)
#define TFP410_CTL_2_RSEN (1<<2)
#define TFP410_CTL_2_HTPLG (1<<1)
#define TFP410_CTL_2_MDI (1<<0)
#define TFP410_CTL_3 0x0A
#define TFP410_CTL_3_DK_MASK (0x7<<5)
#define TFP410_CTL_3_DK (1<<5)
#define TFP410_CTL_3_DKEN (1<<4)
#define TFP410_CTL_3_CTL_MASK (0x7<<1)
#define TFP410_CTL_3_CTL (1<<1)
#define TFP410_USERCFG 0x0B
#define TFP410_DE_DLY 0x32
#define TFP410_DE_CTL 0x33
#define TFP410_DE_CTL_DEGEN (1<<6)
#define TFP410_DE_CTL_VSPOL (1<<5)
#define TFP410_DE_CTL_HSPOL (1<<4)
#define TFP410_DE_CTL_DEDLY8 (1<<0)
#define TFP410_DE_TOP 0x34
#define TFP410_DE_CNT_LO 0x36
#define TFP410_DE_CNT_HI 0x37
#define TFP410_DE_LIN_LO 0x38
#define TFP410_DE_LIN_HI 0x39
#define TFP410_H_RES_LO 0x3A
#define TFP410_H_RES_HI 0x3B
#define TFP410_V_RES_LO 0x3C
#define TFP410_V_RES_HI 0x3D
struct tfp410_priv {
bool quiet;
};
static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
}
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct tfp410_priv *tfp = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(adapter, &msg, 1) == 1)
return true;
if (!tfp->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
addr, adapter->name, dvo->slave_addr);
}
return false;
}
static int tfp410_getid(struct intel_dvo_device *dvo, int addr)
{
uint8_t ch1, ch2;
if (tfp410_readb(dvo, addr+0, &ch1) &&
tfp410_readb(dvo, addr+1, &ch2))
return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF);
return -1;
}
/* Ti TFP410 driver for chip on i2c bus */
static bool tfp410_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
/* this will detect the tfp410 chip on the specified i2c bus */
struct tfp410_priv *tfp;
int id;
tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
if (tfp == NULL)
return false;
dvo->i2c_bus = adapter;
dvo->dev_priv = tfp;
tfp->quiet = true;
if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
"Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
"Slave %d.\n",
id, adapter->name, dvo->slave_addr);
goto out;
}
tfp->quiet = false;
return true;
out:
kfree(tfp);
return false;
}
static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
{
enum drm_connector_status ret = connector_status_disconnected;
uint8_t ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
if (ctl2 & TFP410_CTL_2_RSEN)
ret = connector_status_connected;
else
ret = connector_status_disconnected;
}
return ret;
}
static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static void tfp410_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* As long as the basics are set up, since we don't have clock dependencies
* in the mode setup, we can just leave the registers alone and everything
* will work fine.
*/
/* don't do much */
return;
}
/* set the tfp410 power state */
static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
if (enable)
ctl1 |= TFP410_CTL_1_PD;
else
ctl1 &= ~TFP410_CTL_1_PD;
tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
}
static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
{
uint8_t ctl1;
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return false;
if (ctl1 & TFP410_CTL_1_PD)
return true;
else
return false;
}
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val, val2;
tfp410_readb(dvo, TFP410_REV, &val);
DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_1, &val);
DRM_DEBUG_KMS("TFP410_CTL1: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_2, &val);
DRM_DEBUG_KMS("TFP410_CTL2: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_CTL_3, &val);
DRM_DEBUG_KMS("TFP410_CTL3: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_USERCFG, &val);
DRM_DEBUG_KMS("TFP410_USERCFG: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_DLY, &val);
DRM_DEBUG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CTL, &val);
DRM_DEBUG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_TOP, &val);
DRM_DEBUG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
DRM_DEBUG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
DRM_DEBUG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_H_RES_LO, &val);
tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
DRM_DEBUG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
tfp410_readb(dvo, TFP410_V_RES_LO, &val);
tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
DRM_DEBUG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
}
static void tfp410_destroy(struct intel_dvo_device *dvo)
{
struct tfp410_priv *tfp = dvo->dev_priv;
if (tfp) {
kfree(tfp);
dvo->dev_priv = NULL;
}
}
struct intel_dvo_dev_ops tfp410_ops = {
.init = tfp410_init,
.detect = tfp410_detect,
.mode_valid = tfp410_mode_valid,
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.get_hw_state = tfp410_get_hw_state,
.dump_regs = tfp410_dump_regs,
.destroy = tfp410_destroy,
};

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,775 @@
/*
* Copyright © 2011-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Ben Widawsky <ben@bwidawsk.net>
*
*/
/*
* This file implements HW context support. On gen5+ a HW context consists of an
* opaque GPU object which is referenced at times of context saves and restores.
* With RC6 enabled, the context is also referenced as the GPU enters and exists
* from RC6 (GPU has it's own internal power context, except on gen5). Though
* something like a context does exist for the media ring, the code only
* supports contexts for the render ring.
*
* In software, there is a distinction between contexts created by the user,
* and the default HW context. The default HW context is used by GPU clients
* that do not request setup of their own hardware context. The default
* context's state is never restored to help prevent programming errors. This
* would happen if a client ran and piggy-backed off another clients GPU state.
* The default context only exists to give the GPU some offset to load as the
* current to invoke a save of the context we actually care about. In fact, the
* code could likely be constructed, albeit in a more complicated fashion, to
* never use the default context, though that limits the driver's ability to
* swap out, and/or destroy other contexts.
*
* All other contexts are created as a request by the GPU client. These contexts
* store GPU state, and thus allow GPU clients to not re-emit state (and
* potentially query certain state) at any time. The kernel driver makes
* certain that the appropriate commands are inserted.
*
* The context life cycle is semi-complicated in that context BOs may live
* longer than the context itself because of the way the hardware, and object
* tracking works. Below is a very crude representation of the state machine
* describing the context life.
* refcount pincount active
* S0: initial state 0 0 0
* S1: context created 1 0 0
* S2: context is currently running 2 1 X
* S3: GPU referenced, but not current 2 0 1
* S4: context is current, but destroyed 1 1 0
* S5: like S3, but destroyed 1 0 1
*
* The most common (but not all) transitions:
* S0->S1: client creates a context
* S1->S2: client submits execbuf with context
* S2->S3: other clients submits execbuf with context
* S3->S1: context object was retired
* S3->S2: clients submits another execbuf
* S2->S4: context destroy called with current context
* S3->S5->S0: destroy path
* S4->S5->S0: destroy path on current context
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
* GPU. The GPU has loaded its state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
*
* An "active context' is one which was previously the "current context" and is
* on the active list waiting for the next context switch to occur. Until this
* happens, the object must remain at the same gtt offset. It is therefore
* possible to destroy a context, but it is still active.
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
static size_t get_context_alignment(struct drm_device *dev)
{
if (IS_GEN6(dev))
return GEN6_CONTEXT_ALIGN;
return GEN7_CONTEXT_ALIGN;
}
static int get_context_size(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
switch (INTEL_INFO(dev)->gen) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
case 8:
ret = GEN8_CXT_TOTAL_SIZE;
break;
default:
BUG();
}
return ret;
}
void i915_gem_context_free(struct kref *ctx_ref)
{
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
if (i915.enable_execlists)
intel_lr_context_free(ctx);
i915_ppgtt_put(ctx->ppgtt);
if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
kfree(ctx);
}
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
/*
* Try to make the context utilize L3 as well as LLC.
*
* On VLV we don't have L3 controls in the PTEs so we
* shouldn't touch the cache level, especially as that
* would make the object snooped which might have a
* negative performance impact.
*/
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
drm_gem_object_unreference(&obj->base);
return ERR_PTR(ret);
}
}
return obj;
}
static struct intel_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->context_list);
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj =
i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_out;
}
ctx->legacy_hw_ctx.rcs_state = obj;
}
/* Default context will never have a file_priv */
if (file_priv != NULL) {
ret = idr_alloc(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
} else
ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
return ctx;
err_out:
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
/**
* The default context needs to exist per ring that uses contexts. It stores the
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
const bool is_global_default_ctx = file_priv == NULL;
struct intel_context *ctx;
int ret = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
ctx = __create_hw_context(dev, file_priv);
if (IS_ERR(ctx))
return ctx;
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
* default context also requires GTT space which may not
* be available. To avoid this we always pin the default
* context.
*/
ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
}
}
if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
if (IS_ERR_OR_NULL(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
ret = PTR_ERR(ppgtt);
goto err_unpin;
}
ctx->ppgtt = ppgtt;
}
return ctx;
err_unpin:
if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
void i915_gem_context_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
/* In execlists mode we will unreference the context when the execlist
* queue is cleared and the requests destroyed.
*/
if (i915.enable_execlists)
return;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *lctx = ring->last_context;
if (lctx) {
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(lctx);
ring->last_context = NULL;
}
}
}
int i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int i;
/* Init should only be called once per module load. Eventually the
* restriction on the context_disabled check can be loosened. */
if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
dev_priv->hw_context_size = 0;
}
}
ctx = i915_gem_create_context(dev, NULL);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
PTR_ERR(ctx));
return PTR_ERR(ctx);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
dev_priv->hw_context_size ? "HW" : "fake");
return 0;
}
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
* i915_gem_context_fini() will be called after gpu_idle() has switched
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
ring->default_context = NULL;
ring->last_context = NULL;
}
i915_gem_context_unreference(dctx);
}
int i915_gem_context_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *ring;
int ret, i;
BUG_ON(!dev_priv->ring[RCS].default_context);
if (i915.enable_execlists)
return 0;
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
}
return 0;
}
static int context_idr_cleanup(int id, void *p, void *data)
{
struct intel_context *ctx = p;
i915_gem_context_unreference(ctx);
return 0;
}
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct intel_context *ctx;
idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex);
ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
return PTR_ERR(ctx);
}
return 0;
}
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
}
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
struct intel_context *ctx;
ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
if (!ctx)
return ERR_PTR(-ENOENT);
return ctx;
}
static inline int
mi_set_context(struct intel_engine_cs *ring,
struct intel_context *new_context,
u32 hw_flags)
{
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(ring->dev) ?
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
0;
int len, i, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev)) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
ret = intel_ring_begin(ring, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
intel_ring_emit(ring, MI_NOOP);
if (INTEL_INFO(ring->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
intel_ring_advance(ring);
return ret;
}
static int do_switch(struct intel_engine_cs *ring,
struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
if (from == to && !to->remap_slice)
return 0;
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
if (ret)
return ret;
}
/*
* Pin can switch back to the default context if we end up calling into
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
*/
from = ring->last_context;
if (to->ppgtt) {
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
}
if (ring != &dev_priv->ring[RCS]) {
if (from)
i915_gem_context_unreference(from);
goto done;
}
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
*
* XXX: We need a real interface to do this instead of trickery.
*/
ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret)
goto unpin_out;
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags);
if (ret)
goto unpin_out;
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
ret = i915_gem_l3_remap(ring, i);
/* If it failed, try again next round */
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
else
to->remap_slice &= ~(1<<i);
}
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
* is a bit suboptimal because the retiring can occur simply after the
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
* correct in case the object gets swapped out. Ideally we'd be
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
to->legacy_hw_ctx.initialized = true;
done:
i915_gem_context_reference(to);
ring->last_context = to;
if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(ring);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
}
return 0;
unpin_out:
if (ring->id == RCS)
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
/**
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @to: the context to switch to
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
* it will have a refcount > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*
* This function should not be used in execlists mode. Instead the context is
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (ring->last_context)
i915_gem_context_unreference(ring->last_context);
ring->last_context = to;
}
return 0;
}
return do_switch(ring, to);
}
static bool contexts_enabled(struct drm_device *dev)
{
return i915.enable_execlists || to_i915(dev)->hw_context_size;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct intel_context *ctx;
int ret;
if (!contexts_enabled(dev))
return -ENODEV;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
args->ctx_id = ctx->user_handle;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
return 0;
}
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct intel_context *ctx;
int ret;
if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ctx = i915_gem_context_get(file_priv, args->ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
return 0;
}

View file

@ -0,0 +1,118 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Keith Packard <keithp@keithp.com>
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#if WATCH_LISTS
int
i915_verify_lists(struct drm_device *dev)
{
static int warned;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int err = 0;
if (warned)
return 0;
list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed render active %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
DRM_ERROR("invalid render active %p (a %d r %x)\n",
obj,
obj->active,
obj->base.read_domains);
err++;
} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
obj,
obj->base.write_domain,
!list_empty(&obj->gpu_write_list));
err++;
}
}
list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed flushing %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
list_empty(&obj->gpu_write_list)) {
DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
obj->active,
obj->base.write_domain,
!list_empty(&obj->gpu_write_list));
err++;
}
}
list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed gpu write %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
obj,
obj->active,
obj->base.write_domain);
err++;
}
}
list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed inactive %p\n", obj);
err++;
break;
} else if (obj->pin_count || obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
obj,
obj->pin_count, obj->active,
obj->base.write_domain);
err++;
}
}
return warned = err;
}
#endif /* WATCH_LIST */

View file

@ -0,0 +1,314 @@
/*
* Copyright 2012 Red Hat Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
#include "i915_drv.h"
#include <linux/dma-buf.h>
static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
{
return to_intel_bo(buf->priv);
}
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
struct sg_table *st;
struct scatterlist *src, *dst;
int ret, i;
ret = i915_mutex_lock_interruptible(obj->base.dev);
if (ret)
goto err;
ret = i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
i915_gem_object_pin_pages(obj);
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (st == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
src = obj->pages->sgl;
dst = st->sgl;
for (i = 0; i < obj->pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
}
if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
ret =-ENOMEM;
goto err_free_sg;
}
mutex_unlock(&obj->base.dev->struct_mutex);
return st;
err_free_sg:
sg_free_table(st);
err_free:
kfree(st);
err_unpin:
i915_gem_object_unpin_pages(obj);
err_unlock:
mutex_unlock(&obj->base.dev->struct_mutex);
err:
return ERR_PTR(ret);
}
static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg,
enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
mutex_lock(&obj->base.dev->struct_mutex);
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
i915_gem_object_unpin_pages(obj);
mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
struct sg_page_iter sg_iter;
struct page **pages;
int ret, i;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
if (obj->dma_buf_vmapping) {
obj->vmapping_count++;
goto out_unlock;
}
ret = i915_gem_object_get_pages(obj);
if (ret)
goto err;
i915_gem_object_pin_pages(obj);
ret = -ENOMEM;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
goto err_unpin;
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
pages[i++] = sg_page_iter_page(&sg_iter);
obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages);
if (!obj->dma_buf_vmapping)
goto err_unpin;
obj->vmapping_count = 1;
out_unlock:
mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
err_unpin:
i915_gem_object_unpin_pages(obj);
err:
mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
if (--obj->vmapping_count == 0) {
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
i915_gem_object_unpin_pages(obj);
}
mutex_unlock(&dev->struct_mutex);
}
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
}
static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
}
static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
}
static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
{
}
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
{
return -EINVAL;
}
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
ret = i915_gem_object_set_to_cpu_domain(obj, write);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.kmap = i915_gem_dmabuf_kmap,
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap,
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
.begin_cpu_access = i915_gem_begin_cpu_access,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
if (ret)
return ERR_PTR(ret);
}
return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
NULL);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
struct sg_table *sg;
sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg))
return PTR_ERR(sg);
obj->pages = sg;
obj->has_dma_mapping = true;
return 0;
}
static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
{
dma_buf_unmap_attachment(obj->base.import_attach,
obj->pages, DMA_BIDIRECTIONAL);
obj->has_dma_mapping = false;
}
static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
.get_pages = i915_gem_object_get_pages_dmabuf,
.put_pages = i915_gem_object_put_pages_dmabuf,
};
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct drm_i915_gem_object *obj;
int ret;
/* is this one of own objects? */
if (dma_buf->ops == &i915_dmabuf_ops) {
obj = dma_buf_to_obj(dma_buf);
/* is it from our device? */
if (obj->base.dev == dev) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_reference(&obj->base);
return &obj->base;
}
}
/* need to attach */
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach))
return ERR_CAST(attach);
get_dma_buf(dma_buf);
obj = i915_gem_object_alloc(dev);
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
return &obj->base;
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
return ERR_PTR(ret);
}

View file

@ -0,0 +1,277 @@
/*
* Copyright © 2008-2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uuk>
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "i915_trace.h"
static bool
mark_free(struct i915_vma *vma, struct list_head *unwind)
{
if (vma->pin_count)
return false;
if (WARN_ON(!list_empty(&vma->exec_list)))
return false;
list_add(&vma->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
/**
* i915_gem_evict_something - Evict vmas to make room for binding a new one
* @dev: drm_device
* @vm: address space to evict from
* @size: size of the desired free space
* @alignment: alignment constraint of the desired free space
* @cache_level: cache_level for the desired space
* @mappable: whether the free space must be mappable
* @nonblocking: whether evicting active objects is allowed or not
*
* This function will try to evict vmas until a free space satisfying the
* requirements is found. Callers must check first whether any such hole exists
* already before calling this function.
*
* This function is used by the object/vma binding code.
*
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
int min_size, unsigned alignment, unsigned cache_level,
unsigned long start, unsigned long end,
unsigned flags)
{
struct list_head eviction_list, unwind_list;
struct i915_vma *vma;
int ret = 0;
int pass = 0;
trace_i915_gem_evict(dev, min_size, alignment, flags);
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
* retirement order. The next objects to retire are those on the (per
* ring) active list that do not have an outstanding flush. Once the
* hardware reports completion (the seqno is updated after the
* batchbuffer has been finished) the clean buffer objects would
* be retired to the inactive list. Any dirty objects would be added
* to the tail of the flushing list. So after processing the clean
* active objects we need to emit a MI_FLUSH to retire the flushing
* list, hence the retirement order of the flushing list is in
* advance of the dirty objects on the active lists.
*
* The retirement sequence is thus:
* 1. Inactive objects (already retired)
* 2. Clean active objects
* 3. Flushing list
* 4. Dirty active objects.
*
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
INIT_LIST_HEAD(&unwind_list);
if (start != 0 || end != vm->total) {
drm_mm_init_scan_with_range(&vm->mm, min_size,
alignment, cache_level,
start, end);
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
search_again:
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
if (flags & PIN_NONBLOCK)
goto none;
/* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(vma, &vm->active_list, mm_list) {
if (mark_free(vma, &unwind_list))
goto found;
}
none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
vma = list_first_entry(&unwind_list,
struct i915_vma,
exec_list);
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
list_del_init(&vma->exec_list);
}
/* Can we unpin some objects such as idle hw contents,
* or pending flips?
*/
if (flags & PIN_NONBLOCK)
return -ENOSPC;
/* Only idle the GPU and repeat the search once */
if (pass++ == 0) {
ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev);
goto search_again;
}
/* If we still have pending pageflip completions, drop
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
*/
return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
* scanning, therefore store to be evicted objects on a
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
vma = list_first_entry(&unwind_list,
struct i915_vma,
exec_list);
if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&vma->exec_list, &eviction_list);
drm_gem_object_reference(&vma->obj->base);
continue;
}
list_del_init(&vma->exec_list);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
struct drm_gem_object *obj;
vma = list_first_entry(&eviction_list,
struct i915_vma,
exec_list);
obj = &vma->obj->base;
list_del_init(&vma->exec_list);
if (ret == 0)
ret = i915_vma_unbind(vma);
drm_gem_object_unreference(obj);
}
return ret;
}
/**
* i915_gem_evict_vm - Evict all idle vmas from a vm
*
* @vm: Address space to cleanse
* @do_idle: Boolean directing whether to idle first.
*
* This function evicts all idles vmas from a vm. If all unpinned vmas should be
* evicted the @do_idle needs to be set to true.
*
* This is used by the execbuf code as a last-ditch effort to defragment the
* address space.
*
* To clarify: This is for freeing up virtual address space, not for freeing
* memory in e.g. the shrinker.
*/
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
{
struct i915_vma *vma, *next;
int ret;
trace_i915_gem_evict_vm(vm);
if (do_idle) {
ret = i915_gpu_idle(vm->dev);
if (ret)
return ret;
i915_gem_retire_requests(vm->dev);
}
list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
if (vma->pin_count == 0)
WARN_ON(i915_vma_unbind(vma));
return 0;
}
/**
* i915_gem_evict_everything - Try to evict all objects
* @dev: Device to evict objects for
*
* This functions tries to evict all gem objects from all address spaces. Used
* by the shrinker as a last-ditch effort and for suspend, before releasing the
* backing storage of all unbound objects.
*/
int
i915_gem_evict_everything(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm, *v;
bool lists_empty = true;
int ret;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
lists_empty = (list_empty(&vm->inactive_list) &&
list_empty(&vm->active_list));
if (!lists_empty)
lists_empty = false;
}
if (lists_empty)
return -ENOSPC;
trace_i915_gem_evict_everything(dev);
/* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list
* with retire requests.
*/
ret = i915_gpu_idle(dev);
if (ret)
return ret;
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false));
return 0;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,301 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please try to maintain the following order within this file unless it makes
* sense to do otherwise. From top to bottom:
* 1. typedefs
* 2. #defines, and macros
* 3. structure definitions
* 4. function prototypes
*
* Within each section, please try to order by generation in ascending order,
* from top to bottom (ie. gen6 on the top, gen8 on the bottom).
*/
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
struct drm_i915_file_private;
typedef uint32_t gen6_gtt_pte_t;
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PPGTT_PD_ENTRIES 512
#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE)
#define GEN6_PD_ALIGN (PAGE_SIZE * 16)
#define GEN6_PDE_VALID (1 << 0)
#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
#define BYT_PTE_WRITEABLE (1 << 1)
/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
* 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
*/
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
(((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
#define HSW_PTE_UNCACHED (0)
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
/* GEN8 legacy style address is defined as a 3 level page table:
* 31:30 | 29:21 | 20:12 | 11:0
* PDPE | PDE | PTE | offset
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
*/
#define GEN8_PDPE_SHIFT 30
#define GEN8_PDPE_MASK 0x3
#define GEN8_PDE_SHIFT 21
#define GEN8_PDE_MASK 0x1ff
#define GEN8_PTE_SHIFT 12
#define GEN8_PTE_MASK 0x1ff
#define GEN8_LEGACY_PDPS 4
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
#define CHV_PPAT_SNOOP (1<<6)
#define GEN8_PPAT_AGE(x) (x<<4)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLC (1<<2)
#define GEN8_PPAT_WB (3<<0)
#define GEN8_PPAT_WT (2<<0)
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
enum i915_cache_level;
/**
* A VMA represents a GEM BO that is bound into an address space. Therefore, a
* VMA's presence cannot be guaranteed before binding, or after unbinding the
* object into/from the address space.
*
* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
* will always be <= an objects lifetime. So object refcounting should cover us.
*/
struct i915_vma {
struct drm_mm_node node;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
/** This object's place on the active/inactive lists */
struct list_head mm_list;
struct list_head vma_link; /* Link in the object's VMA list */
/** This vma's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
/**
* Used for performing relocations during execbuffer insertion.
*/
struct hlist_node exec_node;
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
* (via user_pin_count), execbuffer (objects are not allowed multiple
* times for the same batchbuffer), and the framebuffer code. When
* switching/pageflipping, the framebuffer code has at most two buffers
* pinned per crtc.
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
};
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
struct list_head global_link;
unsigned long start; /* Start offset always 0 for dri2 */
size_t total; /* size addr space maps (ex. 2GB for ggtt) */
struct {
dma_addr_t addr;
struct page *page;
} scratch;
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
bool use_scratch);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
};
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
* portion of the GTT which can be mapped by the CPU and remain both coherent
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
struct i915_gtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
unsigned long mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
bool do_idle_maps;
int mtrr;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
};
struct i915_hw_ppgtt {
struct i915_address_space base;
struct kref ref;
struct drm_mm_node node;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
struct page **pt_pages;
struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
};
struct page *pd_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
};
union {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[4];
};
struct drm_i915_file_private *file_priv;
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
void i915_global_gtt_cleanup(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_get(&ppgtt->ref);
}
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
#endif

View file

@ -0,0 +1,179 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Mika Kuoppala <mika.kuoppala@intel.com>
*
*/
#include "i915_drv.h"
#include "intel_renderstate.h"
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
switch (gen) {
case 6:
return &gen6_null_state;
case 7:
return &gen7_null_state;
case 8:
return &gen8_null_state;
}
return NULL;
}
static int render_state_init(struct render_state *so, struct drm_device *dev)
{
int ret;
so->gen = INTEL_INFO(dev)->gen;
so->rodata = render_state_get_rodata(dev, so->gen);
if (so->rodata == NULL)
return 0;
if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
so->obj = i915_gem_alloc_object(dev, 4096);
if (so->obj == NULL)
return -ENOMEM;
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret)
goto free_gem;
so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
return 0;
free_gem:
drm_gem_object_unreference(&so->obj->base);
return ret;
}
static int render_state_setup(struct render_state *so)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
struct page *page;
u32 *d;
int ret;
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
if (ret)
return ret;
page = sg_page(so->obj->pages->sgl);
d = kmap(page);
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
if (i * 4 == rodata->reloc[reloc_index]) {
u64 r = s + so->ggtt_offset;
s = lower_32_bits(r);
if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
d[i++] = s;
s = upper_32_bits(r);
}
reloc_index++;
}
d[i++] = s;
}
kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
return ret;
if (rodata->reloc[reloc_index] != -1) {
DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
return 0;
}
void i915_gem_render_state_fini(struct render_state *so)
{
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
}
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so)
{
int ret;
if (WARN_ON(ring->id != RCS))
return -ENOENT;
ret = render_state_init(so, ring->dev);
if (ret)
return ret;
if (so->rodata == NULL)
return 0;
ret = render_state_setup(so);
if (ret) {
i915_gem_render_state_fini(so);
return ret;
}
return 0;
}
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
struct render_state so;
int ret;
ret = i915_gem_render_state_prepare(ring, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
ret = ring->dispatch_execbuffer(ring,
so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
return ret;
}

View file

@ -0,0 +1,47 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
#include <linux/types.h>
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
const u32 batch_items;
};
struct render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
};
int i915_gem_render_state_init(struct intel_engine_cs *ring);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */

View file

@ -0,0 +1,552 @@
/*
* Copyright © 2008-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
* use by the OS and so the user finds that his system has less memory
* available than he put in. We refer to this memory as stolen.
*
* The BIOS will allocate its framebuffer from the stolen memory. Our
* goal is try to reuse that object for our own fbcon which must always
* be available for panics. Anything else we can reuse the stolen memory
* for is a boon.
*/
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct resource *r;
u32 base;
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space. On a few (desktop)
* machines this is also mirrored in the bridge device at different
* locations, or in the MCHBAR. On gen2, the layout is again slightly
* different with the Graphics Segment immediately following Top of
* Memory (or Top of Usable DRAM). Note it appears that TOUD is only
* reported by 865g, so we just use the top of memory as determined
* by the e820 probe.
*
* XXX However gen2 requires an unavailable symbol.
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 3) {
/* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
} else { /* GEN2 */
#if 0
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
}
if (base == 0)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
struct {
u32 start, end;
} stolen[2] = {
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
};
u64 gtt_start, gtt_end;
gtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev))
gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
gtt_start &= PGTBL_ADDRESS_LO_MASK;
gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
stolen[0].end = gtt_start;
if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
stolen[1].start = gtt_end;
/* pick the larger of the two chunks */
if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) {
base = stolen[0].start;
dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
} else {
base = stolen[1].start;
dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
}
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
(unsigned long long) gtt_start,
(unsigned long long) gtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
base, base + (u32) dev_priv->gtt.stolen_size - 1);
}
}
/* Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
* One more attempt but this time requesting region from
* base + 1, as we have seen that this resolves the region
* conflict with the PCI Bus.
* This is a BIOS w/a: Some BIOS wrap stolen in the root
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
*/
r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->gtt.stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->gtt.stolen_size);
base = 0;
}
}
return base;
}
static int find_compression_threshold(struct drm_device *dev,
struct drm_mm_node *node,
int size,
int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int compression_threshold = 1;
int ret;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
*
* The enable_fbc code will attempt to use one of our 2 compression
* thresholds, therefore, in that case, we only have 1 resort.
*/
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret == 0)
return compression_threshold;
again:
/* HW's ability to limit the CFB is 1:4 */
if (compression_threshold > 4 ||
(fb_cpp == 2 && compression_threshold == 2))
return 0;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size >>= 1, 4096,
DRM_MM_SEARCH_DEFAULT);
if (ret && INTEL_INFO(dev)->gen <= 4) {
return 0;
} else if (ret) {
compression_threshold <<= 1;
goto again;
} else {
return compression_threshold;
}
}
static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
size, fb_cpp);
if (!ret)
goto err_llb;
else if (ret > 1) {
DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
}
dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
4096, 4096, DRM_MM_SEARCH_DEFAULT);
if (ret)
goto err_fb;
dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
dev_priv->fbc.size = size / dev_priv->fbc.threshold;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
return 0;
err_fb:
kfree(compressed_llb);
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
if (size < dev_priv->fbc.size)
return 0;
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
return i915_setup_compression(dev, size, fb_cpp);
}
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->fbc.size == 0)
return;
drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
kfree(dev_priv->fbc.compressed_llb);
}
dev_priv->fbc.size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return;
i915_gem_stolen_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
}
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
int bios_reserved = 0;
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
#endif
if (dev_priv->gtt.stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
if (INTEL_INFO(dev)->gen >= 8) {
tmp = I915_READ(GEN7_BIOS_RESERVED);
tmp >>= GEN8_BIOS_RESERVED_SHIFT;
tmp &= GEN8_BIOS_RESERVED_MASK;
bios_reserved = (1024*1024) << tmp;
} else if (IS_GEN7(dev)) {
tmp = I915_READ(GEN7_BIOS_RESERVED);
bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
256*1024 : 1024*1024;
}
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
return 0;
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
return 0;
}
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st;
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->gtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
* dma mapping in a single scatterlist.
*/
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return NULL;
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
return NULL;
}
sg = st->sgl;
sg->offset = 0;
sg->length = size;
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
sg_dma_len(sg) = size;
return st;
}
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
BUG();
return -EINVAL;
}
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
{
/* Should only be called during free */
sg_free_table(obj->pages);
kfree(obj->pages);
}
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_remove_node(obj->stolen);
kfree(obj->stolen);
obj->stolen = NULL;
}
}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
.release = i915_gem_object_release_stolen,
};
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return NULL;
drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
stolen->start, stolen->size);
if (obj->pages == NULL)
goto cleanup;
obj->has_dma_mapping = true;
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
return obj;
cleanup:
i915_gem_object_free(obj);
return NULL;
}
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
4096, DRM_MM_SEARCH_DEFAULT);
if (ret) {
kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
drm_mm_remove_node(stolen);
kfree(stolen);
return NULL;
}
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 stolen_offset,
u32 gtt_offset,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */
if (WARN_ON(size == 0 || stolen_offset & 4095 || size & 4095))
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
if (!stolen)
return NULL;
stolen->start = stolen_offset;
stolen->size = size;
ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
drm_mm_remove_node(stolen);
kfree(stolen);
return NULL;
}
/* Some objects just need physical mem from stolen space */
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_out;
}
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
vma->node.start = gtt_offset;
vma->node.size = size;
if (drm_mm_initialized(&ggtt->mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err_vma;
}
}
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
i915_gem_object_pin_pages(obj);
return obj;
err_vma:
i915_gem_vma_destroy(vma);
err_out:
drm_mm_remove_node(stolen);
kfree(stolen);
drm_gem_object_unreference(&obj->base);
return NULL;
}

View file

@ -0,0 +1,519 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/string.h>
#include <linux/bitops.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
/** @file i915_gem_tiling.c
*
* Support for managing tiling state of buffer objects.
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
*/
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
* the extra address manipulation GPU side.
*
* VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated with
* identically sized dimms. We don't need to check the 3rd
* channel because no cpu with gpu attached ships in that
* configuration. Also, swizzling only makes sense for 2
* channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_GEN2(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
* 9 for Y tiled. The CPU's interleave is independent, and
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
/* This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
swizzle_y = I915_BIT_6_SWIZZLE_9_17;
}
break;
}
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*/
if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
/* Check pitch constriants for all chips & tiling formats */
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
return true;
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_width = 128;
else
tile_width = 512;
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
if (INTEL_INFO(dev)->gen >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
} else if (INTEL_INFO(dev)->gen >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
if (IS_GEN3(dev)) {
if (size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
if (size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
}
if (stride < tile_width)
return false;
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))
return false;
return true;
}
/* Pre-965 needs power of two tile widths */
if (stride & (stride - 1))
return false;
return true;
}
/* Is the current GTT allocation valid for the change in tiling? */
static bool
i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
u32 size;
if (tiling_mode == I915_TILING_NONE)
return true;
if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (i915_gem_obj_ggtt_size(obj) != size)
return false;
if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
return true;
}
/**
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret = 0;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
if (!i915_tiling_ok(dev,
args->stride, obj->base.size, args->tiling_mode)) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
ret = -EBUSY;
goto err;
}
if (args->tiling_mode == I915_TILING_NONE) {
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
else
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
* and we use the pread/pwrite bit17 paths to swizzle for it.
* If there was a user that was relying on the swizzle
* information for drm_intel_bo_map()ed reads/writes this would
* break it, but we don't have any of those.
*/
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
/* If we can't handle the swizzling, make it untiled. */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
args->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
args->stride = 0;
}
}
if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is updated before
* the next fenced (either through the GTT or by the BLT unit
* on older GPUs) access.
*
* After updating the tiling parameters, we then flag whether
* we need to update an associated fence register. Note this
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode))
ret = i915_gem_object_ggtt_unbind(obj);
if (ret == 0) {
obj->fence_dirty =
obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
sizeof(long), GFP_KERNEL);
}
} else {
kfree(obj->bit_17);
obj->bit_17 = NULL;
}
err:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Returns the current tiling mode and required bit 6 swizzling for the object.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
mutex_lock(&dev->struct_mutex);
args->tiling_mode = obj->tiling_mode;
switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
default:
DRM_ERROR("unknown tiling mode\n");
}
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Swap every 64 bytes of this page around, to account for it having a new
* bit 17 of its physical address and therefore being interpreted differently
* by the GPU.
*/
static void
i915_gem_swizzle_page(struct page *page)
{
char temp[64];
char *vaddr;
int i;
vaddr = kmap(page);
for (i = 0; i < PAGE_SIZE; i += 128) {
memcpy(temp, &vaddr[i], 64);
memcpy(&vaddr[i], &vaddr[i + 64], 64);
memcpy(&vaddr[i + 64], temp, 64);
}
kunmap(page);
}
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
int i;
if (obj->bit_17 == NULL)
return;
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
i++;
}
}
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (obj->bit_17 == NULL) {
obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
sizeof(long), GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
}
}
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
i++;
}
}

View file

@ -0,0 +1,854 @@
/*
* Copyright © 2012-2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
#include <linux/mempolicy.h>
#include <linux/swap.h>
struct i915_mm_struct {
struct mm_struct *mm;
struct drm_device *dev;
struct i915_mmu_notifier *mn;
struct hlist_node node;
struct kref kref;
struct work_struct work;
};
#if defined(CONFIG_MMU_NOTIFIER)
#include <linux/interval_tree.h>
struct i915_mmu_notifier {
spinlock_t lock;
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
struct list_head linear;
unsigned long serial;
bool has_linear;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mn;
struct interval_tree_node it;
struct list_head link;
struct drm_i915_gem_object *obj;
bool is_linear;
};
static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
unsigned long end;
mutex_lock(&dev->struct_mutex);
/* Cancel any active worker and force us to re-evaluate gup */
obj->userptr.work = NULL;
if (obj->pages != NULL) {
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_vma *vma, *tmp;
bool was_interruptible;
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
int ret = i915_vma_unbind(vma);
WARN_ON(ret && ret != -EIO);
}
WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible;
}
end = obj->userptr.ptr + obj->base.size;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return end;
}
static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct i915_mmu_object *mo;
unsigned long serial;
restart:
serial = mn->serial;
list_for_each_entry(mo, &mn->linear, link) {
struct drm_i915_gem_object *obj;
if (mo->it.last < start || mo->it.start > end)
continue;
obj = mo->obj;
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
spin_unlock(&mn->lock);
cancel_userptr(obj);
spin_lock(&mn->lock);
if (serial != mn->serial)
goto restart;
}
return NULL;
}
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it = NULL;
unsigned long next = start;
unsigned long serial = 0;
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
while (next < end) {
struct drm_i915_gem_object *obj = NULL;
spin_lock(&mn->lock);
if (mn->has_linear)
it = invalidate_range__linear(mn, mm, start, end);
else if (serial == mn->serial)
it = interval_tree_iter_next(it, next, end);
else
it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) {
obj = container_of(it, struct i915_mmu_object, it)->obj;
/* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
* since our serialisation is via the spinlock and not
* the struct_mutex - and consequently use it after it
* is freed and then double free it.
*/
if (!kref_get_unless_zero(&obj->base.refcount)) {
spin_unlock(&mn->lock);
serial = 0;
continue;
}
serial = mn->serial;
}
spin_unlock(&mn->lock);
if (obj == NULL)
return;
next = cancel_userptr(obj);
}
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
};
static struct i915_mmu_notifier *
i915_mmu_notifier_create(struct mm_struct *mm)
{
struct i915_mmu_notifier *mn;
int ret;
mn = kmalloc(sizeof(*mn), GFP_KERNEL);
if (mn == NULL)
return ERR_PTR(-ENOMEM);
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT;
mn->serial = 1;
INIT_LIST_HEAD(&mn->linear);
mn->has_linear = false;
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
kfree(mn);
return ERR_PTR(ret);
}
return mn;
}
static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
{
if (++mn->serial == 0)
mn->serial = 1;
}
static int
i915_mmu_notifier_add(struct drm_device *dev,
struct i915_mmu_notifier *mn,
struct i915_mmu_object *mo)
{
struct interval_tree_node *it;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
/* Make sure we drop the final active reference (and thereby
* remove the objects from the interval tree) before we do
* the check for overlapping objects.
*/
i915_gem_retire_requests(dev);
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects,
mo->it.start, mo->it.last);
if (it) {
struct drm_i915_gem_object *obj;
/* We only need to check the first object in the range as it
* either has cancelled gup work queued and we need to
* return back to the user to give time for the gup-workers
* to flush their object references upon which the object will
* be removed from the interval-tree, or the the range is
* still in use by another client and the overlap is invalid.
*
* If we do have an overlap, we cannot use the interval tree
* for fast range invalidation.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
if (!obj->userptr.workers)
mn->has_linear = mo->is_linear = true;
else
ret = -EAGAIN;
} else
interval_tree_insert(&mo->it, &mn->objects);
if (ret == 0) {
list_add(&mo->link, &mn->linear);
__i915_mmu_notifier_update_serial(mn);
}
spin_unlock(&mn->lock);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
{
struct i915_mmu_object *mo;
list_for_each_entry(mo, &mn->linear, link)
if (mo->is_linear)
return true;
return false;
}
static void
i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
struct i915_mmu_object *mo)
{
spin_lock(&mn->lock);
list_del(&mo->link);
if (mo->is_linear)
mn->has_linear = i915_mmu_notifier_has_linear(mn);
else
interval_tree_remove(&mo->it, &mn->objects);
__i915_mmu_notifier_update_serial(mn);
spin_unlock(&mn->lock);
}
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
struct i915_mmu_object *mo;
mo = obj->userptr.mmu_object;
if (mo == NULL)
return;
i915_mmu_notifier_del(mo->mn, mo);
kfree(mo);
obj->userptr.mmu_object = NULL;
}
static struct i915_mmu_notifier *
i915_mmu_notifier_find(struct i915_mm_struct *mm)
{
struct i915_mmu_notifier *mn = mm->mn;
mn = mm->mn;
if (mn)
return mn;
down_write(&mm->mm->mmap_sem);
mutex_lock(&to_i915(mm->dev)->mm_lock);
if ((mn = mm->mn) == NULL) {
mn = i915_mmu_notifier_create(mm->mm);
if (!IS_ERR(mn))
mm->mn = mn;
}
mutex_unlock(&to_i915(mm->dev)->mm_lock);
up_write(&mm->mm->mmap_sem);
return mn;
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
{
struct i915_mmu_notifier *mn;
struct i915_mmu_object *mo;
int ret;
if (flags & I915_USERPTR_UNSYNCHRONIZED)
return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
if (WARN_ON(obj->userptr.mm == NULL))
return -EINVAL;
mn = i915_mmu_notifier_find(obj->userptr.mm);
if (IS_ERR(mn))
return PTR_ERR(mn);
mo = kzalloc(sizeof(*mo), GFP_KERNEL);
if (mo == NULL)
return -ENOMEM;
mo->mn = mn;
mo->it.start = obj->userptr.ptr;
mo->it.last = mo->it.start + obj->base.size - 1;
mo->obj = obj;
ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
if (ret) {
kfree(mo);
return ret;
}
obj->userptr.mmu_object = mo;
return 0;
}
static void
i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
struct mm_struct *mm)
{
if (mn == NULL)
return;
mmu_notifier_unregister(&mn->mn, mm);
kfree(mn);
}
#else
static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
unsigned flags)
{
if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
return -ENODEV;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
static void
i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
struct mm_struct *mm)
{
}
#endif
static struct i915_mm_struct *
__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
{
struct i915_mm_struct *mm;
/* Protected by dev_priv->mm_lock */
hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
if (mm->mm == real)
return mm;
return NULL;
}
static int
i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_mm_struct *mm;
int ret = 0;
/* During release of the GEM object we hold the struct_mutex. This
* precludes us from calling mmput() at that time as that may be
* the last reference and so call exit_mmap(). exit_mmap() will
* attempt to reap the vma, and if we were holding a GTT mmap
* would then call drm_gem_vm_close() and attempt to reacquire
* the struct mutex. So in order to avoid that recursion, we have
* to defer releasing the mm reference until after we drop the
* struct_mutex, i.e. we need to schedule a worker to do the clean
* up.
*/
mutex_lock(&dev_priv->mm_lock);
mm = __i915_mm_struct_find(dev_priv, current->mm);
if (mm == NULL) {
mm = kmalloc(sizeof(*mm), GFP_KERNEL);
if (mm == NULL) {
ret = -ENOMEM;
goto out;
}
kref_init(&mm->kref);
mm->dev = obj->base.dev;
mm->mm = current->mm;
atomic_inc(&current->mm->mm_count);
mm->mn = NULL;
/* Protected by dev_priv->mm_lock */
hash_add(dev_priv->mm_structs,
&mm->node, (unsigned long)mm->mm);
} else
kref_get(&mm->kref);
obj->userptr.mm = mm;
out:
mutex_unlock(&dev_priv->mm_lock);
return ret;
}
static void
__i915_mm_struct_free__worker(struct work_struct *work)
{
struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
i915_mmu_notifier_free(mm->mn, mm->mm);
mmdrop(mm->mm);
kfree(mm);
}
static void
__i915_mm_struct_free(struct kref *kref)
{
struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
/* Protected by dev_priv->mm_lock */
hash_del(&mm->node);
mutex_unlock(&to_i915(mm->dev)->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
schedule_work(&mm->work);
}
static void
i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
{
if (obj->userptr.mm == NULL)
return;
kref_put_mutex(&obj->userptr.mm->kref,
__i915_mm_struct_free,
&to_i915(obj->base.dev)->mm_lock);
obj->userptr.mm = NULL;
}
struct get_pages_work {
struct work_struct work;
struct drm_i915_gem_object *obj;
struct task_struct *task;
};
#if IS_ENABLED(CONFIG_SWIOTLB)
#define swiotlb_active() swiotlb_nr_tbl()
#else
#define swiotlb_active() 0
#endif
static int
st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
{
struct scatterlist *sg;
int ret, n;
*st = kmalloc(sizeof(**st), GFP_KERNEL);
if (*st == NULL)
return -ENOMEM;
if (swiotlb_active()) {
ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
if (ret)
goto err;
for_each_sg((*st)->sgl, sg, num_pages, n)
sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
} else {
ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
0, num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (ret)
goto err;
}
return 0;
err:
kfree(*st);
*st = NULL;
return ret;
}
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
struct get_pages_work *work = container_of(_work, typeof(*work), work);
struct drm_i915_gem_object *obj = work->obj;
struct drm_device *dev = obj->base.dev;
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
int pinned, ret;
ret = -ENOMEM;
pinned = 0;
pvec = kmalloc(num_pages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL)
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
down_read(&mm->mmap_sem);
while (pinned < num_pages) {
ret = get_user_pages(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
num_pages - pinned,
!obj->userptr.read_only, 0,
pvec + pinned, NULL);
if (ret < 0)
break;
pinned += ret;
}
up_read(&mm->mmap_sem);
}
mutex_lock(&dev->struct_mutex);
if (obj->userptr.work != &work->work) {
ret = 0;
} else if (pinned == num_pages) {
ret = st_set_pages(&obj->pages, pvec, num_pages);
if (ret == 0) {
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
pinned = 0;
}
}
obj->userptr.work = ERR_PTR(ret);
obj->userptr.workers--;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
put_task_struct(work->task);
kfree(work);
}
static int
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
int pinned, ret;
/* If userspace should engineer that these pages are replaced in
* the vma between us binding this page into the GTT and completion
* of rendering... Their loss. If they change the mapping of their
* pages they need to create a new bo to point to the new vma.
*
* However, that still leaves open the possibility of the vma
* being copied upon fork. Which falls under the same userspace
* synchronisation issue as a regular bo, except that this time
* the process may not be expecting that a particular piece of
* memory is tied to the GPU.
*
* Fortunately, we can hook into the mmu_notifier in order to
* discard the page references prior to anything nasty happening
* to the vma (discard or cloning) which should prevent the more
* egregious cases from causing harm.
*/
pvec = NULL;
pinned = 0;
if (obj->userptr.mm->mm == current->mm) {
pvec = kmalloc(num_pages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL) {
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
if (pvec == NULL)
return -ENOMEM;
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
!obj->userptr.read_only, pvec);
}
if (pinned < num_pages) {
if (pinned < 0) {
ret = pinned;
pinned = 0;
} else {
/* Spawn a worker so that we can acquire the
* user pages without holding our mutex. Access
* to the user pages requires mmap_sem, and we have
* a strict lock ordering of mmap_sem, struct_mutex -
* we already hold struct_mutex here and so cannot
* call gup without encountering a lock inversion.
*
* Userspace will keep on repeating the operation
* (thanks to EAGAIN) until either we hit the fast
* path or the worker completes. If the worker is
* cancelled or superseded, the task is still run
* but the results ignored. (This leads to
* complications that we may have a stray object
* refcount that we need to be wary of when
* checking for existing objects during creation.)
* If the worker encounters an error, it reports
* that error back to this function through
* obj->userptr.work = ERR_PTR.
*/
ret = -EAGAIN;
if (obj->userptr.work == NULL &&
obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
struct get_pages_work *work;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (work != NULL) {
obj->userptr.work = &work->work;
obj->userptr.workers++;
work->obj = obj;
drm_gem_object_reference(&obj->base);
work->task = current;
get_task_struct(work->task);
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
schedule_work(&work->work);
} else
ret = -ENOMEM;
} else {
if (IS_ERR(obj->userptr.work)) {
ret = PTR_ERR(obj->userptr.work);
obj->userptr.work = NULL;
}
}
}
} else {
ret = st_set_pages(&obj->pages, pvec, num_pages);
if (ret == 0) {
obj->userptr.work = NULL;
pinned = 0;
}
}
release_pages(pvec, pinned, 0);
drm_free_large(pvec);
return ret;
}
static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
{
struct sg_page_iter sg_iter;
BUG_ON(obj->userptr.work != NULL);
if (obj->madv != I915_MADV_WILLNEED)
obj->dirty = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty)
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
}
obj->dirty = 0;
sg_free_table(obj->pages);
kfree(obj->pages);
}
static void
i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
i915_gem_userptr_release__mmu_notifier(obj);
i915_gem_userptr_release__mm_struct(obj);
}
static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
{
if (obj->userptr.mmu_object)
return 0;
return i915_gem_userptr_init__mmu_notifier(obj, 0);
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.dmabuf_export = i915_gem_userptr_dmabuf_export,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.release = i915_gem_userptr_release,
};
/**
* Creates a new mm object that wraps some normal memory from the process
* context - user memory.
*
* We impose several restrictions upon the memory being mapped
* into the GPU.
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
* 2. It must be normal system memory, not a pointer into another map of IO
* space (e.g. it must not be a GTT mmapping of another object).
* 3. We only allow a bo as large as we could in theory map into the GTT,
* that is we limit the size to the total size of the GTT.
* 4. The bo is marked as being snoopable. The backing pages are left
* accessible directly by the CPU, but reads and writes by the GPU may
* incur the cost of a snoop (unless you have an LLC architecture).
*
* Synchronisation between multiple users and the GPU is left to userspace
* through the normal set-domain-ioctl. The kernel will enforce that the
* GPU relinquishes the VMA before it is returned back to the system
* i.e. upon free(), munmap() or process termination. However, the userspace
* malloc() library may not immediately relinquish the VMA after free() and
* instead reuse it whilst the GPU is still reading and writing to the VMA.
* Caveat emptor.
*
* Also note, that the object created here is not currently a "first class"
* object, in that several ioctls are banned. These are the CPU access
* ioctls: mmap(), pwrite and pread. In practice, you are expected to use
* direct access via your pointer rather than use those ioctls.
*
* If you think this is a good interface to use to pass GPU memory between
* drivers, please use dma-buf instead. In fact, wherever possible use
* dma-buf instead.
*/
int
i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_userptr *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
if (args->flags & ~(I915_USERPTR_READ_ONLY |
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;
if (args->user_size > dev_priv->gtt.base.total)
return -E2BIG;
if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
(char __user *)(unsigned long)args->user_ptr, args->user_size))
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
/* On almost all of the current hw, we cannot tell the GPU that a
* page is readonly, so this is just a placeholder in the uAPI.
*/
return -ENODEV;
}
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
i915_gem_object_init(obj, &i915_gem_userptr_ops);
obj->cache_level = I915_CACHE_LLC;
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->userptr.ptr = args->user_ptr;
obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
/* And keep a pointer to the current->mm for resolving the user pages
* at binding. This means that we need to hook into the mmu_notifier
* in order to detect if the mmu is destroyed.
*/
ret = i915_gem_userptr_init__mm_struct(obj);
if (ret == 0)
ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
if (ret == 0)
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
if (ret)
return ret;
args->handle = handle;
return 0;
}
int
i915_gem_init_userptr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
mutex_init(&dev_priv->mm_lock);
hash_init(dev_priv->mm_structs);
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,221 @@
/**
* \file i915_ioc32.c
*
* 32-bit ioctl compatibility routines for the i915 DRM.
*
* \author Alan Hourihane <alanh@fairlite.demon.co.uk>
*
*
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Alan Hourihane 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
u32 cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer32_t;
static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_batchbuffer32_t batchbuffer32;
drm_i915_batchbuffer_t __user *batchbuffer;
if (copy_from_user
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
return -EFAULT;
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
|| __put_user(batchbuffer32.start, &batchbuffer->start)
|| __put_user(batchbuffer32.used, &batchbuffer->used)
|| __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
|| __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
|| __put_user(batchbuffer32.num_cliprects,
&batchbuffer->num_cliprects)
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
&batchbuffer->cliprects))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long)batchbuffer);
}
typedef struct _drm_i915_cmdbuffer32 {
u32 buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
u32 cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer32_t;
static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_cmdbuffer32_t cmdbuffer32;
drm_i915_cmdbuffer_t __user *cmdbuffer;
if (copy_from_user
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
return -EFAULT;
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
|| __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
&cmdbuffer->buf)
|| __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
|| __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
|| __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
|| __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
|| __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
&cmdbuffer->cliprects))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
(unsigned long)cmdbuffer);
}
typedef struct drm_i915_irq_emit32 {
u32 irq_seq;
} drm_i915_irq_emit32_t;
static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_irq_emit32_t req32;
drm_i915_irq_emit_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user((int __user *)(unsigned long)req32.irq_seq,
&request->irq_seq))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
(unsigned long)request);
}
typedef struct drm_i915_getparam32 {
int param;
u32 value;
} drm_i915_getparam32_t;
static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_getparam32_t req32;
drm_i915_getparam_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.param, &request->param)
|| __put_user((void __user *)(unsigned long)req32.value,
&request->value))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
(unsigned long)request);
}
typedef struct drm_i915_mem_alloc32 {
int region;
int alignment;
int size;
u32 region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc32_t;
static int compat_i915_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_i915_mem_alloc32_t req32;
drm_i915_mem_alloc_t __user *request;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.region, &request->region)
|| __put_user(req32.alignment, &request->alignment)
|| __put_user(req32.size, &request->size)
|| __put_user((void __user *)(unsigned long)req32.region_offset,
&request->region_offset))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
(unsigned long)request);
}
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
[DRM_I915_ALLOC] = compat_i915_alloc
};
#ifdef CONFIG_COMPAT
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,175 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "i915_drv.h"
struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
.powersave = 1,
.semaphores = -1,
.lvds_downclock = 0,
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
.enable_execlists = 0,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = 0,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1,
.enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0,
.reset = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = 1,
.disable_vtd_wa = 0,
.use_mmio_flip = 0,
.mmio_debug = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
"1=on, -1=force vga console preference [default])");
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
module_param_named(powersave, i915.powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
MODULE_PARM_DESC(enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400);
MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
module_param_named(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
"(-1=auto, 0=disabled [default], 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support.");
module_param_named(disable_power_well, i915.disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: true)");
module_param_named(enable_ips, i915.enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
module_param_named(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
module_param_named(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
module_param_named(disable_display, i915.disable_display, bool, 0600);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
MODULE_PARM_DESC(mmio_debug,
"Enable the MMIO debug code (default: false). This may negatively "
"affect performance.");

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,405 @@
/*
*
* Copyright 2008 (c) Intel Corporation
* Jesse Barnes <jbarnes@virtuousgeek.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
return I915_READ8(data_port);
}
static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
return I915_READ8(VGA_AR_DATA_READ);
}
static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
I915_WRITE8(VGA_AR_DATA_WRITE, val);
}
static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE8(index_port, reg);
I915_WRITE8(data_port, val);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA state */
dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
/* VGA color palette registers */
dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
/* MSR bits */
dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(dev, cr_index, cr_data, 0x11,
i915_read_indexed(dev, cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
dev_priv->regfile.saveCR[i] =
i915_read_indexed(dev, cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->regfile.saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
I915_READ8(st01);
dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
I915_READ8(st01);
I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
I915_READ8(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->regfile.saveGR[i] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->regfile.saveGR[0x10] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->regfile.saveGR[0x11] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->regfile.saveGR[0x18] =
i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->regfile.saveSR[i] =
i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA state */
I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->regfile.saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->regfile.saveGR[i]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->regfile.saveGR[0x10]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->regfile.saveGR[0x11]);
i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->regfile.saveGR[0x18]);
/* Attribute controller registers */
I915_READ8(st01); /* switch back to index mode */
for (i = 0; i <= 0x14; i++)
i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
I915_READ8(st01); /* switch back to index mode */
I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
I915_READ8(st01);
/* VGA color palette registers */
I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
if (INTEL_INFO(dev)->gen <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
/* Don't regfile.save them in KMS mode */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_display_reg(dev);
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->regfile.saveBLC_HIST_CTL =
I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
dev_priv->regfile.saveBLC_HIST_CTL_B =
I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
} else {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->regfile.saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
/* save FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_vga(dev);
}
static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 mask = 0xffffffff;
/* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_display_reg(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
mask = ~LVDS_PORT_EN;
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
I915_WRITE(RSTDBYCTL,
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
} else if (IS_VALLEYVIEW(dev)) {
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
dev_priv->regfile.saveBLC_HIST_CTL);
I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
dev_priv->regfile.saveBLC_HIST_CTL);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
/* restore FBC interval */
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_vga(dev);
else
i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
mutex_lock(&dev->struct_mutex);
i915_save_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Interrupt state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveDEIER = I915_READ(DEIER);
dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
dev_priv->regfile.saveGTIER = I915_READ(GTIER);
dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
} else {
dev_priv->regfile.saveIER = I915_READ(IER);
dev_priv->regfile.saveIMR = I915_READ(IMR);
}
}
if (IS_GEN4(dev))
pci_read_config_word(dev->pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
mutex_unlock(&dev->struct_mutex);
return 0;
}
int i915_restore_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
mutex_lock(&dev->struct_mutex);
i915_gem_restore_fences(dev);
if (IS_GEN4(dev))
pci_write_config_word(dev->pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Interrupt state */
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
} else {
I915_WRITE(IER, dev_priv->regfile.saveIER);
I915_WRITE(IMR, dev_priv->regfile.saveIMR);
}
}
/* Cache mode state */
if (INTEL_INFO(dev)->gen < 7)
I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
0xffff0000);
/* Memory arbitration state */
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
mutex_unlock(&dev->struct_mutex);
intel_i2c_reset(dev);
return 0;
}

View file

@ -0,0 +1,644 @@
/*
* Copyright © 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Ben Widawsky <ben@bwidawsk.net>
*
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
#include "intel_drv.h"
#include "i915_drv.h"
#define dev_to_drm_minor(d) dev_get_drvdata((d))
#ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */
u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
u32 ret;
if (!intel_enable_rc6(dev))
return 0;
intel_runtime_pm_get(dev_priv);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
u32 reg, czcount_30ns;
if (IS_CHERRYVIEW(dev))
reg = CHV_CLK_CTL1;
else
reg = VLV_CLK_CTL2;
czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
if (!czcount_30ns) {
WARN(!czcount_30ns, "bogus CZ count value");
ret = 0;
goto out;
}
units = 0;
div = 1000000ULL;
if (IS_CHERRYVIEW(dev)) {
/* Special case for 320Mhz */
if (czcount_30ns == 1) {
div = 10000000ULL;
units = 3125ULL;
} else {
/* chv counts are one less */
czcount_30ns += 1;
}
}
if (units == 0)
units = DIV_ROUND_UP_ULL(30ULL * bias,
(u64)czcount_30ns);
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
div = div * bias;
}
raw_time = I915_READ(reg) * units;
ret = DIV_ROUND_UP_ULL(raw_time, div);
out:
intel_runtime_pm_put(dev_priv);
return ret;
}
static ssize_t
show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
}
static ssize_t
show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_get_drvdata(kdev);
u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
}
static ssize_t
show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
if (IS_VALLEYVIEW(dminor->dev))
rc6p_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
}
static ssize_t
show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *dminor = dev_to_drm_minor(kdev);
u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
if (IS_VALLEYVIEW(dminor->dev))
rc6pp_residency = 0;
return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
}
static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
static struct attribute *rc6_attrs[] = {
&dev_attr_rc6_enable.attr,
&dev_attr_rc6_residency_ms.attr,
&dev_attr_rc6p_residency_ms.attr,
&dev_attr_rc6pp_residency_ms.attr,
NULL
};
static struct attribute_group rc6_attr_group = {
.name = power_group_name,
.attrs = rc6_attrs
};
#endif
static int l3_access_valid(struct drm_device *dev, loff_t offset)
{
if (!HAS_L3_DPF(dev))
return -EPERM;
if (offset % 4 != 0)
return -EINVAL;
if (offset >= GEN7_L3LOG_SIZE)
return -ENXIO;
return 0;
}
static ssize_t
i915_l3_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
int slice = (int)(uintptr_t)attr->private;
int ret;
count = round_down(count, 4);
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
if (dev_priv->l3_parity.remap_info[slice])
memcpy(buf,
dev_priv->l3_parity.remap_info[slice] + (offset/4),
count);
else
memset(buf, 0, count);
mutex_unlock(&drm_dev->struct_mutex);
return count;
}
static ssize_t
i915_l3_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct drm_minor *dminor = dev_to_drm_minor(dev);
struct drm_device *drm_dev = dminor->dev;
struct drm_i915_private *dev_priv = drm_dev->dev_private;
struct intel_context *ctx;
u32 *temp = NULL; /* Just here to make handling failures easy */
int slice = (int)(uintptr_t)attr->private;
int ret;
if (!HAS_HW_CONTEXTS(drm_dev))
return -ENXIO;
ret = l3_access_valid(drm_dev, offset);
if (ret)
return ret;
ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
return ret;
if (!dev_priv->l3_parity.remap_info[slice]) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
return -ENOMEM;
}
}
ret = i915_gpu_idle(drm_dev);
if (ret) {
kfree(temp);
mutex_unlock(&drm_dev->struct_mutex);
return ret;
}
/* TODO: Ideally we really want a GPU reset here to make sure errors
* aren't propagated. Since I cannot find a stable way to reset the GPU
* at this point it is left as a TODO.
*/
if (temp)
dev_priv->l3_parity.remap_info[slice] = temp;
memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */
list_for_each_entry(ctx, &dev_priv->context_list, link)
ctx->remap_slice |= (1<<slice);
mutex_unlock(&drm_dev->struct_mutex);
return count;
}
static struct bin_attribute dpf_attrs = {
.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
.mmap = NULL,
.private = (void *)0
};
static struct bin_attribute dpf_attrs_1 = {
.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
.size = GEN7_L3LOG_SIZE,
.read = i915_l3_read,
.write = i915_l3_write,
.mmap = NULL,
.private = (void *)1
};
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq;
freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
} else {
ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
}
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
return snprintf(buf, PAGE_SIZE, "%d\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
else
ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
ssize_t ret;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
val = vlv_freq_opcode(dev_priv, val);
else
val /= GT_FREQUENCY_MULTIPLIER;
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
if (val > dev_priv->rps.rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
val * GT_FREQUENCY_MULTIPLIER);
dev_priv->rps.max_freq_softlimit = val;
if (dev_priv->rps.cur_freq > val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
} else if (!IS_VALLEYVIEW(dev)) {
/* We still need gen6_set_rps to process the new max_delay and
* update the interrupt limits even though frequency request is
* unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_freq);
}
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
}
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
else
ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
ssize_t ret;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev))
val = vlv_freq_opcode(dev_priv, val);
else
val /= GT_FREQUENCY_MULTIPLIER;
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
dev_priv->rps.min_freq_softlimit = val;
if (dev_priv->rps.cur_freq < val) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
} else if (!IS_VALLEYVIEW(dev)) {
/* We still need gen6_set_rps to process the new min_delay and
* update the interrupt limits even though frequency request is
* unchanged. */
gen6_set_rps(dev, dev_priv->rps.cur_freq);
}
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
}
static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
/* For now we have a static number of RP states */
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, rp_state_cap;
ssize_t ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {
if (IS_VALLEYVIEW(dev))
val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
else
val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
if (IS_VALLEYVIEW(dev))
val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
else
val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
if (IS_VALLEYVIEW(dev))
val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
else
val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
} else {
BUG();
}
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
static const struct attribute *gen6_attrs[] = {
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
&dev_attr_gt_RP0_freq_mhz.attr,
&dev_attr_gt_RP1_freq_mhz.attr,
&dev_attr_gt_RPn_freq_mhz.attr,
NULL,
};
static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
&dev_attr_gt_RP0_freq_mhz.attr,
&dev_attr_gt_RP1_freq_mhz.attr,
&dev_attr_gt_RPn_freq_mhz.attr,
&dev_attr_vlv_rpe_freq_mhz.attr,
NULL,
};
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
ssize_t ret_count = 0;
int ret;
memset(&error_priv, 0, sizeof(error_priv));
ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
if (ret)
return ret;
error_priv.dev = dev;
i915_error_state_get(dev, &error_priv);
ret = i915_error_state_to_str(&error_str, &error_priv);
if (ret)
goto out;
ret_count = count < error_str.bytes ? count : error_str.bytes;
memcpy(buf, error_str.buf, ret_count);
out:
i915_error_state_put(&error_priv);
i915_error_state_buf_release(&error_str);
return ret ?: ret_count;
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = container_of(kobj, struct device, kobj);
struct drm_minor *minor = dev_to_drm_minor(kdev);
struct drm_device *dev = minor->dev;
int ret;
DRM_DEBUG_DRIVER("Resetting error state\n");
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
i915_destroy_error_state(dev);
mutex_unlock(&dev->struct_mutex);
return count;
}
static struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
.read = error_state_read,
.write = error_state_write,
};
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
#ifdef CONFIG_PM
if (INTEL_INFO(dev)->gen >= 6) {
ret = sysfs_merge_group(&dev->primary->kdev->kobj,
&rc6_attr_group);
if (ret)
DRM_ERROR("RC6 residency sysfs setup failed\n");
}
#endif
if (HAS_L3_DPF(dev)) {
ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
if (ret)
DRM_ERROR("l3 parity sysfs setup failed\n");
if (NUM_L3_SLICES(dev) > 1) {
ret = device_create_bin_file(dev->primary->kdev,
&dpf_attrs_1);
if (ret)
DRM_ERROR("l3 parity slice 1 setup failed\n");
}
}
ret = 0;
if (IS_VALLEYVIEW(dev))
ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
else if (INTEL_INFO(dev)->gen >= 6)
ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
if (ret)
DRM_ERROR("RPS sysfs setup failed\n");
ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
&error_state_attr);
if (ret)
DRM_ERROR("error_state sysfs setup failed\n");
}
void i915_teardown_sysfs(struct drm_device *dev)
{
sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
if (IS_VALLEYVIEW(dev))
sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
else
sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
#endif
}

View file

@ -0,0 +1,595 @@
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE i915_trace
/* pipe updates */
TRACE_EVENT(i915_pipe_update_start,
TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max),
TP_ARGS(crtc, min, max),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__field(u32, min)
__field(u32, max)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = min;
__entry->max = max;
),
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline, __entry->min, __entry->max)
);
TRACE_EVENT(i915_pipe_update_vblank_evaded,
TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame),
TP_ARGS(crtc, min, max, frame),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
__field(u32, min)
__field(u32, max)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = frame;
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = min;
__entry->max = max;
),
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline, __entry->min, __entry->max)
);
TRACE_EVENT(i915_pipe_update_end,
TP_PROTO(struct intel_crtc *crtc, u32 frame),
TP_ARGS(crtc, frame),
TP_STRUCT__entry(
__field(enum pipe, pipe)
__field(u32, frame)
__field(u32, scanline)
),
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->frame = frame;
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, frame=%u, scanline=%u",
pipe_name(__entry->pipe), __entry->frame,
__entry->scanline)
);
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
__entry->size = obj->base.size;
),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
TRACE_EVENT(i915_vma_bind,
TP_PROTO(struct i915_vma *vma, unsigned flags),
TP_ARGS(vma, flags),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(unsigned, flags)
),
TP_fast_assign(
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
__entry->flags = flags;
),
TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "",
__entry->vm)
);
TRACE_EVENT(i915_vma_unbind,
TP_PROTO(struct i915_vma *vma),
TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = vma->obj;
__entry->vm = vma->vm;
__entry->offset = vma->node.start;
__entry->size = vma->node.size;
),
TP_printk("obj=%p, offset=%08x size=%x vm=%p",
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
__entry->read_domains = obj->base.read_domains | (old_read << 16);
__entry->write_domain = obj->base.write_domain | (old_write << 16);
),
TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
__entry->read_domains >> 16,
__entry->read_domains & 0xffff,
__entry->write_domain >> 16,
__entry->write_domain & 0xffff)
);
TRACE_EVENT(i915_gem_object_pwrite,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, offset)
__field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = offset;
__entry->len = len;
),
TP_printk("obj=%p, offset=%u, len=%u",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_pread,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, offset)
__field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
__entry->offset = offset;
__entry->len = len;
),
TP_printk("obj=%p, offset=%u, len=%u",
__entry->obj, __entry->offset, __entry->len)
);
TRACE_EVENT(i915_gem_object_fault,
TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
TP_ARGS(obj, index, gtt, write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
__field(u32, index)
__field(bool, gtt)
__field(bool, write)
),
TP_fast_assign(
__entry->obj = obj;
__entry->index = index;
__entry->gtt = gtt;
__entry->write = write;
),
TP_printk("obj=%p, %s index=%u %s",
__entry->obj,
__entry->gtt ? "GTT" : "CPU",
__entry->index,
__entry->write ? ", writable" : "")
);
DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
__entry->obj = obj;
),
TP_printk("obj=%p", __entry->obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
TRACE_EVENT(i915_gem_evict,
TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
TP_ARGS(dev, size, align, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, size)
__field(u32, align)
__field(unsigned, flags)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
__entry->size = size;
__entry->align = align;
__entry->flags = flags;
),
TP_printk("dev=%d, size=%d, align=%d %s",
__entry->dev, __entry->size, __entry->align,
__entry->flags & PIN_MAPPABLE ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_evict_everything,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
TP_STRUCT__entry(
__field(u32, dev)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
),
TP_printk("dev=%d", __entry->dev)
);
TRACE_EVENT(i915_gem_evict_vm,
TP_PROTO(struct i915_address_space *vm),
TP_ARGS(vm),
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
__entry->dev = vm->dev->primary->index;
__entry->vm = vm;
),
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct intel_engine_cs *from,
struct intel_engine_cs *to,
u32 seqno),
TP_ARGS(from, to, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, sync_from)
__field(u32, sync_to)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
__entry->sync_to = to->id;
__entry->seqno = seqno;
),
TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
__entry->dev,
__entry->sync_from, __entry->sync_to,
__entry->seqno)
);
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(u32, flags)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->flags = flags;
i915_trace_irq_get(ring, seqno);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,
TP_PROTO(struct intel_engine_cs *ring, u32 invalidate, u32 flush),
TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, invalidate)
__field(u32, flush)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
__entry->dev, __entry->ring,
__entry->invalidate, __entry->flush)
);
DECLARE_EVENT_CLASS(i915_gem_request,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_complete,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = ring->get_seqno(ring, false);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(bool, blocking)
),
/* NB: the blocking information is racy since mutex_is_locked
* doesn't check that the current thread holds the lock. The only
* other option would be to pass the boolean information of whether
* or not the class was blocking down through the stack which is
* less desirable.
*/
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
__entry->dev, __entry->ring, __entry->seqno,
__entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
),
TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
TP_PROTO(struct intel_engine_cs *ring),
TP_ARGS(ring)
);
TRACE_EVENT(i915_flip_request,
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
__field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
__entry->plane = plane;
__entry->obj = obj;
),
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
TRACE_EVENT(i915_flip_complete,
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
__field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
__entry->plane = plane;
__entry->obj = obj;
),
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
TRACE_EVENT_CONDITION(i915_reg_rw,
TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
TP_ARGS(write, reg, val, len, trace),
TP_CONDITION(trace),
TP_STRUCT__entry(
__field(u64, val)
__field(u32, reg)
__field(u16, write)
__field(u16, len)
),
TP_fast_assign(
__entry->val = (u64)val;
__entry->reg = reg;
__entry->write = write;
__entry->len = len;
),
TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
__entry->write ? "write" : "read",
__entry->reg, __entry->len,
(u32)(__entry->val & 0xffffffff),
(u32)(__entry->val >> 32))
);
TRACE_EVENT(intel_gpu_freq_change,
TP_PROTO(u32 freq),
TP_ARGS(freq),
TP_STRUCT__entry(
__field(u32, freq)
),
TP_fast_assign(
__entry->freq = freq;
),
TP_printk("new_freq=%u", __entry->freq)
);
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#include <trace/define_trace.h>

View file

@ -0,0 +1,13 @@
/*
* Copyright © 2009 Intel Corporation
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include "i915_drv.h"
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "i915_trace.h"
#endif

View file

@ -0,0 +1,538 @@
/*
*
* Copyright 2008 (c) Intel Corporation
* Jesse Barnes <jbarnes@virtuousgeek.org>
* Copyright 2013 (c) Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
/* On IVB, 3rd pipe shares PLL with another one */
if (pipe > 1)
return false;
if (HAS_PCH_SPLIT(dev))
dpll_reg = PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
void i915_save_display_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
/* Cursor state */
dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF);
dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A);
dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A);
dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A);
dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A);
dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A);
dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A);
}
dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF);
dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B);
dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B);
dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B);
dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B);
dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B);
dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B);
}
dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
/* CRT state */
if (HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
else
dev_priv->regfile.saveADPA = I915_READ(ADPA);
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
dev_priv->regfile.saveDP_B = I915_READ(DP_B);
dev_priv->regfile.saveDP_C = I915_READ(DP_C);
dev_priv->regfile.saveDP_D = I915_READ(DP_D);
dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X);
dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X);
dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X);
dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X);
dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X);
dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X);
dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X);
dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X);
}
/* FIXME: regfile.save TV & SDVO state */
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_read_config_byte(dev->pdev, PCI_LBPC,
&dev_priv->regfile.saveLBB);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
} else {
dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
}
return;
}
void i915_restore_display_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
int i;
/* Backlight */
if (INTEL_INFO(dev)->gen <= 4)
pci_write_config_byte(dev->pdev, PCI_LBPC,
dev_priv->regfile.saveLBB);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
/* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
* otherwise we get blank eDP screen after S3 on some machines
*/
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
} else {
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
}
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N);
}
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = _PCH_DPLL_A;
dpll_b_reg = _PCH_DPLL_B;
fpa0_reg = _PCH_FPA0;
fpb0_reg = _PCH_FPB0;
fpa1_reg = _PCH_FPA1;
fpb1_reg = _PCH_FPB1;
} else {
dpll_a_reg = _DPLL_A;
dpll_b_reg = _DPLL_B;
fpa0_reg = _FPA0;
fpb0_reg = _FPB0;
fpa1_reg = _FPA1;
fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
else
I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
}
/* FIXME: restore TV & SDVO state */
return;
}

View file

@ -0,0 +1,165 @@
/*
* Intel ACPI functions
*
* _DSM related code stolen from nouveau_acpi.c.
*/
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
static struct intel_dsm_priv {
acpi_handle dhandle;
} intel_dsm_priv;
static const u8 intel_dsm_guid[] = {
0xd3, 0x73, 0xd8, 0x7e,
0xd0, 0xc2,
0x4f, 0x4e,
0xa8, 0x54,
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
static char *intel_dsm_port_name(u8 id)
{
switch (id) {
case 0:
return "Reserved";
case 1:
return "Analog VGA";
case 2:
return "LVDS";
case 3:
return "Reserved";
case 4:
return "HDMI/DVI_B";
case 5:
return "HDMI/DVI_C";
case 6:
return "HDMI/DVI_D";
case 7:
return "DisplayPort_A";
case 8:
return "DisplayPort_B";
case 9:
return "DisplayPort_C";
case 0xa:
return "DisplayPort_D";
case 0xb:
case 0xc:
case 0xd:
return "Reserved";
case 0xe:
return "WiDi";
default:
return "bad type";
}
}
static char *intel_dsm_mux_type(u8 type)
{
switch (type) {
case 0:
return "unknown";
case 1:
return "No MUX, iGPU only";
case 2:
return "No MUX, dGPU only";
case 3:
return "MUXed between iGPU and dGPU";
default:
return "bad type";
}
}
static void intel_dsm_platform_mux_info(void)
{
int i;
union acpi_object *pkg, *connector_count;
pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, intel_dsm_guid,
INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO,
NULL, ACPI_TYPE_PACKAGE);
if (!pkg) {
DRM_DEBUG_DRIVER("failed to evaluate _DSM\n");
return;
}
connector_count = &pkg->package.elements[0];
DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
(unsigned long long)connector_count->integer.value);
for (i = 1; i < pkg->package.count; i++) {
union acpi_object *obj = &pkg->package.elements[i];
union acpi_object *connector_id = &obj->package.elements[0];
union acpi_object *info = &obj->package.elements[1];
DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
(unsigned long long)connector_id->integer.value);
DRM_DEBUG_DRIVER(" port id: %s\n",
intel_dsm_port_name(info->buffer.pointer[0]));
DRM_DEBUG_DRIVER(" display mux info: %s\n",
intel_dsm_mux_type(info->buffer.pointer[1]));
DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n",
intel_dsm_mux_type(info->buffer.pointer[2]));
DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
intel_dsm_mux_type(info->buffer.pointer[3]));
}
ACPI_FREE(pkg);
}
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
if (!acpi_check_dsm(dhandle, intel_dsm_guid, INTEL_DSM_REVISION_ID,
1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) {
DRM_DEBUG_KMS("no _DSM method for intel device\n");
return false;
}
intel_dsm_priv.dhandle = dhandle;
intel_dsm_platform_mux_info();
return true;
}
static bool intel_dsm_detect(void)
{
char acpi_method_name[255] = { 0 };
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
bool has_dsm = false;
int vga_count = 0;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
has_dsm |= intel_dsm_pci_probe(pdev);
}
if (vga_count == 2 && has_dsm) {
acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
return true;
}
return false;
}
void intel_register_dsm_handler(void)
{
if (!intel_dsm_detect())
return;
}
void intel_unregister_dsm_handler(void)
{
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,938 @@
/*
* Copyright © 2006 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
#include <drm/drmP.h>
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 vbt_size; /**< in bytes */
u8 vbt_checksum;
u8 reserved0;
u32 bdb_offset; /**< from beginning of VBT */
u32 aim_offset[4]; /**< from beginning of VBT */
} __packed;
struct bdb_header {
u8 signature[16]; /**< Always 'BIOS_DATA_BLOCK' */
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
};
/* strictly speaking, this is a "skip" block, but it has interesting info */
struct vbios_data {
u8 type; /* 0 == desktop, 1 == mobile */
u8 relstage;
u8 chipset;
u8 lvds_present:1;
u8 tv_present:1;
u8 rsvd2:6; /* finish byte */
u8 rsvd3[4];
u8 signon[155];
u8 copyright[61];
u16 code_segment;
u8 dos_boot_mode;
u8 bandwidth_percent;
u8 rsvd4; /* popup memory size */
u8 resize_pci_bios;
u8 rsvd5; /* is crt already on ddc2 */
} __packed;
/*
* There are several types of BIOS data blocks (BDBs), each block has
* an ID and size in the first 3 bytes (ID in first, size in next 2).
* Known types are listed below.
*/
#define BDB_GENERAL_FEATURES 1
#define BDB_GENERAL_DEFINITIONS 2
#define BDB_OLD_TOGGLE_LIST 3
#define BDB_MODE_SUPPORT_LIST 4
#define BDB_GENERIC_MODE_TABLE 5
#define BDB_EXT_MMIO_REGS 6
#define BDB_SWF_IO 7
#define BDB_SWF_MMIO 8
#define BDB_DOT_CLOCK_TABLE 9
#define BDB_MODE_REMOVAL_TABLE 10
#define BDB_CHILD_DEVICE_TABLE 11
#define BDB_DRIVER_FEATURES 12
#define BDB_DRIVER_PERSISTENCE 13
#define BDB_EXT_TABLE_PTRS 14
#define BDB_DOT_CLOCK_OVERRIDE 15
#define BDB_DISPLAY_SELECT 16
/* 17 rsvd */
#define BDB_DRIVER_ROTATION 18
#define BDB_DISPLAY_REMOVE 19
#define BDB_OEM_CUSTOM 20
#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
#define BDB_SDVO_LVDS_OPTIONS 22
#define BDB_SDVO_PANEL_DTDS 23
#define BDB_SDVO_LVDS_PNP_IDS 24
#define BDB_SDVO_LVDS_POWER_SEQ 25
#define BDB_TV_OPTIONS 26
#define BDB_EDP 27
#define BDB_LVDS_OPTIONS 40
#define BDB_LVDS_LFP_DATA_PTRS 41
#define BDB_LVDS_LFP_DATA 42
#define BDB_LVDS_BACKLIGHT 43
#define BDB_LVDS_POWER 44
#define BDB_MIPI_CONFIG 52
#define BDB_MIPI_SEQUENCE 53
#define BDB_SKIP 254 /* VBIOS private block, ignore */
struct bdb_general_features {
/* bits 1 */
u8 panel_fitting:2;
u8 flexaim:1;
u8 msg_enable:1;
u8 clear_screen:3;
u8 color_flip:1;
/* bits 2 */
u8 download_ext_vbt:1;
u8 enable_ssc:1;
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
u8 rsvd7:1;
u8 display_clock_mode:1;
u8 rsvd8:1; /* finish byte */
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
u8 rsvd9:1;
u8 fdi_rx_polarity_inverted:1;
u8 rsvd10:4; /* finish byte */
/* bits 4 */
u8 legacy_monitor_detect;
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
u8 int_efp_support:1;
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
} __packed;
/* pre-915 */
#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
/* Pre 915 */
#define DEVICE_TYPE_NONE 0x00
#define DEVICE_TYPE_CRT 0x01
#define DEVICE_TYPE_TV 0x09
#define DEVICE_TYPE_EFP 0x12
#define DEVICE_TYPE_LFP 0x22
/* On 915+ */
#define DEVICE_TYPE_CRT_DPMS 0x6001
#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
#define DEVICE_TYPE_TV_COMPOSITE 0x0209
#define DEVICE_TYPE_TV_MACROVISION 0x0289
#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
#define DEVICE_TYPE_TV_SCART 0x0209
#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
#define DEVICE_TYPE_EFP_DVI_I 0x6053
#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
#define DEVICE_TYPE_LFP_PANELLINK 0x5012
#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
#define DEVICE_CFG_NONE 0x00
#define DEVICE_CFG_12BIT_DVOB 0x01
#define DEVICE_CFG_12BIT_DVOC 0x02
#define DEVICE_CFG_24BIT_DVOBC 0x09
#define DEVICE_CFG_24BIT_DVOCB 0x0a
#define DEVICE_CFG_DUAL_DVOB 0x11
#define DEVICE_CFG_DUAL_DVOC 0x12
#define DEVICE_CFG_DUAL_DVOBC 0x13
#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
#define DEVICE_WIRE_NONE 0x00
#define DEVICE_WIRE_DVOB 0x01
#define DEVICE_WIRE_DVOC 0x02
#define DEVICE_WIRE_DVOBC 0x03
#define DEVICE_WIRE_DVOBB 0x05
#define DEVICE_WIRE_DVOCC 0x06
#define DEVICE_WIRE_DVOB_MASTER 0x0d
#define DEVICE_WIRE_DVOC_MASTER 0x0e
#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
/* We used to keep this struct but without any version control. We should avoid
* using it in the future, but it should be safe to keep using it in the old
* code. */
struct old_child_dev_config {
u16 handle;
u16 device_type;
u8 device_id[10]; /* ascii string */
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
u8 slave_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
u8 dvo2_port;
u8 i2c2_pin;
u8 slave2_addr;
u8 ddc2_pin;
u8 capabilities;
u8 dvo_wiring;/* See DEVICE_WIRE_* above */
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
} __packed;
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
* but at least the offsets are consistent. */
struct common_child_dev_config {
u16 handle;
u16 device_type;
u8 not_common1[12];
u8 dvo_port;
u8 not_common2[2];
u8 ddc_pin;
u16 edid_ptr;
} __packed;
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
union child_device_config {
/* This one is safe to be used anywhere, but the code should still check
* the BDB version. */
u8 raw[33];
/* This one should only be kept for legacy code. */
struct old_child_dev_config old;
/* This one should also be safe to use anywhere, even without version
* checks. */
struct common_child_dev_config common;
};
struct bdb_general_definitions {
/* DDC GPIO */
u8 crt_ddc_gmbus_pin;
/* DPMS bits */
u8 dpms_acpi:1;
u8 skip_boot_crt_detect:1;
u8 dpms_aim:1;
u8 rsvd1:5; /* finish byte */
/* boot device bits */
u8 boot_display[2];
u8 child_dev_size;
/*
* Device info:
* If TV is present, it'll be at devices[0].
* LVDS will be next, either devices[0] or [1], if present.
* On some platforms the number of device is 6. But could be as few as
* 4 if both TV and LVDS are missing.
* And the device num is related with the size of general definition
* block. It is obtained by using the following formula:
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
union child_device_config devices[0];
} __packed;
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
#define MODE_MASK 0x3
struct bdb_lvds_options {
u8 panel_type;
u8 rsvd1;
/* LVDS capabilities, stored in a dword */
u8 pfit_mode:2;
u8 pfit_text_mode_enhanced:1;
u8 pfit_gfx_mode_enhanced:1;
u8 pfit_ratio_auto:1;
u8 pixel_dither:1;
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
/* LVDS Panel channel bits stored here */
u32 lvds_panel_channel_bits;
/* LVDS SSC (Spread Spectrum Clock) bits stored here. */
u16 ssc_bits;
u16 ssc_freq;
u16 ssc_ddt;
/* Panel color depth defined here */
u16 panel_color_depth;
/* LVDS panel type bits stored here */
u32 dps_panel_type_bits;
/* LVDS backlight control type bits stored here */
u32 blt_control_type_bits;
} __packed;
/* LFP pointer table contains entries to the struct below */
struct bdb_lvds_lfp_data_ptr {
u16 fp_timing_offset; /* offsets are from start of bdb */
u8 fp_table_size;
u16 dvo_timing_offset;
u8 dvo_table_size;
u16 panel_pnp_id_offset;
u8 pnp_table_size;
} __packed;
struct bdb_lvds_lfp_data_ptrs {
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
struct bdb_lvds_lfp_data_ptr ptr[16];
} __packed;
/* LFP data has 3 blocks per entry */
struct lvds_fp_timing {
u16 x_res;
u16 y_res;
u32 lvds_reg;
u32 lvds_reg_val;
u32 pp_on_reg;
u32 pp_on_reg_val;
u32 pp_off_reg;
u32 pp_off_reg_val;
u32 pp_cycle_reg;
u32 pp_cycle_reg_val;
u32 pfit_reg;
u32 pfit_reg_val;
u16 terminator;
} __packed;
struct lvds_dvo_timing {
u16 clock; /**< In 10khz */
u8 hactive_lo;
u8 hblank_lo;
u8 hblank_hi:4;
u8 hactive_hi:4;
u8 vactive_lo;
u8 vblank_lo;
u8 vblank_hi:4;
u8 vactive_hi:4;
u8 hsync_off_lo;
u8 hsync_pulse_width;
u8 vsync_pulse_width:4;
u8 vsync_off:4;
u8 rsvd0:6;
u8 hsync_off_hi:2;
u8 h_image;
u8 v_image;
u8 max_hv;
u8 h_border;
u8 v_border;
u8 rsvd1:3;
u8 digital:2;
u8 vsync_positive:1;
u8 hsync_positive:1;
u8 rsvd2:1;
} __packed;
struct lvds_pnp_id {
u16 mfg_name;
u16 product_code;
u32 serial;
u8 mfg_week;
u8 mfg_year;
} __packed;
struct bdb_lvds_lfp_data_entry {
struct lvds_fp_timing fp_timing;
struct lvds_dvo_timing dvo_timing;
struct lvds_pnp_id pnp_id;
} __packed;
struct bdb_lvds_lfp_data {
struct bdb_lvds_lfp_data_entry data[16];
} __packed;
#define BDB_BACKLIGHT_TYPE_NONE 0
#define BDB_BACKLIGHT_TYPE_PWM 2
struct bdb_lfp_backlight_data_entry {
u8 type:2;
u8 active_low_pwm:1;
u8 obsolete1:5;
u16 pwm_freq_hz;
u8 min_brightness;
u8 obsolete2;
u8 obsolete3;
} __packed;
struct bdb_lfp_backlight_data {
u8 entry_size;
struct bdb_lfp_backlight_data_entry data[16];
u8 level[16];
} __packed;
struct aimdb_header {
char signature[16];
char oem_device[20];
u16 aimdb_version;
u16 aimdb_header_size;
u16 aimdb_size;
} __packed;
struct aimdb_block {
u8 aimdb_id;
u16 aimdb_size;
} __packed;
struct vch_panel_data {
u16 fp_timing_offset;
u8 fp_timing_size;
u16 dvo_timing_offset;
u8 dvo_timing_size;
u16 text_fitting_offset;
u8 text_fitting_size;
u16 graphics_fitting_offset;
u8 graphics_fitting_size;
} __packed;
struct vch_bdb_22 {
struct aimdb_block aimdb_block;
struct vch_panel_data panels[16];
} __packed;
struct bdb_sdvo_lvds_options {
u8 panel_backlight;
u8 h40_set_panel_type;
u8 panel_type;
u8 ssc_clk_freq;
u16 als_low_trip;
u16 als_high_trip;
u8 sclalarcoeff_tab_row_num;
u8 sclalarcoeff_tab_row_size;
u8 coefficient[8];
u8 panel_misc_bits_1;
u8 panel_misc_bits_2;
u8 panel_misc_bits_3;
u8 panel_misc_bits_4;
} __packed;
#define BDB_DRIVER_FEATURE_NO_LVDS 0
#define BDB_DRIVER_FEATURE_INT_LVDS 1
#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
#define BDB_DRIVER_FEATURE_EDP 3
struct bdb_driver_features {
u8 boot_dev_algorithm:1;
u8 block_display_switch:1;
u8 allow_display_switch:1;
u8 hotplug_dvo:1;
u8 dual_view_zoom:1;
u8 int15h_hook:1;
u8 sprite_in_clone:1;
u8 primary_lfp_id:1;
u16 boot_mode_x;
u16 boot_mode_y;
u8 boot_mode_bpp;
u8 boot_mode_refresh;
u16 enable_lfp_primary:1;
u16 selective_mode_pruning:1;
u16 dual_frequency:1;
u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
u16 nt_clone_support:1;
u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
u16 cui_aspect_scaling:1;
u16 preserve_aspect_ratio:1;
u16 sdvo_device_power_down:1;
u16 crt_hotplug:1;
u16 lvds_config:2;
u16 tv_hotplug:1;
u16 hdmi_config:2;
u8 static_display:1;
u8 reserved2:7;
u16 legacy_crt_max_x;
u16 legacy_crt_max_y;
u8 legacy_crt_max_refresh;
u8 hdmi_termination;
u8 custom_vbt_version;
/* Driver features data block */
u16 rmpm_enabled:1;
u16 s2ddt_enabled:1;
u16 dpst_enabled:1;
u16 bltclt_enabled:1;
u16 adb_enabled:1;
u16 drrs_enabled:1;
u16 grs_enabled:1;
u16 gpmt_enabled:1;
u16 tbt_enabled:1;
u16 psr_enabled:1;
u16 ips_enabled:1;
u16 reserved3:4;
u16 pc_feature_valid:1;
} __packed;
#define EDP_18BPP 0
#define EDP_24BPP 1
#define EDP_30BPP 2
#define EDP_RATE_1_62 0
#define EDP_RATE_2_7 1
#define EDP_LANE_1 0
#define EDP_LANE_2 1
#define EDP_LANE_4 3
#define EDP_PREEMPHASIS_NONE 0
#define EDP_PREEMPHASIS_3_5dB 1
#define EDP_PREEMPHASIS_6dB 2
#define EDP_PREEMPHASIS_9_5dB 3
#define EDP_VSWING_0_4V 0
#define EDP_VSWING_0_6V 1
#define EDP_VSWING_0_8V 2
#define EDP_VSWING_1_2V 3
struct edp_power_seq {
u16 t1_t3;
u16 t8;
u16 t9;
u16 t10;
u16 t11_t12;
} __packed;
struct edp_link_params {
u8 rate:4;
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
} __packed;
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
struct edp_link_params link_params[16];
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
} __packed;
void intel_setup_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
* GR18 & SWF*.
*/
/* GR18 bits are set on display switch and hotkey events */
#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
#define GR18_HK_NONE (0x0<<3)
#define GR18_HK_LFP_STRETCH (0x1<<3)
#define GR18_HK_TOGGLE_DISP (0x2<<3)
#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
#define GR18_HK_POPUP_DISABLED (0x6<<3)
#define GR18_HK_POPUP_ENABLED (0x7<<3)
#define GR18_HK_PFIT (0x8<<3)
#define GR18_HK_APM_CHANGE (0xa<<3)
#define GR18_HK_MULTIPLE (0xc<<3)
#define GR18_USER_INT_EN (1<<2)
#define GR18_A0000_FLUSH_EN (1<<1)
#define GR18_SMM_EN (1<<0)
/* Set by driver, cleared by VBIOS */
#define SWF00_YRES_SHIFT 16
#define SWF00_XRES_SHIFT 0
#define SWF00_RES_MASK 0xffff
/* Set by VBIOS at boot time and driver at runtime */
#define SWF01_TV2_FORMAT_SHIFT 8
#define SWF01_TV1_FORMAT_SHIFT 0
#define SWF01_TV_FORMAT_MASK 0xffff
#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
#define SWF10_GTT_OVERRIDE_EN (1<<28)
#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
#define SWF10_OLD_TOGGLE 0x0
#define SWF10_TOGGLE_LIST_1 0x1
#define SWF10_TOGGLE_LIST_2 0x2
#define SWF10_TOGGLE_LIST_3 0x3
#define SWF10_TOGGLE_LIST_4 0x4
#define SWF10_PANNING_EN (1<<23)
#define SWF10_DRIVER_LOADED (1<<22)
#define SWF10_EXTENDED_DESKTOP (1<<21)
#define SWF10_EXCLUSIVE_MODE (1<<20)
#define SWF10_OVERLAY_EN (1<<19)
#define SWF10_PLANEB_HOLDOFF (1<<18)
#define SWF10_PLANEA_HOLDOFF (1<<17)
#define SWF10_VGA_HOLDOFF (1<<16)
#define SWF10_ACTIVE_DISP_MASK 0xffff
#define SWF10_PIPEB_LFP2 (1<<15)
#define SWF10_PIPEB_EFP2 (1<<14)
#define SWF10_PIPEB_TV2 (1<<13)
#define SWF10_PIPEB_CRT2 (1<<12)
#define SWF10_PIPEB_LFP (1<<11)
#define SWF10_PIPEB_EFP (1<<10)
#define SWF10_PIPEB_TV (1<<9)
#define SWF10_PIPEB_CRT (1<<8)
#define SWF10_PIPEA_LFP2 (1<<7)
#define SWF10_PIPEA_EFP2 (1<<6)
#define SWF10_PIPEA_TV2 (1<<5)
#define SWF10_PIPEA_CRT2 (1<<4)
#define SWF10_PIPEA_LFP (1<<3)
#define SWF10_PIPEA_EFP (1<<2)
#define SWF10_PIPEA_TV (1<<1)
#define SWF10_PIPEA_CRT (1<<0)
#define SWF11_MEMORY_SIZE_SHIFT 16
#define SWF11_SV_TEST_EN (1<<15)
#define SWF11_IS_AGP (1<<14)
#define SWF11_DISPLAY_HOLDOFF (1<<13)
#define SWF11_DPMS_REDUCED (1<<12)
#define SWF11_IS_VBE_MODE (1<<11)
#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
#define SWF11_DPMS_MASK 0x07
#define SWF11_DPMS_OFF (1<<2)
#define SWF11_DPMS_SUSPEND (1<<1)
#define SWF11_DPMS_STANDBY (1<<0)
#define SWF11_DPMS_ON 0
#define SWF14_GFX_PFIT_EN (1<<31)
#define SWF14_TEXT_PFIT_EN (1<<30)
#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
#define SWF14_POPUP_EN (1<<28)
#define SWF14_DISPLAY_HOLDOFF (1<<27)
#define SWF14_DISP_DETECT_EN (1<<26)
#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
#define SWF14_DRIVER_STATUS (1<<24)
#define SWF14_OS_TYPE_WIN9X (1<<23)
#define SWF14_OS_TYPE_WINNT (1<<22)
/* 21:19 rsvd */
#define SWF14_PM_TYPE_MASK 0x00070000
#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
#define SWF14_PM_ACPI (0x3 << 16)
#define SWF14_PM_APM_12 (0x2 << 16)
#define SWF14_PM_APM_11 (0x1 << 16)
#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
/* if GR18 indicates a display switch */
#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
#define SWF14_DS_PIPEB_TV2_EN (1<<13)
#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
#define SWF14_DS_PIPEB_LFP_EN (1<<11)
#define SWF14_DS_PIPEB_EFP_EN (1<<10)
#define SWF14_DS_PIPEB_TV_EN (1<<9)
#define SWF14_DS_PIPEB_CRT_EN (1<<8)
#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
#define SWF14_DS_PIPEA_TV2_EN (1<<5)
#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
#define SWF14_DS_PIPEA_LFP_EN (1<<3)
#define SWF14_DS_PIPEA_EFP_EN (1<<2)
#define SWF14_DS_PIPEA_TV_EN (1<<1)
#define SWF14_DS_PIPEA_CRT_EN (1<<0)
/* if GR18 indicates a panel fitting request */
#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
/* if GR18 indicates an APM change request */
#define SWF14_APM_HIBERNATE 0x4
#define SWF14_APM_SUSPEND 0x3
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
/* Add the device class for LFP, TV, HDMI */
#define DEVICE_TYPE_INT_LFP 0x1022
#define DEVICE_TYPE_INT_TV 0x1009
#define DEVICE_TYPE_HDMI 0x60D2
#define DEVICE_TYPE_DP 0x68C6
#define DEVICE_TYPE_eDP 0x78C6
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
/*
* Bits we care about when checking for DEVICE_TYPE_eDP
* Depending on the system, the other bits may or may not
* be set for eDP outputs.
*/
#define DEVICE_TYPE_eDP_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_NOT_HDMI_OUTPUT | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_DUAL_CHANNEL | \
DEVICE_TYPE_LVDS_SINGALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_DIGITAL_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
/* define the DVO port for HDMI output type */
#define DVO_B 1
#define DVO_C 2
#define DVO_D 3
/* define the PORT for DP output type */
#define PORT_IDPB 7
#define PORT_IDPC 8
#define PORT_IDPD 9
/* Possible values for the "DVO Port" field for versions >= 155: */
#define DVO_PORT_HDMIA 0
#define DVO_PORT_HDMIB 1
#define DVO_PORT_HDMIC 2
#define DVO_PORT_HDMID 3
#define DVO_PORT_LVDS 4
#define DVO_PORT_TV 5
#define DVO_PORT_CRT 6
#define DVO_PORT_DPB 7
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
#define DVO_PORT_MIPIA 21
#define DVO_PORT_MIPIB 22
#define DVO_PORT_MIPIC 23
#define DVO_PORT_MIPID 24
/* Block 52 contains MIPI Panel info
* 6 such enteries will there. Index into correct
* entery is based on the panel_index in #40 LFP
*/
#define MAX_MIPI_CONFIGURATIONS 6
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
struct mipi_config {
u16 panel_id;
/* General Params */
u32 enable_dithering:1;
u32 rsvd1:1;
u32 is_bridge:1;
u32 panel_arch_type:2;
u32 is_cmd_mode:1;
#define NON_BURST_SYNC_PULSE 0x1
#define NON_BURST_SYNC_EVENTS 0x2
#define BURST_MODE 0x3
u32 video_transfer_mode:2;
u32 cabc_supported:1;
u32 pwm_blc:1;
/* Bit 13:10 */
#define PIXEL_FORMAT_RGB565 0x1
#define PIXEL_FORMAT_RGB666 0x2
#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
#define PIXEL_FORMAT_RGB888 0x4
u32 videomode_color_format:4;
/* Bit 15:14 */
#define ENABLE_ROTATION_0 0x0
#define ENABLE_ROTATION_90 0x1
#define ENABLE_ROTATION_180 0x2
#define ENABLE_ROTATION_270 0x3
u32 rotation:2;
u32 bta_enabled:1;
u32 rsvd2:15;
/* 2 byte Port Description */
#define DUAL_LINK_NOT_SUPPORTED 0
#define DUAL_LINK_FRONT_BACK 1
#define DUAL_LINK_PIXEL_ALT 2
u16 dual_link:2;
u16 lane_cnt:2;
u16 rsvd3:12;
u16 rsvd4;
u8 rsvd5;
u32 target_burst_mode_freq;
u32 dsi_ddr_clk;
u32 bridge_ref_clk;
#define BYTE_CLK_SEL_20MHZ 0
#define BYTE_CLK_SEL_10MHZ 1
#define BYTE_CLK_SEL_5MHZ 2
u8 byte_clk_sel:2;
u8 rsvd6:6;
/* DPHY Flags */
u16 dphy_param_valid:1;
u16 eot_pkt_disabled:1;
u16 enable_clk_stop:1;
u16 rsvd7:13;
u32 hs_tx_timeout;
u32 lp_rx_timeout;
u32 turn_around_timeout;
u32 device_reset_timer;
u32 master_init_timer;
u32 dbi_bw_timer;
u32 lp_byte_clk_val;
/* 4 byte Dphy Params */
u32 prepare_cnt:6;
u32 rsvd8:2;
u32 clk_zero_cnt:8;
u32 trail_cnt:5;
u32 rsvd9:3;
u32 exit_zero_cnt:6;
u32 rsvd10:2;
u32 clk_lane_switch_cnt;
u32 hl_switch_cnt;
u32 rsvd11[6];
/* timings based on dphy spec */
u8 tclk_miss;
u8 tclk_post;
u8 rsvd12;
u8 tclk_pre;
u8 tclk_prepare;
u8 tclk_settle;
u8 tclk_term_enable;
u8 tclk_trail;
u16 tclk_prepare_clkzero;
u8 rsvd13;
u8 td_term_enable;
u8 teot;
u8 ths_exit;
u8 ths_prepare;
u16 ths_prepare_hszero;
u8 rsvd14;
u8 ths_settle;
u8 ths_skip;
u8 ths_trail;
u8 tinit;
u8 tlpx;
u8 rsvd15[3];
/* GPIOs */
u8 panel_enable;
u8 bl_enable;
u8 pwm_enable;
u8 reset_r_n;
u8 pwr_down_r;
u8 stdby_r_n;
} __packed;
/* Block 52 contains MIPI configuration block
* 6 * bdb_mipi_config, followed by 6 pps data
* block below
*
* all delays has a unit of 100us
*/
struct mipi_pps_data {
u16 panel_on_delay;
u16 bl_enable_delay;
u16 bl_disable_delay;
u16 panel_off_delay;
u16 panel_power_cycle_delay;
};
struct bdb_mipi_config {
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
};
/* Block 53 contains MIPI sequences as needed by the panel
* for enabling it. This block can be variable in size and
* can be maximum of 6 blocks
*/
struct bdb_mipi_sequence {
u8 version;
u8 data[0];
};
/* MIPI Sequnece Block definitions */
enum mipi_seq {
MIPI_SEQ_UNDEFINED = 0,
MIPI_SEQ_ASSERT_RESET,
MIPI_SEQ_INIT_OTP,
MIPI_SEQ_DISPLAY_ON,
MIPI_SEQ_DISPLAY_OFF,
MIPI_SEQ_DEASSERT_RESET,
MIPI_SEQ_MAX
};
enum mipi_seq_element {
MIPI_SEQ_ELEM_UNDEFINED = 0,
MIPI_SEQ_ELEM_SEND_PKT,
MIPI_SEQ_ELEM_DELAY,
MIPI_SEQ_ELEM_GPIO,
MIPI_SEQ_ELEM_STATUS,
MIPI_SEQ_ELEM_MAX
};
enum mipi_gpio_pin_index {
MIPI_GPIO_UNDEFINED = 0,
MIPI_GPIO_PANEL_ENABLE,
MIPI_GPIO_BL_ENABLE,
MIPI_GPIO_PWM_ENABLE,
MIPI_GPIO_RESET_N,
MIPI_GPIO_PWR_DOWN_R,
MIPI_GPIO_STDBY_RST_N,
MIPI_GPIO_MAX
};
#endif /* _I830_BIOS_H_ */

View file

@ -0,0 +1,926 @@
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
ADPA_CRT_HOTPLUG_WARMUP_10MS | \
ADPA_CRT_HOTPLUG_SAMPLE_4S | \
ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
ADPA_CRT_HOTPLUG_VOLREF_325MV | \
ADPA_CRT_HOTPLUG_ENABLE)
struct intel_crt {
struct intel_encoder base;
/* DPMS state is stored in the connector, which we need in the
* encoder's enable/disable callbacks */
struct intel_connector *connector;
bool force_hotplug_required;
u32 adpa_reg;
};
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_crt, base);
}
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
return intel_encoder_to_crt(intel_attached_encoder(connector));
}
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
return false;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
return true;
}
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
tmp = I915_READ(crt->adpa_reg);
if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & ADPA_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
return flags;
}
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
int dotclock;
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
dotclock = pipe_config->port_clock;
if (HAS_PCH_SPLIT(dev))
ironlake_check_encoder_dotclock(pipe_config, dotclock);
pipe_config->adjusted_mode.crtc_clock = dotclock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
intel_ddi_get_config(encoder, pipe_config);
pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
DRM_MODE_FLAG_NHSYNC |
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
static void hsw_crt_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
POSTING_READ(SPLL_CTL);
udelay(20);
}
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
if (INTEL_INFO(dev)->gen >= 5)
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(crtc->pipe), 0);
switch (mode) {
case DRM_MODE_DPMS_ON:
adpa |= ADPA_DAC_ENABLE;
break;
case DRM_MODE_DPMS_STANDBY:
adpa |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_SUSPEND:
adpa |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
case DRM_MODE_DPMS_OFF:
adpa |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
break;
}
I915_WRITE(crt->adpa_reg, adpa);
}
static void intel_disable_crt(struct intel_encoder *encoder)
{
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void hsw_crt_post_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
}
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
int old_dpms;
/* PCH platforms and VLV only support on/off. */
if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
return;
old_dpms = connector->dpms;
connector->dpms = mode;
/* Only need to change hw state when actually enabled */
crtc = encoder->base.crtc;
if (!crtc) {
encoder->connectors_active = false;
return;
}
/* We need the pipe to run for anything but OFF. */
if (mode == DRM_MODE_DPMS_OFF)
encoder->connectors_active = false;
else
encoder->connectors_active = true;
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode < old_dpms) {
/* From off to on, enable the pipe first. */
intel_crtc_update_dpms(crtc);
intel_crt_set_dpms(encoder, mode);
} else {
intel_crt_set_dpms(encoder, mode);
intel_crtc_update_dpms(crtc);
}
intel_modeset_check_state(connector->dev);
}
static enum drm_mode_status
intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
if (IS_GEN2(dev))
max_clock = 350000;
else
max_clock = 400000;
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
if (HAS_PCH_SPLIT(dev))
pipe_config->has_pch_encoder = true;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev))
pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */
if (HAS_DDI(dev)) {
pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2;
}
return true;
}
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
bool turn_off_dac = HAS_PCH_SPLIT(dev);
u32 save_adpa;
crt->force_hotplug_required = 0;
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
I915_WRITE(crt->adpa_reg, save_adpa);
POSTING_READ(crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
return ret;
}
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
u32 save_adpa;
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
return ret;
}
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
* Not for i915G/i915GM
*
* \return true if CRT is connected.
* \return false if CRT is disconnected.
*/
static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_en, orig, stat;
bool ret = false;
int i, tries = 0;
if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
if (IS_VALLEYVIEW(dev))
return valleyview_crt_detect_hotplug(connector);
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
*/
if (IS_G4X(dev) && !IS_GM45(dev))
tries = 2;
else
tries = 1;
hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
/* wait for FORCE_DETECT to go off */
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
stat = I915_READ(PORT_HOTPLUG_STAT);
if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
ret = true;
/* clear the interrupt we just generated, if any */
I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
/* and put the bits back */
I915_WRITE(PORT_HOTPLUG_EN, orig);
return ret;
}
static struct edid *intel_crt_get_edid(struct drm_connector *connector,
struct i2c_adapter *i2c)
{
struct edid *edid;
edid = drm_get_edid(connector, i2c);
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
}
return edid;
}
/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret;
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
struct edid *edid;
struct i2c_adapter *i2c;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
if (edid) {
bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
*/
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
return false;
}
static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe;
uint32_t save_bclrpat;
uint32_t save_vtotal;
uint32_t vtotal, vactive;
uint32_t vsample;
uint32_t vblank, vblank_start, vblank_end;
uint32_t dsl;
uint32_t bclrpat_reg;
uint32_t vtotal_reg;
uint32_t vblank_reg;
uint32_t vsync_reg;
uint32_t pipeconf_reg;
uint32_t pipe_dsl_reg;
uint8_t st00;
enum drm_connector_status status;
DRM_DEBUG_KMS("starting load-detect on CRT\n");
bclrpat_reg = BCLRPAT(pipe);
vtotal_reg = VTOTAL(pipe);
vblank_reg = VBLANK(pipe);
vsync_reg = VSYNC(pipe);
pipeconf_reg = PIPECONF(pipe);
pipe_dsl_reg = PIPEDSL(pipe);
save_bclrpat = I915_READ(bclrpat_reg);
save_vtotal = I915_READ(vtotal_reg);
vblank = I915_READ(vblank_reg);
vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
vactive = (save_vtotal & 0x7ff) + 1;
vblank_start = (vblank & 0xfff) + 1;
vblank_end = ((vblank >> 16) & 0xfff) + 1;
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
if (!IS_GEN2(dev)) {
uint32_t pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
* border color for Color info */
intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
connector_status_disconnected;
I915_WRITE(pipeconf_reg, pipeconf);
} else {
bool restore_vblank = false;
int count, detect;
/*
* If there isn't any border, add some.
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
uint32_t vsync = I915_READ(vsync_reg);
uint32_t vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
I915_WRITE(vblank_reg,
(vblank_start - 1) |
((vblank_end - 1) << 16));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
if (vblank_start - vactive >= vtotal - vblank_end)
vsample = (vblank_start + vactive) >> 1;
else
vsample = (vtotal + vblank_end) >> 1;
/*
* Wait for the border to be displayed
*/
while (I915_READ(pipe_dsl_reg) >= vactive)
;
while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
;
/*
* Watch ST00 for an entire scanline
*/
detect = 0;
count = 0;
do {
count++;
/* Read the ST00 VGA status register */
st00 = I915_READ8(VGA_MSR_WRITE);
if (st00 & (1 << 4))
detect++;
} while ((I915_READ(pipe_dsl_reg) == dsl));
/* restore vblank if necessary */
if (restore_vblank)
I915_WRITE(vblank_reg, vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
* where there isn't any way to force the border color across
* the screen
*/
status = detect * 4 > count * 3 ?
connector_status_connected :
connector_status_disconnected;
}
/* Restore previous settings */
I915_WRITE(bclrpat_reg, save_bclrpat);
return status;
}
static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
force);
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
*/
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
status = connector_status_connected;
goto out;
} else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
}
if (intel_crt_detect_ddc(connector)) {
status = connector_status_connected;
goto out;
}
/* Load detection is broken on HPD capable machines. Whoever wants a
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
if (I915_HAS_HOTPLUG(dev)) {
status = connector_status_disconnected;
goto out;
}
if (!force) {
status = connector->status;
goto out;
}
drm_modeset_acquire_init(&ctx, 0);
/* for pre-945g platforms use load detect */
if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
out:
intel_display_power_put(dev_priv, power_domain);
return status;
}
static void intel_crt_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
kfree(connector);
}
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
enum intel_display_power_domain power_domain;
int ret;
struct i2c_adapter *i2c;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
intel_display_power_put(dev_priv, power_domain);
return ret;
}
static int intel_crt_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
return 0;
}
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
if (INTEL_INFO(dev)->gen >= 5) {
u32 adpa;
adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
}
/*
* Routines for controlling stuff on the analog port
*/
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
.set_property = intel_crt_set_property,
};
static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
.mode_valid = intel_crt_mode_valid,
.get_modes = intel_crt_get_modes,
.best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
static int intel_no_crt_dmi_callback(const struct dmi_system_id *id)
{
DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
return 1;
}
static const struct dmi_system_id intel_no_crt[] = {
{
.callback = intel_no_crt_dmi_callback,
.ident = "ACER ZGB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
},
},
{
.callback = intel_no_crt_dmi_callback,
.ident = "DELL XPS 8700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
},
},
{ }
};
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
/* Skip machines without VGA that falsely report hotplug events */
if (dmi_check_system(intel_no_crt))
return;
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
if (!crt)
return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(crt);
return;
}
connector = &intel_connector->base;
crt->connector = intel_connector;
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
intel_connector_attach_encoder(intel_connector, &crt->base);
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI);
if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
if (HAS_PCH_SPLIT(dev))
crt->adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
crt->adpa_reg = VLV_ADPA;
else
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
crt->base.pre_enable = hsw_crt_pre_enable;
crt->base.post_disable = hsw_crt_post_disable;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_connector_register(connector);
if (!I915_HAS_HOTPLUG(dev))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
/*
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
/*
* TODO: find a proper way to discover whether we need to set the the
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
if (HAS_PCH_LPT(dev)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
}
intel_crt_reset(connector);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,548 @@
/*
* Copyright © 2008 Intel Corporation
* 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
int bpp;
int lane_count, slots;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct intel_connector *found = NULL, *intel_connector;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
pipe_config->has_pch_encoder = false;
pipe_config->has_dp_encoder = true;
bpp = 24;
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
intel_dp->lane_count = lane_count;
pipe_config->pipe_bpp = 24;
pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
}
}
if (!found) {
DRM_ERROR("can't find connector\n");
return false;
}
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
pipe_config->pbn = mst_pbn;
slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n);
pipe_config->dp_m_n.tu = slots;
return true;
}
static void intel_mst_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
intel_dp->active_mst_links--;
intel_mst->port = NULL;
if (intel_dp->active_mst_links == 0) {
intel_dig_port->base.post_disable(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int ret;
uint32_t temp;
struct intel_connector *found = NULL, *intel_connector;
int slots;
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
if (intel_connector->new_encoder == encoder) {
found = intel_connector;
break;
}
}
if (!found) {
DRM_ERROR("can't find connector\n");
return;
}
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
intel_mst->port = found->port;
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
intel_mst->port, intel_crtc->config.pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
return;
}
intel_dp->active_mst_links++;
temp = I915_READ(DP_TP_STATUS(port));
I915_WRITE(DP_TP_STATUS(port), temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
1))
DRM_ERROR("Timed out waiting for ACT sent\n");
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
*pipe = intel_mst->pipe;
if (intel_mst->port)
return true;
return false;
}
static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
u32 temp, flags = 0;
pipe_config->has_dp_encoder = true;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (temp & TRANS_DDI_PVSYNC)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
switch (temp & TRANS_DDI_BPC_MASK) {
case TRANS_DDI_BPC_6:
pipe_config->pipe_bpp = 18;
break;
case TRANS_DDI_BPC_8:
pipe_config->pipe_bpp = 24;
break;
case TRANS_DDI_BPC_10:
pipe_config->pipe_bpp = 30;
break;
case TRANS_DDI_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
pipe_config->adjusted_mode.flags |= flags;
intel_dp_get_m_n(crtc, pipe_config);
intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
}
static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
struct edid *edid;
int ret;
edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
if (!edid)
return 0;
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
}
static enum drm_connector_status
intel_mst_port_dp_detect(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
}
static enum drm_connector_status
intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
status = intel_mst_port_dp_detect(connector);
return status;
}
static int
intel_dp_mst_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
static void
intel_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
drm_connector_cleanup(connector);
kfree(connector);
}
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_mst_set_property,
.destroy = intel_dp_mst_connector_destroy,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
{
return intel_dp_mst_get_ddc_modes(connector);
}
static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
return MODE_OK;
}
static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
return &intel_dp->mst_encoders[0]->base.base;
}
static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
.get_modes = intel_dp_mst_get_modes,
.mode_valid = intel_dp_mst_mode_valid,
.best_encoder = intel_mst_best_encoder,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_mst);
}
static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
.destroy = intel_dp_mst_encoder_destroy,
};
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
if (connector->encoder) {
enum pipe pipe;
if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
return false;
return true;
}
return false;
}
static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
}
static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
#endif
}
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_connector *intel_connector;
struct drm_connector *connector;
int i;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector)
return NULL;
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
intel_connector->unregister = intel_connector_unregister;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
for (i = PIPE_A; i <= PIPE_C; i++) {
drm_mode_connector_attach_encoder(&intel_connector->base,
&intel_dp->mst_encoders[i]->base.base);
}
intel_dp_add_properties(intel_dp, connector);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
intel_connector_add_to_fbdev(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
drm_connector_register(&intel_connector->base);
return connector;
}
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
/* need to nuke the connector */
mutex_lock(&dev->mode_config.mutex);
intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
mutex_unlock(&dev->mode_config.mutex);
intel_connector->unregister(intel_connector);
mutex_lock(&dev->mode_config.mutex);
intel_connector_remove_from_fbdev(intel_connector);
drm_connector_cleanup(connector);
mutex_unlock(&dev->mode_config.mutex);
drm_reinit_primary_mode_group(dev);
kfree(intel_connector);
DRM_DEBUG_KMS("\n");
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
drm_kms_helper_hotplug_event(dev);
}
static struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
static struct intel_dp_mst_encoder *
intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
{
struct intel_dp_mst_encoder *intel_mst;
struct intel_encoder *intel_encoder;
struct drm_device *dev = intel_dig_port->base.base.dev;
intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
if (!intel_mst)
return NULL;
intel_mst->pipe = pipe;
intel_encoder = &intel_mst->base;
intel_mst->primary = intel_dig_port;
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
DRM_MODE_ENCODER_DPMST);
intel_encoder->type = INTEL_OUTPUT_DP_MST;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
intel_encoder->compute_config = intel_dp_mst_compute_config;
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
intel_encoder->get_config = intel_dp_mst_enc_get_config;
return intel_mst;
}
static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
{
int i;
struct intel_dp *intel_dp = &intel_dig_port->dp;
for (i = PIPE_A; i <= PIPE_C; i++)
intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
return true;
}
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dig_port->base.base.dev;
int ret;
intel_dp->can_mst = true;
intel_dp->mst_mgr.cbs = &mst_cbs;
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->can_mst = false;
return ret;
}
return 0;
}
void
intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
if (!intel_dp->can_mst)
return;
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,796 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <linux/slab.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
/* the sub-encoders aka panel drivers */
static const struct intel_dsi_device intel_dsi_devices[] = {
{
.panel_id = MIPI_DSI_GENERIC_PANEL_ID,
.name = "vbt-generic-dsi-vid-mode-display",
.dev_ops = &vbt_generic_dsi_display_ops,
},
};
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->dpio_lock);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0025);
udelay(150);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
mutex_unlock(&dev_priv->dpio_lock);
}
static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_dsi, base);
}
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
}
static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
{
return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
}
static void intel_dsi_hot_plug(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
}
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *config)
{
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
struct drm_display_mode *mode = &config->requested_mode;
DRM_DEBUG_KMS("\n");
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
if (intel_dsi->dev.dev_ops->mode_fixup)
return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
mode, adjusted_mode);
return true;
}
static void intel_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
u32 val;
DRM_DEBUG_KMS("\n");
mutex_lock(&dev_priv->dpio_lock);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
mutex_unlock(&dev_priv->dpio_lock);
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
usleep_range(2500, 3000);
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
usleep_range(2500, 3000);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
usleep_range(2500, 3000);
}
static void intel_dsi_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
u32 temp;
DRM_DEBUG_KMS("\n");
if (is_cmd_mode(intel_dsi))
I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
else {
msleep(20); /* XXX */
dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN);
msleep(100);
if (intel_dsi->dev.dev_ops->enable)
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
wait_for_dsi_fifo_empty(intel_dsi);
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
}
}
static void intel_dsi_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
u32 tmp;
DRM_DEBUG_KMS("\n");
/* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled */
tmp = I915_READ(DPLL(pipe));
tmp |= DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
DPLL_REFA_CLK_ENABLE_VLV;
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
/* put device in ready state */
intel_dsi_device_ready(encoder);
msleep(intel_dsi->panel_on_delay);
if (intel_dsi->dev.dev_ops->panel_reset)
intel_dsi->dev.dev_ops->panel_reset(&intel_dsi->dev);
if (intel_dsi->dev.dev_ops->send_otp_cmds)
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
wait_for_dsi_fifo_empty(intel_dsi);
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
intel_dsi_enable(encoder);
}
static void intel_dsi_enable_nop(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
/* for DSI port enable has to be done before pipe
* and plane enable, so port enable is done in
* pre_enable phase itself unlike other encoders
*/
}
static void intel_dsi_pre_disable(struct intel_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
dpi_send_cmd(intel_dsi, SHUTDOWN, DPI_LP_MODE_EN);
msleep(10);
}
}
static void intel_dsi_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int pipe = intel_crtc->pipe;
u32 temp;
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
wait_for_dsi_fifo_empty(intel_dsi);
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
POSTING_READ(MIPI_PORT_CTRL(pipe));
msleep(2);
}
/* Panel commands can be sent when clock is in LP11 */
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0);
temp = I915_READ(MIPI_CTRL(pipe));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(pipe), temp |
intel_dsi->escape_clk_div <<
ESCAPE_CLOCK_DIVIDER_SHIFT);
I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe));
temp &= ~VID_MODE_FORMAT_MASK;
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp);
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1);
/* if disable packets are sent before sending shutdown packet then in
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
wait_for_dsi_fifo_empty(intel_dsi);
}
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
int pipe = intel_crtc->pipe;
u32 val;
DRM_DEBUG_KMS("\n");
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
== 0x00000), 30))
DRM_ERROR("DSI LP not going Low\n");
val = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
usleep_range(2000, 2500);
vlv_disable_dsi_pll(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 val;
DRM_DEBUG_KMS("\n");
intel_dsi_disable(encoder);
intel_dsi_clear_device_ready(encoder);
val = I915_READ(DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
if (intel_dsi->dev.dev_ops->disable_panel_power)
intel_dsi->dev.dev_ops->disable_panel_power(&intel_dsi->dev);
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
enum intel_display_power_domain power_domain;
u32 port, func;
enum pipe p;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
return false;
/* XXX: this only works for one DSI output */
for (p = PIPE_A; p <= PIPE_B; p++) {
port = I915_READ(MIPI_PORT_CTRL(p));
func = I915_READ(MIPI_DSI_FUNC_PRG(p));
if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
*pipe = p;
return true;
}
}
}
return false;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
u32 pclk;
DRM_DEBUG_KMS("\n");
/*
* DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0
*/
pipe_config->dpll_hw_state.dpll_md = 0;
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
if (!pclk)
return;
pipe_config->adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}
static enum drm_mode_status
intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
DRM_DEBUG_KMS("\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
return MODE_NO_DBLESCAN;
}
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
}
/* return txclkesc cycles in terms of divider and duration in us */
static u16 txclkesc(u32 divider, unsigned int us)
{
switch (divider) {
case ESCAPE_CLOCK_DIVIDER_1:
default:
return 20 * us;
case ESCAPE_CLOCK_DIVIDER_2:
return 10 * us;
case ESCAPE_CLOCK_DIVIDER_4:
return 5 * us;
}
}
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
u16 burst_mode_ratio)
{
return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
8 * 100), lane_count);
}
static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
int pipe = intel_crtc->pipe;
unsigned int bpp = intel_crtc->config.pipe_bpp;
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
hactive = mode->hdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
/* horizontal values are in terms of high speed byte clock */
hactive = txbyteclkhs(hactive, bpp, lane_count,
intel_dsi->burst_mode_ratio);
hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
hsync = txbyteclkhs(hsync, bpp, lane_count,
intel_dsi->burst_mode_ratio);
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
/* meaningful for video mode non-burst sync pulse mode only, can be zero
* for non-burst sync events and burst modes */
I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
/* vertical values are in terms of lines */
I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
int pipe = intel_crtc->pipe;
unsigned int bpp = intel_crtc->config.pipe_bpp;
u32 val, tmp;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
/* escape clock divider, 20MHz, shared for A and C. device ready must be
* off when doing this! txclkesc? */
tmp = I915_READ(MIPI_CTRL(0));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
tmp = I915_READ(MIPI_CTRL(pipe));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
/* XXX: why here, why like this? handling in irq handler?! */
I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg);
I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
set_dsi_timings(encoder, adjusted_mode);
val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
if (is_cmd_mode(intel_dsi)) {
val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
} else {
val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
/* XXX: cross-check bpp vs. pixel format? */
val |= intel_dsi->pixel_format;
}
I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
/* timeouts for recovery. one frame IIUC. if counter expires, EOT and
* stop state. */
/*
* In burst mode, value greater than one DPI line Time in byte clock
* (txbyteclkhs) To timeout this timer 1+ of the above said value is
* recommended.
*
* In non-burst mode, Value greater than one DPI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
*
* In DBI only mode, value greater than one DBI frame time in byte
* clock(txbyteclkhs) To timeout this timer 1+ of the above said value
* is recommended.
*/
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->htotal, bpp,
intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
} else {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->vtotal *
adjusted_mode->htotal,
bpp, intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
}
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100));
val = 0;
if (intel_dsi->eotp_pkt == 0)
val |= EOT_DISABLE;
if (intel_dsi->clock_stop)
val |= CLOCKSTOP;
/* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(pipe), val);
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock. the number
* of byte clocks occupied in one low power clock. based on txbyteclkhs
* and txclkesc. txclkesc time / txbyteclk time * (105 +
* MIPI_STOP_STATE_STALL) / 105.???
*/
I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk);
/* the bw essential for transmitting 16 long packets containing 252
* bytes meant for dcs write memory command is programmed in this
* register in terms of byte clocks. based on dsi transfer rate and the
* number of lanes configured the time taken to transmit 16 long packets
* in a dsi stream varies. */
I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer);
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a multiple of
* 64 like 1366 x 768. Enable RANDOM resolution support for such
* panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
intel_dsi->video_frmt_cfg_bits |
intel_dsi->video_mode_format |
IP_TG_CONFIG |
RANDOM_DPI_DISPLAY_RESOLUTION);
}
static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
{
DRM_DEBUG_KMS("\n");
intel_dsi_prepare(encoder);
vlv_enable_dsi_pll(encoder);
}
static enum drm_connector_status
intel_dsi_detect(struct drm_connector *connector, bool force)
{
struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
struct intel_encoder *intel_encoder = &intel_dsi->base;
enum intel_display_power_domain power_domain;
enum drm_connector_status connector_status;
struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
connector_status = intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
intel_display_power_put(dev_priv, power_domain);
return connector_status;
}
static int intel_dsi_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *mode;
DRM_DEBUG_KMS("\n");
if (!intel_connector->panel.fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
return 0;
}
mode = drm_mode_duplicate(connector->dev,
intel_connector->panel.fixed_mode);
if (!mode) {
DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
return 0;
}
drm_mode_probed_add(connector, mode);
return 1;
}
static void intel_dsi_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
DRM_DEBUG_KMS("\n");
intel_panel_fini(&intel_connector->panel);
drm_connector_cleanup(connector);
kfree(connector);
}
static const struct drm_encoder_funcs intel_dsi_funcs = {
.destroy = intel_encoder_destroy,
};
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
.mode_valid = intel_dsi_mode_valid,
.best_encoder = intel_best_encoder,
};
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dsi_detect,
.destroy = intel_dsi_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
};
void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *fixed_mode = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_dsi_device *dsi;
unsigned int i;
DRM_DEBUG_KMS("\n");
/* There is no detection method for MIPI so rely on VBT */
if (!dev_priv->vbt.has_mipi)
return;
if (IS_VALLEYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
return;
}
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
return;
}
intel_encoder = &intel_dsi->base;
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
/* XXX: very likely not all of these are needed */
intel_encoder->hot_plug = intel_dsi_hot_plug;
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_pre_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
dsi = &intel_dsi_devices[i];
intel_dsi->dev = *dsi;
if (dsi->dev_ops->init(&intel_dsi->dev))
break;
}
if (i == ARRAY_SIZE(intel_dsi_devices)) {
DRM_DEBUG_KMS("no device found\n");
goto err;
}
intel_encoder->type = INTEL_OUTPUT_DSI;
intel_encoder->crtc_mask = (1 << 0); /* XXX */
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_connector_register(connector);
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
if (!fixed_mode) {
DRM_DEBUG_KMS("no fixed mode\n");
goto err;
}
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
return;
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
}

View file

@ -0,0 +1,141 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INTEL_DSI_H
#define _INTEL_DSI_H
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
struct intel_dsi_device {
unsigned int panel_id;
const char *name;
const struct intel_dsi_dev_ops *dev_ops;
void *dev_priv;
};
struct intel_dsi_dev_ops {
bool (*init)(struct intel_dsi_device *dsi);
void (*panel_reset)(struct intel_dsi_device *dsi);
void (*disable_panel_power)(struct intel_dsi_device *dsi);
/* one time programmable commands if needed */
void (*send_otp_cmds)(struct intel_dsi_device *dsi);
/* This callback must be able to assume DSI commands can be sent */
void (*enable)(struct intel_dsi_device *dsi);
/* This callback must be able to assume DSI commands can be sent */
void (*disable)(struct intel_dsi_device *dsi);
int (*mode_valid)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode);
bool (*mode_fixup)(struct intel_dsi_device *dsi,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(struct intel_dsi_device *dsi,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
bool (*get_hw_state)(struct intel_dsi_device *dev);
struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
void (*destroy) (struct intel_dsi_device *dsi);
};
struct intel_dsi {
struct intel_encoder base;
struct intel_dsi_device dev;
struct intel_connector *attached_connector;
/* if true, use HS mode, otherwise LP */
bool hs;
/* virtual channel */
int channel;
/* Video mode or command mode */
u16 operation_mode;
/* number of DSI lanes */
unsigned int lane_count;
/* video mode pixel format for MIPI_DSI_FUNC_PRG register */
u32 pixel_format;
/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
u32 video_mode_format;
/* eot for MIPI_EOT_DISABLE register */
u8 eotp_pkt;
u8 clock_stop;
u8 escape_clk_div;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
u32 video_frmt_cfg_bits;
u16 lp_byte_clk;
/* timeouts in byte clocks */
u16 lp_rx_timeout;
u16 turn_arnd_val;
u16 rst_timer_val;
u16 hs_to_lp_count;
u16 clk_lp_to_hs_count;
u16 clk_hs_to_lp_count;
u16 init_count;
u32 pclk;
u16 burst_mode_ratio;
/* all delays in ms */
u16 backlight_off_delay;
u16 backlight_on_delay;
u16 panel_on_delay;
u16 panel_off_delay;
u16 panel_pwr_cycle_delay;
};
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
}
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
#endif /* _INTEL_DSI_H */

View file

@ -0,0 +1,437 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
/*
* XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
* MIPI_COMMAND_ADDRESS registers.
*
* Apparently these registers provide a MIPI adapter level way to send (lots of)
* commands and data to the receiver, without having to write the commands and
* data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
*
* Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
* MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
* framebuffer in command mode displays) these are just an optimization that can
* come later.
*
* For memory writes, these should probably be used for performance.
*/
static void print_stat(struct intel_dsi *intel_dsi)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 val;
val = I915_READ(MIPI_INTR_STAT(pipe));
#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
"\n", pipe, val,
STAT_BIT(val, TEARING_EFFECT),
STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
STAT_BIT(val, GEN_READ_DATA_AVAIL),
STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
STAT_BIT(val, RX_PROT_VIOLATION),
STAT_BIT(val, RX_INVALID_TX_LENGTH),
STAT_BIT(val, ACK_WITH_NO_ERROR),
STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
STAT_BIT(val, LP_RX_TIMEOUT),
STAT_BIT(val, HS_TX_TIMEOUT),
STAT_BIT(val, DPI_FIFO_UNDERRUN),
STAT_BIT(val, LOW_CONTENTION),
STAT_BIT(val, HIGH_CONTENTION),
STAT_BIT(val, TXDSI_VC_ID_INVALID),
STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
STAT_BIT(val, TXCHECKSUM_ERROR),
STAT_BIT(val, TXECC_MULTIBIT_ERROR),
STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
STAT_BIT(val, TXFALSE_CONTROL_ERROR),
STAT_BIT(val, RXDSI_VC_ID_INVALID),
STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
STAT_BIT(val, RXCHECKSUM_ERROR),
STAT_BIT(val, RXECC_MULTIBIT_ERROR),
STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
STAT_BIT(val, RXFALSE_CONTROL_ERROR),
STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
STAT_BIT(val, RXEOT_SYNC_ERROR),
STAT_BIT(val, RXSOT_SYNC_ERROR),
STAT_BIT(val, RXSOT_ERROR));
#undef STAT_BIT
}
enum dsi_type {
DSI_DCS,
DSI_GENERIC,
};
/* enable or disable command mode hs transmissions */
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 temp;
u32 mask = DBI_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
temp &= DBI_HS_LP_MODE_MASK;
I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
intel_dsi->hs = enable;
}
static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
u8 data_type, u16 data)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 ctrl_reg;
u32 ctrl;
u32 mask;
DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
channel, data_type, data);
if (intel_dsi->hs) {
ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
mask = HS_CTRL_FIFO_FULL;
} else {
ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
mask = LP_CTRL_FIFO_FULL;
}
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
print_stat(intel_dsi);
}
/*
* Note: This function is also used for long packets, with length passed
* as data, since SHORT_PACKET_PARAM_SHIFT ==
* LONG_PACKET_WORD_COUNT_SHIFT.
*/
ctrl = data << SHORT_PACKET_PARAM_SHIFT |
channel << VIRTUAL_CHANNEL_SHIFT |
data_type << DATA_TYPE_SHIFT;
I915_WRITE(ctrl_reg, ctrl);
return 0;
}
static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
u8 data_type, const u8 *data, int len)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 data_reg;
int i, j, n;
u32 mask;
DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
channel, data_type, len);
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
mask = HS_DATA_FIFO_FULL;
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
mask = LP_DATA_FIFO_FULL;
}
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
for (i = 0; i < len; i += n) {
u32 val = 0;
n = min_t(int, len - i, 4);
for (j = 0; j < n; j++)
val |= *data++ << 8 * j;
I915_WRITE(data_reg, val);
/* XXX: check for data fifo full, once that is set, write 4
* dwords, then wait for not set, then continue. */
}
return dsi_vc_send_short(intel_dsi, channel, data_type, len);
}
static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
int channel, const u8 *data, int len,
enum dsi_type type)
{
int ret;
if (len == 0) {
BUG_ON(type == DSI_GENERIC);
ret = dsi_vc_send_short(intel_dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
0);
} else if (len == 1) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
MIPI_DSI_DCS_SHORT_WRITE, data[0]);
} else if (len == 2) {
ret = dsi_vc_send_short(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
(data[1] << 8) | data[0]);
} else {
ret = dsi_vc_send_long(intel_dsi, channel,
type == DSI_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE, data, len);
}
return ret;
}
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
}
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len)
{
return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
}
static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
{
return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
dcs_cmd);
}
static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
int channel, u8 *reqdata,
int reqlen)
{
u16 data;
u8 data_type;
switch (reqlen) {
case 0:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
data = 0;
break;
case 1:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
data = reqdata[0];
break;
case 2:
data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
data = (reqdata[1] << 8) | reqdata[0];
break;
default:
BUG();
}
return dsi_vc_send_short(intel_dsi, channel, data_type, data);
}
static int dsi_read_data_return(struct intel_dsi *intel_dsi,
u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
int i, len = 0;
u32 data_reg, val;
if (intel_dsi->hs) {
data_reg = MIPI_HS_GEN_DATA(pipe);
} else {
data_reg = MIPI_LP_GEN_DATA(pipe);
}
while (len < buflen) {
val = I915_READ(data_reg);
for (i = 0; i < 4 && len < buflen; i++, len++)
buf[len] = val >> 8 * i;
}
return len;
}
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
/*
* XXX: should issue multiple read requests and reads if request is
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
ret = dsi_read_data_return(intel_dsi, buf, buflen);
if (ret < 0)
return ret;
if (ret != buflen)
return -EIO;
return 0;
}
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
int ret;
/*
* XXX: should issue multiple read requests and reads if request is
* longer than MIPI_MAX_RETURN_PKT_SIZE
*/
I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
reqlen);
if (ret)
return ret;
mask = GEN_READ_DATA_AVAIL;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
DRM_ERROR("Timeout waiting for read data.\n");
ret = dsi_read_data_return(intel_dsi, buf, buflen);
if (ret < 0)
return ret;
if (ret != buflen)
return -EIO;
return 0;
}
/*
* send a video mode command
*
* XXX: commands with data in MIPI_DPI_DATA?
*/
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
/* XXX: pipe, hs */
if (hs)
cmd &= ~DPI_LP_MODE;
else
cmd |= DPI_LP_MODE;
/* clear bit */
I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
return 0;
}
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
enum pipe pipe = intel_crtc->pipe;
u32 mask;
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
DRM_ERROR("DPI FIFOs are not empty\n");
}

View file

@ -0,0 +1,113 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Jani Nikula <jani.nikula@intel.com>
*/
#ifndef _INTEL_DSI_DSI_H
#define _INTEL_DSI_DSI_H
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#define DPI_LP_MODE_EN false
#define DPI_HS_MODE_EN true
void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
const u8 *data, int len);
int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
u8 *buf, int buflen);
int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd)
{
return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
}
static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
int channel, u8 dcs_cmd, u8 param)
{
u8 buf[2] = { dcs_cmd, param };
return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
}
static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
int channel)
{
return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
}
static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
int channel, u8 param)
{
return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
}
static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2)
{
u8 buf[2] = { param1, param2 };
return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
}
/* XXX: questionable read helpers */
static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
int channel, u8 *buf, int buflen)
{
return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
}
static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
int channel, u8 param, u8 *buf,
int buflen)
{
return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
}
static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
int channel, u8 param1, u8 param2,
u8 *buf, int buflen)
{
u8 req[2] = { param1, param2 };
return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
}
#endif /* _INTEL_DSI_DSI_H */

View file

@ -0,0 +1,625 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Shobhit Kumar <shobhit.kumar@intel.com>
*
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
#include <video/mipi_display.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
#include "intel_dsi_cmd.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
#define PREPARE_CNT_MAX 0x3F
#define EXIT_ZERO_CNT_MAX 0x3F
#define CLK_ZERO_CNT_MAX 0xFF
#define TRAIL_CNT_MAX 0x1F
#define NS_KHZ_RATIO 1000000
#define GPI0_NC_0_HV_DDI0_HPD 0x4130
#define GPIO_NC_0_HV_DDI0_PAD 0x4138
#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
#define GPIO_NC_3_PANEL0_VDDEN 0x4140
#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
#define GPIO_NC_4_PANEL0_BLKEN 0x4150
#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
#define GPIO_NC_6_PCONF0 0x4180
#define GPIO_NC_6_PAD 0x4188
#define GPIO_NC_7_PCONF0 0x4190
#define GPIO_NC_7_PAD 0x4198
#define GPIO_NC_8_PCONF0 0x4170
#define GPIO_NC_8_PAD 0x4178
#define GPIO_NC_9_PCONF0 0x4100
#define GPIO_NC_9_PAD 0x4108
#define GPIO_NC_10_PCONF0 0x40E0
#define GPIO_NC_10_PAD 0x40E8
#define GPIO_NC_11_PCONF0 0x40F0
#define GPIO_NC_11_PAD 0x40F8
struct gpio_table {
u16 function_reg;
u16 pad_reg;
u8 init;
};
static struct gpio_table gtable[] = {
{ GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
{ GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
{ GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
{ GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
{ GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
{ GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
{ GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
{ GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
{ GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
{ GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
{ GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
};
static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
{
u8 type, byte, mode, vc, port;
u16 len;
byte = *data++;
mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1;
vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3;
port = (byte >> MIPI_PORT_SHIFT) & 0x3;
/* LP or HS mode */
intel_dsi->hs = mode;
/* get packet type and increment the pointer */
type = *data++;
len = *((u16 *) data);
data += 2;
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
dsi_vc_generic_write_0(intel_dsi, vc);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
dsi_vc_generic_write_1(intel_dsi, vc, *data);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1));
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
dsi_vc_generic_write(intel_dsi, vc, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
dsi_vc_dcs_write_0(intel_dsi, vc, *data);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1));
break;
case MIPI_DSI_DCS_READ:
DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
break;
}
data += len;
return data;
}
static u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, u8 *data)
{
u32 delay = *((u32 *) data);
usleep_range(delay, delay + 10);
data += 4;
return data;
}
static u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, u8 *data)
{
u8 gpio, action;
u16 function, pad;
u32 val;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
gpio = *data++;
/* pull up/down */
action = *data++;
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
mutex_lock(&dev_priv->dpio_lock);
if (!gtable[gpio].init) {
/* program the function */
/* FIXME: remove constant below */
vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
gtable[gpio].init = 1;
}
val = 0x4 | action;
/* pull up/down */
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->dpio_lock);
return data;
}
typedef u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi, u8 *data);
static const fn_mipi_elem_exec exec_elem[] = {
NULL, /* reserved */
mipi_exec_send_packet,
mipi_exec_delay,
mipi_exec_gpio,
NULL, /* status read; later */
};
/*
* MIPI Sequence from VBT #53 parsing logic
* We have already separated each seqence during bios parsing
* Following is generic execution function for any sequence
*/
static const char * const seq_name[] = {
"UNDEFINED",
"MIPI_SEQ_ASSERT_RESET",
"MIPI_SEQ_INIT_OTP",
"MIPI_SEQ_DISPLAY_ON",
"MIPI_SEQ_DISPLAY_OFF",
"MIPI_SEQ_DEASSERT_RESET"
};
static void generic_exec_sequence(struct intel_dsi *intel_dsi, char *sequence)
{
u8 *data = sequence;
fn_mipi_elem_exec mipi_elem_exec;
int index;
if (!sequence)
return;
DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
/* go to the first element of the sequence */
data++;
/* parse each byte till we reach end of sequence byte - 0x00 */
while (1) {
index = *data;
mipi_elem_exec = exec_elem[index];
if (!mipi_elem_exec) {
DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
return;
}
/* goto element payload */
data++;
/* execute the element specific rotines */
data = mipi_elem_exec(intel_dsi, data);
/*
* After processing the element, data should point to
* next element or end of sequence
* check if have we reached end of sequence
*/
if (*data == 0x00)
break;
}
}
static bool generic_init(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
u32 bits_per_pixel = 24;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
u32 tclk_prepare_clkzero, ths_prepare_hszero;
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u16 burst_mode_ratio;
DRM_DEBUG_KMS("\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666)
bits_per_pixel = 18;
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
bits_per_pixel = 16;
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits =
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
pclk = mode->clock;
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
*/
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
if (mipi_config->target_burst_mode_freq) {
computed_ddr =
(pclk * bits_per_pixel) / intel_dsi->lane_count;
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;
}
burst_mode_ratio = DIV_ROUND_UP(
mipi_config->target_burst_mode_freq * 100,
computed_ddr);
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
return false;
}
} else
burst_mode_ratio = 100;
intel_dsi->burst_mode_ratio = burst_mode_ratio;
intel_dsi->pclk = pclk;
bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
switch (intel_dsi->escape_clk_div) {
case 0:
tlpx_ns = 50;
break;
case 1:
tlpx_ns = 100;
break;
case 2:
tlpx_ns = 200;
break;
default:
tlpx_ns = 50;
break;
}
switch (intel_dsi->lane_count) {
case 1:
case 2:
extra_byte_count = 2;
break;
case 3:
extra_byte_count = 4;
break;
case 4:
default:
extra_byte_count = 3;
break;
}
/*
* ui(s) = 1/f [f in hz]
* ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
*/
/* in Kbps */
ui_num = NS_KHZ_RATIO;
ui_den = bitrate;
tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
ths_prepare_hszero = mipi_config->ths_prepare_hszero;
/*
* B060
* LP byte clock = TLPX/ (8UI)
*/
intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
/* count values in UI = (ns value) * (bitrate / (2 * 10^6))
*
* Since txddrclkhs_i is 2xUI, all the count values programmed in
* DPHY param register are divided by 2
*
* prepare count
*/
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
/* exit zero count */
exit_zero_cnt = DIV_ROUND_UP(
(ths_prepare_hszero - ths_prepare_ns) * ui_den,
ui_num * 2
);
/*
* Exit zero is unified val ths_zero and ths_exit
* minimum value for ths_exit = 110ns
* min (exit_zero_cnt * 2) = 110/UI
* exit_zero_cnt = 55/UI
*/
if (exit_zero_cnt < (55 * ui_den / ui_num))
if ((55 * ui_den) % ui_num)
exit_zero_cnt += 1;
/* clk zero count */
clk_zero_cnt = DIV_ROUND_UP(
(tclk_prepare_clkzero - ths_prepare_ns)
* ui_den, 2 * ui_num);
/* trail count */
tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
if (prepare_cnt > PREPARE_CNT_MAX ||
exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
clk_zero_cnt > CLK_ZERO_CNT_MAX ||
trail_cnt > TRAIL_CNT_MAX)
DRM_DEBUG_DRIVER("Values crossing maximum limits, restricting to max values\n");
if (prepare_cnt > PREPARE_CNT_MAX)
prepare_cnt = PREPARE_CNT_MAX;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX)
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
if (clk_zero_cnt > CLK_ZERO_CNT_MAX)
clk_zero_cnt = CLK_ZERO_CNT_MAX;
if (trail_cnt > TRAIL_CNT_MAX)
trail_cnt = TRAIL_CNT_MAX;
/* B080 */
intel_dsi->dphy_reg = exit_zero_cnt << 24 | trail_cnt << 16 |
clk_zero_cnt << 8 | prepare_cnt;
/*
* LP to HS switch count = 4TLPX + PREP_COUNT * 2 + EXIT_ZERO_COUNT * 2
* + 10UI + Extra Byte Count
*
* HS to LP switch count = THS-TRAIL + 2TLPX + Extra Byte Count
* Extra Byte Count is calculated according to number of lanes.
* High Low Switch Count is the Max of LP to HS and
* HS to LP switch count
*
*/
tlpx_ui = DIV_ROUND_UP(tlpx_ns * ui_den, ui_num);
/* B044 */
/* FIXME:
* The comment above does not match with the code */
lp_to_hs_switch = DIV_ROUND_UP(4 * tlpx_ui + prepare_cnt * 2 +
exit_zero_cnt * 2 + 10, 8);
hs_to_lp_switch = DIV_ROUND_UP(mipi_config->ths_trail + 2 * tlpx_ui, 8);
intel_dsi->hs_to_lp_count = max(lp_to_hs_switch, hs_to_lp_switch);
intel_dsi->hs_to_lp_count += extra_byte_count;
/* B088 */
/* LP -> HS for clock lanes
* LP clk sync + LP11 + LP01 + tclk_prepare + tclk_zero +
* extra byte count
* 2TPLX + 1TLPX + 1 TPLX(in ns) + prepare_cnt * 2 + clk_zero_cnt *
* 2(in UI) + extra byte count
* In byteclks = (4TLPX + prepare_cnt * 2 + clk_zero_cnt *2 (in UI)) /
* 8 + extra byte count
*/
intel_dsi->clk_lp_to_hs_count =
DIV_ROUND_UP(
4 * tlpx_ui + prepare_cnt * 2 +
clk_zero_cnt * 2,
8);
intel_dsi->clk_lp_to_hs_count += extra_byte_count;
/* HS->LP for Clock Lanes
* Low Power clock synchronisations + 1Tx byteclk + tclk_trail +
* Extra byte count
* 2TLPX + 8UI + (trail_count*2)(in UI) + Extra byte count
* In byteclks = (2*TLpx(in UI) + trail_count*2 +8)(in UI)/8 +
* Extra byte count
*/
intel_dsi->clk_hs_to_lp_count =
DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
DRM_DEBUG_KMS("Eot %s\n", intel_dsi->eotp_pkt ? "enabled" : "disabled");
DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ?
"disabled" : "enabled");
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
DRM_DEBUG_KMS("BTA %s\n",
intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA ?
"disabled" : "enabled");
/* delays in VBT are in unit of 100us, so need to convert
* here in ms
* Delay (100us) * 100 /1000 = Delay / 10 (ms) */
intel_dsi->backlight_off_delay = pps->bl_disable_delay / 10;
intel_dsi->backlight_on_delay = pps->bl_enable_delay / 10;
intel_dsi->panel_on_delay = pps->panel_on_delay / 10;
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
return true;
}
static int generic_mode_valid(struct intel_dsi_device *dsi,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static bool generic_mode_fixup(struct intel_dsi_device *dsi,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) {
return true;
}
static void generic_panel_reset(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
}
static void generic_disable_panel_power(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
generic_exec_sequence(intel_dsi, sequence);
}
static void generic_send_otp_cmds(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
generic_exec_sequence(intel_dsi, sequence);
}
static void generic_enable(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
generic_exec_sequence(intel_dsi, sequence);
}
static void generic_disable(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
char *sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
generic_exec_sequence(intel_dsi, sequence);
}
static enum drm_connector_status generic_detect(struct intel_dsi_device *dsi)
{
return connector_status_connected;
}
static bool generic_get_hw_state(struct intel_dsi_device *dev)
{
return true;
}
static struct drm_display_mode *generic_get_modes(struct intel_dsi_device *dsi)
{
struct intel_dsi *intel_dsi = container_of(dsi, struct intel_dsi, dev);
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->vbt.lfp_lvds_vbt_mode->type |= DRM_MODE_TYPE_PREFERRED;
return dev_priv->vbt.lfp_lvds_vbt_mode;
}
static void generic_destroy(struct intel_dsi_device *dsi) { }
/* Callbacks. We might not need them all. */
struct intel_dsi_dev_ops vbt_generic_dsi_display_ops = {
.init = generic_init,
.mode_valid = generic_mode_valid,
.mode_fixup = generic_mode_fixup,
.panel_reset = generic_panel_reset,
.disable_panel_power = generic_disable_panel_power,
.send_otp_cmds = generic_send_otp_cmds,
.enable = generic_enable,
.disable = generic_disable,
.detect = generic_detect,
.get_hw_state = generic_get_hw_state,
.get_modes = generic_get_modes,
.destroy = generic_destroy,
};

View file

@ -0,0 +1,378 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Shobhit Kumar <shobhit.kumar@intel.com>
* Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
*/
#include <linux/kernel.h>
#include "intel_drv.h"
#include "i915_drv.h"
#include "intel_dsi.h"
#define DSI_HSS_PACKET_SIZE 4
#define DSI_HSE_PACKET_SIZE 4
#define DSI_HSA_PACKET_EXTRA_SIZE 6
#define DSI_HBP_PACKET_EXTRA_SIZE 6
#define DSI_HACTIVE_PACKET_EXTRA_SIZE 6
#define DSI_HFP_PACKET_EXTRA_SIZE 6
#define DSI_EOTP_PACKET_SIZE 4
struct dsi_mnp {
u32 dsi_pll_ctrl;
u32 dsi_pll_div;
};
static const u32 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
71, 35 /* 91 - 92 */
};
#ifdef DSI_CLK_FROM_RR
static u32 dsi_rr_formula(const struct drm_display_mode *mode,
int pixel_format, int video_mode_format,
int lane_count, bool eotp)
{
u32 bpp;
u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
u32 bytes_per_line, bytes_per_frame;
u32 num_frames;
u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
u32 dsi_bit_clock_hz;
u32 dsi_clk;
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
hactive = mode->hdisplay;
vactive = mode->vdisplay;
hfp = mode->hsync_start - mode->hdisplay;
hsync = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
vfp = mode->vsync_start - mode->vdisplay;
vsync = mode->vsync_end - mode->vsync_start;
vbp = mode->vtotal - mode->vsync_end;
hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
/*
* XXX: Need to accurately calculate LP to HS transition timeout and add
* it to bytes_per_line/bytes_per_frame.
*/
if (eotp && video_mode_format == VIDEO_MODE_BURST)
bytes_per_line += DSI_EOTP_PACKET_SIZE;
bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
vactive * bytes_per_line + vfp * bytes_per_line;
if (eotp &&
(video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
bytes_per_frame += DSI_EOTP_PACKET_SIZE;
num_frames = drm_mode_vrefresh(mode);
bytes_per_x_frames = num_frames * bytes_per_frame;
bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
/* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
dsi_clk = dsi_bit_clock_hz / 1000;
if (eotp && video_mode_format == VIDEO_MODE_BURST)
dsi_clk *= 2;
return dsi_clk;
}
#else
/* Get DSI clock from pixel clock */
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
u32 dsi_clk_khz;
u32 bpp;
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
return dsi_clk_khz;
}
#endif
static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
{
u32 m, n, p;
u32 ref_clk;
u32 error;
u32 tmp_error;
int target_dsi_clk;
int calc_dsi_clk;
u32 calc_m;
u32 calc_p;
u32 m_seed;
/* dsi_clk is expected in KHZ */
if (dsi_clk < 300000 || dsi_clk > 1150000) {
DRM_ERROR("DSI CLK Out of Range\n");
return -ECHRNG;
}
ref_clk = 25000;
target_dsi_clk = dsi_clk;
error = 0xFFFFFFFF;
tmp_error = 0xFFFFFFFF;
calc_m = 0;
calc_p = 0;
for (m = 62; m <= 92; m++) {
for (p = 2; p <= 6; p++) {
/* Find the optimal m and p divisors
with minimal error +/- the required clock */
calc_dsi_clk = (m * ref_clk) / p;
if (calc_dsi_clk == target_dsi_clk) {
calc_m = m;
calc_p = p;
error = 0;
break;
} else
tmp_error = abs(target_dsi_clk - calc_dsi_clk);
if (tmp_error < error) {
error = tmp_error;
calc_m = m;
calc_p = p;
}
}
if (error == 0)
break;
}
m_seed = lfsr_converts[calc_m - 62];
n = 1;
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT;
return 0;
}
/*
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
struct dsi_mnp dsi_mnp;
u32 dsi_clk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
return;
}
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
}
void vlv_enable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp;
DRM_DEBUG_KMS("\n");
mutex_lock(&dev_priv->dpio_lock);
vlv_configure_dsi_pll(encoder);
/* wait at least 0.5 us after ungating before enabling VCO */
usleep_range(1, 10);
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
mutex_unlock(&dev_priv->dpio_lock);
if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
DRM_ERROR("DSI PLL lock failed\n");
return;
}
DRM_DEBUG_KMS("DSI PLL locked\n");
}
void vlv_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp;
DRM_DEBUG_KMS("\n");
mutex_lock(&dev_priv->dpio_lock);
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
mutex_unlock(&dev_priv->dpio_lock);
}
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
{
int bpp;
switch (pixel_format) {
default:
case VID_MODE_FORMAT_RGB888:
case VID_MODE_FORMAT_RGB666_LOOSE:
bpp = 24;
break;
case VID_MODE_FORMAT_RGB666:
bpp = 18;
break;
case VID_MODE_FORMAT_RGB565:
bpp = 16;
break;
}
WARN(bpp != pipe_bpp,
"bpp match assertion failure (expected %d, current %d)\n",
bpp, pipe_bpp);
}
u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0;
int refclk = 25000;
int i;
DRM_DEBUG_KMS("\n");
mutex_lock(&dev_priv->dpio_lock);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
mutex_unlock(&dev_priv->dpio_lock);
/* mask out other bits and extract the P1 divisor */
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
/* mask out the other bits and extract the M1 divisor */
pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
while (pll_ctl) {
pll_ctl = pll_ctl >> 1;
p++;
}
p--;
if (!p) {
DRM_ERROR("wrong P1 divisor\n");
return 0;
}
for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
if (lfsr_converts[i] == pll_div)
break;
}
if (i == ARRAY_SIZE(lfsr_converts)) {
DRM_ERROR("wrong m_seed programmed\n");
return 0;
}
m = i + 62;
dsi_clock = (m * refclk) / p;
/* pixel_format and pipe_bpp should agree */
assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
return pclk;
}

View file

@ -0,0 +1,573 @@
/*
* Copyright 2006 Dave Airlie <airlied@linux.ie>
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "dvo.h"
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
#define NS2501_ADDR 0x38
static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
.dvo_reg = DVOC,
.slave_addr = SIL164_ADDR,
.dev_ops = &sil164_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
.dvo_reg = DVOC,
.slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
.dvo_reg = DVOA,
.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410",
.dvo_reg = DVOC,
.slave_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501",
.dvo_reg = DVOB,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
}
};
struct intel_dvo {
struct intel_encoder base;
struct intel_dvo_device dev;
struct drm_display_mode *panel_fixed_mode;
bool panel_wants_dither;
};
static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
{
return container_of(encoder, struct intel_dvo, base);
}
static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
{
return enc_to_dvo(intel_attached_encoder(connector));
}
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (!(tmp & DVO_ENABLE))
return false;
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (!(tmp & DVO_ENABLE))
return false;
*pipe = PORT_TO_PIPE(tmp);
return true;
}
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
if (tmp & DVO_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
pipe_config->adjusted_mode.flags |= flags;
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_disable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode,
&crtc->config.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
/* Special dpms function to support cloning between dvo/sdvo/crt. */
static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
struct intel_crtc_config *config;
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
return;
connector->dpms = mode;
/* Only need to change hw state when actually enabled */
crtc = intel_dvo->base.base.crtc;
if (!crtc) {
intel_dvo->base.connectors_active = false;
return;
}
/* We call connector dpms manually below in case pipe dpms doesn't
* change due to cloning. */
if (mode == DRM_MODE_DPMS_ON) {
config = &to_intel_crtc(crtc)->config;
intel_dvo->base.connectors_active = true;
intel_crtc_update_dpms(crtc);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
intel_dvo->base.connectors_active = false;
intel_crtc_update_dpms(crtc);
}
intel_modeset_check_state(connector->dev);
}
static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
/* XXX: Validate clock range */
if (intel_dvo->panel_fixed_mode) {
if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
static bool intel_dvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
if (intel_dvo->panel_fixed_mode != NULL) {
#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
C(hdisplay);
C(hsync_start);
C(hsync_end);
C(htotal);
C(vdisplay);
C(vsync_start);
C(vsync_end);
C(vtotal);
C(clock);
#undef C
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
return true;
}
static void intel_dvo_pre_enable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
int pipe = crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
switch (dvo_reg) {
case DVOA:
default:
dvo_srcdim_reg = DVOA_SRCDIM;
break;
case DVOB:
dvo_srcdim_reg = DVOB_SRCDIM;
break;
case DVOC:
dvo_srcdim_reg = DVOC_SRCDIM;
break;
}
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
if (pipe == 1)
dvo_val |= DVO_PIPE_B_SELECT;
dvo_val |= DVO_PIPE_STALL;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
dvo_val |= DVO_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
I915_WRITE(dvo_srcdim_reg,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
/*I915_WRITE(DVOB, dvo_val);*/
I915_WRITE(dvo_reg, dvo_val);
}
/**
* Detect the output connection on our DVO device.
*
* Unimplemented.
*/
static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
static int intel_dvo_get_modes(struct drm_connector *connector)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should probably have an i2c driver get_modes function for those
* devices which will have a fixed set of modes determined by the chip
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
intel_ddc_get_modes(connector,
intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
if (intel_dvo->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
}
return 0;
}
static void intel_dvo_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
kfree(connector);
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
};
static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
.mode_valid = intel_dvo_mode_valid,
.get_modes = intel_dvo_get_modes,
.best_encoder = intel_best_encoder,
};
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
kfree(intel_dvo->panel_fixed_mode);
intel_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
/**
* Attempts to get a fixed panel timing for LVDS (currently only the i830).
*
* Other chips with DVO LVDS will need to extend this to deal with the LVDS
* chip being on DVOB/C and having multiple pipes.
*/
static struct drm_display_mode *
intel_dvo_get_current_mode(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
struct drm_display_mode *mode = NULL;
/* If the DVO port is active, that'll be the LVDS, so we can pull out
* its timings to get how the BIOS set up the panel.
*/
if (dvo_val & DVO_ENABLE) {
struct drm_crtc *crtc;
int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc) {
mode = intel_crtc_mode_get(dev, crtc);
if (mode) {
mode->type |= DRM_MODE_TYPE_PREFERRED;
if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dvo_val & DVO_VSYNC_ACTIVE_HIGH)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
}
}
return mode;
}
void intel_dvo_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
int i;
int encoder_type = DRM_MODE_ENCODER_NONE;
intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
if (!intel_dvo)
return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dvo);
return;
}
intel_encoder = &intel_dvo->base;
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type);
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
intel_encoder->compute_config = intel_dvo_compute_config;
intel_encoder->pre_enable = intel_dvo_pre_enable;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
struct i2c_adapter *i2c;
int gpio;
bool dvoinit;
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
*/
if (intel_gmbus_is_port_valid(dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PORT_SSC;
else
gpio = GMBUS_PORT_DPB;
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
i2c = intel_gmbus_get_adapter(dev_priv, gpio);
intel_dvo->dev = *dvo;
/* GMBUS NAK handling seems to be unstable, hence let the
* transmitter detection run in bit banging mode for now.
*/
intel_gmbus_force_bit(i2c, true);
dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
intel_gmbus_force_bit(i2c, false);
if (!dvoinit)
continue;
intel_encoder->type = INTEL_OUTPUT_DVO;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
encoder_type = DRM_MODE_ENCODER_LVDS;
break;
}
drm_connector_helper_add(connector,
&intel_dvo_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
* to dig the fixed panel mode out of the BIOS data.
* However, it's in a different format from the BIOS
* data on chipsets with integrated LVDS (stored in AIM
* headers, likely), so for now, just get the current
* mode being output through DVO.
*/
intel_dvo->panel_fixed_mode =
intel_dvo_get_current_mode(connector);
intel_dvo->panel_wants_dither = true;
}
drm_connector_register(connector);
return;
}
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo);
kfree(intel_connector);
}

View file

@ -0,0 +1,764 @@
/*
* Copyright © 2007 David Airlie
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* David Airlie
*/
#include <linux/async.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/vga_switcheroo.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
static int intel_fbdev_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct intel_fbdev *ifbdev =
container_of(fb_helper, struct intel_fbdev, helper);
int ret;
ret = drm_fb_helper_set_par(info);
if (ret == 0) {
/*
* FIXME: fbdev presumes that all callbacks also work from
* atomic contexts and relies on that for emergency oops
* printing. KMS totally doesn't do that and the locking here is
* by far not the only place this goes wrong. Ignore this for
* now until we solve this for real.
*/
mutex_lock(&fb_helper->dev->struct_mutex);
ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
true);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
return ret;
}
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = intel_fbdev_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size, ret;
/* we don't do packed 24bpp */
if (sizes->surface_bpp == 24)
sizes->surface_bpp = 32;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size);
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_unref;
}
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unpin;
}
ifbdev->fb = to_intel_framebuffer(fb);
return 0;
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
return ret;
}
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev =
container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
int size, ret;
bool prealloc = false;
mutex_lock(&dev->struct_mutex);
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
intel_fb->base.width, intel_fb->base.height,
sizes->fb_width, sizes->fb_height);
drm_framebuffer_unreference(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
goto out_unlock;
intel_fb = ifbdev->fb;
} else {
DRM_DEBUG_KMS("re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
}
obj = intel_fb->obj;
size = obj->base.size;
info = framebuffer_alloc(0, &dev->pdev->dev);
if (!info) {
ret = -ENOMEM;
goto out_unpin;
}
info->par = helper;
fb = &ifbdev->fb->base;
ifbdev->helper.fb = fb;
ifbdev->helper.fbdev = info;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unpin;
}
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
}
info->screen_size = size;
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
if (ifbdev->fb->obj->stolen && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unpin:
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/** Sets the color ramps on behalf of RandR */
static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_crtc->lut_r[regno] = red >> 8;
intel_crtc->lut_g[regno] = green >> 8;
intel_crtc->lut_b[regno] = blue >> 8;
}
static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
*red = intel_crtc->lut_r[regno] << 8;
*green = intel_crtc->lut_g[regno] << 8;
*blue = intel_crtc->lut_b[regno] << 8;
}
static struct drm_fb_helper_crtc *
intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
{
int i;
for (i = 0; i < fb_helper->crtc_count; i++)
if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
return &fb_helper->crtc_info[i];
return NULL;
}
/*
* Try to read the BIOS display configuration and use it for the initial
* fb configuration.
*
* The BIOS or boot loader will generally create an initial display
* configuration for us that includes some set of active pipes and displays.
* This routine tries to figure out which pipes and connectors are active
* and stuffs them into the crtcs and modes array given to us by the
* drm_fb_helper code.
*
* The overall sequence is:
* intel_fbdev_init - from driver load
* intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
* drm_fb_helper_init - build fb helper structs
* drm_fb_helper_single_add_all_connectors - more fb helper structs
* intel_fbdev_initial_config - apply the config
* drm_fb_helper_initial_config - call ->probe then register_framebuffer()
* drm_setup_crtcs - build crtc config for fbdev
* intel_fb_initial_config - find active connectors etc
* drm_fb_helper_single_fb_probe - set up fbdev
* intelfb_create - re-use or alloc fb, build out fbdev structs
*
* Note that we don't make special consideration whether we could actually
* switch to the selected modes without a full modeset. E.g. when the display
* is in VGA mode we need to recalculate watermarks and set a new high-res
* framebuffer anyway.
*/
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
int i, j;
bool *save_enabled;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
if (!save_enabled)
return false;
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *new_crtc;
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
if (connector->status == connector_status_connected)
num_connectors_detected++;
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
continue;
}
if (connector->force == DRM_FORCE_OFF) {
DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
connector->name);
enabled[i] = false;
continue;
}
encoder = connector->encoder;
if (!encoder || WARN_ON(!encoder->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
continue;
}
num_connectors_enabled++;
new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc);
/*
* Make sure we're not trying to drive multiple connectors
* with a single CRTC, since our cloning support may not
* match the BIOS.
*/
for (j = 0; j < fb_helper->connector_count; j++) {
if (crtcs[j] == new_crtc) {
DRM_DEBUG_KMS("fallback: cloned configuration\n");
goto bail;
}
}
DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
connector->name);
/* go for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
/* try for preferred next */
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
connector->name);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
/* No preferred mode marked by the EDID? Are there any modes? */
if (!modes[i] && !list_empty(&connector->modes)) {
DRM_DEBUG_KMS("using first mode listed on connector %s\n",
connector->name);
modes[i] = list_first_entry(&connector->modes,
struct drm_display_mode,
head);
}
/* last resort: use current mode */
if (!modes[i]) {
/*
* IMPORTANT: We want to use the adjusted mode (i.e.
* after the panel fitter upscaling) as the initial
* config, not the input mode, which is what crtc->mode
* usually contains. But since our current fastboot
* code puts a mode derived from the post-pfit timings
* into crtc->mode this works out correctly. We don't
* use hwmode anywhere right now, so use it for this
* since the fb helper layer wants a pointer to
* something we own.
*/
DRM_DEBUG_KMS("looking for current mode on connector %s\n",
connector->name);
intel_mode_from_pipe_config(&encoder->crtc->hwmode,
&to_intel_crtc(encoder->crtc)->config);
modes[i] = &encoder->crtc->hwmode;
}
crtcs[i] = new_crtc;
DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
modes[i]->hdisplay, modes[i]->vdisplay,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
}
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
* fbdev helper library.
*/
if (num_connectors_enabled != num_connectors_detected &&
num_connectors_enabled < INTEL_INFO(dev)->num_pipes) {
DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
num_connectors_detected);
fallback = true;
}
if (fallback) {
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
kfree(save_enabled);
return false;
}
kfree(save_enabled);
return true;
}
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.initial_config = intel_fb_initial_config,
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
.fb_probe = intelfb_create,
};
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
if (ifbdev->helper.fbdev) {
struct fb_info *info = ifbdev->helper.fbdev;
unregister_framebuffer(info);
iounmap(info->screen_base);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
drm_fb_helper_fini(&ifbdev->helper);
drm_framebuffer_unregister_private(&ifbdev->fb->base);
drm_framebuffer_remove(&ifbdev->fb->base);
}
/*
* Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
* The core display code will have read out the current plane configuration,
* so we use that to figure out if there's an object for us to use as the
* fb, and if so, we re-use it for the fbdev configuration.
*
* Note we only support a single fb shared across pipes for boot (mostly for
* fbcon), so we just find the biggest and use that.
*/
static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
struct intel_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
struct intel_plane_config *plane_config = NULL;
unsigned int max_size = 0;
if (!i915.fastboot)
return false;
/* Find the largest fb */
for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active || !crtc->primary->fb) {
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
if (intel_crtc->plane_config.size > max_size) {
DRM_DEBUG_KMS("found possible fb from plane %c\n",
pipe_name(intel_crtc->pipe));
plane_config = &intel_crtc->plane_config;
fb = to_intel_framebuffer(crtc->primary->fb);
max_size = plane_config->size;
}
}
if (!fb) {
DRM_DEBUG_KMS("no active fbs found, not using BIOS config\n");
goto out;
}
/* Now make sure all the pipes will fit into it */
for_each_crtc(dev, crtc) {
unsigned int cur_size;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active) {
DRM_DEBUG_KMS("pipe %c not active, skipping\n",
pipe_name(intel_crtc->pipe));
continue;
}
DRM_DEBUG_KMS("checking plane %c for BIOS fb\n",
pipe_name(intel_crtc->pipe));
/*
* See if the plane fb we found above will fit on this
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.bits_per_pixel / 8;
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, fb->base.pitches[0]);
plane_config = NULL;
fb = NULL;
break;
}
cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
intel_crtc->config.adjusted_mode.crtc_hdisplay,
intel_crtc->config.adjusted_mode.crtc_vdisplay,
fb->base.bits_per_pixel,
cur_size);
if (cur_size > max_size) {
DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
cur_size, max_size);
plane_config = NULL;
fb = NULL;
break;
}
DRM_DEBUG_KMS("fb big enough for plane %c (%d >= %d)\n",
pipe_name(intel_crtc->pipe),
max_size, cur_size);
}
if (!fb) {
DRM_DEBUG_KMS("BIOS fb not suitable for all pipes, not using\n");
goto out;
}
ifbdev->preferred_bpp = fb->base.bits_per_pixel;
ifbdev->fb = fb;
drm_framebuffer_reference(&ifbdev->fb->base);
/* Final pass to check if any active pipes don't have fbs */
for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active)
continue;
WARN(!crtc->primary->fb,
"re-used BIOS config but lost an fb on crtc %d\n",
crtc->base.id);
}
DRM_DEBUG_KMS("using BIOS fb for initial console\n");
return true;
out:
return false;
}
static void intel_fbdev_suspend_worker(struct work_struct *work)
{
intel_fbdev_set_suspend(container_of(work,
struct drm_i915_private,
fbdev_suspend_work)->dev,
FBINFO_STATE_RUNNING,
true);
}
int intel_fbdev_init(struct drm_device *dev)
{
struct intel_fbdev *ifbdev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
if (ifbdev == NULL)
return -ENOMEM;
drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
ret = drm_fb_helper_init(dev, &ifbdev->helper,
INTEL_INFO(dev)->num_pipes, 4);
if (ret) {
kfree(ifbdev);
return ret;
}
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
return 0;
}
void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
{
struct drm_i915_private *dev_priv = data;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&ifbdev->helper, ifbdev->preferred_bpp);
}
void intel_fbdev_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->fbdev)
return;
flush_work(&dev_priv->fbdev_suspend_work);
async_synchronize_full();
intel_fbdev_destroy(dev, dev_priv->fbdev);
kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL;
}
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct fb_info *info;
if (!ifbdev)
return;
info = ifbdev->helper.fbdev;
if (synchronous) {
/* Flush any pending work to turn the console on, and then
* wait to turn it off. It must be synchronous as we are
* about to suspend or unload the driver.
*
* Note that from within the work-handler, we cannot flush
* ourselves, so only flush outstanding work upon suspend!
*/
if (state != FBINFO_STATE_RUNNING)
flush_work(&dev_priv->fbdev_suspend_work);
console_lock();
} else {
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
WARN_ON(state != FBINFO_STATE_RUNNING);
if (!console_trylock()) {
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
*/
schedule_work(&dev_priv->fbdev_suspend_work);
return;
}
}
/* On resume from hibernation: If the object is shmemfs backed, it has
* been restored from swap. If the object is stolen however, it will be
* full of whatever garbage was left in there.
*/
if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen)
memset_io(info->screen_base, 0, info->screen_size);
fb_set_suspend(info, state);
console_unlock();
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->fbdev)
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
void intel_fbdev_restore_mode(struct drm_device *dev)
{
int ret;
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->fbdev)
return;
ret = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,657 @@
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
* Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*/
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
struct gmbus_port {
const char *name;
int reg;
};
static const struct gmbus_port gmbus_ports[] = {
{ "ssc", GPIOB },
{ "vga", GPIOA },
{ "panel", GPIOC },
{ "dpc", GPIOD },
{ "dpb", GPIOE },
{ "dpd", GPIOF },
};
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 10
static inline struct intel_gmbus *
to_intel_gmbus(struct i2c_adapter *i2c)
{
return container_of(i2c, struct intel_gmbus, adapter);
}
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
if (!IS_PINEVIEW(dev_priv->dev))
return;
val = I915_READ(DSPCLK_GATE_D);
if (enable)
val |= DPCUNIT_CLOCK_GATE_DISABLE;
else
val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
}
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
u32 clock_bits;
if (state_high)
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
else
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
POSTING_READ(bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
u32 data_bits;
if (state_high)
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
else
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
POSTING_READ(bus->gpio_reg);
}
static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_i2c_reset(dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
return 0;
}
static void
intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
set_data(bus, 1);
set_clock(bus, 1);
intel_i2c_quirk_set(dev_priv, false);
}
static void
intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
struct i2c_algo_bit_data *algo;
algo = &bus->bit_algo;
/* -1 to map pin pair to gmbus index */
bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
algo->getsda = get_data;
algo->getscl = get_clock;
algo->pre_xfer = intel_gpio_pre_xfer;
algo->post_xfer = intel_gpio_post_xfer;
algo->udelay = I2C_RISEFALL_TIME;
algo->timeout = usecs_to_jiffies(2200);
algo->data = bus;
}
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
u32 gmbus4_irq_en)
{
int i;
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus2 = 0;
DEFINE_WAIT(wait);
if (!HAS_GMBUS_IRQ(dev_priv->dev))
gmbus4_irq_en = 0;
/* Important: The hw handles only the first bit, so set only one! Since
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves. */
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
for (i = 0; i < msecs_to_jiffies_timeout(50); i++) {
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
break;
schedule_timeout(1);
}
finish_wait(&dev_priv->gmbus_wait_queue, &wait);
I915_WRITE(GMBUS4 + reg_offset, 0);
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
if (gmbus2 & gmbus2_status)
return 0;
return -ETIMEDOUT;
}
static int
gmbus_wait_idle(struct drm_i915_private *dev_priv)
{
int ret;
int reg_offset = dev_priv->gpio_mmio_base;
#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
if (!HAS_GMBUS_IRQ(dev_priv->dev))
return wait_for(C, 10);
/* Important: The hw handles only the first bit, so set only one! */
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
I915_WRITE(GMBUS4 + reg_offset, 0);
if (ret)
return 0;
else
return -ETIMEDOUT;
#undef C
}
static int
gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len,
u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
I915_WRITE(GMBUS1 + reg_offset,
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
GMBUS_HW_RDY_EN);
if (ret)
return ret;
val = I915_READ(GMBUS3 + reg_offset);
do {
*buf++ = val & 0xff;
val >>= 8;
} while (--len && ++loop < 4);
}
return 0;
}
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index)
{
u8 *buf = msg->buf;
unsigned int rx_size = msg->len;
unsigned int len;
int ret;
do {
len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
buf, len, gmbus1_index);
if (ret)
return ret;
rx_size -= len;
buf += len;
} while (rx_size != 0);
return 0;
}
static int
gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
unsigned short addr, u8 *buf, unsigned int len)
{
int reg_offset = dev_priv->gpio_mmio_base;
unsigned int chunk_size = len;
u32 val, loop;
val = loop = 0;
while (len && loop < 4) {
val |= *buf++ << (8 * loop++);
len -= 1;
}
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
(chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
val = loop = 0;
do {
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
I915_WRITE(GMBUS3 + reg_offset, val);
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
GMBUS_HW_RDY_EN);
if (ret)
return ret;
}
return 0;
}
static int
gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
{
u8 *buf = msg->buf;
unsigned int tx_size = msg->len;
unsigned int len;
int ret;
do {
len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
if (ret)
return ret;
buf += len;
tx_size -= len;
} while (tx_size != 0);
return 0;
}
/*
* The gmbus controller can combine a 1 or 2 byte write with a read that
* immediately follows it by using an "INDEX" cycle.
*/
static bool
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
(msgs[i + 1].flags & I2C_M_RD));
}
static int
gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
{
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
int ret;
if (msgs[0].len == 2)
gmbus5 = GMBUS_2BYTE_INDEX_EN |
msgs[0].buf[1] | (msgs[0].buf[0] << 8);
if (msgs[0].len == 1)
gmbus1_index = GMBUS_CYCLE_INDEX |
(msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
/* GMBUS5 holds 16-bit index */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, gmbus5);
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, 0);
return ret;
}
static int
gmbus_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int i, reg_offset;
int ret = 0;
intel_aux_display_runtime_get(dev_priv);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
reg_offset = dev_priv->gpio_mmio_base;
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
for (i = 0; i < num; i++) {
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
if (ret == -ETIMEDOUT)
goto timeout;
if (ret == -ENXIO)
goto clear_err;
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
GMBUS_HW_WAIT_EN);
if (ret == -ENXIO)
goto clear_err;
if (ret)
goto timeout;
}
/* Generate a STOP condition on the bus. Note that gmbus can't generata
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
}
I915_WRITE(GMBUS0 + reg_offset, 0);
ret = ret ?: i;
goto out;
clear_err:
/*
* Wait for bus to IDLE before clearing NAK.
* If we clear the NAK while bus is still active, then it will stay
* active and the next transaction may fail.
*
* If no ACK is received during the address phase of a transaction, the
* adapter must report -ENXIO. It is not clear what to return if no ACK
* is received at other times. But we have to be careful to not return
* spurious -ENXIO because that will prevent i2c and drm edid functions
* from retrying. So return -ENXIO only when gmbus properly quiescents -
* timing out seems to happen when there _is_ a ddc chip present, but
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
}
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
I915_WRITE(GMBUS0 + reg_offset, 0);
DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
goto out;
timeout:
DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out:
mutex_unlock(&dev_priv->gmbus_mutex);
intel_aux_display_runtime_put(dev_priv);
return ret;
}
static u32 gmbus_func(struct i2c_adapter *adapter)
{
return i2c_bit_algo.functionality(adapter) &
(I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
/* I2C_FUNC_10BIT_ADDR | */
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
}
static const struct i2c_algorithm gmbus_algorithm = {
.master_xfer = gmbus_xfer,
.functionality = gmbus_func
};
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
* @dev: DRM device
*/
int intel_setup_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
if (HAS_PCH_NOP(dev))
return 0;
else if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else if (IS_VALLEYVIEW(dev))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else
dev_priv->gpio_mmio_base = 0;
mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
u32 port = i + 1; /* +1 to map gmbus index to pin pair */
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
gmbus_ports[i].name);
bus->adapter.dev.parent = &dev->pdev->dev;
bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
/* By default use a conservative clock rate */
bus->reg0 = port | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
bus->force_bit = 1;
intel_gpio_setup(bus, port);
ret = i2c_add_adapter(&bus->adapter);
if (ret)
goto err;
}
intel_i2c_reset(dev_priv->dev);
return 0;
err:
while (--i) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
return ret;
}
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned port)
{
WARN_ON(!intel_gmbus_is_port_valid(port));
/* -1 to map pin pair to gmbus index */
return (intel_gmbus_is_port_valid(port)) ?
&dev_priv->gmbus[port - 1].adapter : NULL;
}
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
}
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
}
void intel_teardown_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,114 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
*
* The tail is only updated in our logical ringbuffer struct.
*/
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
{
ringbuf->tail &= ringbuf->size - 1;
}
/**
* intel_logical_ring_emit() - write a DWORD to the ringbuffer.
* @ringbuf: Ringbuffer to write to.
* @data: DWORD to write.
*/
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
u32 data)
{
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
/* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
struct intel_context *ctx);
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
/**
* struct intel_ctx_submit_request - queued context submission request
* @ctx: Context to submit to the ELSP.
* @ring: Engine to submit it to.
* @tail: how far in the context's ringbuffer this request goes to.
* @execlist_link: link in the submission queue.
* @work: workqueue for processing this request in a bottom half.
* @elsp_submitted: no. of times this request has been sent to the ELSP.
*
* The ELSP only accepts two elements at a time, so we queue context/tail
* pairs on a given queue (ring->execlist_queue) until the hardware is
* available. The queue serves a double purpose: we also use it to keep track
* of the up to 2 contexts currently in the hardware (usually one in execution
* and the other queued up by the GPU): We only remove elements from the head
* of the queue when the hardware informs us that an element has been
* completed.
*
* All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
*/
struct intel_ctx_submit_request {
struct intel_context *ctx;
struct intel_engine_cs *ring;
u32 tail;
struct list_head execlist_link;
struct work_struct work;
int elsp_submitted;
};
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
#endif /* _INTEL_LRC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,128 @@
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007, 2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
* @edid: previously read EDID information
*/
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid)
{
int ret;
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
return ret;
}
/**
* intel_ddc_get_modes - get modelist from monitor
* @connector: DRM connector device to use
* @adapter: i2c adapter
*
* Fetch the EDID information from @connector using the DDC bus.
*/
int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret;
edid = drm_get_edid(connector, adapter);
if (!edid)
return 0;
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
}
static const struct drm_prop_enum_list force_audio_names[] = {
{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
{ HDMI_AUDIO_OFF, "off" },
{ HDMI_AUDIO_AUTO, "auto" },
{ HDMI_AUDIO_ON, "on" },
};
void
intel_attach_force_audio_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
prop = dev_priv->force_audio_property;
if (prop == NULL) {
prop = drm_property_create_enum(dev, 0,
"audio",
force_audio_names,
ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
dev_priv->force_audio_property = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
{ INTEL_BROADCAST_RGB_FULL, "Full" },
{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
};
void
intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
broadcast_rgb_names,
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
dev_priv->broadcast_rgb_property = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
}

View file

@ -0,0 +1,917 @@
/*
* Copyright 2008 Intel Corporation <hong.liu@intel.com>
* Copyright 2008 Red Hat <mjg@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#define PCI_ASLE 0xe4
#define PCI_ASLS 0xfc
#define PCI_SWSCI 0xe8
#define PCI_SWSCI_SCISEL (1 << 15)
#define PCI_SWSCI_GSSCIE (1 << 0)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define ACPI_CLID 0x01ac /* current lid state indicator */
#define ACPI_CDCK 0x01b0 /* current docking state indicator */
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
struct opregion_header {
u8 signature[16];
u32 size;
u32 opregion_ver;
u8 bios_ver[32];
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
} __packed;
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
u32 drdy; /* driver readiness */
u32 csts; /* notification status */
u32 cevt; /* current event */
u8 rsvd1[20];
u32 didl[8]; /* supported display devices ID list */
u32 cpdl[8]; /* currently presented display list */
u32 cadl[8]; /* currently active display list */
u32 nadl[8]; /* next active devices list */
u32 aslp; /* ASL sleep time-out */
u32 tidx; /* toggle table index */
u32 chpd; /* current hotplug enable indicator */
u32 clid; /* current lid state*/
u32 cdck; /* current docking state */
u32 sxsw; /* Sx state resume */
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
} __packed;
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
u32 scic; /* SWSCI command|status|data */
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
} __packed;
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
u32 ardy; /* driver readiness */
u32 aslc; /* ASLE interrupt command */
u32 tche; /* technology enabled indicator */
u32 alsi; /* current ALS illuminance reading */
u32 bclp; /* backlight brightness to set */
u32 pfit; /* panel fitting state */
u32 cblv; /* current brightness level */
u16 bclm[20]; /* backlight level duty cycle mapping table */
u32 cpfm; /* current panel fitting mode */
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
u32 cddv; /* color correction default values */
u32 pcft; /* power conservation features */
u32 srot; /* supported rotation angles */
u32 iuer; /* IUER events */
u8 rsvd[86];
} __packed;
/* Driver readiness indicator */
#define ASLE_ARDY_READY (1 << 0)
#define ASLE_ARDY_NOT_READY (0 << 0)
/* ASLE Interrupt Command (ASLC) bits */
#define ASLC_SET_ALS_ILLUM (1 << 0)
#define ASLC_SET_BACKLIGHT (1 << 1)
#define ASLC_SET_PFIT (1 << 2)
#define ASLC_SET_PWM_FREQ (1 << 3)
#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
#define ASLC_BUTTON_ARRAY (1 << 5)
#define ASLC_CONVERTIBLE_INDICATOR (1 << 6)
#define ASLC_DOCKING_INDICATOR (1 << 7)
#define ASLC_ISCT_STATE_CHANGE (1 << 8)
#define ASLC_REQ_MSK 0x1ff
/* response bits */
#define ASLC_ALS_ILLUM_FAILED (1 << 10)
#define ASLC_BACKLIGHT_FAILED (1 << 12)
#define ASLC_PFIT_FAILED (1 << 14)
#define ASLC_PWM_FREQ_FAILED (1 << 16)
#define ASLC_ROTATION_ANGLES_FAILED (1 << 18)
#define ASLC_BUTTON_ARRAY_FAILED (1 << 20)
#define ASLC_CONVERTIBLE_FAILED (1 << 22)
#define ASLC_DOCKING_FAILED (1 << 24)
#define ASLC_ISCT_STATE_FAILED (1 << 26)
/* Technology enabled indicator */
#define ASLE_TCHE_ALS_EN (1 << 0)
#define ASLE_TCHE_BLC_EN (1 << 1)
#define ASLE_TCHE_PFIT_EN (1 << 2)
#define ASLE_TCHE_PFMB_EN (1 << 3)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
/* ASLE panel fitting request */
#define ASLE_PFIT_VALID (1<<31)
#define ASLE_PFIT_CENTER (1<<0)
#define ASLE_PFIT_STRETCH_TEXT (1<<1)
#define ASLE_PFIT_STRETCH_GFX (1<<2)
/* PWM frequency and minimum brightness */
#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
#define ASLE_PFMB_PWM_VALID (1<<31)
#define ASLE_CBLV_VALID (1<<31)
/* IUER */
#define ASLE_IUER_DOCKING (1 << 7)
#define ASLE_IUER_CONVERTIBLE (1 << 6)
#define ASLE_IUER_ROTATION_LOCK_BTN (1 << 4)
#define ASLE_IUER_VOLUME_DOWN_BTN (1 << 3)
#define ASLE_IUER_VOLUME_UP_BTN (1 << 2)
#define ASLE_IUER_WINDOWS_BTN (1 << 1)
#define ASLE_IUER_POWER_BTN (1 << 0)
/* Software System Control Interrupt (SWSCI) */
#define SWSCI_SCIC_INDICATOR (1 << 0)
#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
#define SWSCI_SCIC_MAIN_FUNCTION_MASK (0xf << 1)
#define SWSCI_SCIC_SUB_FUNCTION_SHIFT 8
#define SWSCI_SCIC_SUB_FUNCTION_MASK (0xff << 8)
#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT 8
#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
#define SWSCI_SCIC_EXIT_STATUS_SHIFT 5
#define SWSCI_SCIC_EXIT_STATUS_MASK (7 << 5)
#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
#define SWSCI_FUNCTION_CODE(main, sub) \
((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
(sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
/* SWSCI: Get BIOS Data (GBDA) */
#define SWSCI_GBDA 4
#define SWSCI_GBDA_SUPPORTED_CALLS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
#define SWSCI_GBDA_BOOT_DISPLAY_PREF SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
#define SWSCI_GBDA_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
#define SWSCI_GBDA_TV_STANDARD SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
#define SWSCI_GBDA_INTERNAL_GRAPHICS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
#define SWSCI_GBDA_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
/* SWSCI: System BIOS Callbacks (SBCB) */
#define SWSCI_SBCB 6
#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
#define SWSCI_SBCB_INIT_COMPLETION SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
#define SWSCI_SBCB_PRE_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
#define SWSCI_SBCB_DISPLAY_SWITCH SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
#define SWSCI_SBCB_SET_TV_FORMAT SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
#define SWSCI_SBCB_SET_BOOT_DISPLAY SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
#define SWSCI_SBCB_SET_PANEL_DETAILS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
#define SWSCI_SBCB_SET_INTERNAL_GFX SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
#define SWSCI_SBCB_SUSPEND_RESUME SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
#define ACPI_OTHER_OUTPUT (0<<8)
#define ACPI_VGA_OUTPUT (1<<8)
#define ACPI_TV_OUTPUT (2<<8)
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
#define MAX_DSLP 1500
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
u32 main_function, sub_function, scic;
u16 pci_swsci;
u32 dslp;
if (!swsci)
return -ENODEV;
main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
SWSCI_SCIC_SUB_FUNCTION_SHIFT;
/* Check if we can call the function. See swsci_setup for details. */
if (main_function == SWSCI_SBCB) {
if ((dev_priv->opregion.swsci_sbcb_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
} else if (main_function == SWSCI_GBDA) {
if ((dev_priv->opregion.swsci_gbda_sub_functions &
(1 << sub_function)) == 0)
return -EINVAL;
}
/* Driver sleep timeout in ms. */
dslp = ioread32(&swsci->dslp);
if (!dslp) {
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
} else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
"using %u ms instead\n", dslp, MAX_DSLP);
dslp = MAX_DSLP;
}
/* The spec tells us to do this, but we are the only user... */
scic = ioread32(&swsci->scic);
if (scic & SWSCI_SCIC_INDICATOR) {
DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
return -EBUSY;
}
scic = function | SWSCI_SCIC_INDICATOR;
iowrite32(parm, &swsci->parm);
iowrite32(scic, &swsci->scic);
/* Ensure SCI event is selected and event trigger is cleared. */
pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
pci_swsci |= PCI_SWSCI_SCISEL;
pci_swsci &= ~PCI_SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
}
/* Use event trigger to tell bios to check the mail. */
pci_swsci |= PCI_SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
/* Poll for the result. */
#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
DRM_DEBUG_DRIVER("SWSCI request timed out\n");
return -ETIMEDOUT;
}
scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
SWSCI_SCIC_EXIT_STATUS_SHIFT;
/* Note: scic == 0 is an error! */
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
return -EIO;
}
if (parm_out)
*parm_out = ioread32(&swsci->parm);
return 0;
#undef C
}
#define DISPLAY_TYPE_CRT 0
#define DISPLAY_TYPE_TV 1
#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL 2
#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL 3
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 parm = 0;
u32 type = 0;
u32 port;
/* don't care about old stuff for now */
if (!HAS_DDI(dev))
return 0;
port = intel_ddi_get_encoder_port(intel_encoder);
if (port == PORT_E) {
port = 0;
} else {
parm |= 1 << port;
port++;
}
if (!enable)
parm |= 4 << 8;
switch (intel_encoder->type) {
case INTEL_OUTPUT_ANALOG:
type = DISPLAY_TYPE_CRT;
break;
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
WARN_ONCE(1, "unsupported intel_encoder type %d\n",
intel_encoder->type);
return -EINVAL;
}
parm |= type << (16 + port * 3);
return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
}
static const struct {
pci_power_t pci_power_state;
u32 parm;
} power_state_map[] = {
{ PCI_D0, 0x00 },
{ PCI_D1, 0x01 },
{ PCI_D2, 0x02 },
{ PCI_D3hot, 0x04 },
{ PCI_D3cold, 0x04 },
};
int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
int i;
if (!HAS_DDI(dev))
return 0;
for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
if (state == power_state_map[i].pci_power_state)
return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
power_state_map[i].parm, NULL);
}
return -EINVAL;
}
/*
* If the vendor backlight interface is not in use and ACPI backlight interface
* is broken, do not bother processing backlight change requests from firmware.
*/
static bool should_ignore_backlight_request(void)
{
return acpi_video_backlight_support() &&
!acpi_video_verify_backlight_support();
}
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
if (should_ignore_backlight_request()) {
DRM_DEBUG_KMS("opregion backlight request ignored\n");
return 0;
}
if (!(bclp & ASLE_BCLP_VALID))
return ASLC_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
return ASLC_BACKLIGHT_FAILED;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
/*
* Update backlight on all connectors that support backlight (usually
* only one).
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
return 0;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
DRM_DEBUG_DRIVER("Illum is not supported\n");
return ASLC_ALS_ILLUM_FAILED;
}
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
DRM_DEBUG_DRIVER("Pfit is not supported\n");
return ASLC_PFIT_FAILED;
}
static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
{
DRM_DEBUG_DRIVER("SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
{
if (!iuer)
DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
if (iuer & ASLE_IUER_WINDOWS_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
if (iuer & ASLE_IUER_POWER_BTN)
DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
return ASLC_BUTTON_ARRAY_FAILED;
}
static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
else
DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
return ASLC_CONVERTIBLE_FAILED;
}
static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
else
DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
return ASLC_DOCKING_FAILED;
}
static u32 asle_isct_state(struct drm_device *dev)
{
DRM_DEBUG_DRIVER("ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
}
static void asle_work(struct work_struct *work)
{
struct intel_opregion *opregion =
container_of(work, struct intel_opregion, asle_work);
struct drm_i915_private *dev_priv =
container_of(opregion, struct drm_i915_private, opregion);
struct drm_device *dev = dev_priv->dev;
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 aslc_stat = 0;
u32 aslc_req;
if (!asle)
return;
aslc_req = ioread32(&asle->aslc);
if (!(aslc_req & ASLC_REQ_MSK)) {
DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
aslc_req);
return;
}
if (aslc_req & ASLC_SET_ALS_ILLUM)
aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
if (aslc_req & ASLC_SET_BACKLIGHT)
aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
if (aslc_req & ASLC_SET_PFIT)
aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
if (aslc_req & ASLC_SET_PWM_FREQ)
aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
aslc_stat |= asle_set_supported_rotation_angles(dev,
ioread32(&asle->srot));
if (aslc_req & ASLC_BUTTON_ARRAY)
aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
if (aslc_req & ASLC_DOCKING_INDICATOR)
aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
if (aslc_req & ASLC_ISCT_STATE_CHANGE)
aslc_stat |= asle_isct_state(dev);
iowrite32(aslc_stat, &asle->aslc);
}
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
static struct intel_opregion *system_opregion;
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
*/
struct opregion_acpi __iomem *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
if (event->type == 0x80 &&
(ioread32(&acpi->cevt) & 1) == 0)
ret = NOTIFY_BAD;
iowrite32(0, &acpi->csts);
return ret;
}
static struct notifier_block intel_opregion_notifier = {
.notifier_call = intel_opregion_video_event,
};
/*
* Initialise the DIDL field in opregion. This passes a list of devices to
* the firmware. Values are defined by section B.4.2 of the ACPI specification
* (version 3)
*/
static void intel_didl_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
acpi_handle handle;
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
u32 temp;
int i = 0;
handle = ACPI_HANDLE(&dev->pdev->dev);
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
if (acpi_is_video_device(handle))
acpi_video_bus = acpi_dev;
else {
list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
if (acpi_is_video_device(acpi_cdev->handle)) {
acpi_video_bus = acpi_cdev;
break;
}
}
}
if (!acpi_video_bus) {
pr_warn("No ACPI video bus found\n");
return;
}
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
dev_dbg(&dev->pdev->dev,
"More than 8 outputs detected via ACPI\n");
return;
}
status =
acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
NULL, &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
iowrite32((u32)(device_id & 0x0f0f),
&opregion->acpi->didl[i]);
i++;
}
}
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
iowrite32(0, &opregion->acpi->didl[i]);
return;
blind_set:
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
dev_dbg(&dev->pdev->dev,
"More than 8 outputs in connector list\n");
return;
}
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
output_type = ACPI_VGA_OUTPUT;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_9PinDIN:
output_type = ACPI_TV_OUTPUT;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
output_type = ACPI_DIGITAL_OUTPUT;
break;
case DRM_MODE_CONNECTOR_LVDS:
output_type = ACPI_LVDS_OUTPUT;
break;
}
temp = ioread32(&opregion->acpi->didl[i]);
iowrite32(temp | (1<<31) | output_type | i,
&opregion->acpi->didl[i]);
i++;
}
goto end;
}
static void intel_setup_cadls(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
int i = 0;
u32 disp_id;
/* Initialize the CADL field by duplicating the DIDL values.
* Technically, this is not always correct as display outputs may exist,
* but not active. This initialization is necessary for some Clevo
* laptops that check this field before processing the brightness and
* display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
* there are less than eight devices. */
do {
disp_id = ioread32(&opregion->acpi->didl[i]);
iowrite32(disp_id, &opregion->acpi->cadl[i]);
} while (++i < 8 && disp_id != 0);
}
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_didl_outputs(dev);
intel_setup_cadls(dev);
}
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
iowrite32(0, &opregion->acpi->csts);
iowrite32(1, &opregion->acpi->drdy);
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
}
if (opregion->asle) {
iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche);
iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy);
}
}
void intel_opregion_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->asle)
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
cancel_work_sync(&dev_priv->opregion.asle_work);
if (opregion->acpi) {
iowrite32(0, &opregion->acpi->drdy);
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
}
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->vbt = NULL;
opregion->lid_state = NULL;
}
static void swsci_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
bool requested_callbacks = false;
u32 tmp;
/* Sub-function code 0 is okay, let's allow them. */
opregion->swsci_gbda_sub_functions = 1;
opregion->swsci_sbcb_sub_functions = 1;
/* We use GBDA to ask for supported GBDA calls. */
if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
tmp <<= 1;
opregion->swsci_gbda_sub_functions |= tmp;
}
/*
* We also use GBDA to ask for _requested_ SBCB callbacks. The driver
* must not call interfaces that are not specifically requested by the
* bios.
*/
if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
/* here, the bits already match sub-function codes */
opregion->swsci_sbcb_sub_functions |= tmp;
requested_callbacks = true;
}
/*
* But we use SBCB to ask for _supported_ SBCB calls. This does not mean
* the callback is _requested_. But we still can't call interfaces that
* are not requested.
*/
if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
/* make the bits match the sub-function codes */
u32 low = tmp & 0x7ff;
u32 high = tmp & ~0xfff; /* bit 11 is reserved */
tmp = (high << 4) | (low << 1) | 1;
/* best guess what to do with supported wrt requested */
if (requested_callbacks) {
u32 req = opregion->swsci_sbcb_sub_functions;
if ((req & tmp) != req)
DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
/* XXX: for now, trust the requested callbacks */
/* opregion->swsci_sbcb_sub_functions &= tmp; */
} else {
opregion->swsci_sbcb_sub_functions |= tmp;
}
}
DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
opregion->swsci_gbda_sub_functions,
opregion->swsci_sbcb_sub_functions);
}
#else /* CONFIG_ACPI */
static inline void swsci_setup(struct drm_device *dev) {}
#endif /* CONFIG_ACPI */
int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
void __iomem *base;
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
#ifdef CONFIG_ACPI
INIT_WORK(&opregion->asle_work, asle_work);
#endif
base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
memcpy_fromio(buf, base, sizeof(buf));
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
opregion->lid_state = base + ACPI_CLID;
mboxes = ioread32(&opregion->header->mboxes);
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
}
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
swsci_setup(dev);
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy);
}
return 0;
err_out:
iounmap(base);
return err;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,40 @@
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _INTEL_RENDERSTATE_H
#define _INTEL_RENDERSTATE_H
#include "i915_drv.h"
extern const struct intel_renderstate_rodata gen6_null_state;
extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
.reloc = gen ## _g ## _null_state_relocs, \
.batch = gen ## _g ## _null_state_batch, \
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
#endif /* INTEL_RENDERSTATE_H */

View file

@ -0,0 +1,290 @@
#include "intel_renderstate.h"
static const u32 gen6_null_state_relocs[] = {
0x00000020,
0x00000024,
0x0000002c,
0x000001e0,
0x000001e4,
-1,
};
static const u32 gen6_null_state_batch[] = {
0x69040000,
0x790d0001,
0x00000000,
0x00000000,
0x78180000,
0x00000001,
0x61010008,
0x00000000,
0x00000001, /* reloc */
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001,
0x61020000,
0x00000000,
0x78050001,
0x00000018,
0x00000000,
0x780d1002,
0x00000000,
0x00000000,
0x00000420,
0x78150003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78160003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78170003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79050005,
0xe0040000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79100000,
0x00000000,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x780e0002,
0x00000441,
0x00000401,
0x00000401,
0x78021002,
0x00000000,
0x00000000,
0x00000400,
0x78130012,
0x00400810,
0x00000000,
0x20000000,
0x04000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78140007,
0x00000280,
0x08080000,
0x00000000,
0x00060000,
0x4e080002,
0x00100400,
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11330000,
0x02850004,
0x11220000,
0x78011002,
0x00000000,
0x00000000,
0x00000200,
0x78080003,
0x00002000,
0x00000448, /* reloc */
0x00000448, /* reloc */
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000220, /* state start */
0x00000240,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0060005a,
0x204077be,
0x000000c0,
0x008d0040,
0x0060005a,
0x206077be,
0x000000c0,
0x008d0080,
0x0060005a,
0x208077be,
0x000000d0,
0x008d0040,
0x0060005a,
0x20a077be,
0x000000d0,
0x008d0080,
0x00000201,
0x20080061,
0x00000000,
0x00000000,
0x00600001,
0x20200022,
0x008d0000,
0x00000000,
0x02800031,
0x21c01cc9,
0x00000020,
0x0a8a0001,
0x00600001,
0x204003be,
0x008d01c0,
0x00000000,
0x00600001,
0x206003be,
0x008d01e0,
0x00000000,
0x00600001,
0x208003be,
0x008d0200,
0x00000000,
0x00600001,
0x20a003be,
0x008d0220,
0x00000000,
0x00600001,
0x20c003be,
0x008d0240,
0x00000000,
0x00600001,
0x20e003be,
0x008d0260,
0x00000000,
0x00600001,
0x210003be,
0x008d0280,
0x00000000,
0x00600001,
0x212003be,
0x008d02a0,
0x00000000,
0x05800031,
0x24001cc8,
0x00000040,
0x90019000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x0000007e,
0x00000000,
0x00000000,
0x00000000,
0x30000000,
0x00000124,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x80000031,
0x00000003,
0x00000000, /* state end */
};
RO_RENDERSTATE(6);

View file

@ -0,0 +1,254 @@
#include "intel_renderstate.h"
static const u32 gen7_null_state_relocs[] = {
0x0000000c,
0x00000010,
0x00000018,
0x000001ec,
-1,
};
static const u32 gen7_null_state_batch[] = {
0x69040000,
0x61010008,
0x00000000,
0x00000001, /* reloc */
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001,
0x790d0002,
0x00000000,
0x00000000,
0x00000000,
0x78180000,
0x00000001,
0x79160000,
0x00000008,
0x78300000,
0x02010040,
0x78310000,
0x04000000,
0x78320000,
0x04000000,
0x78330000,
0x02000000,
0x78100004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x781d0004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110005,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78210000,
0x00000000,
0x78130005,
0x00000000,
0x20000000,
0x04000000,
0x00000000,
0x00000000,
0x00000000,
0x78140001,
0x20000800,
0x00000000,
0x781e0001,
0x00000000,
0x00000000,
0x78050005,
0xe0040000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000000,
0x78240000,
0x00000240,
0x78230000,
0x00000260,
0x782f0000,
0x00000280,
0x781f000c,
0x00400810,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78200006,
0x000002c0,
0x08080000,
0x00000000,
0x28000402,
0x00060000,
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11230000,
0x02f60004,
0x11230000,
0x78080003,
0x00006008,
0x00000340, /* reloc */
0xffffffff,
0x00000000,
0x782a0000,
0x00000360,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x7b000005,
0x0000000f,
0x00000003,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000031, /* state start */
0x00000003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000492,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0080005a,
0x2e2077bd,
0x000000c0,
0x008d0040,
0x0080005a,
0x2e6077bd,
0x000000d0,
0x008d0040,
0x02800031,
0x21801fa9,
0x008d0e20,
0x08840001,
0x00800001,
0x2e2003bd,
0x008d0180,
0x00000000,
0x00800001,
0x2e6003bd,
0x008d01c0,
0x00000000,
0x00800001,
0x2ea003bd,
0x008d0200,
0x00000000,
0x00800001,
0x2ee003bd,
0x008d0240,
0x00000000,
0x05800031,
0x20001fa8,
0x008d0e20,
0x90031000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000380,
0x000003a0,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
RO_RENDERSTATE(7);

View file

@ -0,0 +1,480 @@
#include "intel_renderstate.h"
static const u32 gen8_null_state_relocs[] = {
0x00000048,
0x00000050,
0x00000060,
0x000003ec,
-1,
};
static const u32 gen8_null_state_batch[] = {
0x69040000,
0x61020001,
0x00000000,
0x00000000,
0x79120000,
0x00000000,
0x79130000,
0x00000000,
0x79140000,
0x00000000,
0x79150000,
0x00000000,
0x79160000,
0x00000000,
0x6101000e,
0x00000001,
0x00000000,
0x00000001,
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000000,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0xfffff001,
0x00001001,
0xfffff001,
0x00001001,
0x78230000,
0x000006e0,
0x78210000,
0x00000700,
0x78300000,
0x08010040,
0x78330000,
0x08000000,
0x78310000,
0x08000000,
0x78320000,
0x08000000,
0x78240000,
0x00000641,
0x780e0000,
0x00000601,
0x780d0000,
0x00000000,
0x78180000,
0x00000001,
0x78520003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78190009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78270000,
0x00000000,
0x782c0000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110008,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78290000,
0x00000000,
0x782e0000,
0x00000000,
0x781a0009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781d0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78280000,
0x00000000,
0x782d0000,
0x00000000,
0x78260000,
0x00000000,
0x782b0000,
0x00000000,
0x78150009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781e0003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x781f0002,
0x30400820,
0x00000000,
0x00000000,
0x78510009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78500003,
0x00210000,
0x00000000,
0x00000000,
0x00000000,
0x78130002,
0x00000000,
0x00000000,
0x00000000,
0x782a0000,
0x00000480,
0x782f0000,
0x00000540,
0x78140000,
0x00000800,
0x78170009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x7820000a,
0x00000580,
0x00000000,
0x08080000,
0x00000000,
0x00000000,
0x1f000002,
0x00060000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x784d0000,
0x40000000,
0x784f0000,
0x80000100,
0x780f0000,
0x00000740,
0x78050006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78070003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78060003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000001,
0x79000002,
0xffffffff,
0x00000000,
0x00000000,
0x78080003,
0x00006000,
0x000005e0, /* reloc */
0x00000000,
0x00000000,
0x78090005,
0x02000000,
0x22220000,
0x02f60000,
0x11230000,
0x02850004,
0x11230000,
0x784b0000,
0x0000000f,
0x78490001,
0x00000000,
0x00000000,
0x7b000005,
0x00000000,
0x00000003,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x000004c0, /* state start */
0x00000500,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000092,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x0060005a,
0x21403ae8,
0x3a0000c0,
0x008d0040,
0x0060005a,
0x21603ae8,
0x3a0000c0,
0x008d0080,
0x0060005a,
0x21803ae8,
0x3a0000d0,
0x008d0040,
0x0060005a,
0x21a03ae8,
0x3a0000d0,
0x008d0080,
0x02800031,
0x2e0022e8,
0x0e000140,
0x08840001,
0x05800031,
0x200022e0,
0x0e000e00,
0x90031000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x3f800000,
0x00000000,
0x3f800000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
RO_RENDERSTATE(8);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,447 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
* to give some inclination as to some of the magic values used in the various
* workarounds!
*/
#define CACHELINE_BYTES 64
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
* Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
*
* "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
* cacheline, the Head Pointer must not be greater than the Tail
* Pointer."
*/
#define I915_RING_FREE_SPACE 64
struct intel_hw_status_page {
u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
#define i915_semaphore_seqno_size sizeof(uint64_t)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
(i915_semaphore_seqno_size * (__ring)->id))
#define GEN8_RING_SEMAPHORE_INIT do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
HANGCHECK_ACTIVE_LOOP,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
#define HANGCHECK_SCORE_RING_HUNG 31
struct intel_ring_hangcheck {
u64 acthd;
u64 max_acthd;
u32 seqno;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
};
struct intel_ringbuffer {
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
struct intel_engine_cs *ring;
/*
* FIXME: This backpointer is an artifact of the history of how the
* execlist patches came into being. It will get removed once the basic
* code has landed.
*/
struct intel_context *FIXME_lrc_ctx;
u32 head;
u32 tail;
int space;
int size;
int effective_size;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
* must have finished processing the request and so we know we
* can advance the ringbuffer up to that position.
*
* last_retired_head is set to -1 after the value is consumed so
* we can detect new retirements.
*/
u32 last_retired_head;
};
struct intel_engine_cs {
const char *name;
enum intel_ring_id {
RCS = 0x0,
VCS,
BCS,
VECS,
VCS2
} id;
#define I915_NUM_RINGS 5
#define LAST_USER_RING (VECS + 1)
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
struct intel_hw_status_page status_page;
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
bool __must_check (*irq_get)(struct intel_engine_cs *ring);
void (*irq_put)(struct intel_engine_cs *ring);
int (*init)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring);
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
int __must_check (*flush)(struct intel_engine_cs *ring,
u32 invalidate_domains,
u32 flush_domains);
int (*add_request)(struct intel_engine_cs *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
u32 (*get_seqno)(struct intel_engine_cs *ring,
bool lazy_coherency);
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct intel_engine_cs *ring,
u64 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
/* GEN8 signal/wait table - never trust comments!
* signal to signal to signal to signal to signal to
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
* |-------------------------------------------------------------------
* VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
* |-------------------------------------------------------------------
* BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
* |-------------------------------------------------------------------
* VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
* ie. transpose of g(x, y)
*
* sync from sync from sync from sync from sync from
* RCS VCS BCS VECS VCS2
* --------------------------------------------------------------------
* RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
* |-------------------------------------------------------------------
* VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
* |-------------------------------------------------------------------
* BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
* |-------------------------------------------------------------------
* VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
* |-------------------------------------------------------------------
* VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
* |-------------------------------------------------------------------
*
* Generalization:
* g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
* ie. transpose of f(x, y)
*/
struct {
u32 sync_seqno[I915_NUM_RINGS-1];
union {
struct {
/* our mbox written by others */
u32 wait[I915_NUM_RINGS];
/* mboxes this ring signals to */
u32 signal[I915_NUM_RINGS];
} mbox;
u64 signal_ggtt[I915_NUM_RINGS];
};
/* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring,
struct intel_engine_cs *to,
u32 seqno);
int (*signal)(struct intel_engine_cs *signaller,
/* num_dwords needed by caller */
unsigned int num_dwords);
} semaphore;
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
u64 offset, unsigned flags);
/**
* List of objects currently involved in rendering from the
* ringbuffer.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
*/
struct list_head request_list;
/**
* Do we have some not yet emitted requests outstanding?
*/
struct drm_i915_gem_request *preallocated_lazy_request;
u32 outstanding_lazy_seqno;
bool gpu_caches_dirty;
bool fbc_dirty;
wait_queue_head_t irq_queue;
struct intel_context *default_context;
struct intel_context *last_context;
struct intel_ring_hangcheck hangcheck;
struct {
struct drm_i915_gem_object *obj;
u32 gtt_offset;
volatile u32 *cpu_page;
} scratch;
bool needs_cmd_parser;
/*
* Table of commands the command parser needs to know about
* for this ring.
*/
DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
/*
* Table of registers allowed in commands that read/write registers.
*/
const u32 *reg_table;
int reg_count;
/*
* Table of registers allowed in commands that read/write registers, but
* only from the DRM master.
*/
const u32 *master_reg_table;
int master_reg_count;
/*
* Returns the bitmask for the length field of the specified command.
* Return 0 for an unrecognized/invalid command.
*
* If the command parser finds an entry for a command in the ring's
* cmd_tables, it gets the command's length based on the table entry.
* If not, it calls this function to determine the per-ring length field
* encoding for the command (i.e. certain opcode ranges use certain bits
* to encode the command length in the header).
*/
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
bool intel_ring_initialized(struct intel_engine_cs *ring);
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)
{
return 1 << ring->id;
}
static inline u32
intel_ring_sync_index(struct intel_engine_cs *ring,
struct intel_engine_cs *other)
{
int idx;
/*
* rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
* vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
* bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
* vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other - ring) - 1;
if (idx < 0)
idx += I915_NUM_RINGS;
return idx;
}
static inline u32
intel_read_status_page(struct intel_engine_cs *ring,
int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
barrier();
return ring->status_page.page_addr[reg];
}
static inline void
intel_write_status_page(struct intel_engine_cs *ring,
int reg, u32 value)
{
ring->status_page.page_addr[reg] = value;
}
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
* MI_STORE_DATA_IMM.
*
* The following dwords have a reserved meaning:
* 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
* 0x04: ring 0 head pointer
* 0x05: ring 1 head pointer (915-class)
* 0x06: ring 2 head pointer (915-class)
* 0x10-0x1b: Context status DWords (GM45)
* 0x1f: Last written status offset. (GM45)
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
static inline void intel_ring_emit(struct intel_engine_cs *ring,
u32 data)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
static inline void intel_ring_advance(struct intel_engine_cs *ring)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ringbuf->tail;
}
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
{
BUG_ON(ring->outstanding_lazy_seqno == 0);
return ring->outstanding_lazy_seqno;
}
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
ring->trace_irq_seqno = seqno;
}
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
#endif /* _INTEL_RINGBUFFER_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,730 @@
/*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
/**
* @file SDVO command definitions and structures.
*/
#define SDVO_OUTPUT_FIRST (0)
#define SDVO_OUTPUT_TMDS0 (1 << 0)
#define SDVO_OUTPUT_RGB0 (1 << 1)
#define SDVO_OUTPUT_CVBS0 (1 << 2)
#define SDVO_OUTPUT_SVID0 (1 << 3)
#define SDVO_OUTPUT_YPRPB0 (1 << 4)
#define SDVO_OUTPUT_SCART0 (1 << 5)
#define SDVO_OUTPUT_LVDS0 (1 << 6)
#define SDVO_OUTPUT_TMDS1 (1 << 8)
#define SDVO_OUTPUT_RGB1 (1 << 9)
#define SDVO_OUTPUT_CVBS1 (1 << 10)
#define SDVO_OUTPUT_SVID1 (1 << 11)
#define SDVO_OUTPUT_YPRPB1 (1 << 12)
#define SDVO_OUTPUT_SCART1 (1 << 13)
#define SDVO_OUTPUT_LVDS1 (1 << 14)
#define SDVO_OUTPUT_LAST (14)
struct intel_sdvo_caps {
u8 vendor_id;
u8 device_id;
u8 device_rev_id;
u8 sdvo_version_major;
u8 sdvo_version_minor;
unsigned int sdvo_inputs_mask:2;
unsigned int smooth_scaling:1;
unsigned int sharp_scaling:1;
unsigned int up_scaling:1;
unsigned int down_scaling:1;
unsigned int stall_support:1;
unsigned int pad:1;
u16 output_flags;
} __packed;
/* Note: SDVO detailed timing flags match EDID misc flags. */
#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
#define DTD_FLAG_INTERLACE (1 << 7)
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
u16 clock; /**< pixel clock, in 10kHz units */
u8 h_active; /**< lower 8 bits (pixels) */
u8 h_blank; /**< lower 8 bits (pixels) */
u8 h_high; /**< upper 4 bits each h_active, h_blank */
u8 v_active; /**< lower 8 bits (lines) */
u8 v_blank; /**< lower 8 bits (lines) */
u8 v_high; /**< upper 4 bits each v_active, v_blank */
} part1;
struct {
u8 h_sync_off; /**< lower 8 bits, from hblank start */
u8 h_sync_width; /**< lower 8 bits (pixels) */
/** lower 4 bits each vsync offset, vsync width */
u8 v_sync_off_width;
/**
* 2 high bits of hsync offset, 2 high bits of hsync width,
* bits 4-5 of vsync offset, and 2 high bits of vsync width.
*/
u8 sync_off_width_high;
u8 dtd_flags;
u8 sdvo_flags;
/** bits 6-7 of vsync offset at bits 6-7 */
u8 v_sync_off_high;
u8 reserved;
} part2;
} __packed;
struct intel_sdvo_pixel_clock_range {
u16 min; /**< pixel clock, in 10kHz units */
u16 max; /**< pixel clock, in 10kHz units */
} __packed;
struct intel_sdvo_preferred_input_timing_args {
u16 clock;
u16 width;
u16 height;
u8 interlace:1;
u8 scaled:1;
u8 pad:6;
} __packed;
/* I2C registers for SDVO */
#define SDVO_I2C_ARG_0 0x07
#define SDVO_I2C_ARG_1 0x06
#define SDVO_I2C_ARG_2 0x05
#define SDVO_I2C_ARG_3 0x04
#define SDVO_I2C_ARG_4 0x03
#define SDVO_I2C_ARG_5 0x02
#define SDVO_I2C_ARG_6 0x01
#define SDVO_I2C_ARG_7 0x00
#define SDVO_I2C_OPCODE 0x08
#define SDVO_I2C_CMD_STATUS 0x09
#define SDVO_I2C_RETURN_0 0x0a
#define SDVO_I2C_RETURN_1 0x0b
#define SDVO_I2C_RETURN_2 0x0c
#define SDVO_I2C_RETURN_3 0x0d
#define SDVO_I2C_RETURN_4 0x0e
#define SDVO_I2C_RETURN_5 0x0f
#define SDVO_I2C_RETURN_6 0x10
#define SDVO_I2C_RETURN_7 0x11
#define SDVO_I2C_VENDOR_BEGIN 0x20
/* Status results */
#define SDVO_CMD_STATUS_POWER_ON 0x0
#define SDVO_CMD_STATUS_SUCCESS 0x1
#define SDVO_CMD_STATUS_NOTSUPP 0x2
#define SDVO_CMD_STATUS_INVALID_ARG 0x3
#define SDVO_CMD_STATUS_PENDING 0x4
#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
/* SDVO commands, argument/result registers */
#define SDVO_CMD_RESET 0x01
/** Returns a struct intel_sdvo_caps */
#define SDVO_CMD_GET_DEVICE_CAPS 0x02
#define SDVO_CMD_GET_FIRMWARE_REV 0x86
# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
/**
* Reports which inputs are trained (managed to sync).
*
* Devices must have trained within 2 vsyncs of a mode change.
*/
#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
struct intel_sdvo_get_trained_inputs_response {
unsigned int input0_trained:1;
unsigned int input1_trained:1;
unsigned int pad:6;
} __packed;
/** Returns a struct intel_sdvo_output_flags of active outputs. */
#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
/**
* Sets the current set of active outputs.
*
* Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
* on multi-output devices.
*/
#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
/**
* Returns the current mapping of SDVO inputs to outputs on the device.
*
* Returns two struct intel_sdvo_output_flags structures.
*/
#define SDVO_CMD_GET_IN_OUT_MAP 0x06
struct intel_sdvo_in_out_map {
u16 in0, in1;
};
/**
* Sets the current mapping of SDVO inputs to outputs on the device.
*
* Takes two struct i380_sdvo_output_flags structures.
*/
#define SDVO_CMD_SET_IN_OUT_MAP 0x07
/**
* Returns a struct intel_sdvo_output_flags of attached displays.
*/
#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
/**
* Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
*/
#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
/**
* Takes a struct intel_sdvo_output_flags.
*/
#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
/**
* Returns a struct intel_sdvo_output_flags of displays with hot plug
* interrupts enabled.
*/
#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
struct intel_sdvo_get_interrupt_event_source_response {
u16 interrupt_status;
unsigned int ambient_light_interrupt:1;
unsigned int hdmi_audio_encrypt_change:1;
unsigned int pad:6;
} __packed;
/**
* Selects which input is affected by future input commands.
*
* Commands affected include SET_INPUT_TIMINGS_PART[12],
* GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
* GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
*/
#define SDVO_CMD_SET_TARGET_INPUT 0x10
struct intel_sdvo_set_target_input_args {
unsigned int target_1:1;
unsigned int pad:7;
} __packed;
/**
* Takes a struct intel_sdvo_output_flags of which outputs are targeted by
* future output commands.
*
* Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
* GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
*/
#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
/* Part 1 */
# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
/* Part 2 */
# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
/**
* Generates a DTD based on the given width, height, and flags.
*
* This will be supported by any device supporting scaling or interlaced
* modes.
*/
#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
/** Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
/** Returns a struct intel_sdvo_pixel_clock_range */
#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
/** 6 bytes of bit flags for TV formats shared by all TV format functions */
struct intel_sdvo_tv_format {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
unsigned int ntsc_443:1;
unsigned int pal_b:1;
unsigned int pal_d:1;
unsigned int pal_g:1;
unsigned int pal_h:1;
unsigned int pal_i:1;
unsigned int pal_m:1;
unsigned int pal_n:1;
unsigned int pal_nc:1;
unsigned int pal_60:1;
unsigned int secam_b:1;
unsigned int secam_d:1;
unsigned int secam_g:1;
unsigned int secam_k:1;
unsigned int secam_k1:1;
unsigned int secam_l:1;
unsigned int secam_60:1;
unsigned int hdtv_std_smpte_240m_1080i_59:1;
unsigned int hdtv_std_smpte_240m_1080i_60:1;
unsigned int hdtv_std_smpte_260m_1080i_59:1;
unsigned int hdtv_std_smpte_260m_1080i_60:1;
unsigned int hdtv_std_smpte_274m_1080i_50:1;
unsigned int hdtv_std_smpte_274m_1080i_59:1;
unsigned int hdtv_std_smpte_274m_1080i_60:1;
unsigned int hdtv_std_smpte_274m_1080p_23:1;
unsigned int hdtv_std_smpte_274m_1080p_24:1;
unsigned int hdtv_std_smpte_274m_1080p_25:1;
unsigned int hdtv_std_smpte_274m_1080p_29:1;
unsigned int hdtv_std_smpte_274m_1080p_30:1;
unsigned int hdtv_std_smpte_274m_1080p_50:1;
unsigned int hdtv_std_smpte_274m_1080p_59:1;
unsigned int hdtv_std_smpte_274m_1080p_60:1;
unsigned int hdtv_std_smpte_295m_1080i_50:1;
unsigned int hdtv_std_smpte_295m_1080p_50:1;
unsigned int hdtv_std_smpte_296m_720p_59:1;
unsigned int hdtv_std_smpte_296m_720p_60:1;
unsigned int hdtv_std_smpte_296m_720p_50:1;
unsigned int hdtv_std_smpte_293m_480p_59:1;
unsigned int hdtv_std_smpte_170m_480i_59:1;
unsigned int hdtv_std_iturbt601_576i_50:1;
unsigned int hdtv_std_iturbt601_576p_50:1;
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:3;
} __packed;
#define SDVO_CMD_GET_TV_FORMAT 0x28
#define SDVO_CMD_SET_TV_FORMAT 0x29
/** Returns the resolutiosn that can be used with the given TV format */
#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
struct intel_sdvo_sdtv_resolution_request {
unsigned int ntsc_m:1;
unsigned int ntsc_j:1;
unsigned int ntsc_443:1;
unsigned int pal_b:1;
unsigned int pal_d:1;
unsigned int pal_g:1;
unsigned int pal_h:1;
unsigned int pal_i:1;
unsigned int pal_m:1;
unsigned int pal_n:1;
unsigned int pal_nc:1;
unsigned int pal_60:1;
unsigned int secam_b:1;
unsigned int secam_d:1;
unsigned int secam_g:1;
unsigned int secam_k:1;
unsigned int secam_k1:1;
unsigned int secam_l:1;
unsigned int secam_60:1;
unsigned int pad:5;
} __packed;
struct intel_sdvo_sdtv_resolution_reply {
unsigned int res_320x200:1;
unsigned int res_320x240:1;
unsigned int res_400x300:1;
unsigned int res_640x350:1;
unsigned int res_640x400:1;
unsigned int res_640x480:1;
unsigned int res_704x480:1;
unsigned int res_704x576:1;
unsigned int res_720x350:1;
unsigned int res_720x400:1;
unsigned int res_720x480:1;
unsigned int res_720x540:1;
unsigned int res_720x576:1;
unsigned int res_768x576:1;
unsigned int res_800x600:1;
unsigned int res_832x624:1;
unsigned int res_920x766:1;
unsigned int res_1024x768:1;
unsigned int res_1280x1024:1;
unsigned int pad:5;
} __packed;
/* Get supported resolution with squire pixel aspect ratio that can be
scaled for the requested HDTV format */
#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
struct intel_sdvo_hdtv_resolution_request {
unsigned int hdtv_std_smpte_240m_1080i_59:1;
unsigned int hdtv_std_smpte_240m_1080i_60:1;
unsigned int hdtv_std_smpte_260m_1080i_59:1;
unsigned int hdtv_std_smpte_260m_1080i_60:1;
unsigned int hdtv_std_smpte_274m_1080i_50:1;
unsigned int hdtv_std_smpte_274m_1080i_59:1;
unsigned int hdtv_std_smpte_274m_1080i_60:1;
unsigned int hdtv_std_smpte_274m_1080p_23:1;
unsigned int hdtv_std_smpte_274m_1080p_24:1;
unsigned int hdtv_std_smpte_274m_1080p_25:1;
unsigned int hdtv_std_smpte_274m_1080p_29:1;
unsigned int hdtv_std_smpte_274m_1080p_30:1;
unsigned int hdtv_std_smpte_274m_1080p_50:1;
unsigned int hdtv_std_smpte_274m_1080p_59:1;
unsigned int hdtv_std_smpte_274m_1080p_60:1;
unsigned int hdtv_std_smpte_295m_1080i_50:1;
unsigned int hdtv_std_smpte_295m_1080p_50:1;
unsigned int hdtv_std_smpte_296m_720p_59:1;
unsigned int hdtv_std_smpte_296m_720p_60:1;
unsigned int hdtv_std_smpte_296m_720p_50:1;
unsigned int hdtv_std_smpte_293m_480p_59:1;
unsigned int hdtv_std_smpte_170m_480i_59:1;
unsigned int hdtv_std_iturbt601_576i_50:1;
unsigned int hdtv_std_iturbt601_576p_50:1;
unsigned int hdtv_std_eia_7702a_480i_60:1;
unsigned int hdtv_std_eia_7702a_480p_60:1;
unsigned int pad:6;
} __packed;
struct intel_sdvo_hdtv_resolution_reply {
unsigned int res_640x480:1;
unsigned int res_800x600:1;
unsigned int res_1024x768:1;
unsigned int res_1280x960:1;
unsigned int res_1400x1050:1;
unsigned int res_1600x1200:1;
unsigned int res_1920x1440:1;
unsigned int res_2048x1536:1;
unsigned int res_2560x1920:1;
unsigned int res_3200x2400:1;
unsigned int res_3840x2880:1;
unsigned int pad1:5;
unsigned int res_848x480:1;
unsigned int res_1064x600:1;
unsigned int res_1280x720:1;
unsigned int res_1360x768:1;
unsigned int res_1704x960:1;
unsigned int res_1864x1050:1;
unsigned int res_1920x1080:1;
unsigned int res_2128x1200:1;
unsigned int res_2560x1400:1;
unsigned int res_2728x1536:1;
unsigned int res_3408x1920:1;
unsigned int res_4264x2400:1;
unsigned int res_5120x2880:1;
unsigned int pad2:3;
unsigned int res_768x480:1;
unsigned int res_960x600:1;
unsigned int res_1152x720:1;
unsigned int res_1124x768:1;
unsigned int res_1536x960:1;
unsigned int res_1680x1050:1;
unsigned int res_1728x1080:1;
unsigned int res_1920x1200:1;
unsigned int res_2304x1440:1;
unsigned int res_2456x1536:1;
unsigned int res_3072x1920:1;
unsigned int res_3840x2400:1;
unsigned int res_4608x2880:1;
unsigned int pad3:3;
unsigned int res_1280x1024:1;
unsigned int pad4:7;
unsigned int res_1280x768:1;
unsigned int pad5:7;
} __packed;
/* Get supported power state returns info for encoder and monitor, rely on
last SetTargetInput and SetTargetOutput calls */
#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
/* Get power state returns info for encoder and monitor, rely on last
SetTargetInput and SetTargetOutput calls */
#define SDVO_CMD_GET_POWER_STATE 0x2b
#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
# define SDVO_ENCODER_STATE_ON (1 << 0)
# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
# define SDVO_ENCODER_STATE_OFF (1 << 3)
# define SDVO_MONITOR_STATE_ON (1 << 4)
# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
# define SDVO_MONITOR_STATE_OFF (1 << 7)
#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
/**
* The panel power sequencing parameters are in units of milliseconds.
* The high fields are bits 8:9 of the 10-bit values.
*/
struct sdvo_panel_power_sequencing {
u8 t0;
u8 t1;
u8 t2;
u8 t3;
u8 t4;
unsigned int t0_high:2;
unsigned int t1_high:2;
unsigned int t2_high:2;
unsigned int t3_high:2;
unsigned int t4_high:2;
unsigned int pad:6;
} __packed;
#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
struct sdvo_max_backlight_reply {
u8 max_value;
u8 default_value;
} __packed;
#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
struct sdvo_get_ambient_light_reply {
u16 trip_low;
u16 trip_high;
u16 value;
} __packed;
#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
struct sdvo_set_ambient_light_reply {
u16 trip_low;
u16 trip_high;
unsigned int enable:1;
unsigned int pad:7;
} __packed;
/* Set display power state */
#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
# define SDVO_DISPLAY_STATE_ON (1 << 0)
# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
# define SDVO_DISPLAY_STATE_OFF (1 << 3)
#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
struct intel_sdvo_enhancements_reply {
unsigned int flicker_filter:1;
unsigned int flicker_filter_adaptive:1;
unsigned int flicker_filter_2d:1;
unsigned int saturation:1;
unsigned int hue:1;
unsigned int brightness:1;
unsigned int contrast:1;
unsigned int overscan_h:1;
unsigned int overscan_v:1;
unsigned int hpos:1;
unsigned int vpos:1;
unsigned int sharpness:1;
unsigned int dot_crawl:1;
unsigned int dither:1;
unsigned int tv_chroma_filter:1;
unsigned int tv_luma_filter:1;
} __packed;
/* Picture enhancement limits below are dependent on the current TV format,
* and thus need to be queried and set after it.
*/
#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
#define SDVO_CMD_GET_MAX_SATURATION 0x55
#define SDVO_CMD_GET_MAX_HUE 0x58
#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
#define SDVO_CMD_GET_MAX_HPOS 0x67
#define SDVO_CMD_GET_MAX_VPOS 0x6a
#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
struct intel_sdvo_enhancement_limits_reply {
u16 max_value;
u16 default_value;
} __packed;
#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
#define SDVO_CMD_GET_SATURATION 0x56
#define SDVO_CMD_SET_SATURATION 0x57
#define SDVO_CMD_GET_HUE 0x59
#define SDVO_CMD_SET_HUE 0x5a
#define SDVO_CMD_GET_BRIGHTNESS 0x5c
#define SDVO_CMD_SET_BRIGHTNESS 0x5d
#define SDVO_CMD_GET_CONTRAST 0x5f
#define SDVO_CMD_SET_CONTRAST 0x60
#define SDVO_CMD_GET_OVERSCAN_H 0x62
#define SDVO_CMD_SET_OVERSCAN_H 0x63
#define SDVO_CMD_GET_OVERSCAN_V 0x65
#define SDVO_CMD_SET_OVERSCAN_V 0x66
#define SDVO_CMD_GET_HPOS 0x68
#define SDVO_CMD_SET_HPOS 0x69
#define SDVO_CMD_GET_VPOS 0x6b
#define SDVO_CMD_SET_VPOS 0x6c
#define SDVO_CMD_GET_SHARPNESS 0x6e
#define SDVO_CMD_SET_SHARPNESS 0x6f
#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
struct intel_sdvo_enhancements_arg {
u16 value;
} __packed;
#define SDVO_CMD_GET_DOT_CRAWL 0x70
#define SDVO_CMD_SET_DOT_CRAWL 0x71
# define SDVO_DOT_CRAWL_ON (1 << 0)
# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
#define SDVO_CMD_GET_DITHER 0x72
#define SDVO_CMD_SET_DITHER 0x73
# define SDVO_DITHER_ON (1 << 0)
# define SDVO_DITHER_DEFAULT_ON (1 << 1)
#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
# define SDVO_CONTROL_BUS_PROM (1 << 0)
# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
/* HDMI op codes */
#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
#define SDVO_CMD_GET_ENCODE 0x9e
#define SDVO_CMD_SET_ENCODE 0x9f
#define SDVO_ENCODE_DVI 0x0
#define SDVO_ENCODE_HDMI 0x1
#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
#define SDVO_CMD_SET_COLORIMETRY 0x8e
#define SDVO_COLORIMETRY_RGB256 0x0
#define SDVO_COLORIMETRY_RGB220 0x1
#define SDVO_COLORIMETRY_YCrCb422 0x3
#define SDVO_COLORIMETRY_YCrCb444 0x4
#define SDVO_CMD_GET_COLORIMETRY 0x8f
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
#define SDVO_CMD_SET_HBUF_INDEX 0x93
#define SDVO_HBUF_INDEX_ELD 0
#define SDVO_HBUF_INDEX_AVI_IF 1
#define SDVO_CMD_GET_HBUF_INDEX 0x94
#define SDVO_CMD_GET_HBUF_INFO 0x95
#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
#define SDVO_CMD_SET_HBUF_DATA 0x98
#define SDVO_CMD_GET_HBUF_DATA 0x99
#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
#define SDVO_HBUF_TX_DISABLED (0 << 6)
#define SDVO_HBUF_TX_ONCE (2 << 6)
#define SDVO_HBUF_TX_VSYNC (3 << 6)
#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
#define SDVO_NEED_TO_STALL (1 << 7)
struct intel_sdvo_encode {
u8 dvi_rev;
u8 hdmi_rev;
} __packed;

View file

@ -0,0 +1,282 @@
/*
* Copyright © 2013 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "i915_drv.h"
#include "intel_drv.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
* VLV_VLV2_PUNIT_HAS_0.8.docx
*/
/* Standard MMIO read, non-posted */
#define SB_MRD_NP 0x00
/* Standard MMIO write, non-posted */
#define SB_MWR_NP 0x01
/* Private register read, double-word addressing, non-posted */
#define SB_CRRDDA_NP 0x06
/* Private register write, double-word addressing, non-posted */
#define SB_CRWRDA_NP 0x07
static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
u32 port, u32 opcode, u32 addr, u32 *val)
{
u32 cmd, be = 0xf, bar = 0;
bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
cmd = (devfn << IOSF_DEVFN_SHIFT) | (opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
(bar << IOSF_BAR_SHIFT);
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
is_read ? "read" : "write");
return -EAGAIN;
}
I915_WRITE(VLV_IOSF_ADDR, addr);
if (!is_read)
I915_WRITE(VLV_IOSF_DATA, *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
return -ETIMEDOUT;
}
if (is_read)
*val = I915_READ(VLV_IOSF_DATA);
I915_WRITE(VLV_IOSF_DATA, 0);
return 0;
}
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
}
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
{
u32 val = 0;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->dpio_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
return val;
}
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRRDDA_NP, reg, &val);
return val;
}
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
SB_CRWRDA_NP, reg, &val);
}
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
SB_MRD_NP, reg, &val);
/*
* FIXME: There might be some registers where all 1's is a valid value,
* so ideally we should check the register offset instead...
*/
WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
pipe_name(pipe), reg, val);
return val;
}
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, DPIO_PHY_IOSF_PORT(DPIO_PHY(pipe)),
SB_MWR_NP, reg, &val);
}
/* SBI access */
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return 0;
}
I915_WRITE(SBI_ADDR, (reg << 16));
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
return 0;
}
return I915_READ(SBI_DATA);
}
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
u32 tmp;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
return;
}
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, value);
if (destination == SBI_ICLK)
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
else
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
return;
}
}
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
reg, &val);
return val;
}
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
reg, &val);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff