From 4c96aecda6044fd5abf093a29896103c333a9726 Mon Sep 17 00:00:00 2001 From: Stephan Raue Date: Fri, 12 Nov 2010 23:14:20 +0100 Subject: [PATCH] linux: add multiversion support, add linux-2.6.37-rc1 as an second version Signed-off-by: Stephan Raue --- packages/linux/meta | 5 + .../linux-2.6.37-rc1-000_crosscompile.patch | 22 + ...nux-2.6.37-rc1-002_bash_only_feature.patch | 15 + .../linux-2.6.37-rc1-003-no_dev_console.patch | 20 + ...rc1-004_lower_undefined_mode_timeout.patch | 24 + ...-2.6.37-rc1-005_kconfig_no_timestamp.patch | 13 + .../linux-2.6.37-rc1-006_enable_utf8.patch | 25 + .../linux-2.6.37-rc1-007_die_floppy_die.patch | 30 + ...008-hda_intel_prealloc_4mb_dmabuffer.patch | 47 + ...009_disable_i8042_check_on_apple_mac.patch | 59 + ....6.37-rc1-050_add_appleir_usb_driver.patch | 702 + ...6.37-rc1-052-aureal_remote_quirk-0.1.patch | 113 + ...c1-110-drm_nouveau_upstream-20101111.patch | 11929 ++++++++++++++++ projects/ATV/options | 3 + projects/ION/options | 3 + projects/generic/options | 5 +- projects/intel/options | 3 + 17 files changed, 13017 insertions(+), 1 deletion(-) create mode 100644 packages/linux/patches/linux-2.6.37-rc1-000_crosscompile.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-002_bash_only_feature.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-003-no_dev_console.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-004_lower_undefined_mode_timeout.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-005_kconfig_no_timestamp.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-006_enable_utf8.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-007_die_floppy_die.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-008-hda_intel_prealloc_4mb_dmabuffer.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-009_disable_i8042_check_on_apple_mac.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-050_add_appleir_usb_driver.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-052-aureal_remote_quirk-0.1.patch create mode 100644 packages/linux/patches/linux-2.6.37-rc1-110-drm_nouveau_upstream-20101111.patch diff --git a/packages/linux/meta b/packages/linux/meta index 1f1a43e872..22e58e429f 100644 --- a/packages/linux/meta +++ b/packages/linux/meta @@ -12,3 +12,8 @@ PKG_SECTION="linux" PKG_SHORTDESC="linux26: The Linux kernel 2.6 precompiled kernel binary image and modules" PKG_LONGDESC="This package contains a precompiled kernel image and the modules." PKG_IS_ADDON="no" + +if [ "$LINUX_NEXT" = "yes" ]; then + PKG_VERSION="2.6.37-rc1" + PKG_URL="http://www.kernel.org/pub/linux/kernel/v2.6/testing/$PKG_NAME-$PKG_VERSION.tar.bz2" +fi diff --git a/packages/linux/patches/linux-2.6.37-rc1-000_crosscompile.patch b/packages/linux/patches/linux-2.6.37-rc1-000_crosscompile.patch new file mode 100644 index 0000000000..b4fc575828 --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-000_crosscompile.patch @@ -0,0 +1,22 @@ +--- linux-2.6.24-rc2.orig/arch/x86/boot/tools/build.c 2007-10-06 12:26:14.000000000 +0200 ++++ linux-2.6.24-rc2/arch/x86/boot/tools/build.c 2007-10-06 12:27:36.000000000 +0200 +@@ -29,7 +29,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -42,6 +41,11 @@ + #define DEFAULT_MAJOR_ROOT 0 + #define DEFAULT_MINOR_ROOT 0 + ++#undef major ++#define major(dev) ((int)(((dev) >> 8) & 0xff)) ++#undef minor ++#define minor(dev) ((int)((dev) & 0xff)) ++ + /* Minimal number of setup sectors */ + #define SETUP_SECT_MIN 5 + #define SETUP_SECT_MAX 64 diff --git a/packages/linux/patches/linux-2.6.37-rc1-002_bash_only_feature.patch b/packages/linux/patches/linux-2.6.37-rc1-002_bash_only_feature.patch new file mode 100644 index 0000000000..a1028d15aa --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-002_bash_only_feature.patch @@ -0,0 +1,15 @@ +Index: linux-2.6.16/scripts/gen_initramfs_list.sh +=================================================================== +--- linux-2.6.16.orig/scripts/gen_initramfs_list.sh 2006-03-20 18:41:34.000000000 +0100 ++++ linux-2.6.16/scripts/gen_initramfs_list.sh 2006-03-20 18:42:40.000000000 +0100 +@@ -56,9 +56,7 @@ + + parse() { + local location="$1" +- local name="${location/${srcdir}//}" +- # change '//' into '/' +- name="${name//\/\///}" ++ local name="$(echo "$location" | sed -e 's%$srcdir%%' -e 's%//*%/%g')" + local mode="$2" + local uid="$3" + local gid="$4" diff --git a/packages/linux/patches/linux-2.6.37-rc1-003-no_dev_console.patch b/packages/linux/patches/linux-2.6.37-rc1-003-no_dev_console.patch new file mode 100644 index 0000000000..9b5e51437d --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-003-no_dev_console.patch @@ -0,0 +1,20 @@ +diff -Naur linux-2.6.34-rc7/init/main.c linux-2.6.34-rc7.patch/init/main.c +--- linux-2.6.34-rc7/init/main.c 2010-05-10 03:36:28.000000000 +0200 ++++ linux-2.6.34-rc7.patch/init/main.c 2010-05-15 12:28:34.767241760 +0200 +@@ -886,8 +886,14 @@ + do_basic_setup(); + + /* Open the /dev/console on the rootfs, this should never fail */ +- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) +- printk(KERN_WARNING "Warning: unable to open an initial console.\n"); ++ char *console = "/dev_console"; ++ ++ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) { ++ sys_mknod(console, S_IFCHR|0600, (TTYAUX_MAJOR<<8)|1); ++ if (sys_open(console, O_RDWR, 0) < 0) ++ printk(KERN_WARNING "Warning: unable to open an initial console.\n"); ++ sys_unlink(console); ++ } + + (void) sys_dup(0); + (void) sys_dup(0); diff --git a/packages/linux/patches/linux-2.6.37-rc1-004_lower_undefined_mode_timeout.patch b/packages/linux/patches/linux-2.6.37-rc1-004_lower_undefined_mode_timeout.patch new file mode 100644 index 0000000000..a0aca61d23 --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-004_lower_undefined_mode_timeout.patch @@ -0,0 +1,24 @@ +diff -Naur linux-2.6.23-rc9.orig/arch/i386/boot/tty.c linux-2.6.23-rc9/arch/i386/boot/tty.c +--- linux-2.6.23-rc9.orig/arch/x86/boot/tty.c 2007-10-06 12:26:14.000000000 +0200 ++++ linux-2.6.23-rc9/arch/x86/boot/tty.c 2007-10-06 12:37:47.000000000 +0200 +@@ -92,7 +92,7 @@ + + int getchar_timeout(void) + { +- int cnt = 30; ++ int cnt = 3; + int t0, t1; + + t0 = gettime(); +diff -Naur linux-2.6.23-rc9.orig/arch/i386/boot/video.c linux-2.6.23-rc9/arch/i386/boot/video.c +--- linux-2.6.23-rc9.orig/arch/x86/boot/video.c 2007-10-06 12:26:14.000000000 +0200 ++++ linux-2.6.23-rc9/arch/x86/boot/video.c 2007-10-06 12:36:05.000000000 +0200 +@@ -329,7 +329,7 @@ + unsigned int sel; + + puts("Press to see video modes available, " +- " to continue, or wait 30 sec\n"); ++ " to continue, or wait 3 sec\n"); + + kbd_flush(); + while (1) { diff --git a/packages/linux/patches/linux-2.6.37-rc1-005_kconfig_no_timestamp.patch b/packages/linux/patches/linux-2.6.37-rc1-005_kconfig_no_timestamp.patch new file mode 100644 index 0000000000..332e553831 --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-005_kconfig_no_timestamp.patch @@ -0,0 +1,13 @@ +Index: linux-2.6.16/scripts/kconfig/confdata.c +=================================================================== +--- linux-2.6.16.orig/scripts/kconfig/confdata.c 2006-03-20 06:53:29.000000000 +0100 ++++ linux-2.6.16/scripts/kconfig/confdata.c 2006-03-20 18:47:06.000000000 +0100 +@@ -340,7 +340,7 @@ + int type, l; + const char *str; + time_t now; +- int use_timestamp = 1; ++ int use_timestamp = 0; + char *env; + + dirname[0] = 0; diff --git a/packages/linux/patches/linux-2.6.37-rc1-006_enable_utf8.patch b/packages/linux/patches/linux-2.6.37-rc1-006_enable_utf8.patch new file mode 100644 index 0000000000..bee1cf3da8 --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-006_enable_utf8.patch @@ -0,0 +1,25 @@ +diff -Naur linux-2.6.31-rc4.orig/fs/fat/inode.c linux-2.6.31-rc4/fs/fat/inode.c +--- linux-2.6.31-rc4.orig/fs/fat/inode.c 2009-07-25 12:47:41.000000000 +0200 ++++ linux-2.6.31-rc4/fs/fat/inode.c 2009-07-25 13:38:18.000000000 +0200 +@@ -979,7 +979,8 @@ + } + opts->name_check = 'n'; + opts->quiet = opts->showexec = opts->sys_immutable = opts->dotsOK = 0; +- opts->utf8 = opts->unicode_xlate = 0; ++ opts->utf8 = 1; ++ opts->unicode_xlate = 0; + opts->numtail = 1; + opts->usefree = opts->nocase = 0; + opts->tz_utc = 0; +diff -Naur linux-2.6.31-rc4.orig/fs/isofs/inode.c linux-2.6.31-rc4/fs/isofs/inode.c +--- linux-2.6.31-rc4.orig/fs/isofs/inode.c 2009-07-25 12:47:41.000000000 +0200 ++++ linux-2.6.31-rc4/fs/isofs/inode.c 2009-07-25 13:38:49.000000000 +0200 +@@ -377,7 +377,7 @@ + popt->gid = 0; + popt->uid = 0; + popt->iocharset = NULL; +- popt->utf8 = 0; ++ popt->utf8 = 1; + popt->overriderockperm = 0; + popt->session=-1; + popt->sbsector=-1; diff --git a/packages/linux/patches/linux-2.6.37-rc1-007_die_floppy_die.patch b/packages/linux/patches/linux-2.6.37-rc1-007_die_floppy_die.patch new file mode 100644 index 0000000000..76db312182 --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-007_die_floppy_die.patch @@ -0,0 +1,30 @@ +From 4ff58b642f80dedb20533978123d89b5ac9b1ed5 Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Tue, 30 Mar 2010 00:04:29 -0400 +Subject: die-floppy-die + +Kill the floppy.ko pnp modalias. We were surviving just fine without +autoloading floppy drivers, tyvm. + +Please feel free to register all complaints in the wastepaper bin. +--- + drivers/block/floppy.c | 3 +-- + 1 files changed, 1 insertions(+), 2 deletions(-) + +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c +index 90c4038..f4a0b90 100644 +--- a/drivers/block/floppy.c ++++ b/drivers/block/floppy.c +@@ -4619,8 +4619,7 @@ static const struct pnp_device_id floppy_pnpids[] = { + {"PNP0700", 0}, + {} + }; +- +-MODULE_DEVICE_TABLE(pnp, floppy_pnpids); ++/* MODULE_DEVICE_TABLE(pnp, floppy_pnpids); */ + + #else + +-- +1.7.0.1 + diff --git a/packages/linux/patches/linux-2.6.37-rc1-008-hda_intel_prealloc_4mb_dmabuffer.patch b/packages/linux/patches/linux-2.6.37-rc1-008-hda_intel_prealloc_4mb_dmabuffer.patch new file mode 100644 index 0000000000..36e6aca4fa --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-008-hda_intel_prealloc_4mb_dmabuffer.patch @@ -0,0 +1,47 @@ +From c69fcbd1f60b0842f7c1ad2c95692ffd19c4932b Mon Sep 17 00:00:00 2001 +From: Kyle McMartin +Date: Mon, 29 Mar 2010 23:56:08 -0400 +Subject: hda_intel-prealloc-4mb-dmabuffer + +--- + sound/pci/hda/hda_intel.c | 14 +++++++++++++- + 1 files changed, 13 insertions(+), 1 deletions(-) + +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 4bb9067..37db515 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1986,6 +1986,7 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, + struct azx_pcm *apcm; + int pcm_dev = cpcm->device; + int s, err; ++ size_t prealloc_min = 64*1024; /* 64KB */ + + if (pcm_dev >= HDA_MAX_PCMS) { + snd_printk(KERN_ERR SFX "Invalid PCM device number %d\n", +@@ -2019,10 +2020,21 @@ azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec, + if (cpcm->stream[s].substreams) + snd_pcm_set_ops(pcm, s, &azx_pcm_ops); + } ++ + /* buffer pre-allocation */ ++ ++ /* subtle, don't allocate a big buffer for modems... ++ * also, don't just test 32BIT_MASK, since azx supports ++ * 64-bit DMA in some cases. ++ */ ++ /* lennart wants a 2.2MB buffer for 2sec of 48khz */ ++ if (pcm->dev_class == SNDRV_PCM_CLASS_GENERIC && ++ chip->pci->dma_mask >= DMA_32BIT_MASK) ++ prealloc_min = 4 * 1024 * 1024; /* 4MB */ ++ + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, + snd_dma_pci_data(chip->pci), +- 1024 * 64, 32 * 1024 * 1024); ++ prealloc_min, 32 * 1024 * 1024); + return 0; + } + +-- +1.7.0.1 + diff --git a/packages/linux/patches/linux-2.6.37-rc1-009_disable_i8042_check_on_apple_mac.patch b/packages/linux/patches/linux-2.6.37-rc1-009_disable_i8042_check_on_apple_mac.patch new file mode 100644 index 0000000000..f99d0f900c --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-009_disable_i8042_check_on_apple_mac.patch @@ -0,0 +1,59 @@ +From 2a79554c864ac58fa2ad982f0fcee2cc2aa33eb5 Mon Sep 17 00:00:00 2001 +From: Bastien Nocera +Date: Thu, 20 May 2010 10:30:31 -0400 +Subject: Disable i8042 checks on Intel Apple Macs + +As those computers never had any i8042 controllers, and the +current lookup code could potentially lock up/hang/wait for +timeout for long periods of time. + +Fixes intermittent hangs on boot on a MacbookAir1,1 + +Signed-off-by: Bastien Nocera +--- + drivers/input/serio/i8042.c | 22 ++++++++++++++++++++++ + 1 files changed, 22 insertions(+), 0 deletions(-) + +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index 6440a8f..4d7cf98 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -1451,6 +1451,22 @@ static struct platform_driver i8042_driver = { + .shutdown = i8042_shutdown, + }; + ++#ifdef CONFIG_DMI ++static struct dmi_system_id __initdata dmi_system_table[] = { ++ { ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "Apple Computer, Inc.") ++ }, ++ }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_BIOS_VENDOR, "Apple Inc.") ++ }, ++ }, ++ {} ++}; ++#endif /*CONFIG_DMI*/ ++ + static int __init i8042_init(void) + { + struct platform_device *pdev; +@@ -1458,6 +1474,12 @@ static int __init i8042_init(void) + + dbg_init(); + ++#ifdef CONFIG_DMI ++ /* Intel Apple Macs never have an i8042 controller */ ++ if (dmi_check_system(dmi_system_table) > 0) ++ return -ENODEV; ++#endif /*CONFIG_DMI*/ ++ + err = i8042_platform_init(); + if (err) + return err; +-- +1.7.0.1 + diff --git a/packages/linux/patches/linux-2.6.37-rc1-050_add_appleir_usb_driver.patch b/packages/linux/patches/linux-2.6.37-rc1-050_add_appleir_usb_driver.patch new file mode 100644 index 0000000000..61edb8061e --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-050_add_appleir_usb_driver.patch @@ -0,0 +1,702 @@ +From e11e9e78799a7641fe0dc5289f35f2604a4b71a3 Mon Sep 17 00:00:00 2001 +From: Bastien Nocera +Date: Sun, 17 Jan 2010 00:40:15 +0000 +Subject: [PATCH] Input: add appleir USB driver + +This driver was originally written by James McKenzie, updated by +Greg Kroah-Hartman, further updated by myself, with suspend support +added. + +More recent versions of the IR receiver are also supported through +a patch by Alex Karpenko. The patch also adds support for the 2nd +and 5th generation of the controller, and the menu key on newer +brushed metal remotes. + +Tested on a MacbookAir1,1 + +Signed-off-by: Bastien Nocera +--- + Documentation/input/appleir.txt | 46 ++++ + drivers/hid/hid-apple.c | 4 - + drivers/hid/hid-core.c | 7 +- + drivers/hid/hid-ids.h | 5 +- + drivers/input/misc/Kconfig | 13 + + drivers/input/misc/Makefile | 1 + + drivers/input/misc/appleir.c | 519 +++++++++++++++++++++++++++++++++++++++ + 7 files changed, 588 insertions(+), 7 deletions(-) + create mode 100644 Documentation/input/appleir.txt + create mode 100644 drivers/input/misc/appleir.c + +diff --git a/Documentation/input/appleir.txt b/Documentation/input/appleir.txt +new file mode 100644 +index 0000000..db637fb +--- /dev/null ++++ b/Documentation/input/appleir.txt +@@ -0,0 +1,46 @@ ++Apple IR receiver Driver (appleir) ++---------------------------------- ++ Copyright (C) 2009 Bastien Nocera ++ ++The appleir driver is a kernel input driver to handle Apple's IR ++receivers (and associated remotes) in the kernel. ++ ++The driver is an input driver which only handles "official" remotes ++as built and sold by Apple. ++ ++Authors ++------- ++ ++James McKenzie (original driver) ++Alex Karpenko (05ac:8242 support) ++Greg Kroah-Hartman (cleanups and original submission) ++Bastien Nocera (further cleanups, brushed metal "enter" ++button support and suspend support) ++ ++Supported hardware ++------------------ ++ ++- All Apple laptops and desktops from 2005 onwards, except: ++ - the unibody Macbook (2009) ++ - Mac Pro (all versions) ++- Apple TV (all revisions prior to September 2010) ++ ++The remote will only support the 6 (old white) or 7 (brushed metal) buttons ++of the remotes as sold by Apple. See the next section if you want to use ++other remotes or want to use lirc with the device instead of the kernel driver. ++ ++Using lirc (native) instead of the kernel driver ++------------------------------------------------ ++ ++First, you will need to disable the kernel driver for the receiver. ++ ++This can be achieved by passing quirks to the usbhid driver. ++The quirk line would be: ++usbhid.quirks=0x05ac:0x8242:0x40000010 ++ ++With 0x05ac being the vendor ID (Apple, you shouldn't need to change this) ++With 0x8242 being the product ID (check the output of lsusb for your hardware) ++And 0x10 being "HID_QUIRK_HIDDEV_FORCE" and 0x40000000 being "HID_QUIRK_NO_IGNORE" ++ ++This should force the creation of a hiddev device for the receiver, and ++make it usable under lirc. +diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c +index bba05d0..0059d5a 100644 +--- a/drivers/hid/hid-apple.c ++++ b/drivers/hid/hid-apple.c +@@ -361,10 +361,6 @@ static void apple_remove(struct hid_device *hdev) + } + + static const struct hid_device_id apple_devices[] = { +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL), +- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4), +- .driver_data = APPLE_HIDDEV | APPLE_IGNORE_HIDINPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE), + .driver_data = APPLE_MIGHTYMOUSE | APPLE_INVERT_HWHEEL }, + +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index baa25ad..abc5bd7 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1244,8 +1244,6 @@ static const struct hid_device_id hid_blacklist[] = { + #if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE) + { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, + #endif +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, +@@ -1577,6 +1575,11 @@ static const struct hid_device_id hid_ignore_list[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) }, + { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)}, + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)}, + { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 11af537..360a5ca 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -100,8 +100,11 @@ + #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b + #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a + #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b +-#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241 ++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 ++#define USB_DEVICE_ID_APPLE_IRCONTROL2 0x1440 ++#define USB_DEVICE_ID_APPLE_IRCONTROL3 0x8241 + #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 ++#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243 + + #define USB_VENDOR_ID_ASUS 0x0486 + #define USB_DEVICE_ID_ASUS_T91MT 0x0185 +diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig +index 60de906..2f2f2e7 100644 +--- a/drivers/input/misc/Kconfig ++++ b/drivers/input/misc/Kconfig +@@ -209,6 +209,19 @@ config INPUT_KEYSPAN_REMOTE + To compile this driver as a module, choose M here: the module will + be called keyspan_remote. + ++config INPUT_APPLEIR ++ tristate "Apple infrared receiver (built in)" ++ depends on USB_ARCH_HAS_HCD ++ select USB ++ help ++ Say Y here if you want to use a Apple infrared remote control. All ++ the Apple computers from 2005 onwards include such a port, except ++ the unibody Macbook (2009), and Mac Pros. This receiver is also ++ used in the Apple TV set-top box prior to the 2010 model. ++ ++ To compile this driver as a module, choose M here: the module will ++ be called appleir. ++ + config INPUT_POWERMATE + tristate "Griffin PowerMate and Contour Jog support" + depends on USB_ARCH_HAS_HCD +diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile +index 1fe1f6c..d5ef2b9 100644 +--- a/drivers/input/misc/Makefile ++++ b/drivers/input/misc/Makefile +@@ -13,6 +13,7 @@ obj-$(CONFIG_INPUT_ADXL34X) += adxl34x.o + obj-$(CONFIG_INPUT_ADXL34X_I2C) += adxl34x-i2c.o + obj-$(CONFIG_INPUT_ADXL34X_SPI) += adxl34x-spi.o + obj-$(CONFIG_INPUT_APANEL) += apanel.o ++obj-$(CONFIG_INPUT_APPLEIR) += appleir.o + obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o + obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o + obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o +diff --git a/drivers/input/misc/appleir.c b/drivers/input/misc/appleir.c +new file mode 100644 +index 0000000..3817a3c +--- /dev/null ++++ b/drivers/input/misc/appleir.c +@@ -0,0 +1,519 @@ ++/* ++ * appleir: USB driver for the apple ir device ++ * ++ * Original driver written by James McKenzie ++ * Ported to recent 2.6 kernel versions by Greg Kroah-Hartman ++ * ++ * Copyright (C) 2006 James McKenzie ++ * Copyright (C) 2008 Greg Kroah-Hartman ++ * Copyright (C) 2008 Novell Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation, version 2. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRIVER_VERSION "v1.2" ++#define DRIVER_AUTHOR "James McKenzie" ++#define DRIVER_DESC "Apple infrared receiver driver" ++#define DRIVER_LICENSE "GPL" ++ ++MODULE_AUTHOR(DRIVER_AUTHOR); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE(DRIVER_LICENSE); ++ ++#define USB_VENDOR_ID_APPLE 0x05ac ++#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 ++#define USB_DEVICE_ID_APPLE_IRCONTROL2 0x1440 ++#define USB_DEVICE_ID_APPLE_IRCONTROL3 0x8241 ++#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 ++#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243 ++ ++#define URB_SIZE 32 ++ ++#define MAX_KEYS 9 ++#define MAX_KEYS_MASK (MAX_KEYS - 1) ++ ++#define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0) ++ ++static int debug; ++module_param(debug, int, 0644); ++MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); ++ ++/* I have two devices both of which report the following */ ++/* 25 87 ee 83 0a + */ ++/* 25 87 ee 83 0c - */ ++/* 25 87 ee 83 09 << */ ++/* 25 87 ee 83 06 >> */ ++/* 25 87 ee 83 05 >" */ ++/* 25 87 ee 83 03 menu */ ++/* 26 00 00 00 00 for key repeat*/ ++ ++/* Thomas Glanzmann reports the following responses */ ++/* 25 87 ee ca 0b + */ ++/* 25 87 ee ca 0d - */ ++/* 25 87 ee ca 08 << */ ++/* 25 87 ee ca 07 >> */ ++/* 25 87 ee ca 04 >" */ ++/* 25 87 ee ca 02 menu */ ++/* 26 00 00 00 00 for key repeat*/ ++/* He also observes the following event sometimes */ ++/* sent after a key is release, which I interpret */ ++/* as a flat battery message */ ++/* 25 87 e0 ca 06 flat battery */ ++ ++/* Alexandre Karpenko reports the following responses for Device ID 0x8242 */ ++/* 25 87 ee 47 0b + */ ++/* 25 87 ee 47 0d - */ ++/* 25 87 ee 47 08 << */ ++/* 25 87 ee 47 07 >> */ ++/* 25 87 ee 47 04 >" */ ++/* 25 87 ee 47 02 menu */ ++/* 26 87 ee 47 ** for key repeat (** is the code of the key being held) */ ++ ++/* Bastien Nocera's "new" remote */ ++/* 25 87 ee 91 5f followed by ++ * 25 87 ee 91 05 gives you >" ++ * ++ * 25 87 ee 91 5c followed by ++ * 25 87 ee 91 05 gives you the middle button */ ++ ++static const unsigned short appleir_key_table[] = { ++ KEY_RESERVED, ++ KEY_MENU, ++ KEY_PLAYPAUSE, ++ KEY_FORWARD, ++ KEY_BACK, ++ KEY_VOLUMEUP, ++ KEY_VOLUMEDOWN, ++ KEY_ENTER, ++ KEY_RESERVED, ++}; ++ ++struct appleir { ++ struct input_dev *input_dev; ++ unsigned short keymap[ARRAY_SIZE(appleir_key_table)]; ++ u8 *data; ++ dma_addr_t dma_buf; ++ struct usb_device *usbdev; ++ unsigned int flags; ++ struct urb *urb; ++ struct timer_list key_up_timer; ++ int current_key; ++ int prev_key_idx; ++ char phys[32]; ++}; ++ ++static DEFINE_MUTEX(appleir_mutex); ++ ++enum { ++ APPLEIR_OPENED = 0x1, ++ APPLEIR_SUSPENDED = 0x2, ++}; ++ ++static struct usb_device_id appleir_ids[] = { ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, ++ { USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, ++ {} ++}; ++MODULE_DEVICE_TABLE(usb, appleir_ids); ++ ++static void dump_packet(struct appleir *appleir, char *msg, u8 *data, int len) ++{ ++ int i; ++ ++ printk(KERN_ERR "appleir: %s (%d bytes)", msg, len); ++ ++ for (i = 0; i < len; ++i) ++ printk(" %02x", data[i]); ++ printk(" (should be command %d)\n", (data[4] >> 1) & MAX_KEYS_MASK); ++} ++ ++static int get_key(int data) ++{ ++ switch (data) { ++ case 0x02: ++ case 0x03: ++ /* menu */ ++ return 1; ++ case 0x04: ++ case 0x05: ++ /* >" */ ++ return 2; ++ case 0x06: ++ case 0x07: ++ /* >> */ ++ return 3; ++ case 0x08: ++ case 0x09: ++ /* << */ ++ return 4; ++ case 0x0a: ++ case 0x0b: ++ /* + */ ++ return 5; ++ case 0x0c: ++ case 0x0d: ++ /* - */ ++ return 6; ++ case 0x5c: ++ /* Middle button, on newer remotes, ++ * part of a 2 packet-command */ ++ return -7; ++ default: ++ return -1; ++ } ++} ++ ++static void key_up(struct appleir *appleir, int key) ++{ ++ dbginfo(&appleir->input_dev->dev, "key %d up\n", key); ++ input_report_key(appleir->input_dev, key, 0); ++ input_sync(appleir->input_dev); ++} ++ ++static void key_down(struct appleir *appleir, int key) ++{ ++ dbginfo(&appleir->input_dev->dev, "key %d down\n", key); ++ input_report_key(appleir->input_dev, key, 1); ++ input_sync(appleir->input_dev); ++} ++ ++static void battery_flat(struct appleir *appleir) ++{ ++ dev_err(&appleir->input_dev->dev, "possible flat battery?\n"); ++} ++ ++static void key_up_tick(unsigned long data) ++{ ++ struct appleir *appleir = (struct appleir *)data; ++ ++ if (appleir->current_key) { ++ key_up(appleir, appleir->current_key); ++ appleir->current_key = 0; ++ } ++} ++ ++static void new_data(struct appleir *appleir, u8 *data, int len) ++{ ++ static const u8 keydown[] = { 0x25, 0x87, 0xee }; ++ static const u8 keyrepeat[] = { 0x26, }; ++ static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 }; ++ ++ if (debug) ++ dump_packet(appleir, "received", data, len); ++ ++ if (len != 5) ++ return; ++ ++ if (!memcmp(data, keydown, sizeof(keydown))) { ++ int index; ++ ++ /* If we already have a key down, take it up before marking ++ this one down */ ++ if (appleir->current_key) ++ key_up(appleir, appleir->current_key); ++ ++ /* Handle dual packet commands */ ++ if (appleir->prev_key_idx > 0) ++ index = appleir->prev_key_idx; ++ else ++ index = get_key(data[4]); ++ ++ if (index > 0) { ++ appleir->current_key = appleir->keymap[index]; ++ ++ key_down(appleir, appleir->current_key); ++ /* Remote doesn't do key up, either pull them up, in the test ++ above, or here set a timer which pulls them up after 1/8 s */ ++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); ++ appleir->prev_key_idx = 0; ++ return; ++ } else if (index == -7) { ++ /* Remember key for next packet */ ++ appleir->prev_key_idx = 0 - index; ++ return; ++ } ++ } ++ ++ appleir->prev_key_idx = 0; ++ ++ if (!memcmp(data, keyrepeat, sizeof(keyrepeat))) { ++ key_down(appleir, appleir->current_key); ++ /* Remote doesn't do key up, either pull them up, in the test ++ above, or here set a timer which pulls them up after 1/8 s */ ++ mod_timer(&appleir->key_up_timer, jiffies + HZ / 8); ++ return; ++ } ++ ++ if (!memcmp(data, flatbattery, sizeof(flatbattery))) { ++ battery_flat(appleir); ++ /* Fall through */ ++ } ++ ++ dump_packet(appleir, "unknown packet", data, len); ++} ++ ++static void appleir_urb(struct urb *urb) ++{ ++ struct appleir *appleir = urb->context; ++ int status = urb->status; ++ int retval; ++ ++ switch (status) { ++ case 0: ++ new_data(appleir, urb->transfer_buffer, urb->actual_length); ++ break; ++ case -ECONNRESET: ++ case -ENOENT: ++ case -ESHUTDOWN: ++ /* This urb is terminated, clean up */ ++ dbginfo(&appleir->input_dev->dev, "%s - urb shutting down with status: %d", __func__, ++ urb->status); ++ return; ++ default: ++ dbginfo(&appleir->input_dev->dev, "%s - nonzero urb status received: %d", __func__, ++ urb->status); ++ } ++ ++ retval = usb_submit_urb(urb, GFP_ATOMIC); ++ if (retval) ++ err("%s - usb_submit_urb failed with result %d", __func__, ++ retval); ++} ++ ++static int appleir_open(struct input_dev *dev) ++{ ++ struct appleir *appleir = input_get_drvdata(dev); ++ struct usb_interface *intf = usb_ifnum_to_if(appleir->usbdev, 0); ++ int r; ++ ++ r = usb_autopm_get_interface(intf); ++ if (r) { ++ dev_err(&intf->dev, ++ "%s(): usb_autopm_get_interface() = %d\n", __func__, r); ++ return r; ++ } ++ ++ mutex_lock(&appleir_mutex); ++ ++ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) { ++ r = -EIO; ++ goto fail; ++ } ++ ++ appleir->flags |= APPLEIR_OPENED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ usb_autopm_put_interface(intf); ++ ++ return 0; ++fail: ++ mutex_unlock(&appleir_mutex); ++ usb_autopm_put_interface(intf); ++ return r; ++} ++ ++static void appleir_close(struct input_dev *dev) ++{ ++ struct appleir *appleir = input_get_drvdata(dev); ++ ++ mutex_lock(&appleir_mutex); ++ ++ if (!(appleir->flags & APPLEIR_SUSPENDED)) { ++ usb_kill_urb(appleir->urb); ++ del_timer_sync(&appleir->key_up_timer); ++ } ++ ++ appleir->flags &= ~APPLEIR_OPENED; ++ ++ mutex_unlock(&appleir_mutex); ++} ++ ++static int appleir_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_device *dev = interface_to_usbdev(intf); ++ struct usb_endpoint_descriptor *endpoint; ++ struct appleir *appleir = NULL; ++ struct input_dev *input_dev; ++ int retval = -ENOMEM; ++ int i; ++ ++ appleir = kzalloc(sizeof(struct appleir), GFP_KERNEL); ++ if (!appleir) ++ goto allocfail; ++ ++ appleir->data = usb_alloc_coherent(dev, URB_SIZE, GFP_KERNEL, ++ &appleir->dma_buf); ++ if (!appleir->data) ++ goto usbfail; ++ ++ appleir->urb = usb_alloc_urb(0, GFP_KERNEL); ++ if (!appleir->urb) ++ goto urbfail; ++ ++ appleir->usbdev = dev; ++ ++ input_dev = input_allocate_device(); ++ if (!input_dev) ++ goto inputfail; ++ ++ appleir->input_dev = input_dev; ++ ++ usb_make_path(dev, appleir->phys, sizeof(appleir->phys)); ++ strlcpy(appleir->phys, "/input0", sizeof(appleir->phys)); ++ ++ input_dev->name = "Apple Infrared Remote Controller"; ++ input_dev->phys = appleir->phys; ++ usb_to_input_id(dev, &input_dev->id); ++ input_dev->dev.parent = &intf->dev; ++ input_dev->keycode = appleir->keymap; ++ input_dev->keycodesize = sizeof(unsigned short); ++ input_dev->keycodemax = ARRAY_SIZE(appleir->keymap); ++ ++ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP); ++ ++ memcpy(appleir->keymap, appleir_key_table, sizeof(appleir->keymap)); ++ for (i = 0; i < ARRAY_SIZE(appleir_key_table); i++) ++ set_bit(appleir->keymap[i], input_dev->keybit); ++ clear_bit(KEY_RESERVED, input_dev->keybit); ++ ++ input_set_drvdata(input_dev, appleir); ++ input_dev->open = appleir_open; ++ input_dev->close = appleir_close; ++ ++ endpoint = &intf->cur_altsetting->endpoint[0].desc; ++ ++ usb_fill_int_urb(appleir->urb, dev, ++ usb_rcvintpipe(dev, endpoint->bEndpointAddress), ++ appleir->data, 8, ++ appleir_urb, appleir, endpoint->bInterval); ++ ++ appleir->urb->transfer_dma = appleir->dma_buf; ++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ setup_timer(&appleir->key_up_timer, ++ key_up_tick, (unsigned long) appleir); ++ ++ retval = input_register_device(appleir->input_dev); ++ if (retval) ++ goto inputfail; ++ ++ usb_set_intfdata(intf, appleir); ++ ++ return 0; ++ ++inputfail: ++ input_free_device(appleir->input_dev); ++ ++urbfail: ++ usb_free_urb(appleir->urb); ++ ++usbfail: ++ usb_free_coherent(dev, URB_SIZE, appleir->data, ++ appleir->dma_buf); ++ ++allocfail: ++ kfree(appleir); ++ ++ return retval; ++} ++ ++static void appleir_disconnect(struct usb_interface *intf) ++{ ++ struct appleir *appleir = usb_get_intfdata(intf); ++ ++ usb_set_intfdata(intf, NULL); ++ input_unregister_device(appleir->input_dev); ++ usb_free_urb(appleir->urb); ++ usb_free_coherent(interface_to_usbdev(intf), URB_SIZE, ++ appleir->data, appleir->dma_buf); ++ kfree(appleir); ++} ++ ++static int appleir_suspend(struct usb_interface *interface, ++ pm_message_t message) ++{ ++ struct appleir *appleir = usb_get_intfdata(interface); ++ ++ mutex_lock(&appleir_mutex); ++ if (appleir->flags & APPLEIR_OPENED) ++ usb_kill_urb(appleir->urb); ++ ++ appleir->flags |= APPLEIR_SUSPENDED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ return 0; ++} ++ ++static int appleir_resume(struct usb_interface *interface) ++{ ++ struct appleir *appleir; ++ int r = 0; ++ ++ appleir = usb_get_intfdata(interface); ++ ++ mutex_lock(&appleir_mutex); ++ if (appleir->flags & APPLEIR_OPENED) { ++ struct usb_endpoint_descriptor *endpoint; ++ ++ endpoint = &interface->cur_altsetting->endpoint[0].desc; ++ usb_fill_int_urb(appleir->urb, appleir->usbdev, ++ usb_rcvintpipe(appleir->usbdev, endpoint->bEndpointAddress), ++ appleir->data, 8, ++ appleir_urb, appleir, endpoint->bInterval); ++ appleir->urb->transfer_dma = appleir->dma_buf; ++ appleir->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; ++ ++ /* And reset the USB device */ ++ if (usb_submit_urb(appleir->urb, GFP_ATOMIC)) ++ r = -EIO; ++ } ++ ++ appleir->flags &= ~APPLEIR_SUSPENDED; ++ ++ mutex_unlock(&appleir_mutex); ++ ++ return r; ++} ++ ++static struct usb_driver appleir_driver = { ++ .name = "appleir", ++ .probe = appleir_probe, ++ .disconnect = appleir_disconnect, ++ .suspend = appleir_suspend, ++ .resume = appleir_resume, ++ .reset_resume = appleir_resume, ++ .id_table = appleir_ids, ++}; ++ ++static int __init appleir_init(void) ++{ ++ return usb_register(&appleir_driver); ++} ++ ++static void __exit appleir_exit(void) ++{ ++ usb_deregister(&appleir_driver); ++} ++ ++module_init(appleir_init); ++module_exit(appleir_exit); +-- +1.7.2.2 + diff --git a/packages/linux/patches/linux-2.6.37-rc1-052-aureal_remote_quirk-0.1.patch b/packages/linux/patches/linux-2.6.37-rc1-052-aureal_remote_quirk-0.1.patch new file mode 100644 index 0000000000..2cf266aac9 --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-052-aureal_remote_quirk-0.1.patch @@ -0,0 +1,113 @@ +diff -Naur linux-2.6.36-rc6/drivers/hid/hid-aureal.c linux-2.6.36-rc6.patch/drivers/hid/hid-aureal.c +--- linux-2.6.36-rc6/drivers/hid/hid-aureal.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.36-rc6.patch/drivers/hid/hid-aureal.c 2010-10-05 02:28:50.925421529 +0200 +@@ -0,0 +1,60 @@ ++/* ++ * HID driver for some sunplus "special" devices ++ * ++ * Copyright (c) 1999 Andreas Gal ++ * Copyright (c) 2000-2005 Vojtech Pavlik ++ * Copyright (c) 2005 Michael Haboustak for Concept2, Inc ++ * Copyright (c) 2006-2007 Jiri Kosina ++ * Copyright (c) 2007 Paul Walmsley ++ * Copyright (c) 2008 Jiri Slaby ++ * Copyright (c) 2010 Franco Catrin ++ */ ++ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the Free ++ * Software Foundation; either version 2 of the License, or (at your option) ++ * any later version. ++ */ ++ ++#include ++#include ++#include ++ ++#include "hid-ids.h" ++ ++static void aureal_report_fixup(struct hid_device *hdev, __u8 *rdesc, ++ unsigned int rsize) ++{ ++ if (rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { ++ dev_info(&hdev->dev, "fixing Aureal Cy se W-01RN USB_V3.1 " ++ "report descriptor. Keyboard Logical Maximum = 101\n"); ++ rdesc[53] = 0x65; ++ } ++} ++ ++static const struct hid_device_id aureal_devices[] = { ++ { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, ++ { } ++}; ++MODULE_DEVICE_TABLE(hid, aureal_devices); ++ ++static struct hid_driver aureal_driver = { ++ .name = "aureal", ++ .id_table = aureal_devices, ++ .report_fixup = aureal_report_fixup, ++}; ++ ++static int __init aureal_init(void) ++{ ++ return hid_register_driver(&aureal_driver); ++} ++ ++static void __exit aureal_exit(void) ++{ ++ hid_unregister_driver(&aureal_driver); ++} ++ ++module_init(aureal_init); ++module_exit(aureal_exit); ++MODULE_LICENSE("GPL"); +diff -Naur linux-2.6.36-rc6/drivers/hid/hid-ids.h linux-2.6.36-rc6.patch/drivers/hid/hid-ids.h +--- linux-2.6.36-rc6/drivers/hid/hid-ids.h 2010-09-29 03:01:22.000000000 +0200 ++++ linux-2.6.36-rc6.patch/drivers/hid/hid-ids.h 2010-10-05 02:30:00.651266940 +0200 +@@ -6,6 +6,7 @@ + * Copyright (c) 2005 Michael Haboustak for Concept2, Inc + * Copyright (c) 2006-2007 Jiri Kosina + * Copyright (c) 2007 Paul Walmsley ++ * Copyright (c) 2010 Franco Catrin + */ + + /* +@@ -316,6 +317,9 @@ + #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 + #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 + ++#define USB_VENDOR_ID_AUREAL 0x0755 ++#define USB_DEVICE_ID_AUREAL_W01RN 0x2626 ++ + #define USB_VENDOR_ID_LABTEC 0x1020 + #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 + +diff -Naur linux-2.6.36-rc6/drivers/hid/Kconfig linux-2.6.36-rc6.patch/drivers/hid/Kconfig +--- linux-2.6.36-rc6/drivers/hid/Kconfig 2010-09-29 03:01:22.000000000 +0200 ++++ linux-2.6.36-rc6.patch/drivers/hid/Kconfig 2010-10-05 02:28:50.936421663 +0200 +@@ -87,6 +87,13 @@ + Say Y here if you want support for keyboards of Apple iBooks, PowerBooks, + MacBooks, MacBook Pros and Apple Aluminum. + ++config HID_AUREAL ++ tristate "Aureal" if EMBEDDED ++ depends on USB_HID ++ default !EMBEDDED ++ ---help--- ++ Support for Aureal Cy se W-01RN Remote Controller ++ + config HID_BELKIN + tristate "Belkin" if EMBEDDED + depends on USB_HID +diff -Naur linux-2.6.36-rc6/drivers/hid/Makefile linux-2.6.36-rc6.patch/drivers/hid/Makefile +--- linux-2.6.36-rc6/drivers/hid/Makefile 2010-09-29 03:01:22.000000000 +0200 ++++ linux-2.6.36-rc6.patch/drivers/hid/Makefile 2010-10-05 02:28:50.938421687 +0200 +@@ -26,6 +26,7 @@ + obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o + obj-$(CONFIG_HID_ACRUX_FF) += hid-axff.o + obj-$(CONFIG_HID_APPLE) += hid-apple.o ++obj-$(CONFIG_HID_AUREAL) += hid-aureal.o + obj-$(CONFIG_HID_BELKIN) += hid-belkin.o + obj-$(CONFIG_HID_CANDO) += hid-cando.o + obj-$(CONFIG_HID_CHERRY) += hid-cherry.o diff --git a/packages/linux/patches/linux-2.6.37-rc1-110-drm_nouveau_upstream-20101111.patch b/packages/linux/patches/linux-2.6.37-rc1-110-drm_nouveau_upstream-20101111.patch new file mode 100644 index 0000000000..8a8e34765b --- /dev/null +++ b/packages/linux/patches/linux-2.6.37-rc1-110-drm_nouveau_upstream-20101111.patch @@ -0,0 +1,11929 @@ +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/Makefile linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/Makefile +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/Makefile 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/Makefile 2010-11-12 06:18:06.000000000 +0100 +@@ -5,7 +5,7 @@ + ccflags-y := -Iinclude/drm + nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o \ +- nouveau_sgdma.o nouveau_dma.o \ ++ nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ + nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ + nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ + nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ +@@ -18,8 +18,9 @@ + nv04_graph.o nv10_graph.o nv20_graph.o \ + nv40_graph.o nv50_graph.o nvc0_graph.o \ + nv40_grctx.o nv50_grctx.o \ ++ nv84_crypt.o \ + nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ +- nv50_crtc.o nv50_dac.o nv50_sor.o \ ++ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ + nv50_cursor.o nv50_display.o nv50_fbcon.o \ + nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ + nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_backlight.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_backlight.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-11-12 06:18:06.000000000 +0100 +@@ -31,6 +31,7 @@ + */ + + #include ++#include + + #include "drmP.h" + #include "nouveau_drv.h" +@@ -136,6 +137,14 @@ + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + ++#ifdef CONFIG_ACPI ++ if (acpi_video_backlight_support()) { ++ NV_INFO(dev, "ACPI backlight interface available, " ++ "not registering our own\n"); ++ return 0; ++ } ++#endif ++ + switch (dev_priv->card_type) { + case NV_40: + return nouveau_nv40_backlight_init(dev); +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_bios.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_bios.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_bios.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_bios.c 2010-11-12 06:18:06.000000000 +0100 +@@ -6039,7 +6039,6 @@ + if (type != cte->type) + NV_WARN(dev, " -> type 0x%02x\n", cte->type); + } +- + } + } + +@@ -6829,7 +6828,7 @@ + struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned htotal; + +- if (dev_priv->chipset >= NV_50) { ++ if (dev_priv->card_type >= NV_50) { + if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && + NVReadVgaCrtc(dev, 0, 0x1a) == 0) + return false; +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_bo.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_bo.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_bo.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_bo.c 2010-11-12 06:18:06.000000000 +0100 +@@ -46,9 +46,7 @@ + if (unlikely(nvbo->gem)) + DRM_ERROR("bo %p still attached to GEM object\n", bo); + +- if (nvbo->tile) +- nv10_mem_expire_tiling(dev, nvbo->tile, NULL); +- ++ nv10_mem_put_tile_region(dev, nvbo->tile, NULL); + kfree(nvbo); + } + +@@ -143,8 +141,10 @@ + nvbo->no_vm = no_vm; + nvbo->tile_mode = tile_mode; + nvbo->tile_flags = tile_flags; ++ nvbo->bo.bdev = &dev_priv->ttm.bdev; + +- nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); ++ nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), ++ &align, &size); + align >>= PAGE_SHIFT; + + nouveau_bo_placement_set(nvbo, flags, 0); +@@ -176,6 +176,31 @@ + pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; + } + ++static void ++set_placement_range(struct nouveau_bo *nvbo, uint32_t type) ++{ ++ struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); ++ ++ if (dev_priv->card_type == NV_10 && ++ nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { ++ /* ++ * Make sure that the color and depth buffers are handled ++ * by independent memory controller units. Up to a 9x ++ * speed up when alpha-blending and depth-test are enabled ++ * at the same time. ++ */ ++ int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; ++ ++ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { ++ nvbo->placement.fpfn = vram_pages / 2; ++ nvbo->placement.lpfn = ~0; ++ } else { ++ nvbo->placement.fpfn = 0; ++ nvbo->placement.lpfn = vram_pages / 2; ++ } ++ } ++} ++ + void + nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) + { +@@ -190,6 +215,8 @@ + pl->busy_placement = nvbo->busy_placements; + set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, + type | busy, flags); ++ ++ set_placement_range(nvbo, type); + } + + int +@@ -456,16 +483,9 @@ + if (ret) + return ret; + +- if (nvbo->channel) { +- ret = nouveau_fence_sync(fence, nvbo->channel); +- if (ret) +- goto out; +- } +- + ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, + no_wait_reserve, no_wait_gpu, new_mem); +-out: +- nouveau_fence_unref((void *)&fence); ++ nouveau_fence_unref(&fence); + return ret; + } + +@@ -525,7 +545,8 @@ + stride = 16 * 4; + height = amount / stride; + +- if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { ++ if (new_mem->mem_type == TTM_PL_VRAM && ++ nouveau_bo_tile_layout(nvbo)) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; +@@ -546,7 +567,8 @@ + BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); + OUT_RING (chan, 1); + } +- if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { ++ if (old_mem->mem_type == TTM_PL_VRAM && ++ nouveau_bo_tile_layout(nvbo)) { + ret = RING_SPACE(chan, 8); + if (ret) + return ret; +@@ -652,17 +674,24 @@ + int ret; + + chan = nvbo->channel; +- if (!chan || nvbo->no_vm) ++ if (!chan || nvbo->no_vm) { + chan = dev_priv->channel; ++ mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); ++ } + + if (dev_priv->card_type < NV_50) + ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); + else + ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); +- if (ret) +- return ret; ++ if (ret == 0) { ++ ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, ++ no_wait_reserve, ++ no_wait_gpu, new_mem); ++ } + +- return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); ++ if (chan == dev_priv->channel) ++ mutex_unlock(&chan->mutex); ++ return ret; + } + + static int +@@ -753,14 +782,16 @@ + if (dev_priv->card_type == NV_50) { + ret = nv50_mem_vm_bind_linear(dev, + offset + dev_priv->vm_vram_base, +- new_mem->size, nvbo->tile_flags, ++ new_mem->size, ++ nouveau_bo_tile_layout(nvbo), + offset); + if (ret) + return ret; + + } else if (dev_priv->card_type >= NV_10) { + *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, +- nvbo->tile_mode); ++ nvbo->tile_mode, ++ nvbo->tile_flags); + } + + return 0; +@@ -776,9 +807,7 @@ + + if (dev_priv->card_type >= NV_10 && + dev_priv->card_type < NV_50) { +- if (*old_tile) +- nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); +- ++ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); + *old_tile = new_tile; + } + } +@@ -894,7 +923,8 @@ + * nothing to do here. + */ + if (bo->mem.mem_type != TTM_PL_VRAM) { +- if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) ++ if (dev_priv->card_type < NV_50 || ++ !nouveau_bo_tile_layout(nvbo)) + return 0; + } + +@@ -909,6 +939,22 @@ + return ttm_bo_validate(bo, &nvbo->placement, false, true, false); + } + ++void ++nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) ++{ ++ struct nouveau_fence *old_fence; ++ ++ if (likely(fence)) ++ nouveau_fence_ref(fence); ++ ++ spin_lock(&nvbo->bo.lock); ++ old_fence = nvbo->bo.sync_obj; ++ nvbo->bo.sync_obj = fence; ++ spin_unlock(&nvbo->bo.lock); ++ ++ nouveau_fence_unref(&old_fence); ++} ++ + struct ttm_bo_driver nouveau_bo_driver = { + .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .invalidate_caches = nouveau_bo_invalidate_caches, +@@ -916,11 +962,11 @@ + .evict_flags = nouveau_bo_evict_flags, + .move = nouveau_bo_move, + .verify_access = nouveau_bo_verify_access, +- .sync_obj_signaled = nouveau_fence_signalled, +- .sync_obj_wait = nouveau_fence_wait, +- .sync_obj_flush = nouveau_fence_flush, +- .sync_obj_unref = nouveau_fence_unref, +- .sync_obj_ref = nouveau_fence_ref, ++ .sync_obj_signaled = __nouveau_fence_signalled, ++ .sync_obj_wait = __nouveau_fence_wait, ++ .sync_obj_flush = __nouveau_fence_flush, ++ .sync_obj_unref = __nouveau_fence_unref, ++ .sync_obj_ref = __nouveau_fence_ref, + .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, + .io_mem_reserve = &nouveau_ttm_io_mem_reserve, + .io_mem_free = &nouveau_ttm_io_mem_free, +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_channel.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_channel.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_channel.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_channel.c 2010-11-12 06:18:06.000000000 +0100 +@@ -107,53 +107,56 @@ + int + nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, + struct drm_file *file_priv, +- uint32_t vram_handle, uint32_t tt_handle) ++ uint32_t vram_handle, uint32_t gart_handle) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_channel *chan; +- int channel, user; +- int ret; ++ unsigned long flags; ++ int user, ret; + +- /* +- * Alright, here is the full story +- * Nvidia cards have multiple hw fifo contexts (praise them for that, +- * no complicated crash-prone context switches) +- * We allocate a new context for each app and let it write to it +- * directly (woo, full userspace command submission !) +- * When there are no more contexts, you lost +- */ +- for (channel = 0; channel < pfifo->channels; channel++) { +- if (dev_priv->fifos[channel] == NULL) ++ /* allocate and lock channel structure */ ++ chan = kzalloc(sizeof(*chan), GFP_KERNEL); ++ if (!chan) ++ return -ENOMEM; ++ chan->dev = dev; ++ chan->file_priv = file_priv; ++ chan->vram_handle = vram_handle; ++ chan->gart_handle = gart_handle; ++ ++ kref_init(&chan->ref); ++ atomic_set(&chan->users, 1); ++ mutex_init(&chan->mutex); ++ mutex_lock(&chan->mutex); ++ ++ /* allocate hw channel id */ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); ++ for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { ++ if (!dev_priv->channels.ptr[chan->id]) { ++ nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); + break; ++ } + } ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + +- /* no more fifos. you lost. */ +- if (channel == pfifo->channels) +- return -EINVAL; ++ if (chan->id == pfifo->channels) { ++ mutex_unlock(&chan->mutex); ++ kfree(chan); ++ return -ENODEV; ++ } + +- dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), +- GFP_KERNEL); +- if (!dev_priv->fifos[channel]) +- return -ENOMEM; +- chan = dev_priv->fifos[channel]; ++ NV_DEBUG(dev, "initialising channel %d\n", chan->id); + INIT_LIST_HEAD(&chan->nvsw.vbl_wait); ++ INIT_LIST_HEAD(&chan->nvsw.flip); + INIT_LIST_HEAD(&chan->fence.pending); +- chan->dev = dev; +- chan->id = channel; +- chan->file_priv = file_priv; +- chan->vram_handle = vram_handle; +- chan->gart_handle = tt_handle; +- +- NV_INFO(dev, "Allocating FIFO number %d\n", channel); + + /* Allocate DMA push buffer */ + chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); + if (!chan->pushbuf_bo) { + ret = -ENOMEM; + NV_ERROR(dev, "pushbuf %d\n", ret); +- nouveau_channel_free(chan); ++ nouveau_channel_put(&chan); + return ret; + } + +@@ -161,18 +164,18 @@ + + /* Locate channel's user control regs */ + if (dev_priv->card_type < NV_40) +- user = NV03_USER(channel); ++ user = NV03_USER(chan->id); + else + if (dev_priv->card_type < NV_50) +- user = NV40_USER(channel); ++ user = NV40_USER(chan->id); + else +- user = NV50_USER(channel); ++ user = NV50_USER(chan->id); + + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, + PAGE_SIZE); + if (!chan->user) { + NV_ERROR(dev, "ioremap of regs failed.\n"); +- nouveau_channel_free(chan); ++ nouveau_channel_put(&chan); + return -ENOMEM; + } + chan->user_put = 0x40; +@@ -182,15 +185,15 @@ + ret = nouveau_notifier_init_channel(chan); + if (ret) { + NV_ERROR(dev, "ntfy %d\n", ret); +- nouveau_channel_free(chan); ++ nouveau_channel_put(&chan); + return ret; + } + + /* Setup channel's default objects */ +- ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); ++ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); + if (ret) { + NV_ERROR(dev, "gpuobj %d\n", ret); +- nouveau_channel_free(chan); ++ nouveau_channel_put(&chan); + return ret; + } + +@@ -198,7 +201,7 @@ + ret = nouveau_channel_pushbuf_ctxdma_init(chan); + if (ret) { + NV_ERROR(dev, "pbctxdma %d\n", ret); +- nouveau_channel_free(chan); ++ nouveau_channel_put(&chan); + return ret; + } + +@@ -206,16 +209,18 @@ + pfifo->reassign(dev, false); + + /* Create a graphics context for new channel */ +- ret = pgraph->create_context(chan); +- if (ret) { +- nouveau_channel_free(chan); +- return ret; ++ if (dev_priv->card_type < NV_50) { ++ ret = pgraph->create_context(chan); ++ if (ret) { ++ nouveau_channel_put(&chan); ++ return ret; ++ } + } + + /* Construct inital RAMFC for new channel */ + ret = pfifo->create_context(chan); + if (ret) { +- nouveau_channel_free(chan); ++ nouveau_channel_put(&chan); + return ret; + } + +@@ -225,83 +230,121 @@ + if (!ret) + ret = nouveau_fence_channel_init(chan); + if (ret) { +- nouveau_channel_free(chan); ++ nouveau_channel_put(&chan); + return ret; + } + + nouveau_debugfs_channel_init(chan); + +- NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); ++ NV_DEBUG(dev, "channel %d initialised\n", chan->id); + *chan_ret = chan; + return 0; + } + +-/* stops a fifo */ ++struct nouveau_channel * ++nouveau_channel_get_unlocked(struct nouveau_channel *ref) ++{ ++ struct nouveau_channel *chan = NULL; ++ ++ if (likely(ref && atomic_inc_not_zero(&ref->users))) ++ nouveau_channel_ref(ref, &chan); ++ ++ return chan; ++} ++ ++struct nouveau_channel * ++nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); ++ chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); ++ ++ if (unlikely(!chan)) ++ return ERR_PTR(-EINVAL); ++ ++ if (unlikely(file_priv && chan->file_priv != file_priv)) { ++ nouveau_channel_put_unlocked(&chan); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ mutex_lock(&chan->mutex); ++ return chan; ++} ++ + void +-nouveau_channel_free(struct nouveau_channel *chan) ++nouveau_channel_put_unlocked(struct nouveau_channel **pchan) + { ++ struct nouveau_channel *chan = *pchan; + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; ++ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; + unsigned long flags; + int ret; + +- NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); ++ /* decrement the refcount, and we're done if there's still refs */ ++ if (likely(!atomic_dec_and_test(&chan->users))) { ++ nouveau_channel_ref(NULL, pchan); ++ return; ++ } + ++ /* noone wants the channel anymore */ ++ NV_DEBUG(dev, "freeing channel %d\n", chan->id); + nouveau_debugfs_channel_fini(chan); + +- /* Give outstanding push buffers a chance to complete */ ++ /* give it chance to idle */ + nouveau_fence_update(chan); + if (chan->fence.sequence != chan->fence.sequence_ack) { + struct nouveau_fence *fence = NULL; + + ret = nouveau_fence_new(chan, &fence, true); + if (ret == 0) { +- ret = nouveau_fence_wait(fence, NULL, false, false); +- nouveau_fence_unref((void *)&fence); ++ ret = nouveau_fence_wait(fence, false, false); ++ nouveau_fence_unref(&fence); + } + + if (ret) + NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); + } + +- /* Ensure all outstanding fences are signaled. They should be if the ++ /* ensure all outstanding fences are signaled. they should be if the + * above attempts at idling were OK, but if we failed this'll tell TTM + * we're done with the buffers. + */ + nouveau_fence_channel_fini(chan); + +- /* This will prevent pfifo from switching channels. */ ++ /* boot it off the hardware */ + pfifo->reassign(dev, false); + +- /* We want to give pgraph a chance to idle and get rid of all potential +- * errors. We need to do this before the lock, otherwise the irq handler +- * is unable to process them. ++ /* We want to give pgraph a chance to idle and get rid of all ++ * potential errors. We need to do this without the context ++ * switch lock held, otherwise the irq handler is unable to ++ * process them. + */ + if (pgraph->channel(dev) == chan) + nouveau_wait_for_idle(dev); + +- spin_lock_irqsave(&dev_priv->context_switch_lock, flags); +- +- pgraph->fifo_access(dev, false); +- if (pgraph->channel(dev) == chan) +- pgraph->unload_context(dev); +- pgraph->destroy_context(chan); +- pgraph->fifo_access(dev, true); +- +- if (pfifo->channel_id(dev) == chan->id) { +- pfifo->disable(dev); +- pfifo->unload_context(dev); +- pfifo->enable(dev); +- } ++ /* destroy the engine specific contexts */ + pfifo->destroy_context(chan); ++ pgraph->destroy_context(chan); ++ if (pcrypt->destroy_context) ++ pcrypt->destroy_context(chan); + + pfifo->reassign(dev, true); + +- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ /* aside from its resources, the channel should now be dead, ++ * remove it from the channel list ++ */ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); ++ nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + +- /* Release the channel's resources */ ++ /* destroy any resources the channel owned */ + nouveau_gpuobj_ref(NULL, &chan->pushbuf); + if (chan->pushbuf_bo) { + nouveau_bo_unmap(chan->pushbuf_bo); +@@ -310,44 +353,62 @@ + } + nouveau_gpuobj_channel_takedown(chan); + nouveau_notifier_takedown_channel(chan); ++ ++ nouveau_channel_ref(NULL, pchan); ++} ++ ++void ++nouveau_channel_put(struct nouveau_channel **pchan) ++{ ++ mutex_unlock(&(*pchan)->mutex); ++ nouveau_channel_put_unlocked(pchan); ++} ++ ++static void ++nouveau_channel_del(struct kref *ref) ++{ ++ struct nouveau_channel *chan = ++ container_of(ref, struct nouveau_channel, ref); ++ + if (chan->user) + iounmap(chan->user); + +- dev_priv->fifos[chan->id] = NULL; + kfree(chan); + } + ++void ++nouveau_channel_ref(struct nouveau_channel *chan, ++ struct nouveau_channel **pchan) ++{ ++ if (chan) ++ kref_get(&chan->ref); ++ ++ if (*pchan) ++ kref_put(&(*pchan)->ref, nouveau_channel_del); ++ ++ *pchan = chan; ++} ++ + /* cleans up all the fifos from file_priv */ + void + nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; ++ struct nouveau_channel *chan; + int i; + + NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); + for (i = 0; i < engine->fifo.channels; i++) { +- struct nouveau_channel *chan = dev_priv->fifos[i]; ++ chan = nouveau_channel_get(dev, file_priv, i); ++ if (IS_ERR(chan)) ++ continue; + +- if (chan && chan->file_priv == file_priv) +- nouveau_channel_free(chan); ++ atomic_dec(&chan->users); ++ nouveau_channel_put(&chan); + } + } + +-int +-nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, +- int channel) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_engine *engine = &dev_priv->engine; +- +- if (channel >= engine->fifo.channels) +- return 0; +- if (dev_priv->fifos[channel] == NULL) +- return 0; +- +- return (dev_priv->fifos[channel]->file_priv == file_priv); +-} + + /*********************************** + * ioctls wrapping the functions +@@ -395,24 +456,26 @@ + /* Named memory object area */ + ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, + &init->notifier_handle); +- if (ret) { +- nouveau_channel_free(chan); +- return ret; +- } + +- return 0; ++ if (ret == 0) ++ atomic_inc(&chan->users); /* userspace reference */ ++ nouveau_channel_put(&chan); ++ return ret; + } + + static int + nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +- struct drm_nouveau_channel_free *cfree = data; ++ struct drm_nouveau_channel_free *req = data; + struct nouveau_channel *chan; + +- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); ++ chan = nouveau_channel_get(dev, file_priv, req->channel); ++ if (IS_ERR(chan)) ++ return PTR_ERR(chan); + +- nouveau_channel_free(chan); ++ atomic_dec(&chan->users); ++ nouveau_channel_put(&chan); + return 0; + } + +@@ -421,18 +484,18 @@ + ***********************************/ + + struct drm_ioctl_desc nouveau_ioctls[] = { +- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), +- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), ++ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), + }; + + int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_connector.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_connector.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_connector.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_connector.c 2010-11-12 06:18:06.000000000 +0100 +@@ -37,6 +37,8 @@ + #include "nouveau_connector.h" + #include "nouveau_hw.h" + ++static void nouveau_connector_hotplug(void *, int); ++ + static struct nouveau_encoder * + find_encoder_by_type(struct drm_connector *connector, int type) + { +@@ -94,22 +96,30 @@ + } + + static void +-nouveau_connector_destroy(struct drm_connector *drm_connector) ++nouveau_connector_destroy(struct drm_connector *connector) + { +- struct nouveau_connector *nv_connector = +- nouveau_connector(drm_connector); ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct drm_nouveau_private *dev_priv; ++ struct nouveau_gpio_engine *pgpio; + struct drm_device *dev; + + if (!nv_connector) + return; + + dev = nv_connector->base.dev; ++ dev_priv = dev->dev_private; + NV_DEBUG_KMS(dev, "\n"); + ++ pgpio = &dev_priv->engine.gpio; ++ if (pgpio->irq_unregister) { ++ pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag, ++ nouveau_connector_hotplug, connector); ++ } ++ + kfree(nv_connector->edid); +- drm_sysfs_connector_remove(drm_connector); +- drm_connector_cleanup(drm_connector); +- kfree(drm_connector); ++ drm_sysfs_connector_remove(connector); ++ drm_connector_cleanup(connector); ++ kfree(connector); + } + + static struct nouveau_i2c_chan * +@@ -281,7 +291,7 @@ + nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); + if (!nv_encoder && !nouveau_tv_disable) + nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); +- if (nv_encoder) { ++ if (nv_encoder && force) { + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); + struct drm_encoder_helper_funcs *helper = + encoder->helper_private; +@@ -641,11 +651,28 @@ + return ret; + } + ++static unsigned ++get_tmds_link_bandwidth(struct drm_connector *connector) ++{ ++ struct nouveau_connector *nv_connector = nouveau_connector(connector); ++ struct drm_nouveau_private *dev_priv = connector->dev->dev_private; ++ struct dcb_entry *dcb = nv_connector->detected_encoder->dcb; ++ ++ if (dcb->location != DCB_LOC_ON_CHIP || ++ dev_priv->chipset >= 0x46) ++ return 165000; ++ else if (dev_priv->chipset >= 0x40) ++ return 155000; ++ else if (dev_priv->chipset >= 0x18) ++ return 135000; ++ else ++ return 112000; ++} ++ + static int + nouveau_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { +- struct drm_nouveau_private *dev_priv = connector->dev->dev_private; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; + struct drm_encoder *encoder = to_drm_encoder(nv_encoder); +@@ -663,11 +690,9 @@ + max_clock = 400000; + break; + case OUTPUT_TMDS: +- if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || +- !nv_encoder->dcb->duallink_possible) +- max_clock = 165000; +- else +- max_clock = 330000; ++ max_clock = get_tmds_link_bandwidth(connector); ++ if (nouveau_duallink && nv_encoder->dcb->duallink_possible) ++ max_clock *= 2; + break; + case OUTPUT_ANALOG: + max_clock = nv_encoder->dcb->crtconf.maxfreq; +@@ -709,44 +734,6 @@ + return NULL; + } + +-void +-nouveau_connector_set_polling(struct drm_connector *connector) +-{ +- struct drm_device *dev = connector->dev; +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct drm_crtc *crtc; +- bool spare_crtc = false; +- +- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) +- spare_crtc |= !crtc->enabled; +- +- connector->polled = 0; +- +- switch (connector->connector_type) { +- case DRM_MODE_CONNECTOR_VGA: +- case DRM_MODE_CONNECTOR_TV: +- if (dev_priv->card_type >= NV_50 || +- (nv_gf4_disp_arch(dev) && spare_crtc)) +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; +- break; +- +- case DRM_MODE_CONNECTOR_DVII: +- case DRM_MODE_CONNECTOR_DVID: +- case DRM_MODE_CONNECTOR_HDMIA: +- case DRM_MODE_CONNECTOR_DisplayPort: +- case DRM_MODE_CONNECTOR_eDP: +- if (dev_priv->card_type >= NV_50) +- connector->polled = DRM_CONNECTOR_POLL_HPD; +- else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID || +- spare_crtc) +- connector->polled = DRM_CONNECTOR_POLL_CONNECT; +- break; +- +- default: +- break; +- } +-} +- + static const struct drm_connector_helper_funcs + nouveau_connector_helper_funcs = { + .get_modes = nouveau_connector_get_modes, +@@ -783,6 +770,7 @@ + { + const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nouveau_connector *nv_connector = NULL; + struct dcb_connector_table_entry *dcb = NULL; + struct drm_connector *connector; +@@ -872,6 +860,7 @@ + dev->mode_config.scaling_mode_property, + nv_connector->scaling_mode); + } ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; + /* fall-through */ + case DCB_CONNECTOR_TV_0: + case DCB_CONNECTOR_TV_1: +@@ -888,10 +877,20 @@ + dev->mode_config.dithering_mode_property, + nv_connector->use_dithering ? + DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); ++ ++ if (dcb->type != DCB_CONNECTOR_LVDS) { ++ if (dev_priv->card_type >= NV_50) ++ connector->polled = DRM_CONNECTOR_POLL_HPD; ++ else ++ connector->polled = DRM_CONNECTOR_POLL_CONNECT; ++ } + break; + } + +- nouveau_connector_set_polling(connector); ++ if (pgpio->irq_register) { ++ pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, ++ nouveau_connector_hotplug, connector); ++ } + + drm_sysfs_connector_add(connector); + dcb->drm = connector; +@@ -903,3 +902,29 @@ + return ERR_PTR(ret); + + } ++ ++static void ++nouveau_connector_hotplug(void *data, int plugged) ++{ ++ struct drm_connector *connector = data; ++ struct drm_device *dev = connector->dev; ++ ++ NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", ++ drm_get_connector_name(connector)); ++ ++ if (connector->encoder && connector->encoder->crtc && ++ connector->encoder->crtc->enabled) { ++ struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder); ++ struct drm_encoder_helper_funcs *helper = ++ connector->encoder->helper_private; ++ ++ if (nv_encoder->dcb->type == OUTPUT_DP) { ++ if (plugged) ++ helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); ++ else ++ helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); ++ } ++ } ++ ++ drm_helper_hpd_irq_event(dev); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_connector.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_connector.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_connector.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_connector.h 2010-11-12 06:18:06.000000000 +0100 +@@ -52,9 +52,6 @@ + struct drm_connector * + nouveau_connector_create(struct drm_device *, int index); + +-void +-nouveau_connector_set_polling(struct drm_connector *); +- + int + nouveau_connector_bpp(struct drm_connector *); + +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_display.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_display.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_display.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_display.c 2010-11-12 06:18:06.000000000 +0100 +@@ -29,6 +29,9 @@ + #include "nouveau_drv.h" + #include "nouveau_fb.h" + #include "nouveau_fbcon.h" ++#include "nouveau_hw.h" ++#include "nouveau_crtc.h" ++#include "nouveau_dma.h" + + static void + nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) +@@ -104,3 +107,207 @@ + .output_poll_changed = nouveau_fbcon_output_poll_changed, + }; + ++int ++nouveau_vblank_enable(struct drm_device *dev, int crtc) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->card_type >= NV_50) ++ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, ++ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc)); ++ else ++ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, ++ NV_PCRTC_INTR_0_VBLANK); ++ ++ return 0; ++} ++ ++void ++nouveau_vblank_disable(struct drm_device *dev, int crtc) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->card_type >= NV_50) ++ nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, ++ NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0); ++ else ++ NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0); ++} ++ ++static int ++nouveau_page_flip_reserve(struct nouveau_bo *old_bo, ++ struct nouveau_bo *new_bo) ++{ ++ int ret; ++ ++ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); ++ if (ret) ++ return ret; ++ ++ ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); ++ if (ret) ++ goto fail; ++ ++ ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); ++ if (ret) ++ goto fail_unreserve; ++ ++ return 0; ++ ++fail_unreserve: ++ ttm_bo_unreserve(&new_bo->bo); ++fail: ++ nouveau_bo_unpin(new_bo); ++ return ret; ++} ++ ++static void ++nouveau_page_flip_unreserve(struct nouveau_bo *old_bo, ++ struct nouveau_bo *new_bo, ++ struct nouveau_fence *fence) ++{ ++ nouveau_bo_fence(new_bo, fence); ++ ttm_bo_unreserve(&new_bo->bo); ++ ++ nouveau_bo_fence(old_bo, fence); ++ ttm_bo_unreserve(&old_bo->bo); ++ ++ nouveau_bo_unpin(old_bo); ++} ++ ++static int ++nouveau_page_flip_emit(struct nouveau_channel *chan, ++ struct nouveau_bo *old_bo, ++ struct nouveau_bo *new_bo, ++ struct nouveau_page_flip_state *s, ++ struct nouveau_fence **pfence) ++{ ++ struct drm_device *dev = chan->dev; ++ unsigned long flags; ++ int ret; ++ ++ /* Queue it to the pending list */ ++ spin_lock_irqsave(&dev->event_lock, flags); ++ list_add_tail(&s->head, &chan->nvsw.flip); ++ spin_unlock_irqrestore(&dev->event_lock, flags); ++ ++ /* Synchronize with the old framebuffer */ ++ ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan); ++ if (ret) ++ goto fail; ++ ++ /* Emit the pageflip */ ++ ret = RING_SPACE(chan, 2); ++ if (ret) ++ goto fail; ++ ++ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); ++ OUT_RING(chan, 0); ++ FIRE_RING(chan); ++ ++ ret = nouveau_fence_new(chan, pfence, true); ++ if (ret) ++ goto fail; ++ ++ return 0; ++fail: ++ spin_lock_irqsave(&dev->event_lock, flags); ++ list_del(&s->head); ++ spin_unlock_irqrestore(&dev->event_lock, flags); ++ return ret; ++} ++ ++int ++nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ++ struct drm_pending_vblank_event *event) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; ++ struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; ++ struct nouveau_page_flip_state *s; ++ struct nouveau_channel *chan; ++ struct nouveau_fence *fence; ++ int ret; ++ ++ if (dev_priv->engine.graph.accel_blocked) ++ return -ENODEV; ++ ++ s = kzalloc(sizeof(*s), GFP_KERNEL); ++ if (!s) ++ return -ENOMEM; ++ ++ /* Don't let the buffers go away while we flip */ ++ ret = nouveau_page_flip_reserve(old_bo, new_bo); ++ if (ret) ++ goto fail_free; ++ ++ /* Initialize a page flip struct */ ++ *s = (struct nouveau_page_flip_state) ++ { { }, s->event, nouveau_crtc(crtc)->index, ++ fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y, ++ new_bo->bo.offset }; ++ ++ /* Choose the channel the flip will be handled in */ ++ chan = nouveau_fence_channel(new_bo->bo.sync_obj); ++ if (!chan) ++ chan = nouveau_channel_get_unlocked(dev_priv->channel); ++ mutex_lock(&chan->mutex); ++ ++ /* Emit a page flip */ ++ ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); ++ nouveau_channel_put(&chan); ++ if (ret) ++ goto fail_unreserve; ++ ++ /* Update the crtc struct and cleanup */ ++ crtc->fb = fb; ++ ++ nouveau_page_flip_unreserve(old_bo, new_bo, fence); ++ nouveau_fence_unref(&fence); ++ return 0; ++ ++fail_unreserve: ++ nouveau_page_flip_unreserve(old_bo, new_bo, NULL); ++fail_free: ++ kfree(s); ++ return ret; ++} ++ ++int ++nouveau_finish_page_flip(struct nouveau_channel *chan, ++ struct nouveau_page_flip_state *ps) ++{ ++ struct drm_device *dev = chan->dev; ++ struct nouveau_page_flip_state *s; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev->event_lock, flags); ++ ++ if (list_empty(&chan->nvsw.flip)) { ++ NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); ++ spin_unlock_irqrestore(&dev->event_lock, flags); ++ return -EINVAL; ++ } ++ ++ s = list_first_entry(&chan->nvsw.flip, ++ struct nouveau_page_flip_state, head); ++ if (s->event) { ++ struct drm_pending_vblank_event *e = s->event; ++ struct timeval now; ++ ++ do_gettimeofday(&now); ++ e->event.sequence = 0; ++ e->event.tv_sec = now.tv_sec; ++ e->event.tv_usec = now.tv_usec; ++ list_add_tail(&e->base.link, &e->base.file_priv->event_list); ++ wake_up_interruptible(&e->base.file_priv->event_wait); ++ } ++ ++ list_del(&s->head); ++ *ps = *s; ++ kfree(s); ++ ++ spin_unlock_irqrestore(&dev->event_lock, flags); ++ return 0; ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_dp.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_dp.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_dp.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_dp.c 2010-11-12 06:18:06.000000000 +0100 +@@ -279,7 +279,7 @@ + struct bit_displayport_encoder_table *dpe; + int dpe_headerlen; + uint8_t config[4], status[3]; +- bool cr_done, cr_max_vs, eq_done; ++ bool cr_done, cr_max_vs, eq_done, hpd_state; + int ret = 0, i, tries, voltage; + + NV_DEBUG_KMS(dev, "link training!!\n"); +@@ -297,7 +297,7 @@ + /* disable hotplug detect, this flips around on some panels during + * link training. + */ +- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); ++ hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); + + if (dpe->script0) { + NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); +@@ -439,7 +439,7 @@ + } + + /* re-enable hotplug detect */ +- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); ++ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state); + + return eq_done; + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_drv.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_drv.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_drv.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_drv.c 2010-11-12 06:18:06.000000000 +0100 +@@ -115,6 +115,10 @@ + int nouveau_perflvl_wr; + module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); + ++MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); ++int nouveau_msi; ++module_param_named(msi, nouveau_msi, int, 0400); ++ + int nouveau_fbpercrtc; + #if 0 + module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); +@@ -195,15 +199,14 @@ + for (i = 0; i < pfifo->channels; i++) { + struct nouveau_fence *fence = NULL; + +- chan = dev_priv->fifos[i]; +- if (!chan || (dev_priv->card_type >= NV_50 && +- chan == dev_priv->fifos[0])) ++ chan = dev_priv->channels.ptr[i]; ++ if (!chan || !chan->pushbuf_bo) + continue; + + ret = nouveau_fence_new(chan, &fence, true); + if (ret == 0) { +- ret = nouveau_fence_wait(fence, NULL, false, false); +- nouveau_fence_unref((void *)&fence); ++ ret = nouveau_fence_wait(fence, false, false); ++ nouveau_fence_unref(&fence); + } + + if (ret) { +@@ -219,17 +222,17 @@ + pfifo->unload_context(dev); + pgraph->unload_context(dev); + +- NV_INFO(dev, "Suspending GPU objects...\n"); +- ret = nouveau_gpuobj_suspend(dev); ++ ret = pinstmem->suspend(dev); + if (ret) { + NV_ERROR(dev, "... failed: %d\n", ret); + goto out_abort; + } + +- ret = pinstmem->suspend(dev); ++ NV_INFO(dev, "Suspending GPU objects...\n"); ++ ret = nouveau_gpuobj_suspend(dev); + if (ret) { + NV_ERROR(dev, "... failed: %d\n", ret); +- nouveau_gpuobj_suspend_cleanup(dev); ++ pinstmem->resume(dev); + goto out_abort; + } + +@@ -294,17 +297,18 @@ + } + } + ++ NV_INFO(dev, "Restoring GPU objects...\n"); ++ nouveau_gpuobj_resume(dev); ++ + NV_INFO(dev, "Reinitialising engines...\n"); + engine->instmem.resume(dev); + engine->mc.init(dev); + engine->timer.init(dev); + engine->fb.init(dev); + engine->graph.init(dev); ++ engine->crypt.init(dev); + engine->fifo.init(dev); + +- NV_INFO(dev, "Restoring GPU objects...\n"); +- nouveau_gpuobj_resume(dev); +- + nouveau_irq_postinstall(dev); + + /* Re-write SKIPS, they'll have been lost over the suspend */ +@@ -313,7 +317,7 @@ + int j; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- chan = dev_priv->fifos[i]; ++ chan = dev_priv->channels.ptr[i]; + if (!chan || !chan->pushbuf_bo) + continue; + +@@ -393,6 +397,9 @@ + .irq_postinstall = nouveau_irq_postinstall, + .irq_uninstall = nouveau_irq_uninstall, + .irq_handler = nouveau_irq_handler, ++ .get_vblank_counter = drm_vblank_count, ++ .enable_vblank = nouveau_vblank_enable, ++ .disable_vblank = nouveau_vblank_disable, + .reclaim_buffers = drm_core_reclaim_buffers, + .ioctls = nouveau_ioctls, + .fops = { +@@ -403,6 +410,7 @@ + .mmap = nouveau_ttm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, ++ .read = drm_read, + #if defined(CONFIG_COMPAT) + .compat_ioctl = nouveau_compat_ioctl, + #endif +@@ -448,6 +456,12 @@ + if (!nouveau_modeset) + return 0; + ++#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) ++ request_module("fbcon"); ++#elif !defined(CONFIG_FRAMEBUFFER_CONSOLE) ++ printk(KERN_INFO "CONFIG_FRAMEBUFFER_CONSOLE was not enabled. You won't get any console output.\n"); ++#endif ++ + nouveau_register_dsm_handler(); + return drm_init(&driver); + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_drv.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_drv.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_drv.h 2010-11-12 06:18:06.000000000 +0100 +@@ -54,6 +54,7 @@ + #include "nouveau_drm.h" + #include "nouveau_reg.h" + #include "nouveau_bios.h" ++#include "nouveau_util.h" + struct nouveau_grctx; + + #define MAX_NUM_DCB_ENTRIES 16 +@@ -66,10 +67,13 @@ + #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) + + struct nouveau_tile_reg { +- struct nouveau_fence *fence; +- uint32_t addr; +- uint32_t size; + bool used; ++ uint32_t addr; ++ uint32_t limit; ++ uint32_t pitch; ++ uint32_t zcomp; ++ struct drm_mm_node *tag_mem; ++ struct nouveau_fence *fence; + }; + + struct nouveau_bo { +@@ -96,10 +100,12 @@ + struct nouveau_tile_reg *tile; + + struct drm_gem_object *gem; +- struct drm_file *cpu_filp; + int pin_refcnt; + }; + ++#define nouveau_bo_tile_layout(nvbo) \ ++ ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) ++ + static inline struct nouveau_bo * + nouveau_bo(struct ttm_buffer_object *bo) + { +@@ -130,20 +136,26 @@ + + #define NVOBJ_ENGINE_SW 0 + #define NVOBJ_ENGINE_GR 1 +-#define NVOBJ_ENGINE_DISPLAY 2 ++#define NVOBJ_ENGINE_PPP 2 ++#define NVOBJ_ENGINE_COPY 3 ++#define NVOBJ_ENGINE_VP 4 ++#define NVOBJ_ENGINE_CRYPT 5 ++#define NVOBJ_ENGINE_BSP 6 ++#define NVOBJ_ENGINE_DISPLAY 0xcafe0001 + #define NVOBJ_ENGINE_INT 0xdeadbeef + + #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) + #define NVOBJ_FLAG_ZERO_FREE (1 << 2) ++ ++#define NVOBJ_CINST_GLOBAL 0xdeadbeef ++ + struct nouveau_gpuobj { + struct drm_device *dev; + struct kref refcount; + struct list_head list; + +- struct drm_mm_node *im_pramin; +- struct nouveau_bo *im_backing; +- uint32_t *im_backing_suspend; +- int im_bound; ++ void *node; ++ u32 *suspend; + + uint32_t flags; + +@@ -159,10 +171,29 @@ + void *priv; + }; + ++struct nouveau_page_flip_state { ++ struct list_head head; ++ struct drm_pending_vblank_event *event; ++ int crtc, bpp, pitch, x, y; ++ uint64_t offset; ++}; ++ ++enum nouveau_channel_mutex_class { ++ NOUVEAU_UCHANNEL_MUTEX, ++ NOUVEAU_KCHANNEL_MUTEX ++}; ++ + struct nouveau_channel { + struct drm_device *dev; + int id; + ++ /* references to the channel data structure */ ++ struct kref ref; ++ /* users of the hardware channel resources, the hardware ++ * context will be kicked off when it reaches zero. */ ++ atomic_t users; ++ struct mutex mutex; ++ + /* owner of this fifo */ + struct drm_file *file_priv; + /* mapping of the fifo itself */ +@@ -199,6 +230,7 @@ + /* PGRAPH context */ + /* XXX may be merge 2 pointers as private data ??? */ + struct nouveau_gpuobj *ramin_grctx; ++ struct nouveau_gpuobj *crypt_ctx; + void *pgraph_ctx; + + /* NV50 VM */ +@@ -235,9 +267,11 @@ + + struct { + struct nouveau_gpuobj *vblsem; ++ uint32_t vblsem_head; + uint32_t vblsem_offset; + uint32_t vblsem_rval; + struct list_head vbl_wait; ++ struct list_head flip; + } nvsw; + + struct { +@@ -255,11 +289,11 @@ + int (*suspend)(struct drm_device *dev); + void (*resume)(struct drm_device *dev); + +- int (*populate)(struct drm_device *, struct nouveau_gpuobj *, +- uint32_t *size); +- void (*clear)(struct drm_device *, struct nouveau_gpuobj *); +- int (*bind)(struct drm_device *, struct nouveau_gpuobj *); +- int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); ++ int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); ++ void (*put)(struct nouveau_gpuobj *); ++ int (*map)(struct nouveau_gpuobj *); ++ void (*unmap)(struct nouveau_gpuobj *); ++ + void (*flush)(struct drm_device *); + }; + +@@ -276,12 +310,16 @@ + + struct nouveau_fb_engine { + int num_tiles; ++ struct drm_mm tag_heap; + + int (*init)(struct drm_device *dev); + void (*takedown)(struct drm_device *dev); + +- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch); ++ void (*init_tile_region)(struct drm_device *dev, int i, ++ uint32_t addr, uint32_t size, ++ uint32_t pitch, uint32_t flags); ++ void (*set_tile_region)(struct drm_device *dev, int i); ++ void (*free_tile_region)(struct drm_device *dev, int i); + }; + + struct nouveau_fifo_engine { +@@ -304,23 +342,12 @@ + void (*destroy_context)(struct nouveau_channel *); + int (*load_context)(struct nouveau_channel *); + int (*unload_context)(struct drm_device *); +-}; +- +-struct nouveau_pgraph_object_method { +- int id; +- int (*exec)(struct nouveau_channel *chan, int grclass, int mthd, +- uint32_t data); +-}; +- +-struct nouveau_pgraph_object_class { +- int id; +- bool software; +- struct nouveau_pgraph_object_method *methods; ++ void (*tlb_flush)(struct drm_device *dev); + }; + + struct nouveau_pgraph_engine { +- struct nouveau_pgraph_object_class *grclass; + bool accel_blocked; ++ bool registered; + int grctx_size; + + /* NV2x/NV3x context table (0x400780) */ +@@ -336,9 +363,9 @@ + void (*destroy_context)(struct nouveau_channel *); + int (*load_context)(struct nouveau_channel *); + int (*unload_context)(struct drm_device *); ++ void (*tlb_flush)(struct drm_device *dev); + +- void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch); ++ void (*set_tile_region)(struct drm_device *dev, int i); + }; + + struct nouveau_display_engine { +@@ -350,13 +377,19 @@ + }; + + struct nouveau_gpio_engine { ++ void *priv; ++ + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + + int (*get)(struct drm_device *, enum dcb_gpio_tag); + int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); + +- void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); ++ int (*irq_register)(struct drm_device *, enum dcb_gpio_tag, ++ void (*)(void *, int), void *); ++ void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag, ++ void (*)(void *, int), void *); ++ bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); + }; + + struct nouveau_pm_voltage_level { +@@ -432,6 +465,7 @@ + struct nouveau_pm_level *cur; + + struct device *hwmon; ++ struct notifier_block acpi_nb; + + int (*clock_get)(struct drm_device *, u32 id); + void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, +@@ -444,6 +478,16 @@ + int (*temp_get)(struct drm_device *); + }; + ++struct nouveau_crypt_engine { ++ bool registered; ++ ++ int (*init)(struct drm_device *); ++ void (*takedown)(struct drm_device *); ++ int (*create_context)(struct nouveau_channel *); ++ void (*destroy_context)(struct nouveau_channel *); ++ void (*tlb_flush)(struct drm_device *dev); ++}; ++ + struct nouveau_engine { + struct nouveau_instmem_engine instmem; + struct nouveau_mc_engine mc; +@@ -454,6 +498,7 @@ + struct nouveau_display_engine display; + struct nouveau_gpio_engine gpio; + struct nouveau_pm_engine pm; ++ struct nouveau_crypt_engine crypt; + }; + + struct nouveau_pll_vals { +@@ -485,13 +530,13 @@ + }; + + struct nv04_crtc_reg { +- unsigned char MiscOutReg; /* */ ++ unsigned char MiscOutReg; + uint8_t CRTC[0xa0]; + uint8_t CR58[0x10]; + uint8_t Sequencer[5]; + uint8_t Graphics[9]; + uint8_t Attribute[21]; +- unsigned char DAC[768]; /* Internal Colorlookuptable */ ++ unsigned char DAC[768]; + + /* PCRTC regs */ + uint32_t fb_start; +@@ -539,43 +584,9 @@ + }; + + struct nv04_mode_state { +- uint32_t bpp; +- uint32_t width; +- uint32_t height; +- uint32_t interlace; +- uint32_t repaint0; +- uint32_t repaint1; +- uint32_t screen; +- uint32_t scale; +- uint32_t dither; +- uint32_t extra; +- uint32_t fifo; +- uint32_t pixel; +- uint32_t horiz; +- int arbitration0; +- int arbitration1; +- uint32_t pll; +- uint32_t pllB; +- uint32_t vpll; +- uint32_t vpll2; +- uint32_t vpllB; +- uint32_t vpll2B; ++ struct nv04_crtc_reg crtc_reg[2]; + uint32_t pllsel; + uint32_t sel_clk; +- uint32_t general; +- uint32_t crtcOwner; +- uint32_t head; +- uint32_t head2; +- uint32_t cursorConfig; +- uint32_t cursor0; +- uint32_t cursor1; +- uint32_t cursor2; +- uint32_t timingH; +- uint32_t timingV; +- uint32_t displayV; +- uint32_t crtcSync; +- +- struct nv04_crtc_reg crtc_reg[2]; + }; + + enum nouveau_card_type { +@@ -606,12 +617,15 @@ + bool ramin_available; + struct drm_mm ramin_heap; + struct list_head gpuobj_list; ++ struct list_head classes; + + struct nouveau_bo *vga_ram; + ++ /* interrupt handling */ ++ void (*irq_handler[32])(struct drm_device *); ++ bool msi_enabled; + struct workqueue_struct *wq; + struct work_struct irq_work; +- struct work_struct hpd_work; + + struct list_head vbl_waiting; + +@@ -628,8 +642,10 @@ + struct nouveau_bo *bo; + } fence; + +- int fifo_alloc_count; +- struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; ++ struct { ++ spinlock_t lock; ++ struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR]; ++ } channels; + + struct nouveau_engine engine; + struct nouveau_channel *channel; +@@ -660,7 +676,10 @@ + } gart_info; + + /* nv10-nv40 tiling regions */ +- struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; ++ struct { ++ struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; ++ spinlock_t lock; ++ } tile; + + /* VRAM/fb configuration */ + uint64_t vram_size; +@@ -697,6 +716,7 @@ + struct backlight_device *backlight; + + struct nouveau_channel *evo; ++ u32 evo_alloc; + struct { + struct dcb_entry *dcb; + u16 script; +@@ -742,16 +762,6 @@ + return 0; + } + +-#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ +- struct drm_nouveau_private *nv = dev->dev_private; \ +- if (!nouveau_channel_owner(dev, (cl), (id))) { \ +- NV_ERROR(dev, "pid %d doesn't own channel %d\n", \ +- DRM_CURRENTPID, (id)); \ +- return -EPERM; \ +- } \ +- (ch) = nv->fifos[(id)]; \ +-} while (0) +- + /* nouveau_drv.c */ + extern int nouveau_agpmode; + extern int nouveau_duallink; +@@ -771,6 +781,7 @@ + extern int nouveau_override_conntype; + extern char *nouveau_perflvl; + extern int nouveau_perflvl_wr; ++extern int nouveau_msi; + + extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); + extern int nouveau_pci_resume(struct pci_dev *pdev); +@@ -798,13 +809,12 @@ + extern int nouveau_mem_init_agp(struct drm_device *); + extern int nouveau_mem_reset_agp(struct drm_device *); + extern void nouveau_mem_close(struct drm_device *); +-extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, +- uint32_t addr, +- uint32_t size, +- uint32_t pitch); +-extern void nv10_mem_expire_tiling(struct drm_device *dev, +- struct nouveau_tile_reg *tile, +- struct nouveau_fence *fence); ++extern struct nouveau_tile_reg *nv10_mem_set_tiling( ++ struct drm_device *dev, uint32_t addr, uint32_t size, ++ uint32_t pitch, uint32_t flags); ++extern void nv10_mem_put_tile_region(struct drm_device *dev, ++ struct nouveau_tile_reg *tile, ++ struct nouveau_fence *fence); + extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, + uint32_t size, uint32_t flags, + uint64_t phys); +@@ -826,21 +836,43 @@ + extern struct drm_ioctl_desc nouveau_ioctls[]; + extern int nouveau_max_ioctl; + extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); +-extern int nouveau_channel_owner(struct drm_device *, struct drm_file *, +- int channel); + extern int nouveau_channel_alloc(struct drm_device *dev, + struct nouveau_channel **chan, + struct drm_file *file_priv, + uint32_t fb_ctxdma, uint32_t tt_ctxdma); +-extern void nouveau_channel_free(struct nouveau_channel *); ++extern struct nouveau_channel * ++nouveau_channel_get_unlocked(struct nouveau_channel *); ++extern struct nouveau_channel * ++nouveau_channel_get(struct drm_device *, struct drm_file *, int id); ++extern void nouveau_channel_put_unlocked(struct nouveau_channel **); ++extern void nouveau_channel_put(struct nouveau_channel **); ++extern void nouveau_channel_ref(struct nouveau_channel *chan, ++ struct nouveau_channel **pchan); + + /* nouveau_object.c */ ++#define NVOBJ_CLASS(d,c,e) do { \ ++ int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ ++ if (ret) \ ++ return ret; \ ++} while(0) ++ ++#define NVOBJ_MTHD(d,c,m,e) do { \ ++ int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ ++ if (ret) \ ++ return ret; \ ++} while(0) ++ + extern int nouveau_gpuobj_early_init(struct drm_device *); + extern int nouveau_gpuobj_init(struct drm_device *); + extern void nouveau_gpuobj_takedown(struct drm_device *); + extern int nouveau_gpuobj_suspend(struct drm_device *dev); +-extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); + extern void nouveau_gpuobj_resume(struct drm_device *dev); ++extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); ++extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, ++ int (*exec)(struct nouveau_channel *, ++ u32 class, u32 mthd, u32 data)); ++extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); ++extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); + extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, + uint32_t vram_h, uint32_t tt_h); + extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); +@@ -861,15 +893,18 @@ + uint32_t *o_ret); + extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, + struct nouveau_gpuobj **); +-extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, +- struct nouveau_gpuobj **); + extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, + struct drm_file *); + extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, + struct drm_file *); + + /* nouveau_irq.c */ ++extern int nouveau_irq_init(struct drm_device *); ++extern void nouveau_irq_fini(struct drm_device *); + extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); ++extern void nouveau_irq_register(struct drm_device *, int status_bit, ++ void (*)(struct drm_device *)); ++extern void nouveau_irq_unregister(struct drm_device *, int status_bit); + extern void nouveau_irq_preinstall(struct drm_device *); + extern int nouveau_irq_postinstall(struct drm_device *); + extern void nouveau_irq_uninstall(struct drm_device *); +@@ -989,18 +1024,25 @@ + /* nv10_fb.c */ + extern int nv10_fb_init(struct drm_device *); + extern void nv10_fb_takedown(struct drm_device *); +-extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, +- uint32_t, uint32_t); ++extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, ++ uint32_t addr, uint32_t size, ++ uint32_t pitch, uint32_t flags); ++extern void nv10_fb_set_tile_region(struct drm_device *dev, int i); ++extern void nv10_fb_free_tile_region(struct drm_device *dev, int i); + + /* nv30_fb.c */ + extern int nv30_fb_init(struct drm_device *); + extern void nv30_fb_takedown(struct drm_device *); ++extern void nv30_fb_init_tile_region(struct drm_device *dev, int i, ++ uint32_t addr, uint32_t size, ++ uint32_t pitch, uint32_t flags); ++extern void nv30_fb_free_tile_region(struct drm_device *dev, int i); + + /* nv40_fb.c */ + extern int nv40_fb_init(struct drm_device *); + extern void nv40_fb_takedown(struct drm_device *); +-extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, +- uint32_t, uint32_t); ++extern void nv40_fb_set_tile_region(struct drm_device *dev, int i); ++ + /* nv50_fb.c */ + extern int nv50_fb_init(struct drm_device *); + extern void nv50_fb_takedown(struct drm_device *); +@@ -1012,6 +1054,7 @@ + + /* nv04_fifo.c */ + extern int nv04_fifo_init(struct drm_device *); ++extern void nv04_fifo_fini(struct drm_device *); + extern void nv04_fifo_disable(struct drm_device *); + extern void nv04_fifo_enable(struct drm_device *); + extern bool nv04_fifo_reassign(struct drm_device *, bool); +@@ -1021,19 +1064,18 @@ + extern void nv04_fifo_destroy_context(struct nouveau_channel *); + extern int nv04_fifo_load_context(struct nouveau_channel *); + extern int nv04_fifo_unload_context(struct drm_device *); ++extern void nv04_fifo_isr(struct drm_device *); + + /* nv10_fifo.c */ + extern int nv10_fifo_init(struct drm_device *); + extern int nv10_fifo_channel_id(struct drm_device *); + extern int nv10_fifo_create_context(struct nouveau_channel *); +-extern void nv10_fifo_destroy_context(struct nouveau_channel *); + extern int nv10_fifo_load_context(struct nouveau_channel *); + extern int nv10_fifo_unload_context(struct drm_device *); + + /* nv40_fifo.c */ + extern int nv40_fifo_init(struct drm_device *); + extern int nv40_fifo_create_context(struct nouveau_channel *); +-extern void nv40_fifo_destroy_context(struct nouveau_channel *); + extern int nv40_fifo_load_context(struct nouveau_channel *); + extern int nv40_fifo_unload_context(struct drm_device *); + +@@ -1045,6 +1087,7 @@ + extern void nv50_fifo_destroy_context(struct nouveau_channel *); + extern int nv50_fifo_load_context(struct nouveau_channel *); + extern int nv50_fifo_unload_context(struct drm_device *); ++extern void nv50_fifo_tlb_flush(struct drm_device *dev); + + /* nvc0_fifo.c */ + extern int nvc0_fifo_init(struct drm_device *); +@@ -1060,7 +1103,6 @@ + extern int nvc0_fifo_unload_context(struct drm_device *); + + /* nv04_graph.c */ +-extern struct nouveau_pgraph_object_class nv04_graph_grclass[]; + extern int nv04_graph_init(struct drm_device *); + extern void nv04_graph_takedown(struct drm_device *); + extern void nv04_graph_fifo_access(struct drm_device *, bool); +@@ -1069,10 +1111,11 @@ + extern void nv04_graph_destroy_context(struct nouveau_channel *); + extern int nv04_graph_load_context(struct nouveau_channel *); + extern int nv04_graph_unload_context(struct drm_device *); +-extern void nv04_graph_context_switch(struct drm_device *); ++extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data); ++extern struct nouveau_bitfield nv04_graph_nsource[]; + + /* nv10_graph.c */ +-extern struct nouveau_pgraph_object_class nv10_graph_grclass[]; + extern int nv10_graph_init(struct drm_device *); + extern void nv10_graph_takedown(struct drm_device *); + extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); +@@ -1080,13 +1123,11 @@ + extern void nv10_graph_destroy_context(struct nouveau_channel *); + extern int nv10_graph_load_context(struct nouveau_channel *); + extern int nv10_graph_unload_context(struct drm_device *); +-extern void nv10_graph_context_switch(struct drm_device *); +-extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, +- uint32_t, uint32_t); ++extern void nv10_graph_set_tile_region(struct drm_device *dev, int i); ++extern struct nouveau_bitfield nv10_graph_intr[]; ++extern struct nouveau_bitfield nv10_graph_nstatus[]; + + /* nv20_graph.c */ +-extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; +-extern struct nouveau_pgraph_object_class nv30_graph_grclass[]; + extern int nv20_graph_create_context(struct nouveau_channel *); + extern void nv20_graph_destroy_context(struct nouveau_channel *); + extern int nv20_graph_load_context(struct nouveau_channel *); +@@ -1094,11 +1135,9 @@ + extern int nv20_graph_init(struct drm_device *); + extern void nv20_graph_takedown(struct drm_device *); + extern int nv30_graph_init(struct drm_device *); +-extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, +- uint32_t, uint32_t); ++extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); + + /* nv40_graph.c */ +-extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; + extern int nv40_graph_init(struct drm_device *); + extern void nv40_graph_takedown(struct drm_device *); + extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); +@@ -1107,11 +1146,9 @@ + extern int nv40_graph_load_context(struct nouveau_channel *); + extern int nv40_graph_unload_context(struct drm_device *); + extern void nv40_grctx_init(struct nouveau_grctx *); +-extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, +- uint32_t, uint32_t); ++extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); + + /* nv50_graph.c */ +-extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; + extern int nv50_graph_init(struct drm_device *); + extern void nv50_graph_takedown(struct drm_device *); + extern void nv50_graph_fifo_access(struct drm_device *, bool); +@@ -1120,8 +1157,9 @@ + extern void nv50_graph_destroy_context(struct nouveau_channel *); + extern int nv50_graph_load_context(struct nouveau_channel *); + extern int nv50_graph_unload_context(struct drm_device *); +-extern void nv50_graph_context_switch(struct drm_device *); + extern int nv50_grctx_init(struct nouveau_grctx *); ++extern void nv50_graph_tlb_flush(struct drm_device *dev); ++extern void nv86_graph_tlb_flush(struct drm_device *dev); + + /* nvc0_graph.c */ + extern int nvc0_graph_init(struct drm_device *); +@@ -1133,16 +1171,22 @@ + extern int nvc0_graph_load_context(struct nouveau_channel *); + extern int nvc0_graph_unload_context(struct drm_device *); + ++/* nv84_crypt.c */ ++extern int nv84_crypt_init(struct drm_device *dev); ++extern void nv84_crypt_fini(struct drm_device *dev); ++extern int nv84_crypt_create_context(struct nouveau_channel *); ++extern void nv84_crypt_destroy_context(struct nouveau_channel *); ++extern void nv84_crypt_tlb_flush(struct drm_device *dev); ++ + /* nv04_instmem.c */ + extern int nv04_instmem_init(struct drm_device *); + extern void nv04_instmem_takedown(struct drm_device *); + extern int nv04_instmem_suspend(struct drm_device *); + extern void nv04_instmem_resume(struct drm_device *); +-extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, +- uint32_t *size); +-extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); +-extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); +-extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); ++extern void nv04_instmem_put(struct nouveau_gpuobj *); ++extern int nv04_instmem_map(struct nouveau_gpuobj *); ++extern void nv04_instmem_unmap(struct nouveau_gpuobj *); + extern void nv04_instmem_flush(struct drm_device *); + + /* nv50_instmem.c */ +@@ -1150,11 +1194,10 @@ + extern void nv50_instmem_takedown(struct drm_device *); + extern int nv50_instmem_suspend(struct drm_device *); + extern void nv50_instmem_resume(struct drm_device *); +-extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, +- uint32_t *size); +-extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); +-extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); +-extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); ++extern void nv50_instmem_put(struct nouveau_gpuobj *); ++extern int nv50_instmem_map(struct nouveau_gpuobj *); ++extern void nv50_instmem_unmap(struct nouveau_gpuobj *); + extern void nv50_instmem_flush(struct drm_device *); + extern void nv84_instmem_flush(struct drm_device *); + extern void nv50_vm_flush(struct drm_device *, int engine); +@@ -1164,11 +1207,10 @@ + extern void nvc0_instmem_takedown(struct drm_device *); + extern int nvc0_instmem_suspend(struct drm_device *); + extern void nvc0_instmem_resume(struct drm_device *); +-extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, +- uint32_t *size); +-extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); +-extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); +-extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); ++extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); ++extern void nvc0_instmem_put(struct nouveau_gpuobj *); ++extern int nvc0_instmem_map(struct nouveau_gpuobj *); ++extern void nvc0_instmem_unmap(struct nouveau_gpuobj *); + extern void nvc0_instmem_flush(struct drm_device *); + + /* nv04_mc.c */ +@@ -1239,7 +1281,7 @@ + extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); + extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); + extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); +-extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); ++extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); + + /* nouveau_fence.c */ + struct nouveau_fence; +@@ -1255,12 +1297,35 @@ + void (*work)(void *priv, bool signalled), + void *priv); + struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); +-extern bool nouveau_fence_signalled(void *obj, void *arg); +-extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); ++ ++extern bool __nouveau_fence_signalled(void *obj, void *arg); ++extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); ++extern int __nouveau_fence_flush(void *obj, void *arg); ++extern void __nouveau_fence_unref(void **obj); ++extern void *__nouveau_fence_ref(void *obj); ++ ++static inline bool nouveau_fence_signalled(struct nouveau_fence *obj) ++{ ++ return __nouveau_fence_signalled(obj, NULL); ++} ++static inline int ++nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr) ++{ ++ return __nouveau_fence_wait(obj, NULL, lazy, intr); ++} + extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); +-extern int nouveau_fence_flush(void *obj, void *arg); +-extern void nouveau_fence_unref(void **obj); +-extern void *nouveau_fence_ref(void *obj); ++static inline int nouveau_fence_flush(struct nouveau_fence *obj) ++{ ++ return __nouveau_fence_flush(obj, NULL); ++} ++static inline void nouveau_fence_unref(struct nouveau_fence **obj) ++{ ++ __nouveau_fence_unref((void **)obj); ++} ++static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) ++{ ++ return __nouveau_fence_ref(obj); ++} + + /* nouveau_gem.c */ + extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, +@@ -1280,15 +1345,28 @@ + extern int nouveau_gem_ioctl_info(struct drm_device *, void *, + struct drm_file *); + ++/* nouveau_display.c */ ++int nouveau_vblank_enable(struct drm_device *dev, int crtc); ++void nouveau_vblank_disable(struct drm_device *dev, int crtc); ++int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, ++ struct drm_pending_vblank_event *event); ++int nouveau_finish_page_flip(struct nouveau_channel *, ++ struct nouveau_page_flip_state *); ++ + /* nv10_gpio.c */ + int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); + int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); + + /* nv50_gpio.c */ + int nv50_gpio_init(struct drm_device *dev); ++void nv50_gpio_fini(struct drm_device *dev); + int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); + int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); +-void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); ++int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, ++ void (*)(void *, int), void *); ++void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, ++ void (*)(void *, int), void *); ++bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); + + /* nv50_calc. */ + int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, +@@ -1478,5 +1556,6 @@ + #define NV_SW_VBLSEM_OFFSET 0x00000400 + #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 + #define NV_SW_VBLSEM_RELEASE 0x00000408 ++#define NV_SW_PAGE_FLIP 0x00000500 + + #endif /* __NOUVEAU_DRV_H__ */ +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_fbcon.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_fbcon.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_fbcon.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_fbcon.c 2010-11-12 06:18:06.000000000 +0100 +@@ -49,6 +49,96 @@ + #include "nouveau_fbcon.h" + #include "nouveau_dma.h" + ++static void ++nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) ++{ ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (info->state != FBINFO_STATE_RUNNING) ++ return; ++ ++ ret = -ENODEV; ++ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && ++ mutex_trylock(&dev_priv->channel->mutex)) { ++ if (dev_priv->card_type < NV_50) ++ ret = nv04_fbcon_fillrect(info, rect); ++ else ++ if (dev_priv->card_type < NV_C0) ++ ret = nv50_fbcon_fillrect(info, rect); ++ mutex_unlock(&dev_priv->channel->mutex); ++ } ++ ++ if (ret == 0) ++ return; ++ ++ if (ret != -ENODEV) ++ nouveau_fbcon_gpu_lockup(info); ++ cfb_fillrect(info, rect); ++} ++ ++static void ++nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) ++{ ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (info->state != FBINFO_STATE_RUNNING) ++ return; ++ ++ ret = -ENODEV; ++ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && ++ mutex_trylock(&dev_priv->channel->mutex)) { ++ if (dev_priv->card_type < NV_50) ++ ret = nv04_fbcon_copyarea(info, image); ++ else ++ if (dev_priv->card_type < NV_C0) ++ ret = nv50_fbcon_copyarea(info, image); ++ mutex_unlock(&dev_priv->channel->mutex); ++ } ++ ++ if (ret == 0) ++ return; ++ ++ if (ret != -ENODEV) ++ nouveau_fbcon_gpu_lockup(info); ++ cfb_copyarea(info, image); ++} ++ ++static void ++nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ++{ ++ struct nouveau_fbdev *nfbdev = info->par; ++ struct drm_device *dev = nfbdev->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (info->state != FBINFO_STATE_RUNNING) ++ return; ++ ++ ret = -ENODEV; ++ if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && ++ mutex_trylock(&dev_priv->channel->mutex)) { ++ if (dev_priv->card_type < NV_50) ++ ret = nv04_fbcon_imageblit(info, image); ++ else ++ if (dev_priv->card_type < NV_C0) ++ ret = nv50_fbcon_imageblit(info, image); ++ mutex_unlock(&dev_priv->channel->mutex); ++ } ++ ++ if (ret == 0) ++ return; ++ ++ if (ret != -ENODEV) ++ nouveau_fbcon_gpu_lockup(info); ++ cfb_imageblit(info, image); ++} ++ + static int + nouveau_fbcon_sync(struct fb_info *info) + { +@@ -58,12 +148,17 @@ + struct nouveau_channel *chan = dev_priv->channel; + int ret, i; + +- if (!chan || !chan->accel_done || ++ if (!chan || !chan->accel_done || in_interrupt() || + info->state != FBINFO_STATE_RUNNING || + info->flags & FBINFO_HWACCEL_DISABLED) + return 0; + +- if (RING_SPACE(chan, 4)) { ++ if (!mutex_trylock(&chan->mutex)) ++ return 0; ++ ++ ret = RING_SPACE(chan, 4); ++ if (ret) { ++ mutex_unlock(&chan->mutex); + nouveau_fbcon_gpu_lockup(info); + return 0; + } +@@ -74,6 +169,7 @@ + OUT_RING(chan, 0); + nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); + FIRE_RING(chan); ++ mutex_unlock(&chan->mutex); + + ret = -EBUSY; + for (i = 0; i < 100000; i++) { +@@ -97,24 +193,9 @@ + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, +- .fb_fillrect = cfb_fillrect, +- .fb_copyarea = cfb_copyarea, +- .fb_imageblit = cfb_imageblit, +- .fb_sync = nouveau_fbcon_sync, +- .fb_pan_display = drm_fb_helper_pan_display, +- .fb_blank = drm_fb_helper_blank, +- .fb_setcmap = drm_fb_helper_setcmap, +- .fb_debug_enter = drm_fb_helper_debug_enter, +- .fb_debug_leave = drm_fb_helper_debug_leave, +-}; +- +-static struct fb_ops nv04_fbcon_ops = { +- .owner = THIS_MODULE, +- .fb_check_var = drm_fb_helper_check_var, +- .fb_set_par = drm_fb_helper_set_par, +- .fb_fillrect = nv04_fbcon_fillrect, +- .fb_copyarea = nv04_fbcon_copyarea, +- .fb_imageblit = nv04_fbcon_imageblit, ++ .fb_fillrect = nouveau_fbcon_fillrect, ++ .fb_copyarea = nouveau_fbcon_copyarea, ++ .fb_imageblit = nouveau_fbcon_imageblit, + .fb_sync = nouveau_fbcon_sync, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, +@@ -123,14 +204,13 @@ + .fb_debug_leave = drm_fb_helper_debug_leave, + }; + +-static struct fb_ops nv50_fbcon_ops = { ++static struct fb_ops nouveau_fbcon_sw_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, +- .fb_fillrect = nv50_fbcon_fillrect, +- .fb_copyarea = nv50_fbcon_copyarea, +- .fb_imageblit = nv50_fbcon_imageblit, +- .fb_sync = nouveau_fbcon_sync, ++ .fb_fillrect = cfb_fillrect, ++ .fb_copyarea = cfb_copyarea, ++ .fb_imageblit = cfb_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, +@@ -257,7 +337,7 @@ + FBINFO_HWACCEL_FILLRECT | + FBINFO_HWACCEL_IMAGEBLIT; + info->flags |= FBINFO_CAN_FORCE_OUTPUT; +- info->fbops = &nouveau_fbcon_ops; ++ info->fbops = &nouveau_fbcon_sw_ops; + info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - + dev_priv->vm_vram_base; + info->fix.smem_len = size; +@@ -285,19 +365,18 @@ + info->pixmap.flags = FB_PIXMAP_SYSTEM; + info->pixmap.scan_align = 1; + ++ mutex_unlock(&dev->struct_mutex); ++ + if (dev_priv->channel && !nouveau_nofbaccel) { +- switch (dev_priv->card_type) { +- case NV_C0: +- break; +- case NV_50: +- nv50_fbcon_accel_init(info); +- info->fbops = &nv50_fbcon_ops; +- break; +- default: +- nv04_fbcon_accel_init(info); +- info->fbops = &nv04_fbcon_ops; +- break; +- }; ++ ret = -ENODEV; ++ if (dev_priv->card_type < NV_50) ++ ret = nv04_fbcon_accel_init(info); ++ else ++ if (dev_priv->card_type < NV_C0) ++ ret = nv50_fbcon_accel_init(info); ++ ++ if (ret == 0) ++ info->fbops = &nouveau_fbcon_ops; + } + + nouveau_fbcon_zfill(dev, nfbdev); +@@ -308,7 +387,6 @@ + nouveau_fb->base.height, + nvbo->bo.offset, nvbo); + +- mutex_unlock(&dev->struct_mutex); + vga_switcheroo_client_fb_set(dev->pdev, info); + return 0; + +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_fbcon.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_fbcon.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_fbcon.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_fbcon.h 2010-11-12 06:18:06.000000000 +0100 +@@ -40,13 +40,13 @@ + + void nouveau_fbcon_restore(void); + +-void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +-void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +-void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); ++int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); ++int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); ++int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); + int nv04_fbcon_accel_init(struct fb_info *info); +-void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +-void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +-void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); ++int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); ++int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); ++int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); + int nv50_fbcon_accel_init(struct fb_info *info); + + void nouveau_fbcon_gpu_lockup(struct fb_info *info); +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_fence.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_fence.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_fence.c 2010-11-12 06:18:06.000000000 +0100 +@@ -64,6 +64,7 @@ + struct nouveau_fence *fence = + container_of(ref, struct nouveau_fence, refcount); + ++ nouveau_channel_ref(NULL, &fence->channel); + kfree(fence); + } + +@@ -113,13 +114,13 @@ + if (!fence) + return -ENOMEM; + kref_init(&fence->refcount); +- fence->channel = chan; ++ nouveau_channel_ref(chan, &fence->channel); + + if (emit) + ret = nouveau_fence_emit(fence); + + if (ret) +- nouveau_fence_unref((void *)&fence); ++ nouveau_fence_unref(&fence); + *pfence = fence; + return ret; + } +@@ -127,7 +128,7 @@ + struct nouveau_channel * + nouveau_fence_channel(struct nouveau_fence *fence) + { +- return fence ? fence->channel : NULL; ++ return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; + } + + int +@@ -182,7 +183,7 @@ + } + + void +-nouveau_fence_unref(void **sync_obj) ++__nouveau_fence_unref(void **sync_obj) + { + struct nouveau_fence *fence = nouveau_fence(*sync_obj); + +@@ -192,7 +193,7 @@ + } + + void * +-nouveau_fence_ref(void *sync_obj) ++__nouveau_fence_ref(void *sync_obj) + { + struct nouveau_fence *fence = nouveau_fence(sync_obj); + +@@ -201,7 +202,7 @@ + } + + bool +-nouveau_fence_signalled(void *sync_obj, void *sync_arg) ++__nouveau_fence_signalled(void *sync_obj, void *sync_arg) + { + struct nouveau_fence *fence = nouveau_fence(sync_obj); + struct nouveau_channel *chan = fence->channel; +@@ -214,13 +215,13 @@ + } + + int +-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) ++__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) + { + unsigned long timeout = jiffies + (3 * DRM_HZ); + int ret = 0; + + while (1) { +- if (nouveau_fence_signalled(sync_obj, sync_arg)) ++ if (__nouveau_fence_signalled(sync_obj, sync_arg)) + break; + + if (time_after_eq(jiffies, timeout)) { +@@ -249,6 +250,7 @@ + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_semaphore *sema; ++ int ret; + + if (!USE_SEMA(dev)) + return NULL; +@@ -257,10 +259,14 @@ + if (!sema) + goto fail; + ++ ret = drm_mm_pre_get(&dev_priv->fence.heap); ++ if (ret) ++ goto fail; ++ + spin_lock(&dev_priv->fence.lock); + sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); + if (sema->mem) +- sema->mem = drm_mm_get_block(sema->mem, 4, 0); ++ sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0); + spin_unlock(&dev_priv->fence.lock); + + if (!sema->mem) +@@ -363,7 +369,7 @@ + + kref_get(&sema->ref); + nouveau_fence_work(fence, semaphore_work, sema); +- nouveau_fence_unref((void *)&fence); ++ nouveau_fence_unref(&fence); + + return 0; + } +@@ -375,33 +381,49 @@ + struct nouveau_channel *chan = nouveau_fence_channel(fence); + struct drm_device *dev = wchan->dev; + struct nouveau_semaphore *sema; +- int ret; ++ int ret = 0; + +- if (likely(!fence || chan == wchan || +- nouveau_fence_signalled(fence, NULL))) +- return 0; ++ if (likely(!chan || chan == wchan || ++ nouveau_fence_signalled(fence))) ++ goto out; + + sema = alloc_semaphore(dev); + if (!sema) { + /* Early card or broken userspace, fall back to + * software sync. */ +- return nouveau_fence_wait(fence, NULL, false, false); ++ ret = nouveau_fence_wait(fence, true, false); ++ goto out; ++ } ++ ++ /* try to take chan's mutex, if we can't take it right away ++ * we have to fallback to software sync to prevent locking ++ * order issues ++ */ ++ if (!mutex_trylock(&chan->mutex)) { ++ ret = nouveau_fence_wait(fence, true, false); ++ goto out_unref; + } + + /* Make wchan wait until it gets signalled */ + ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); + if (ret) +- goto out; ++ goto out_unlock; + + /* Signal the semaphore from chan */ + ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); +-out: ++ ++out_unlock: ++ mutex_unlock(&chan->mutex); ++out_unref: + kref_put(&sema->ref, free_semaphore); ++out: ++ if (chan) ++ nouveau_channel_put_unlocked(&chan); + return ret; + } + + int +-nouveau_fence_flush(void *sync_obj, void *sync_arg) ++__nouveau_fence_flush(void *sync_obj, void *sync_arg) + { + return 0; + } +@@ -415,7 +437,7 @@ + int ret; + + /* Create an NV_SW object for various sync purposes */ +- ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); ++ ret = nouveau_gpuobj_gr_new(chan, NV_SW, &obj); + if (ret) + return ret; + +@@ -468,6 +490,8 @@ + { + struct nouveau_fence *tmp, *fence; + ++ spin_lock(&chan->fence.lock); ++ + list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { + fence->signalled = true; + list_del(&fence->entry); +@@ -477,6 +501,8 @@ + + kref_put(&fence->refcount, nouveau_fence_del); + } ++ ++ spin_unlock(&chan->fence.lock); + } + + int +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_gem.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_gem.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_gem.c 2010-11-12 06:18:06.000000000 +0100 +@@ -48,9 +48,6 @@ + return; + nvbo->gem = NULL; + +- if (unlikely(nvbo->cpu_filp)) +- ttm_bo_synccpu_write_release(bo); +- + if (unlikely(nvbo->pin_refcnt)) { + nvbo->pin_refcnt = 1; + nouveau_bo_unpin(nvbo); +@@ -107,23 +104,29 @@ + } + + static bool +-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { +- switch (tile_flags) { +- case 0x0000: +- case 0x1800: +- case 0x2800: +- case 0x4800: +- case 0x7000: +- case 0x7400: +- case 0x7a00: +- case 0xe000: +- break; +- default: +- NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); +- return false; ++nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->card_type >= NV_50) { ++ switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { ++ case 0x0000: ++ case 0x1800: ++ case 0x2800: ++ case 0x4800: ++ case 0x7000: ++ case 0x7400: ++ case 0x7a00: ++ case 0xe000: ++ return true; ++ } ++ } else { ++ if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) ++ return true; + } + +- return true; ++ NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); ++ return false; + } + + int +@@ -140,11 +143,6 @@ + if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) + dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; + +- if (req->channel_hint) { +- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, +- file_priv, chan); +- } +- + if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) + flags |= TTM_PL_FLAG_VRAM; + if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) +@@ -155,10 +153,18 @@ + if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) + return -EINVAL; + ++ if (req->channel_hint) { ++ chan = nouveau_channel_get(dev, file_priv, req->channel_hint); ++ if (IS_ERR(chan)) ++ return PTR_ERR(chan); ++ } ++ + ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, + req->info.tile_mode, req->info.tile_flags, false, + (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), + &nvbo); ++ if (chan) ++ nouveau_channel_put(&chan); + if (ret) + return ret; + +@@ -225,15 +231,8 @@ + + list_for_each_safe(entry, tmp, list) { + nvbo = list_entry(entry, struct nouveau_bo, entry); +- if (likely(fence)) { +- struct nouveau_fence *prev_fence; + +- spin_lock(&nvbo->bo.lock); +- prev_fence = nvbo->bo.sync_obj; +- nvbo->bo.sync_obj = nouveau_fence_ref(fence); +- spin_unlock(&nvbo->bo.lock); +- nouveau_fence_unref((void *)&prev_fence); +- } ++ nouveau_bo_fence(nvbo, fence); + + if (unlikely(nvbo->validate_mapped)) { + ttm_bo_kunmap(&nvbo->kmap); +@@ -293,14 +292,15 @@ + return -EINVAL; + } + +- ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); ++ ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); + if (ret) { + validate_fini(op, NULL); +- if (ret == -EAGAIN) +- ret = ttm_bo_wait_unreserved(&nvbo->bo, false); ++ if (unlikely(ret == -EAGAIN)) ++ ret = ttm_bo_wait_unreserved(&nvbo->bo, true); + drm_gem_object_unreference_unlocked(gem); +- if (ret) { +- NV_ERROR(dev, "fail reserve\n"); ++ if (unlikely(ret)) { ++ if (ret != -ERESTARTSYS) ++ NV_ERROR(dev, "fail reserve\n"); + return ret; + } + goto retry; +@@ -325,25 +325,6 @@ + validate_fini(op, NULL); + return -EINVAL; + } +- +- if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { +- validate_fini(op, NULL); +- +- if (nvbo->cpu_filp == file_priv) { +- NV_ERROR(dev, "bo %p mapped by process trying " +- "to validate it!\n", nvbo); +- return -EINVAL; +- } +- +- mutex_unlock(&drm_global_mutex); +- ret = ttm_bo_wait_cpu(&nvbo->bo, false); +- mutex_lock(&drm_global_mutex); +- if (ret) { +- NV_ERROR(dev, "fail wait_cpu\n"); +- return ret; +- } +- goto retry; +- } + } + + return 0; +@@ -378,10 +359,11 @@ + + nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, +- false, false, false); ++ true, false, false); + nvbo->channel = NULL; + if (unlikely(ret)) { +- NV_ERROR(dev, "fail ttm_validate\n"); ++ if (ret != -ERESTARTSYS) ++ NV_ERROR(dev, "fail ttm_validate\n"); + return ret; + } + +@@ -433,13 +415,15 @@ + + ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); + if (unlikely(ret)) { +- NV_ERROR(dev, "validate_init\n"); ++ if (ret != -ERESTARTSYS) ++ NV_ERROR(dev, "validate_init\n"); + return ret; + } + + ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { +- NV_ERROR(dev, "validate vram_list\n"); ++ if (ret != -ERESTARTSYS) ++ NV_ERROR(dev, "validate vram_list\n"); + validate_fini(op, NULL); + return ret; + } +@@ -447,7 +431,8 @@ + + ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { +- NV_ERROR(dev, "validate gart_list\n"); ++ if (ret != -ERESTARTSYS) ++ NV_ERROR(dev, "validate gart_list\n"); + validate_fini(op, NULL); + return ret; + } +@@ -455,7 +440,8 @@ + + ret = validate_list(chan, &op->both_list, pbbo, user_buffers); + if (unlikely(ret < 0)) { +- NV_ERROR(dev, "validate both_list\n"); ++ if (ret != -ERESTARTSYS) ++ NV_ERROR(dev, "validate both_list\n"); + validate_fini(op, NULL); + return ret; + } +@@ -579,7 +565,9 @@ + struct nouveau_fence *fence = NULL; + int i, j, ret = 0, do_reloc = 0; + +- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); ++ chan = nouveau_channel_get(dev, file_priv, req->channel); ++ if (IS_ERR(chan)) ++ return PTR_ERR(chan); + + req->vram_available = dev_priv->fb_aper_free; + req->gart_available = dev_priv->gart_info.aper_free; +@@ -589,28 +577,34 @@ + if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { + NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", + req->nr_push, NOUVEAU_GEM_MAX_PUSH); ++ nouveau_channel_put(&chan); + return -EINVAL; + } + + if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { + NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", + req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); ++ nouveau_channel_put(&chan); + return -EINVAL; + } + + if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { + NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", + req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); ++ nouveau_channel_put(&chan); + return -EINVAL; + } + + push = u_memcpya(req->push, req->nr_push, sizeof(*push)); +- if (IS_ERR(push)) ++ if (IS_ERR(push)) { ++ nouveau_channel_put(&chan); + return PTR_ERR(push); ++ } + + bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); + if (IS_ERR(bo)) { + kfree(push); ++ nouveau_channel_put(&chan); + return PTR_ERR(bo); + } + +@@ -633,7 +627,8 @@ + ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, + req->nr_buffers, &op, &do_reloc); + if (ret) { +- NV_ERROR(dev, "validate: %d\n", ret); ++ if (ret != -ERESTARTSYS) ++ NV_ERROR(dev, "validate: %d\n", ret); + goto out; + } + +@@ -726,7 +721,7 @@ + + out: + validate_fini(&op, fence); +- nouveau_fence_unref((void**)&fence); ++ nouveau_fence_unref(&fence); + kfree(bo); + kfree(push); + +@@ -744,6 +739,7 @@ + req->suffix1 = 0x00000000; + } + ++ nouveau_channel_put(&chan); + return ret; + } + +@@ -775,26 +771,9 @@ + return -ENOENT; + nvbo = nouveau_gem_object(gem); + +- if (nvbo->cpu_filp) { +- if (nvbo->cpu_filp == file_priv) +- goto out; +- +- ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); +- if (ret) +- goto out; +- } +- +- if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { +- spin_lock(&nvbo->bo.lock); +- ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); +- spin_unlock(&nvbo->bo.lock); +- } else { +- ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); +- if (ret == 0) +- nvbo->cpu_filp = file_priv; +- } +- +-out: ++ spin_lock(&nvbo->bo.lock); ++ ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); ++ spin_unlock(&nvbo->bo.lock); + drm_gem_object_unreference_unlocked(gem); + return ret; + } +@@ -803,26 +782,7 @@ + nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +- struct drm_nouveau_gem_cpu_prep *req = data; +- struct drm_gem_object *gem; +- struct nouveau_bo *nvbo; +- int ret = -EINVAL; +- +- gem = drm_gem_object_lookup(dev, file_priv, req->handle); +- if (!gem) +- return -ENOENT; +- nvbo = nouveau_gem_object(gem); +- +- if (nvbo->cpu_filp != file_priv) +- goto out; +- nvbo->cpu_filp = NULL; +- +- ttm_bo_synccpu_write_release(&nvbo->bo); +- ret = 0; +- +-out: +- drm_gem_object_unreference_unlocked(gem); +- return ret; ++ return 0; + } + + int +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_hw.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_hw.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_hw.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_hw.c 2010-11-12 06:18:06.000000000 +0100 +@@ -519,11 +519,11 @@ + + struct pll_lims pll_lim; + struct nouveau_pll_vals pv; +- uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; ++ enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0; + +- if (get_pll_limits(dev, pllreg, &pll_lim)) ++ if (get_pll_limits(dev, pll, &pll_lim)) + return; +- nouveau_hw_get_pllvals(dev, pllreg, &pv); ++ nouveau_hw_get_pllvals(dev, pll, &pv); + + if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && + pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && +@@ -536,7 +536,7 @@ + pv.M1 = pll_lim.vco1.max_m; + pv.N1 = pll_lim.vco1.min_n; + pv.log2P = pll_lim.max_usable_log2p; +- nouveau_hw_setpll(dev, pllreg, &pv); ++ nouveau_hw_setpll(dev, pll_lim.reg, &pv); + } + + /* +@@ -953,7 +953,7 @@ + NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); + + reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); +- if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) ++ if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC) + NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); + else + NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); +@@ -1017,8 +1017,9 @@ + + NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); + +- /* Setting 1 on this value gives you interrupts for every vblank period. */ +- NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); ++ /* Enable vblank interrupts. */ ++ NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, ++ (dev->vblank_enabled[head] ? 1 : 0)); + NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); + } + +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_hw.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_hw.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_hw.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_hw.h 2010-11-12 06:18:06.000000000 +0100 +@@ -416,6 +416,25 @@ + } + + static inline void ++nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ NVWriteCRTC(dev, head, NV_PCRTC_START, offset); ++ ++ if (dev_priv->card_type == NV_04) { ++ /* ++ * Hilarious, the 24th bit doesn't want to stick to ++ * PCRTC_START... ++ */ ++ int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); ++ ++ NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, ++ (cre_heb & ~0x40) | ((offset >> 18) & 0x40)); ++ } ++} ++ ++static inline void + nv_show_cursor(struct drm_device *dev, int head, bool show) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_i2c.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_i2c.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_i2c.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_i2c.c 2010-11-12 06:18:06.000000000 +0100 +@@ -256,7 +256,7 @@ + if (index >= DCB_MAX_NUM_I2C_ENTRIES) + return NULL; + +- if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { ++ if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) { + uint32_t reg = 0xe500, val; + + if (i2c->port_type == 6) { +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_irq.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_irq.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_irq.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_irq.c 2010-11-12 06:18:06.000000000 +0100 +@@ -36,11 +36,7 @@ + #include "nouveau_drv.h" + #include "nouveau_reg.h" + #include "nouveau_ramht.h" +-#include +- +-/* needed for hotplug irq */ +-#include "nouveau_connector.h" +-#include "nv50_display.h" ++#include "nouveau_util.h" + + void + nouveau_irq_preinstall(struct drm_device *dev) +@@ -50,18 +46,19 @@ + /* Master disable */ + nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); + +- if (dev_priv->card_type >= NV_50) { +- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); +- INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); +- INIT_LIST_HEAD(&dev_priv->vbl_waiting); +- } ++ INIT_LIST_HEAD(&dev_priv->vbl_waiting); + } + + int + nouveau_irq_postinstall(struct drm_device *dev) + { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ + /* Master enable */ + nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); ++ if (dev_priv->msi_enabled) ++ nv_wr08(dev, 0x00088068, 0xff); ++ + return 0; + } + +@@ -72,1182 +69,83 @@ + nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); + } + +-static int +-nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) +-{ +- struct drm_nouveau_private *dev_priv = chan->dev->dev_private; +- struct nouveau_pgraph_object_method *grm; +- struct nouveau_pgraph_object_class *grc; +- +- grc = dev_priv->engine.graph.grclass; +- while (grc->id) { +- if (grc->id == class) +- break; +- grc++; +- } +- +- if (grc->id != class || !grc->methods) +- return -ENOENT; +- +- grm = grc->methods; +- while (grm->id) { +- if (grm->id == mthd) +- return grm->exec(chan, class, mthd, data); +- grm++; +- } +- +- return -ENOENT; +-} +- +-static bool +-nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) +-{ +- struct drm_device *dev = chan->dev; +- const int subc = (addr >> 13) & 0x7; +- const int mthd = addr & 0x1ffc; +- +- if (mthd == 0x0000) { +- struct nouveau_gpuobj *gpuobj; +- +- gpuobj = nouveau_ramht_find(chan, data); +- if (!gpuobj) +- return false; +- +- if (gpuobj->engine != NVOBJ_ENGINE_SW) +- return false; +- +- chan->sw_subchannel[subc] = gpuobj->class; +- nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, +- NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); +- return true; +- } +- +- /* hw object */ +- if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4))) +- return false; +- +- if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data)) +- return false; +- +- return true; +-} +- +-static void +-nouveau_fifo_irq_handler(struct drm_device *dev) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_engine *engine = &dev_priv->engine; +- uint32_t status, reassign; +- int cnt = 0; +- +- reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; +- while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { +- struct nouveau_channel *chan = NULL; +- uint32_t chid, get; +- +- nv_wr32(dev, NV03_PFIFO_CACHES, 0); +- +- chid = engine->fifo.channel_id(dev); +- if (chid >= 0 && chid < engine->fifo.channels) +- chan = dev_priv->fifos[chid]; +- get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); +- +- if (status & NV_PFIFO_INTR_CACHE_ERROR) { +- uint32_t mthd, data; +- int ptr; +- +- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before +- * wrapping on my G80 chips, but CACHE1 isn't big +- * enough for this much data.. Tests show that it +- * wraps around to the start at GET=0x800.. No clue +- * as to why.. +- */ +- ptr = (get & 0x7ff) >> 2; +- +- if (dev_priv->card_type < NV_40) { +- mthd = nv_rd32(dev, +- NV04_PFIFO_CACHE1_METHOD(ptr)); +- data = nv_rd32(dev, +- NV04_PFIFO_CACHE1_DATA(ptr)); +- } else { +- mthd = nv_rd32(dev, +- NV40_PFIFO_CACHE1_METHOD(ptr)); +- data = nv_rd32(dev, +- NV40_PFIFO_CACHE1_DATA(ptr)); +- } +- +- if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) { +- NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " +- "Mthd 0x%04x Data 0x%08x\n", +- chid, (mthd >> 13) & 7, mthd & 0x1ffc, +- data); +- } +- +- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); +- nv_wr32(dev, NV03_PFIFO_INTR_0, +- NV_PFIFO_INTR_CACHE_ERROR); +- +- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, +- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); +- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); +- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, +- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); +- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); +- +- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, +- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); +- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); +- +- status &= ~NV_PFIFO_INTR_CACHE_ERROR; +- } +- +- if (status & NV_PFIFO_INTR_DMA_PUSHER) { +- u32 get = nv_rd32(dev, 0x003244); +- u32 put = nv_rd32(dev, 0x003240); +- u32 push = nv_rd32(dev, 0x003220); +- u32 state = nv_rd32(dev, 0x003228); +- +- if (dev_priv->card_type == NV_50) { +- u32 ho_get = nv_rd32(dev, 0x003328); +- u32 ho_put = nv_rd32(dev, 0x003320); +- u32 ib_get = nv_rd32(dev, 0x003334); +- u32 ib_put = nv_rd32(dev, 0x003330); +- +- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " +- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " +- "State 0x%08x Push 0x%08x\n", +- chid, ho_get, get, ho_put, put, ib_get, ib_put, +- state, push); +- +- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ +- nv_wr32(dev, 0x003364, 0x00000000); +- if (get != put || ho_get != ho_put) { +- nv_wr32(dev, 0x003244, put); +- nv_wr32(dev, 0x003328, ho_put); +- } else +- if (ib_get != ib_put) { +- nv_wr32(dev, 0x003334, ib_put); +- } +- } else { +- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " +- "Put 0x%08x State 0x%08x Push 0x%08x\n", +- chid, get, put, state, push); +- +- if (get != put) +- nv_wr32(dev, 0x003244, put); +- } +- +- nv_wr32(dev, 0x003228, 0x00000000); +- nv_wr32(dev, 0x003220, 0x00000001); +- nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); +- status &= ~NV_PFIFO_INTR_DMA_PUSHER; +- } +- +- if (status & NV_PFIFO_INTR_SEMAPHORE) { +- uint32_t sem; +- +- status &= ~NV_PFIFO_INTR_SEMAPHORE; +- nv_wr32(dev, NV03_PFIFO_INTR_0, +- NV_PFIFO_INTR_SEMAPHORE); +- +- sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); +- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); +- +- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); +- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); +- } +- +- if (dev_priv->card_type == NV_50) { +- if (status & 0x00000010) { +- nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); +- status &= ~0x00000010; +- nv_wr32(dev, 0x002100, 0x00000010); +- } +- } +- +- if (status) { +- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", +- status, chid); +- nv_wr32(dev, NV03_PFIFO_INTR_0, status); +- status = 0; +- } +- +- nv_wr32(dev, NV03_PFIFO_CACHES, reassign); +- } +- +- if (status) { +- NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); +- nv_wr32(dev, 0x2140, 0); +- nv_wr32(dev, 0x140, 0); +- } +- +- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); +-} +- +-struct nouveau_bitfield_names { +- uint32_t mask; +- const char *name; +-}; +- +-static struct nouveau_bitfield_names nstatus_names[] = +-{ +- { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, +- { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, +- { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, +- { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } +-}; +- +-static struct nouveau_bitfield_names nstatus_names_nv10[] = +-{ +- { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, +- { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, +- { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, +- { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } +-}; +- +-static struct nouveau_bitfield_names nsource_names[] = +-{ +- { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, +- { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, +- { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, +- { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, +- { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, +- { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, +- { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, +- { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, +- { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, +- { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, +- { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, +- { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, +- { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, +- { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, +- { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, +- { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, +- { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, +- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, +- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, +-}; +- +-static void +-nouveau_print_bitfield_names_(uint32_t value, +- const struct nouveau_bitfield_names *namelist, +- const int namelist_len) +-{ +- /* +- * Caller must have already printed the KERN_* log level for us. +- * Also the caller is responsible for adding the newline. +- */ +- int i; +- for (i = 0; i < namelist_len; ++i) { +- uint32_t mask = namelist[i].mask; +- if (value & mask) { +- printk(" %s", namelist[i].name); +- value &= ~mask; +- } +- } +- if (value) +- printk(" (unknown bits 0x%08x)", value); +-} +-#define nouveau_print_bitfield_names(val, namelist) \ +- nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) +- +-struct nouveau_enum_names { +- uint32_t value; +- const char *name; +-}; +- +-static void +-nouveau_print_enum_names_(uint32_t value, +- const struct nouveau_enum_names *namelist, +- const int namelist_len) +-{ +- /* +- * Caller must have already printed the KERN_* log level for us. +- * Also the caller is responsible for adding the newline. +- */ +- int i; +- for (i = 0; i < namelist_len; ++i) { +- if (value == namelist[i].value) { +- printk("%s", namelist[i].name); +- return; +- } +- } +- printk("unknown value 0x%08x", value); +-} +-#define nouveau_print_enum_names(val, namelist) \ +- nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) +- +-static int +-nouveau_graph_chid_from_grctx(struct drm_device *dev) ++irqreturn_t ++nouveau_irq_handler(DRM_IRQ_ARGS) + { ++ struct drm_device *dev = (struct drm_device *)arg; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t inst; ++ unsigned long flags; ++ u32 stat; + int i; + +- if (dev_priv->card_type < NV_40) +- return dev_priv->engine.fifo.channels; +- else +- if (dev_priv->card_type < NV_50) { +- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; +- +- for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- struct nouveau_channel *chan = dev_priv->fifos[i]; +- +- if (!chan || !chan->ramin_grctx) +- continue; +- +- if (inst == chan->ramin_grctx->pinst) +- break; +- } +- } else { +- inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; +- +- for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- struct nouveau_channel *chan = dev_priv->fifos[i]; +- +- if (!chan || !chan->ramin) +- continue; +- +- if (inst == chan->ramin->vinst) +- break; +- } +- } +- +- +- return i; +-} +- +-static int +-nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_engine *engine = &dev_priv->engine; +- int channel; +- +- if (dev_priv->card_type < NV_10) +- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; +- else +- if (dev_priv->card_type < NV_40) +- channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; +- else +- channel = nouveau_graph_chid_from_grctx(dev); +- +- if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { +- NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); +- return -EINVAL; +- } +- +- *channel_ret = channel; +- return 0; +-} +- +-struct nouveau_pgraph_trap { +- int channel; +- int class; +- int subc, mthd, size; +- uint32_t data, data2; +- uint32_t nsource, nstatus; +-}; +- +-static void +-nouveau_graph_trap_info(struct drm_device *dev, +- struct nouveau_pgraph_trap *trap) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t address; +- +- trap->nsource = trap->nstatus = 0; +- if (dev_priv->card_type < NV_50) { +- trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); +- trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); +- } +- +- if (nouveau_graph_trapped_channel(dev, &trap->channel)) +- trap->channel = -1; +- address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); +- +- trap->mthd = address & 0x1FFC; +- trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); +- if (dev_priv->card_type < NV_10) { +- trap->subc = (address >> 13) & 0x7; +- } else { +- trap->subc = (address >> 16) & 0x7; +- trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH); +- } +- +- if (dev_priv->card_type < NV_10) +- trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF; +- else if (dev_priv->card_type < NV_40) +- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF; +- else if (dev_priv->card_type < NV_50) +- trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF; +- else +- trap->class = nv_rd32(dev, 0x400814); +-} +- +-static void +-nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, +- struct nouveau_pgraph_trap *trap) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t nsource = trap->nsource, nstatus = trap->nstatus; +- +- if (dev_priv->card_type < NV_50) { +- NV_INFO(dev, "%s - nSource:", id); +- nouveau_print_bitfield_names(nsource, nsource_names); +- printk(", nStatus:"); +- if (dev_priv->card_type < NV_10) +- nouveau_print_bitfield_names(nstatus, nstatus_names); +- else +- nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); +- printk("\n"); +- } +- +- NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " +- "Data 0x%08x:0x%08x\n", +- id, trap->channel, trap->subc, +- trap->class, trap->mthd, +- trap->data2, trap->data); +-} +- +-static int +-nouveau_pgraph_intr_swmthd(struct drm_device *dev, +- struct nouveau_pgraph_trap *trap) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- +- if (trap->channel < 0 || +- trap->channel >= dev_priv->engine.fifo.channels || +- !dev_priv->fifos[trap->channel]) +- return -ENODEV; +- +- return nouveau_call_method(dev_priv->fifos[trap->channel], +- trap->class, trap->mthd, trap->data); +-} +- +-static inline void +-nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) +-{ +- struct nouveau_pgraph_trap trap; +- int unhandled = 0; ++ stat = nv_rd32(dev, NV03_PMC_INTR_0); ++ if (!stat) ++ return IRQ_NONE; + +- nouveau_graph_trap_info(dev, &trap); ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ for (i = 0; i < 32 && stat; i++) { ++ if (!(stat & (1 << i)) || !dev_priv->irq_handler[i]) ++ continue; + +- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { +- if (nouveau_pgraph_intr_swmthd(dev, &trap)) +- unhandled = 1; +- } else { +- unhandled = 1; ++ dev_priv->irq_handler[i](dev); ++ stat &= ~(1 << i); + } + +- if (unhandled) +- nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); +-} +- +-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); +- +-static int nouveau_ratelimit(void) +-{ +- return __ratelimit(&nouveau_ratelimit_state); +-} +- +- +-static inline void +-nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) +-{ +- struct nouveau_pgraph_trap trap; +- int unhandled = 0; +- +- nouveau_graph_trap_info(dev, &trap); +- trap.nsource = nsource; +- +- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { +- if (nouveau_pgraph_intr_swmthd(dev, &trap)) +- unhandled = 1; +- } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { +- uint32_t v = nv_rd32(dev, 0x402000); +- nv_wr32(dev, 0x402000, v); +- +- /* dump the error anyway for now: it's useful for +- Gallium development */ +- unhandled = 1; +- } else { +- unhandled = 1; +- } ++ if (dev_priv->msi_enabled) ++ nv_wr08(dev, 0x00088068, 0xff); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + +- if (unhandled && nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); ++ if (stat && nouveau_ratelimit()) ++ NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat); ++ return IRQ_HANDLED; + } + +-static inline void +-nouveau_pgraph_intr_context_switch(struct drm_device *dev) ++int ++nouveau_irq_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_engine *engine = &dev_priv->engine; +- uint32_t chid; +- +- chid = engine->fifo.channel_id(dev); +- NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid); +- +- switch (dev_priv->card_type) { +- case NV_04: +- nv04_graph_context_switch(dev); +- break; +- case NV_10: +- nv10_graph_context_switch(dev); +- break; +- default: +- NV_ERROR(dev, "Context switch not implemented\n"); +- break; +- } +-} +- +-static void +-nouveau_pgraph_irq_handler(struct drm_device *dev) +-{ +- uint32_t status; +- +- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { +- uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); +- +- if (status & NV_PGRAPH_INTR_NOTIFY) { +- nouveau_pgraph_intr_notify(dev, nsource); +- +- status &= ~NV_PGRAPH_INTR_NOTIFY; +- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); +- } +- +- if (status & NV_PGRAPH_INTR_ERROR) { +- nouveau_pgraph_intr_error(dev, nsource); +- +- status &= ~NV_PGRAPH_INTR_ERROR; +- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); +- } +- +- if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { +- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; +- nv_wr32(dev, NV03_PGRAPH_INTR, +- NV_PGRAPH_INTR_CONTEXT_SWITCH); ++ int ret; + +- nouveau_pgraph_intr_context_switch(dev); ++ if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) { ++ ret = pci_enable_msi(dev->pdev); ++ if (ret == 0) { ++ NV_INFO(dev, "enabled MSI\n"); ++ dev_priv->msi_enabled = true; + } +- +- if (status) { +- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); +- nv_wr32(dev, NV03_PGRAPH_INTR, status); +- } +- +- if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0) +- nv_wr32(dev, NV04_PGRAPH_FIFO, 1); + } + +- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); ++ return drm_irq_install(dev); + } + +-static struct nouveau_enum_names nv50_mp_exec_error_names[] = +-{ +- { 3, "STACK_UNDERFLOW" }, +- { 4, "QUADON_ACTIVE" }, +- { 8, "TIMEOUT" }, +- { 0x10, "INVALID_OPCODE" }, +- { 0x40, "BREAKPOINT" }, +-}; +- +-static void +-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t units = nv_rd32(dev, 0x1540); +- uint32_t addr, mp10, status, pc, oplow, ophigh; +- int i; +- int mps = 0; +- for (i = 0; i < 4; i++) { +- if (!(units & 1 << (i+24))) +- continue; +- if (dev_priv->chipset < 0xa0) +- addr = 0x408200 + (tpid << 12) + (i << 7); +- else +- addr = 0x408100 + (tpid << 11) + (i << 7); +- mp10 = nv_rd32(dev, addr + 0x10); +- status = nv_rd32(dev, addr + 0x14); +- if (!status) +- continue; +- if (display) { +- nv_rd32(dev, addr + 0x20); +- pc = nv_rd32(dev, addr + 0x24); +- oplow = nv_rd32(dev, addr + 0x70); +- ophigh= nv_rd32(dev, addr + 0x74); +- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " +- "TP %d MP %d: ", tpid, i); +- nouveau_print_enum_names(status, +- nv50_mp_exec_error_names); +- printk(" at %06x warp %d, opcode %08x %08x\n", +- pc&0xffffff, pc >> 24, +- oplow, ophigh); +- } +- nv_wr32(dev, addr + 0x10, mp10); +- nv_wr32(dev, addr + 0x14, 0); +- mps++; +- } +- if (!mps && display) +- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " +- "No MPs claiming errors?\n", tpid); +-} +- +-static void +-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, +- uint32_t ustatus_new, int display, const char *name) ++void ++nouveau_irq_fini(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- int tps = 0; +- uint32_t units = nv_rd32(dev, 0x1540); +- int i, r; +- uint32_t ustatus_addr, ustatus; +- for (i = 0; i < 16; i++) { +- if (!(units & (1 << i))) +- continue; +- if (dev_priv->chipset < 0xa0) +- ustatus_addr = ustatus_old + (i << 12); +- else +- ustatus_addr = ustatus_new + (i << 11); +- ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; +- if (!ustatus) +- continue; +- tps++; +- switch (type) { +- case 6: /* texture error... unknown for now */ +- nv50_fb_vm_trap(dev, display, name); +- if (display) { +- NV_ERROR(dev, "magic set %d:\n", i); +- for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) +- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, +- nv_rd32(dev, r)); +- } +- break; +- case 7: /* MP error */ +- if (ustatus & 0x00010000) { +- nv50_pgraph_mp_trap(dev, i, display); +- ustatus &= ~0x00010000; +- } +- break; +- case 8: /* TPDMA error */ +- { +- uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); +- uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); +- uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); +- uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); +- uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); +- uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); +- uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); +- nv50_fb_vm_trap(dev, display, name); +- /* 2d engine destination */ +- if (ustatus & 0x00000010) { +- if (display) { +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", +- i, e14, e10); +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", +- i, e0c, e18, e1c, e20, e24); +- } +- ustatus &= ~0x00000010; +- } +- /* Render target */ +- if (ustatus & 0x00000040) { +- if (display) { +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", +- i, e14, e10); +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", +- i, e0c, e18, e1c, e20, e24); +- } +- ustatus &= ~0x00000040; +- } +- /* CUDA memory: l[], g[] or stack. */ +- if (ustatus & 0x00000080) { +- if (display) { +- if (e18 & 0x80000000) { +- /* g[] read fault? */ +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", +- i, e14, e10 | ((e18 >> 24) & 0x1f)); +- e18 &= ~0x1f000000; +- } else if (e18 & 0xc) { +- /* g[] write fault? */ +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", +- i, e14, e10 | ((e18 >> 7) & 0x1f)); +- e18 &= ~0x00000f80; +- } else { +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", +- i, e14, e10); +- } +- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", +- i, e0c, e18, e1c, e20, e24); +- } +- ustatus &= ~0x00000080; +- } +- } +- break; +- } +- if (ustatus) { +- if (display) +- NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); +- } +- nv_wr32(dev, ustatus_addr, 0xc0000000); +- } +- +- if (!tps && display) +- NV_INFO(dev, "%s - No TPs claiming errors?\n", name); +-} +- +-static void +-nv50_pgraph_trap_handler(struct drm_device *dev) +-{ +- struct nouveau_pgraph_trap trap; +- uint32_t status = nv_rd32(dev, 0x400108); +- uint32_t ustatus; +- int display = nouveau_ratelimit(); +- +- +- if (!status && display) { +- nouveau_graph_trap_info(dev, &trap); +- nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); +- NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); +- } +- +- /* DISPATCH: Relays commands to other units and handles NOTIFY, +- * COND, QUERY. If you get a trap from it, the command is still stuck +- * in DISPATCH and you need to do something about it. */ +- if (status & 0x001) { +- ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; +- if (!ustatus && display) { +- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); +- } +- +- /* Known to be triggered by screwed up NOTIFY and COND... */ +- if (ustatus & 0x00000001) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); +- nv_wr32(dev, 0x400500, 0); +- if (nv_rd32(dev, 0x400808) & 0x80000000) { +- if (display) { +- if (nouveau_graph_trapped_channel(dev, &trap.channel)) +- trap.channel = -1; +- trap.class = nv_rd32(dev, 0x400814); +- trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; +- trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; +- trap.data = nv_rd32(dev, 0x40080c); +- trap.data2 = nv_rd32(dev, 0x400810); +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_TRAP_DISPATCH_FAULT", &trap); +- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); +- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); +- } +- nv_wr32(dev, 0x400808, 0); +- } else if (display) { +- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); +- } +- nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); +- nv_wr32(dev, 0x400848, 0); +- ustatus &= ~0x00000001; +- } +- if (ustatus & 0x00000002) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); +- nv_wr32(dev, 0x400500, 0); +- if (nv_rd32(dev, 0x40084c) & 0x80000000) { +- if (display) { +- if (nouveau_graph_trapped_channel(dev, &trap.channel)) +- trap.channel = -1; +- trap.class = nv_rd32(dev, 0x400814); +- trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; +- trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; +- trap.data = nv_rd32(dev, 0x40085c); +- trap.data2 = 0; +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_TRAP_DISPATCH_QUERY", &trap); +- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); +- } +- nv_wr32(dev, 0x40084c, 0); +- } else if (display) { +- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); +- } +- ustatus &= ~0x00000002; +- } +- if (ustatus && display) +- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); +- nv_wr32(dev, 0x400804, 0xc0000000); +- nv_wr32(dev, 0x400108, 0x001); +- status &= ~0x001; +- } +- +- /* TRAPs other than dispatch use the "normal" trap regs. */ +- if (status && display) { +- nouveau_graph_trap_info(dev, &trap); +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_TRAP", &trap); +- } +- +- /* M2MF: Memory to memory copy engine. */ +- if (status & 0x002) { +- ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; +- if (!ustatus && display) { +- NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); +- } +- if (ustatus & 0x00000001) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); +- ustatus &= ~0x00000001; +- } +- if (ustatus & 0x00000002) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); +- ustatus &= ~0x00000002; +- } +- if (ustatus & 0x00000004) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); +- ustatus &= ~0x00000004; +- } +- NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", +- nv_rd32(dev, 0x406804), +- nv_rd32(dev, 0x406808), +- nv_rd32(dev, 0x40680c), +- nv_rd32(dev, 0x406810)); +- if (ustatus && display) +- NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); +- /* No sane way found yet -- just reset the bugger. */ +- nv_wr32(dev, 0x400040, 2); +- nv_wr32(dev, 0x400040, 0); +- nv_wr32(dev, 0x406800, 0xc0000000); +- nv_wr32(dev, 0x400108, 0x002); +- status &= ~0x002; +- } +- +- /* VFETCH: Fetches data from vertex buffers. */ +- if (status & 0x004) { +- ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; +- if (!ustatus && display) { +- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); +- } +- if (ustatus & 0x00000001) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); +- NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", +- nv_rd32(dev, 0x400c00), +- nv_rd32(dev, 0x400c08), +- nv_rd32(dev, 0x400c0c), +- nv_rd32(dev, 0x400c10)); +- ustatus &= ~0x00000001; +- } +- if (ustatus && display) +- NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); +- nv_wr32(dev, 0x400c04, 0xc0000000); +- nv_wr32(dev, 0x400108, 0x004); +- status &= ~0x004; +- } +- +- /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ +- if (status & 0x008) { +- ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; +- if (!ustatus && display) { +- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); +- } +- if (ustatus & 0x00000001) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); +- NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", +- nv_rd32(dev, 0x401804), +- nv_rd32(dev, 0x401808), +- nv_rd32(dev, 0x40180c), +- nv_rd32(dev, 0x401810)); +- ustatus &= ~0x00000001; +- } +- if (ustatus && display) +- NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); +- /* No sane way found yet -- just reset the bugger. */ +- nv_wr32(dev, 0x400040, 0x80); +- nv_wr32(dev, 0x400040, 0); +- nv_wr32(dev, 0x401800, 0xc0000000); +- nv_wr32(dev, 0x400108, 0x008); +- status &= ~0x008; +- } +- +- /* CCACHE: Handles code and c[] caches and fills them. */ +- if (status & 0x010) { +- ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; +- if (!ustatus && display) { +- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); +- } +- if (ustatus & 0x00000001) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); +- NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", +- nv_rd32(dev, 0x405800), +- nv_rd32(dev, 0x405804), +- nv_rd32(dev, 0x405808), +- nv_rd32(dev, 0x40580c), +- nv_rd32(dev, 0x405810), +- nv_rd32(dev, 0x405814), +- nv_rd32(dev, 0x40581c)); +- ustatus &= ~0x00000001; +- } +- if (ustatus && display) +- NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); +- nv_wr32(dev, 0x405018, 0xc0000000); +- nv_wr32(dev, 0x400108, 0x010); +- status &= ~0x010; +- } +- +- /* Unknown, not seen yet... 0x402000 is the only trap status reg +- * remaining, so try to handle it anyway. Perhaps related to that +- * unknown DMA slot on tesla? */ +- if (status & 0x20) { +- nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); +- ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; +- if (display) +- NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); +- nv_wr32(dev, 0x402000, 0xc0000000); +- /* no status modifiction on purpose */ +- } +- +- /* TEXTURE: CUDA texturing units */ +- if (status & 0x040) { +- nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, +- "PGRAPH_TRAP_TEXTURE"); +- nv_wr32(dev, 0x400108, 0x040); +- status &= ~0x040; +- } +- +- /* MP: CUDA execution engines. */ +- if (status & 0x080) { +- nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, +- "PGRAPH_TRAP_MP"); +- nv_wr32(dev, 0x400108, 0x080); +- status &= ~0x080; +- } +- +- /* TPDMA: Handles TP-initiated uncached memory accesses: +- * l[], g[], stack, 2d surfaces, render targets. */ +- if (status & 0x100) { +- nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, +- "PGRAPH_TRAP_TPDMA"); +- nv_wr32(dev, 0x400108, 0x100); +- status &= ~0x100; +- } + +- if (status) { +- if (display) +- NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", +- status); +- nv_wr32(dev, 0x400108, status); +- } ++ drm_irq_uninstall(dev); ++ if (dev_priv->msi_enabled) ++ pci_disable_msi(dev->pdev); + } + +-/* There must be a *lot* of these. Will take some time to gather them up. */ +-static struct nouveau_enum_names nv50_data_error_names[] = +-{ +- { 4, "INVALID_VALUE" }, +- { 5, "INVALID_ENUM" }, +- { 8, "INVALID_OBJECT" }, +- { 0xc, "INVALID_BITFIELD" }, +- { 0x28, "MP_NO_REG_SPACE" }, +- { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, +-}; +- +-static void +-nv50_pgraph_irq_handler(struct drm_device *dev) +-{ +- struct nouveau_pgraph_trap trap; +- int unhandled = 0; +- uint32_t status; +- +- while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { +- /* NOTIFY: You've set a NOTIFY an a command and it's done. */ +- if (status & 0x00000001) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_NOTIFY", &trap); +- status &= ~0x00000001; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); +- } +- +- /* COMPUTE_QUERY: Purpose and exact cause unknown, happens +- * when you write 0x200 to 0x50c0 method 0x31c. */ +- if (status & 0x00000002) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_COMPUTE_QUERY", &trap); +- status &= ~0x00000002; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); +- } +- +- /* Unknown, never seen: 0x4 */ +- +- /* ILLEGAL_MTHD: You used a wrong method for this class. */ +- if (status & 0x00000010) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_pgraph_intr_swmthd(dev, &trap)) +- unhandled = 1; +- if (unhandled && nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_ILLEGAL_MTHD", &trap); +- status &= ~0x00000010; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); +- } +- +- /* ILLEGAL_CLASS: You used a wrong class. */ +- if (status & 0x00000020) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_ILLEGAL_CLASS", &trap); +- status &= ~0x00000020; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); +- } +- +- /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ +- if (status & 0x00000040) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_DOUBLE_NOTIFY", &trap); +- status &= ~0x00000040; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); +- } +- +- /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ +- if (status & 0x00001000) { +- nv_wr32(dev, 0x400500, 0x00000000); +- nv_wr32(dev, NV03_PGRAPH_INTR, +- NV_PGRAPH_INTR_CONTEXT_SWITCH); +- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, +- NV40_PGRAPH_INTR_EN) & +- ~NV_PGRAPH_INTR_CONTEXT_SWITCH); +- nv_wr32(dev, 0x400500, 0x00010001); +- +- nv50_graph_context_switch(dev); +- +- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; +- } +- +- /* BUFFER_NOTIFY: Your m2mf transfer finished */ +- if (status & 0x00010000) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_BUFFER_NOTIFY", &trap); +- status &= ~0x00010000; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); +- } +- +- /* DATA_ERROR: Invalid value for this method, or invalid +- * state in current PGRAPH context for this operation */ +- if (status & 0x00100000) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_ratelimit()) { +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_DATA_ERROR", &trap); +- NV_INFO (dev, "PGRAPH_DATA_ERROR - "); +- nouveau_print_enum_names(nv_rd32(dev, 0x400110), +- nv50_data_error_names); +- printk("\n"); +- } +- status &= ~0x00100000; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); +- } +- +- /* TRAP: Something bad happened in the middle of command +- * execution. Has a billion types, subtypes, and even +- * subsubtypes. */ +- if (status & 0x00200000) { +- nv50_pgraph_trap_handler(dev); +- status &= ~0x00200000; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); +- } +- +- /* Unknown, never seen: 0x00400000 */ +- +- /* SINGLE_STEP: Happens on every method if you turned on +- * single stepping in 40008c */ +- if (status & 0x01000000) { +- nouveau_graph_trap_info(dev, &trap); +- if (nouveau_ratelimit()) +- nouveau_graph_dump_trap_info(dev, +- "PGRAPH_SINGLE_STEP", &trap); +- status &= ~0x01000000; +- nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); +- } +- +- /* 0x02000000 happens when you pause a ctxprog... +- * but the only way this can happen that I know is by +- * poking the relevant MMIO register, and we don't +- * do that. */ +- +- if (status) { +- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", +- status); +- nv_wr32(dev, NV03_PGRAPH_INTR, status); +- } +- +- { +- const int isb = (1 << 16) | (1 << 0); +- +- if ((nv_rd32(dev, 0x400500) & isb) != isb) +- nv_wr32(dev, 0x400500, +- nv_rd32(dev, 0x400500) | isb); +- } +- } +- +- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); +- if (nv_rd32(dev, 0x400824) & (1 << 31)) +- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); +-} +- +-static void +-nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) ++void ++nouveau_irq_register(struct drm_device *dev, int status_bit, ++ void (*handler)(struct drm_device *)) + { +- if (crtc & 1) +- nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ unsigned long flags; + +- if (crtc & 2) +- nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ dev_priv->irq_handler[status_bit] = handler; ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + } + +-irqreturn_t +-nouveau_irq_handler(DRM_IRQ_ARGS) ++void ++nouveau_irq_unregister(struct drm_device *dev, int status_bit) + { +- struct drm_device *dev = (struct drm_device *)arg; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t status; + unsigned long flags; + +- status = nv_rd32(dev, NV03_PMC_INTR_0); +- if (!status) +- return IRQ_NONE; +- + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); +- +- if (status & NV_PMC_INTR_0_PFIFO_PENDING) { +- nouveau_fifo_irq_handler(dev); +- status &= ~NV_PMC_INTR_0_PFIFO_PENDING; +- } +- +- if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { +- if (dev_priv->card_type >= NV_50) +- nv50_pgraph_irq_handler(dev); +- else +- nouveau_pgraph_irq_handler(dev); +- +- status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; +- } +- +- if (status & NV_PMC_INTR_0_CRTCn_PENDING) { +- nouveau_crtc_irq_handler(dev, (status>>24)&3); +- status &= ~NV_PMC_INTR_0_CRTCn_PENDING; +- } +- +- if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING | +- NV_PMC_INTR_0_NV50_I2C_PENDING)) { +- nv50_display_irq_handler(dev); +- status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING | +- NV_PMC_INTR_0_NV50_I2C_PENDING); +- } +- +- if (status) +- NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); +- ++ dev_priv->irq_handler[status_bit] = NULL; + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); +- +- return IRQ_HANDLED; + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_mem.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_mem.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_mem.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_mem.c 2010-11-12 06:18:06.000000000 +0100 +@@ -33,92 +33,113 @@ + #include "drmP.h" + #include "drm.h" + #include "drm_sarea.h" +-#include "nouveau_drv.h" + +-#define MIN(a,b) a < b ? a : b ++#include "nouveau_drv.h" ++#include "nouveau_pm.h" + + /* + * NV10-NV40 tiling helpers + */ + + static void +-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch) ++nv10_mem_update_tile_region(struct drm_device *dev, ++ struct nouveau_tile_reg *tile, uint32_t addr, ++ uint32_t size, uint32_t pitch, uint32_t flags) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; +- struct nouveau_tile_reg *tile = &dev_priv->tile[i]; ++ int i = tile - dev_priv->tile.reg; ++ unsigned long save; ++ ++ nouveau_fence_unref(&tile->fence); ++ ++ if (tile->pitch) ++ pfb->free_tile_region(dev, i); + +- tile->addr = addr; +- tile->size = size; +- tile->used = !!pitch; +- nouveau_fence_unref((void **)&tile->fence); ++ if (pitch) ++ pfb->init_tile_region(dev, i, addr, size, pitch, flags); + ++ spin_lock_irqsave(&dev_priv->context_switch_lock, save); + pfifo->reassign(dev, false); + pfifo->cache_pull(dev, false); + + nouveau_wait_for_idle(dev); + +- pgraph->set_region_tiling(dev, i, addr, size, pitch); +- pfb->set_region_tiling(dev, i, addr, size, pitch); ++ pfb->set_tile_region(dev, i); ++ pgraph->set_tile_region(dev, i); + + pfifo->cache_pull(dev, true); + pfifo->reassign(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); + } + +-struct nouveau_tile_reg * +-nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, +- uint32_t pitch) ++static struct nouveau_tile_reg * ++nv10_mem_get_tile_region(struct drm_device *dev, int i) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; +- struct nouveau_tile_reg *found = NULL; +- unsigned long i, flags; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + +- spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ spin_lock(&dev_priv->tile.lock); + +- for (i = 0; i < pfb->num_tiles; i++) { +- struct nouveau_tile_reg *tile = &dev_priv->tile[i]; +- +- if (tile->used) +- /* Tile region in use. */ +- continue; ++ if (!tile->used && ++ (!tile->fence || nouveau_fence_signalled(tile->fence))) ++ tile->used = true; ++ else ++ tile = NULL; + +- if (tile->fence && +- !nouveau_fence_signalled(tile->fence, NULL)) +- /* Pending tile region. */ +- continue; ++ spin_unlock(&dev_priv->tile.lock); ++ return tile; ++} + +- if (max(tile->addr, addr) < +- min(tile->addr + tile->size, addr + size)) +- /* Kill an intersecting tile region. */ +- nv10_mem_set_region_tiling(dev, i, 0, 0, 0); ++void ++nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, ++ struct nouveau_fence *fence) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; + +- if (pitch && !found) { +- /* Free tile region. */ +- nv10_mem_set_region_tiling(dev, i, addr, size, pitch); +- found = tile; ++ if (tile) { ++ spin_lock(&dev_priv->tile.lock); ++ if (fence) { ++ /* Mark it as pending. */ ++ tile->fence = fence; ++ nouveau_fence_ref(fence); + } +- } + +- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); +- +- return found; ++ tile->used = false; ++ spin_unlock(&dev_priv->tile.lock); ++ } + } + +-void +-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, +- struct nouveau_fence *fence) ++struct nouveau_tile_reg * ++nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, ++ uint32_t pitch, uint32_t flags) + { +- if (fence) { +- /* Mark it as pending. */ +- tile->fence = fence; +- nouveau_fence_ref(fence); ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; ++ struct nouveau_tile_reg *tile, *found = NULL; ++ int i; ++ ++ for (i = 0; i < pfb->num_tiles; i++) { ++ tile = nv10_mem_get_tile_region(dev, i); ++ ++ if (pitch && !found) { ++ found = tile; ++ continue; ++ ++ } else if (tile && tile->pitch) { ++ /* Kill an unused tile region. */ ++ nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); ++ } ++ ++ nv10_mem_put_tile_region(dev, tile, NULL); + } + +- tile->used = false; ++ if (found) ++ nv10_mem_update_tile_region(dev, found, addr, size, ++ pitch, flags); ++ return found; + } + + /* +@@ -177,9 +198,9 @@ + } + dev_priv->engine.instmem.flush(dev); + +- nv50_vm_flush(dev, 5); +- nv50_vm_flush(dev, 0); +- nv50_vm_flush(dev, 4); ++ dev_priv->engine.instmem.flush(dev); ++ dev_priv->engine.fifo.tlb_flush(dev); ++ dev_priv->engine.graph.tlb_flush(dev); + nv50_vm_flush(dev, 6); + return 0; + } +@@ -209,11 +230,10 @@ + pte++; + } + } +- dev_priv->engine.instmem.flush(dev); + +- nv50_vm_flush(dev, 5); +- nv50_vm_flush(dev, 0); +- nv50_vm_flush(dev, 4); ++ dev_priv->engine.instmem.flush(dev); ++ dev_priv->engine.fifo.tlb_flush(dev); ++ dev_priv->engine.graph.tlb_flush(dev); + nv50_vm_flush(dev, 6); + } + +@@ -653,6 +673,7 @@ + void + nouveau_mem_timing_init(struct drm_device *dev) + { ++ /* cards < NVC0 only */ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct nouveau_pm_memtimings *memtimings = &pm->memtimings; +@@ -719,14 +740,14 @@ + tUNK_19 = 1; + tUNK_20 = 0; + tUNK_21 = 0; +- switch (MIN(recordlen,21)) { +- case 21: ++ switch (min(recordlen, 22)) { ++ case 22: + tUNK_21 = entry[21]; +- case 20: ++ case 21: + tUNK_20 = entry[20]; +- case 19: ++ case 20: + tUNK_19 = entry[19]; +- case 18: ++ case 19: + tUNK_18 = entry[18]; + default: + tUNK_0 = entry[0]; +@@ -756,24 +777,30 @@ + timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); + if(recordlen > 19) { + timing->reg_100228 += (tUNK_19 - 1) << 24; +- } else { ++ }/* I cannot back-up this else-statement right now ++ else { + timing->reg_100228 += tUNK_12 << 24; +- } ++ }*/ + + /* XXX: reg_10022c */ ++ timing->reg_10022c = tUNK_2 - 1; + + timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | + tUNK_13 << 8 | tUNK_13); + + /* XXX: +6? */ + timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); +- if(tUNK_10 > tUNK_11) { +- timing->reg_100234 += tUNK_10 << 16; +- } else { +- timing->reg_100234 += tUNK_11 << 16; ++ timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; ++ ++ /* XXX; reg_100238, reg_10023c ++ * reg: 0x00?????? ++ * reg_10023c: ++ * 0 for pre-NV50 cards ++ * 0x????0202 for NV50+ cards (empirical evidence) */ ++ if(dev_priv->card_type >= NV_50) { ++ timing->reg_10023c = 0x202; + } + +- /* XXX; reg_100238, reg_10023c */ + NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, + timing->reg_100220, timing->reg_100224, + timing->reg_100228, timing->reg_10022c); +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_notifier.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_notifier.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_notifier.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_notifier.c 2010-11-12 06:18:06.000000000 +0100 +@@ -185,11 +185,11 @@ + struct nouveau_channel *chan; + int ret; + +- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); ++ chan = nouveau_channel_get(dev, file_priv, na->channel); ++ if (IS_ERR(chan)) ++ return PTR_ERR(chan); + + ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); +- if (ret) +- return ret; +- +- return 0; ++ nouveau_channel_put(&chan); ++ return ret; + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_object.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_object.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_object.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_object.c 2010-11-12 06:18:06.000000000 +0100 +@@ -36,6 +36,101 @@ + #include "nouveau_drm.h" + #include "nouveau_ramht.h" + ++struct nouveau_gpuobj_method { ++ struct list_head head; ++ u32 mthd; ++ int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); ++}; ++ ++struct nouveau_gpuobj_class { ++ struct list_head head; ++ struct list_head methods; ++ u32 id; ++ u32 engine; ++}; ++ ++int ++nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj_class *oc; ++ ++ oc = kzalloc(sizeof(*oc), GFP_KERNEL); ++ if (!oc) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&oc->methods); ++ oc->id = class; ++ oc->engine = engine; ++ list_add(&oc->head, &dev_priv->classes); ++ return 0; ++} ++ ++int ++nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, ++ int (*exec)(struct nouveau_channel *, u32, u32, u32)) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj_method *om; ++ struct nouveau_gpuobj_class *oc; ++ ++ list_for_each_entry(oc, &dev_priv->classes, head) { ++ if (oc->id == class) ++ goto found; ++ } ++ ++ return -EINVAL; ++ ++found: ++ om = kzalloc(sizeof(*om), GFP_KERNEL); ++ if (!om) ++ return -ENOMEM; ++ ++ om->mthd = mthd; ++ om->exec = exec; ++ list_add(&om->head, &oc->methods); ++ return 0; ++} ++ ++int ++nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) ++{ ++ struct drm_nouveau_private *dev_priv = chan->dev->dev_private; ++ struct nouveau_gpuobj_method *om; ++ struct nouveau_gpuobj_class *oc; ++ ++ list_for_each_entry(oc, &dev_priv->classes, head) { ++ if (oc->id != class) ++ continue; ++ ++ list_for_each_entry(om, &oc->methods, head) { ++ if (om->mthd == mthd) ++ return om->exec(chan, class, mthd, data); ++ } ++ } ++ ++ return -ENOENT; ++} ++ ++int ++nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, ++ u32 class, u32 mthd, u32 data) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = NULL; ++ unsigned long flags; ++ int ret = -EINVAL; ++ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); ++ if (chid > 0 && chid < dev_priv->engine.fifo.channels) ++ chan = dev_priv->channels.ptr[chid]; ++ if (chan) ++ ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); ++ return ret; ++} ++ + /* NVidia uses context objects to drive drawing operations. + + Context objects can be selected into 8 subchannels in the FIFO, +@@ -73,17 +168,14 @@ + struct nouveau_gpuobj **gpuobj_ret) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_engine *engine = &dev_priv->engine; ++ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_gpuobj *gpuobj; + struct drm_mm_node *ramin = NULL; +- int ret; ++ int ret, i; + + NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", + chan ? chan->id : -1, size, align, flags); + +- if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) +- return -EINVAL; +- + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; +@@ -98,88 +190,45 @@ + spin_unlock(&dev_priv->ramin_lock); + + if (chan) { +- NV_DEBUG(dev, "channel heap\n"); +- + ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); + if (ramin) + ramin = drm_mm_get_block(ramin, size, align); +- + if (!ramin) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return -ENOMEM; + } +- } else { +- NV_DEBUG(dev, "global heap\n"); +- +- /* allocate backing pages, sets vinst */ +- ret = engine->instmem.populate(dev, gpuobj, &size); +- if (ret) { +- nouveau_gpuobj_ref(NULL, &gpuobj); +- return ret; +- } +- +- /* try and get aperture space */ +- do { +- if (drm_mm_pre_get(&dev_priv->ramin_heap)) +- return -ENOMEM; + +- spin_lock(&dev_priv->ramin_lock); +- ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, +- align, 0); +- if (ramin == NULL) { +- spin_unlock(&dev_priv->ramin_lock); +- nouveau_gpuobj_ref(NULL, &gpuobj); +- return ret; +- } +- +- ramin = drm_mm_get_block_atomic(ramin, size, align); +- spin_unlock(&dev_priv->ramin_lock); +- } while (ramin == NULL); ++ gpuobj->pinst = chan->ramin->pinst; ++ if (gpuobj->pinst != ~0) ++ gpuobj->pinst += ramin->start; + +- /* on nv50 it's ok to fail, we have a fallback path */ +- if (!ramin && dev_priv->card_type < NV_50) { +- nouveau_gpuobj_ref(NULL, &gpuobj); +- return -ENOMEM; +- } +- } ++ if (dev_priv->card_type < NV_50) ++ gpuobj->cinst = gpuobj->pinst; ++ else ++ gpuobj->cinst = ramin->start; + +- /* if we got a chunk of the aperture, map pages into it */ +- gpuobj->im_pramin = ramin; +- if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { +- ret = engine->instmem.bind(dev, gpuobj); ++ gpuobj->vinst = ramin->start + chan->ramin->vinst; ++ gpuobj->node = ramin; ++ } else { ++ ret = instmem->get(gpuobj, size, align); + if (ret) { + nouveau_gpuobj_ref(NULL, &gpuobj); + return ret; + } +- } + +- /* calculate the various different addresses for the object */ +- if (chan) { +- gpuobj->pinst = chan->ramin->pinst; +- if (gpuobj->pinst != ~0) +- gpuobj->pinst += gpuobj->im_pramin->start; +- +- if (dev_priv->card_type < NV_50) { +- gpuobj->cinst = gpuobj->pinst; +- } else { +- gpuobj->cinst = gpuobj->im_pramin->start; +- gpuobj->vinst = gpuobj->im_pramin->start + +- chan->ramin->vinst; +- } +- } else { +- if (gpuobj->im_pramin) +- gpuobj->pinst = gpuobj->im_pramin->start; +- else ++ ret = -ENOSYS; ++ if (dev_priv->ramin_available) ++ ret = instmem->map(gpuobj); ++ if (ret) + gpuobj->pinst = ~0; +- gpuobj->cinst = 0xdeadbeef; ++ ++ gpuobj->cinst = NVOBJ_CINST_GLOBAL; + } + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { +- int i; +- + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); +- engine->instmem.flush(dev); ++ instmem->flush(dev); + } + + +@@ -195,6 +244,7 @@ + NV_DEBUG(dev, "\n"); + + INIT_LIST_HEAD(&dev_priv->gpuobj_list); ++ INIT_LIST_HEAD(&dev_priv->classes); + spin_lock_init(&dev_priv->ramin_lock); + dev_priv->ramin_base = ~0; + +@@ -205,13 +255,23 @@ + nouveau_gpuobj_takedown(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj_method *om, *tm; ++ struct nouveau_gpuobj_class *oc, *tc; + + NV_DEBUG(dev, "\n"); + ++ list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { ++ list_for_each_entry_safe(om, tm, &oc->methods, head) { ++ list_del(&om->head); ++ kfree(om); ++ } ++ list_del(&oc->head); ++ kfree(oc); ++ } ++ + BUG_ON(!list_empty(&dev_priv->gpuobj_list)); + } + +- + static void + nouveau_gpuobj_del(struct kref *ref) + { +@@ -219,26 +279,34 @@ + container_of(ref, struct nouveau_gpuobj, refcount); + struct drm_device *dev = gpuobj->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_engine *engine = &dev_priv->engine; ++ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + int i; + + NV_DEBUG(dev, "gpuobj %p\n", gpuobj); + +- if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { ++ if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { + for (i = 0; i < gpuobj->size; i += 4) + nv_wo32(gpuobj, i, 0); +- engine->instmem.flush(dev); ++ instmem->flush(dev); + } + + if (gpuobj->dtor) + gpuobj->dtor(dev, gpuobj); + +- if (gpuobj->im_backing) +- engine->instmem.clear(dev, gpuobj); ++ if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { ++ if (gpuobj->node) { ++ instmem->unmap(gpuobj); ++ instmem->put(gpuobj); ++ } ++ } else { ++ if (gpuobj->node) { ++ spin_lock(&dev_priv->ramin_lock); ++ drm_mm_put_block(gpuobj->node); ++ spin_unlock(&dev_priv->ramin_lock); ++ } ++ } + + spin_lock(&dev_priv->ramin_lock); +- if (gpuobj->im_pramin) +- drm_mm_put_block(gpuobj->im_pramin); + list_del(&gpuobj->list); + spin_unlock(&dev_priv->ramin_lock); + +@@ -278,7 +346,7 @@ + kref_init(&gpuobj->refcount); + gpuobj->size = size; + gpuobj->pinst = pinst; +- gpuobj->cinst = 0xdeadbeef; ++ gpuobj->cinst = NVOBJ_CINST_GLOBAL; + gpuobj->vinst = vinst; + + if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { +@@ -495,23 +563,86 @@ + entry[5]: + set to 0? + */ ++static int ++nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, ++ struct nouveau_gpuobj **gpuobj_ret) ++{ ++ struct drm_nouveau_private *dev_priv; ++ struct nouveau_gpuobj *gpuobj; ++ ++ if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) ++ return -EINVAL; ++ dev_priv = chan->dev->dev_private; ++ ++ gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); ++ if (!gpuobj) ++ return -ENOMEM; ++ gpuobj->dev = chan->dev; ++ gpuobj->engine = NVOBJ_ENGINE_SW; ++ gpuobj->class = class; ++ kref_init(&gpuobj->refcount); ++ gpuobj->cinst = 0x40; ++ ++ spin_lock(&dev_priv->ramin_lock); ++ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); ++ spin_unlock(&dev_priv->ramin_lock); ++ *gpuobj_ret = gpuobj; ++ return 0; ++} ++ + int + nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, + struct nouveau_gpuobj **gpuobj) + { ++ struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct drm_device *dev = chan->dev; +- struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj_class *oc; + int ret; + + NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); + ++ list_for_each_entry(oc, &dev_priv->classes, head) { ++ if (oc->id == class) ++ goto found; ++ } ++ ++ NV_ERROR(dev, "illegal object class: 0x%x\n", class); ++ return -EINVAL; ++ ++found: ++ if (oc->engine == NVOBJ_ENGINE_SW) ++ return nouveau_gpuobj_sw_new(chan, class, gpuobj); ++ ++ switch (oc->engine) { ++ case NVOBJ_ENGINE_GR: ++ if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) { ++ struct nouveau_pgraph_engine *pgraph = ++ &dev_priv->engine.graph; ++ ++ ret = pgraph->create_context(chan); ++ if (ret) ++ return ret; ++ } ++ break; ++ case NVOBJ_ENGINE_CRYPT: ++ if (!chan->crypt_ctx) { ++ struct nouveau_crypt_engine *pcrypt = ++ &dev_priv->engine.crypt; ++ ++ ret = pcrypt->create_context(chan); ++ if (ret) ++ return ret; ++ } ++ break; ++ } ++ + ret = nouveau_gpuobj_new(dev, chan, + nouveau_gpuobj_class_instmem_size(dev, class), + 16, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + gpuobj); + if (ret) { +- NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); ++ NV_ERROR(dev, "error creating gpuobj: %d\n", ret); + return ret; + } + +@@ -541,35 +672,8 @@ + } + dev_priv->engine.instmem.flush(dev); + +- (*gpuobj)->engine = NVOBJ_ENGINE_GR; +- (*gpuobj)->class = class; +- return 0; +-} +- +-int +-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, +- struct nouveau_gpuobj **gpuobj_ret) +-{ +- struct drm_nouveau_private *dev_priv; +- struct nouveau_gpuobj *gpuobj; +- +- if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) +- return -EINVAL; +- dev_priv = chan->dev->dev_private; +- +- gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); +- if (!gpuobj) +- return -ENOMEM; +- gpuobj->dev = chan->dev; +- gpuobj->engine = NVOBJ_ENGINE_SW; +- gpuobj->class = class; +- kref_init(&gpuobj->refcount); +- gpuobj->cinst = 0x40; +- +- spin_lock(&dev_priv->ramin_lock); +- list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); +- spin_unlock(&dev_priv->ramin_lock); +- *gpuobj_ret = gpuobj; ++ (*gpuobj)->engine = oc->engine; ++ (*gpuobj)->class = oc->id; + return 0; + } + +@@ -585,7 +689,7 @@ + NV_DEBUG(dev, "ch%d\n", chan->id); + + /* Base amount for object storage (4KiB enough?) */ +- size = 0x1000; ++ size = 0x2000; + base = 0; + + /* PGRAPH context */ +@@ -791,119 +895,69 @@ + struct nouveau_gpuobj *gpuobj; + int i; + +- if (dev_priv->card_type < NV_50) { +- dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram); +- if (!dev_priv->susres.ramin_copy) +- return -ENOMEM; +- +- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) +- dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i); +- return 0; +- } +- + list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { +- if (!gpuobj->im_backing) ++ if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) + continue; + +- gpuobj->im_backing_suspend = vmalloc(gpuobj->size); +- if (!gpuobj->im_backing_suspend) { ++ gpuobj->suspend = vmalloc(gpuobj->size); ++ if (!gpuobj->suspend) { + nouveau_gpuobj_resume(dev); + return -ENOMEM; + } + + for (i = 0; i < gpuobj->size; i += 4) +- gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); ++ gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); + } + + return 0; + } + + void +-nouveau_gpuobj_suspend_cleanup(struct drm_device *dev) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_gpuobj *gpuobj; +- +- if (dev_priv->card_type < NV_50) { +- vfree(dev_priv->susres.ramin_copy); +- dev_priv->susres.ramin_copy = NULL; +- return; +- } +- +- list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { +- if (!gpuobj->im_backing_suspend) +- continue; +- +- vfree(gpuobj->im_backing_suspend); +- gpuobj->im_backing_suspend = NULL; +- } +-} +- +-void + nouveau_gpuobj_resume(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj; + int i; + +- if (dev_priv->card_type < NV_50) { +- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) +- nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]); +- nouveau_gpuobj_suspend_cleanup(dev); +- return; +- } +- + list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { +- if (!gpuobj->im_backing_suspend) ++ if (!gpuobj->suspend) + continue; + + for (i = 0; i < gpuobj->size; i += 4) +- nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); +- dev_priv->engine.instmem.flush(dev); ++ nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); ++ ++ vfree(gpuobj->suspend); ++ gpuobj->suspend = NULL; + } + +- nouveau_gpuobj_suspend_cleanup(dev); ++ dev_priv->engine.instmem.flush(dev); + } + + int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_grobj_alloc *init = data; +- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; +- struct nouveau_pgraph_object_class *grc; + struct nouveau_gpuobj *gr = NULL; + struct nouveau_channel *chan; + int ret; + +- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); +- + if (init->handle == ~0) + return -EINVAL; + +- grc = pgraph->grclass; +- while (grc->id) { +- if (grc->id == init->class) +- break; +- grc++; +- } ++ chan = nouveau_channel_get(dev, file_priv, init->channel); ++ if (IS_ERR(chan)) ++ return PTR_ERR(chan); + +- if (!grc->id) { +- NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); +- return -EPERM; ++ if (nouveau_ramht_find(chan, init->handle)) { ++ ret = -EEXIST; ++ goto out; + } + +- if (nouveau_ramht_find(chan, init->handle)) +- return -EEXIST; +- +- if (!grc->software) +- ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); +- else +- ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); ++ ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); + if (ret) { + NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", + ret, init->channel, init->handle); +- return ret; ++ goto out; + } + + ret = nouveau_ramht_insert(chan, init->handle, gr); +@@ -911,27 +965,27 @@ + if (ret) { + NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", + ret, init->channel, init->handle); +- return ret; + } + +- return 0; ++out: ++ nouveau_channel_put(&chan); ++ return ret; + } + + int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) + { + struct drm_nouveau_gpuobj_free *objfree = data; +- struct nouveau_gpuobj *gpuobj; + struct nouveau_channel *chan; ++ int ret; + +- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); +- +- gpuobj = nouveau_ramht_find(chan, objfree->handle); +- if (!gpuobj) +- return -ENOENT; ++ chan = nouveau_channel_get(dev, file_priv, objfree->channel); ++ if (IS_ERR(chan)) ++ return PTR_ERR(chan); + +- nouveau_ramht_remove(chan, objfree->handle); +- return 0; ++ ret = nouveau_ramht_remove(chan, objfree->handle); ++ nouveau_channel_put(&chan); ++ return ret; + } + + u32 +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_pm.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_pm.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_pm.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_pm.c 2010-11-12 06:18:06.000000000 +0100 +@@ -27,6 +27,10 @@ + #include "nouveau_drv.h" + #include "nouveau_pm.h" + ++#ifdef CONFIG_ACPI ++#include ++#endif ++#include + #include + #include + +@@ -284,6 +288,7 @@ + } + } + ++#ifdef CONFIG_HWMON + static ssize_t + nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) + { +@@ -395,10 +400,12 @@ + static const struct attribute_group hwmon_attrgroup = { + .attrs = hwmon_attributes, + }; ++#endif + + static int + nouveau_hwmon_init(struct drm_device *dev) + { ++#ifdef CONFIG_HWMON + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + struct device *hwmon_dev; +@@ -425,13 +432,14 @@ + } + + pm->hwmon = hwmon_dev; +- ++#endif + return 0; + } + + static void + nouveau_hwmon_fini(struct drm_device *dev) + { ++#ifdef CONFIG_HWMON + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pm_engine *pm = &dev_priv->engine.pm; + +@@ -439,7 +447,27 @@ + sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); + hwmon_device_unregister(pm->hwmon); + } ++#endif ++} ++ ++#ifdef CONFIG_ACPI ++static int ++nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) ++{ ++ struct drm_nouveau_private *dev_priv = ++ container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); ++ struct drm_device *dev = dev_priv->dev; ++ struct acpi_bus_event *entry = (struct acpi_bus_event *)data; ++ ++ if (strcmp(entry->device_class, "ac_adapter") == 0) { ++ bool ac = power_supply_is_system_supplied(); ++ ++ NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); ++ } ++ ++ return NOTIFY_OK; + } ++#endif + + int + nouveau_pm_init(struct drm_device *dev) +@@ -480,6 +508,10 @@ + + nouveau_sysfs_init(dev); + nouveau_hwmon_init(dev); ++#ifdef CONFIG_ACPI ++ pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; ++ register_acpi_notifier(&pm->acpi_nb); ++#endif + + return 0; + } +@@ -498,6 +530,9 @@ + nouveau_perf_fini(dev); + nouveau_volt_fini(dev); + ++#ifdef CONFIG_ACPI ++ unregister_acpi_notifier(&pm->acpi_nb); ++#endif + nouveau_hwmon_fini(dev); + nouveau_sysfs_fini(dev); + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_ramht.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_ramht.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_ramht.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_ramht.c 2010-11-12 06:18:06.000000000 +0100 +@@ -114,7 +114,7 @@ + (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); + } else { + if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { +- ctx = (gpuobj->cinst << 10) | 2; ++ ctx = (gpuobj->cinst << 10) | chan->id; + } else { + ctx = (gpuobj->cinst >> 4) | + ((gpuobj->engine << +@@ -153,26 +153,42 @@ + return -ENOMEM; + } + ++static struct nouveau_ramht_entry * ++nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle) ++{ ++ struct nouveau_ramht *ramht = chan ? chan->ramht : NULL; ++ struct nouveau_ramht_entry *entry; ++ unsigned long flags; ++ ++ if (!ramht) ++ return NULL; ++ ++ spin_lock_irqsave(&ramht->lock, flags); ++ list_for_each_entry(entry, &ramht->entries, head) { ++ if (entry->channel == chan && ++ (!handle || entry->handle == handle)) { ++ list_del(&entry->head); ++ spin_unlock_irqrestore(&ramht->lock, flags); ++ ++ return entry; ++ } ++ } ++ spin_unlock_irqrestore(&ramht->lock, flags); ++ ++ return NULL; ++} ++ + static void +-nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) ++nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; + struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; +- struct nouveau_ramht_entry *entry, *tmp; ++ unsigned long flags; + u32 co, ho; + +- list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { +- if (entry->channel != chan || entry->handle != handle) +- continue; +- +- nouveau_gpuobj_ref(NULL, &entry->gpuobj); +- list_del(&entry->head); +- kfree(entry); +- break; +- } +- ++ spin_lock_irqsave(&chan->ramht->lock, flags); + co = ho = nouveau_ramht_hash_handle(chan, handle); + do { + if (nouveau_ramht_entry_valid(dev, ramht, co) && +@@ -184,7 +200,7 @@ + nv_wo32(ramht, co + 0, 0x00000000); + nv_wo32(ramht, co + 4, 0x00000000); + instmem->flush(dev); +- return; ++ goto out; + } + + co += 8; +@@ -194,17 +210,23 @@ + + NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", + chan->id, handle); ++out: ++ spin_unlock_irqrestore(&chan->ramht->lock, flags); + } + +-void ++int + nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) + { +- struct nouveau_ramht *ramht = chan->ramht; +- unsigned long flags; ++ struct nouveau_ramht_entry *entry; + +- spin_lock_irqsave(&ramht->lock, flags); +- nouveau_ramht_remove_locked(chan, handle); +- spin_unlock_irqrestore(&ramht->lock, flags); ++ entry = nouveau_ramht_remove_entry(chan, handle); ++ if (!entry) ++ return -ENOENT; ++ ++ nouveau_ramht_remove_hash(chan, entry->handle); ++ nouveau_gpuobj_ref(NULL, &entry->gpuobj); ++ kfree(entry); ++ return 0; + } + + struct nouveau_gpuobj * +@@ -265,23 +287,19 @@ + nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, + struct nouveau_channel *chan) + { +- struct nouveau_ramht_entry *entry, *tmp; ++ struct nouveau_ramht_entry *entry; + struct nouveau_ramht *ramht; +- unsigned long flags; + + if (ref) + kref_get(&ref->refcount); + + ramht = *ptr; + if (ramht) { +- spin_lock_irqsave(&ramht->lock, flags); +- list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { +- if (entry->channel != chan) +- continue; +- +- nouveau_ramht_remove_locked(chan, entry->handle); ++ while ((entry = nouveau_ramht_remove_entry(chan, 0))) { ++ nouveau_ramht_remove_hash(chan, entry->handle); ++ nouveau_gpuobj_ref(NULL, &entry->gpuobj); ++ kfree(entry); + } +- spin_unlock_irqrestore(&ramht->lock, flags); + + kref_put(&ramht->refcount, nouveau_ramht_del); + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_ramht.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_ramht.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_ramht.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_ramht.h 2010-11-12 06:18:06.000000000 +0100 +@@ -48,7 +48,7 @@ + + extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, + struct nouveau_gpuobj *); +-extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); ++extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); + extern struct nouveau_gpuobj * + nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); + +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_reg.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_reg.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_reg.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_reg.h 2010-11-12 06:18:06.000000000 +0100 +@@ -45,6 +45,11 @@ + # define NV04_PFB_REF_CMD_REFRESH (1 << 0) + #define NV04_PFB_PRE 0x001002d4 + # define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) ++#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i)) ++# define NV20_PFB_ZCOMP_MODE_32 (4 << 24) ++# define NV20_PFB_ZCOMP_EN (1 << 31) ++# define NV25_PFB_ZCOMP_MODE_16 (1 << 20) ++# define NV25_PFB_ZCOMP_MODE_32 (2 << 20) + #define NV10_PFB_CLOSE_PAGE2 0x0010033c + #define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) + #define NV40_PFB_TILE(i) (0x00100600 + (i*16)) +@@ -332,6 +337,7 @@ + #define NV04_PGRAPH_BSWIZZLE5 0x004006A0 + #define NV03_PGRAPH_STATUS 0x004006B0 + #define NV04_PGRAPH_STATUS 0x00400700 ++# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000 + #define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 + #define NV04_PGRAPH_TRAPPED_DATA 0x00400708 + #define NV04_PGRAPH_SURFACE 0x0040070C +@@ -378,6 +384,7 @@ + #define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) + #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) + #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) ++#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) + #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) + #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) + #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) +@@ -714,31 +721,32 @@ + #define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 + #define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 + #define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 +-#define NV50_PDISPLAY_INTR_EN 0x0061002c +-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c +-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2)) +-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004 +-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008 +-#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010 +-#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020 +-#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040 ++#define NV50_PDISPLAY_INTR_EN_0 0x00610028 ++#define NV50_PDISPLAY_INTR_EN_1 0x0061002c ++#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c ++#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) ++#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004 ++#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008 ++#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010 ++#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020 ++#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040 + #define NV50_PDISPLAY_UNK30_CTRL 0x00610030 + #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 + #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 + #define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 +-#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 +-#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 +-#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200) +-#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010 +-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000 +-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010 +-#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204) +-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002 +-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000 +-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002 +-#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001 +-#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208) +-#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c) ++#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080) ++#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084) ++#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200) ++#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010 ++#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000 ++#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010 ++#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204) ++#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002 ++#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000 ++#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002 ++#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001 ++#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208) ++#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c) + + #define NV50_PDISPLAY_CURSOR 0x00610270 + #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) +@@ -746,15 +754,11 @@ + #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 + #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 + +-#define NV50_PDISPLAY_CTRL_STATE 0x00610300 +-#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000 +-#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc +-#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001 +-#define NV50_PDISPLAY_CTRL_VAL 0x00610304 +-#define NV50_PDISPLAY_UNK_380 0x00610380 +-#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384 +-#define NV50_PDISPLAY_UNK_388 0x00610388 +-#define NV50_PDISPLAY_UNK_38C 0x0061038c ++#define NV50_PDISPLAY_PIO_CTRL 0x00610300 ++#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000 ++#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc ++#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001 ++#define NV50_PDISPLAY_PIO_DATA 0x00610304 + + #define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) + #define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_sgdma.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_sgdma.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_sgdma.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_sgdma.c 2010-11-12 06:18:06.000000000 +0100 +@@ -120,8 +120,8 @@ + dev_priv->engine.instmem.flush(nvbe->dev); + + if (dev_priv->card_type == NV_50) { +- nv50_vm_flush(dev, 5); /* PGRAPH */ +- nv50_vm_flush(dev, 0); /* PFIFO */ ++ dev_priv->engine.fifo.tlb_flush(dev); ++ dev_priv->engine.graph.tlb_flush(dev); + } + + nvbe->bound = true; +@@ -162,8 +162,8 @@ + dev_priv->engine.instmem.flush(nvbe->dev); + + if (dev_priv->card_type == NV_50) { +- nv50_vm_flush(dev, 5); +- nv50_vm_flush(dev, 0); ++ dev_priv->engine.fifo.tlb_flush(dev); ++ dev_priv->engine.graph.tlb_flush(dev); + } + + nvbe->bound = false; +@@ -224,7 +224,11 @@ + int i, ret; + + if (dev_priv->card_type < NV_50) { +- aper_size = (64 * 1024 * 1024); ++ if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) ++ aper_size = 64 * 1024 * 1024; ++ else ++ aper_size = 512 * 1024 * 1024; ++ + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; + obj_size += 8; /* ctxdma header */ + } else { +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_state.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_state.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_state.c 2010-11-12 06:18:06.000000000 +0100 +@@ -53,10 +53,10 @@ + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; +- engine->instmem.populate = nv04_instmem_populate; +- engine->instmem.clear = nv04_instmem_clear; +- engine->instmem.bind = nv04_instmem_bind; +- engine->instmem.unbind = nv04_instmem_unbind; ++ engine->instmem.get = nv04_instmem_get; ++ engine->instmem.put = nv04_instmem_put; ++ engine->instmem.map = nv04_instmem_map; ++ engine->instmem.unmap = nv04_instmem_unmap; + engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; +@@ -65,7 +65,6 @@ + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv04_fb_init; + engine->fb.takedown = nv04_fb_takedown; +- engine->graph.grclass = nv04_graph_grclass; + engine->graph.init = nv04_graph_init; + engine->graph.takedown = nv04_graph_takedown; + engine->graph.fifo_access = nv04_graph_fifo_access; +@@ -76,7 +75,7 @@ + engine->graph.unload_context = nv04_graph_unload_context; + engine->fifo.channels = 16; + engine->fifo.init = nv04_fifo_init; +- engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.takedown = nv04_fifo_fini; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; +@@ -99,16 +98,18 @@ + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; ++ engine->crypt.init = nouveau_stub_init; ++ engine->crypt.takedown = nouveau_stub_takedown; + break; + case 0x10: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; +- engine->instmem.populate = nv04_instmem_populate; +- engine->instmem.clear = nv04_instmem_clear; +- engine->instmem.bind = nv04_instmem_bind; +- engine->instmem.unbind = nv04_instmem_unbind; ++ engine->instmem.get = nv04_instmem_get; ++ engine->instmem.put = nv04_instmem_put; ++ engine->instmem.map = nv04_instmem_map; ++ engine->instmem.unmap = nv04_instmem_unmap; + engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; +@@ -117,8 +118,9 @@ + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; +- engine->fb.set_region_tiling = nv10_fb_set_region_tiling; +- engine->graph.grclass = nv10_graph_grclass; ++ engine->fb.init_tile_region = nv10_fb_init_tile_region; ++ engine->fb.set_tile_region = nv10_fb_set_tile_region; ++ engine->fb.free_tile_region = nv10_fb_free_tile_region; + engine->graph.init = nv10_graph_init; + engine->graph.takedown = nv10_graph_takedown; + engine->graph.channel = nv10_graph_channel; +@@ -127,17 +129,17 @@ + engine->graph.fifo_access = nv04_graph_fifo_access; + engine->graph.load_context = nv10_graph_load_context; + engine->graph.unload_context = nv10_graph_unload_context; +- engine->graph.set_region_tiling = nv10_graph_set_region_tiling; ++ engine->graph.set_tile_region = nv10_graph_set_tile_region; + engine->fifo.channels = 32; + engine->fifo.init = nv10_fifo_init; +- engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.takedown = nv04_fifo_fini; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.cache_pull = nv04_fifo_cache_pull; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv10_fifo_create_context; +- engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.unload_context = nv10_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; +@@ -153,16 +155,18 @@ + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; ++ engine->crypt.init = nouveau_stub_init; ++ engine->crypt.takedown = nouveau_stub_takedown; + break; + case 0x20: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; +- engine->instmem.populate = nv04_instmem_populate; +- engine->instmem.clear = nv04_instmem_clear; +- engine->instmem.bind = nv04_instmem_bind; +- engine->instmem.unbind = nv04_instmem_unbind; ++ engine->instmem.get = nv04_instmem_get; ++ engine->instmem.put = nv04_instmem_put; ++ engine->instmem.map = nv04_instmem_map; ++ engine->instmem.unmap = nv04_instmem_unmap; + engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; +@@ -171,8 +175,9 @@ + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv10_fb_init; + engine->fb.takedown = nv10_fb_takedown; +- engine->fb.set_region_tiling = nv10_fb_set_region_tiling; +- engine->graph.grclass = nv20_graph_grclass; ++ engine->fb.init_tile_region = nv10_fb_init_tile_region; ++ engine->fb.set_tile_region = nv10_fb_set_tile_region; ++ engine->fb.free_tile_region = nv10_fb_free_tile_region; + engine->graph.init = nv20_graph_init; + engine->graph.takedown = nv20_graph_takedown; + engine->graph.channel = nv10_graph_channel; +@@ -181,17 +186,17 @@ + engine->graph.fifo_access = nv04_graph_fifo_access; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.unload_context = nv20_graph_unload_context; +- engine->graph.set_region_tiling = nv20_graph_set_region_tiling; ++ engine->graph.set_tile_region = nv20_graph_set_tile_region; + engine->fifo.channels = 32; + engine->fifo.init = nv10_fifo_init; +- engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.takedown = nv04_fifo_fini; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.cache_pull = nv04_fifo_cache_pull; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv10_fifo_create_context; +- engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.unload_context = nv10_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; +@@ -207,16 +212,18 @@ + engine->pm.clock_get = nv04_pm_clock_get; + engine->pm.clock_pre = nv04_pm_clock_pre; + engine->pm.clock_set = nv04_pm_clock_set; ++ engine->crypt.init = nouveau_stub_init; ++ engine->crypt.takedown = nouveau_stub_takedown; + break; + case 0x30: + engine->instmem.init = nv04_instmem_init; + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; +- engine->instmem.populate = nv04_instmem_populate; +- engine->instmem.clear = nv04_instmem_clear; +- engine->instmem.bind = nv04_instmem_bind; +- engine->instmem.unbind = nv04_instmem_unbind; ++ engine->instmem.get = nv04_instmem_get; ++ engine->instmem.put = nv04_instmem_put; ++ engine->instmem.map = nv04_instmem_map; ++ engine->instmem.unmap = nv04_instmem_unmap; + engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv04_mc_init; + engine->mc.takedown = nv04_mc_takedown; +@@ -225,8 +232,9 @@ + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv30_fb_init; + engine->fb.takedown = nv30_fb_takedown; +- engine->fb.set_region_tiling = nv10_fb_set_region_tiling; +- engine->graph.grclass = nv30_graph_grclass; ++ engine->fb.init_tile_region = nv30_fb_init_tile_region; ++ engine->fb.set_tile_region = nv10_fb_set_tile_region; ++ engine->fb.free_tile_region = nv30_fb_free_tile_region; + engine->graph.init = nv30_graph_init; + engine->graph.takedown = nv20_graph_takedown; + engine->graph.fifo_access = nv04_graph_fifo_access; +@@ -235,17 +243,17 @@ + engine->graph.destroy_context = nv20_graph_destroy_context; + engine->graph.load_context = nv20_graph_load_context; + engine->graph.unload_context = nv20_graph_unload_context; +- engine->graph.set_region_tiling = nv20_graph_set_region_tiling; ++ engine->graph.set_tile_region = nv20_graph_set_tile_region; + engine->fifo.channels = 32; + engine->fifo.init = nv10_fifo_init; +- engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.takedown = nv04_fifo_fini; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.cache_pull = nv04_fifo_cache_pull; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv10_fifo_create_context; +- engine->fifo.destroy_context = nv10_fifo_destroy_context; ++ engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv10_fifo_load_context; + engine->fifo.unload_context = nv10_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; +@@ -263,6 +271,8 @@ + engine->pm.clock_set = nv04_pm_clock_set; + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; ++ engine->crypt.init = nouveau_stub_init; ++ engine->crypt.takedown = nouveau_stub_takedown; + break; + case 0x40: + case 0x60: +@@ -270,10 +280,10 @@ + engine->instmem.takedown = nv04_instmem_takedown; + engine->instmem.suspend = nv04_instmem_suspend; + engine->instmem.resume = nv04_instmem_resume; +- engine->instmem.populate = nv04_instmem_populate; +- engine->instmem.clear = nv04_instmem_clear; +- engine->instmem.bind = nv04_instmem_bind; +- engine->instmem.unbind = nv04_instmem_unbind; ++ engine->instmem.get = nv04_instmem_get; ++ engine->instmem.put = nv04_instmem_put; ++ engine->instmem.map = nv04_instmem_map; ++ engine->instmem.unmap = nv04_instmem_unmap; + engine->instmem.flush = nv04_instmem_flush; + engine->mc.init = nv40_mc_init; + engine->mc.takedown = nv40_mc_takedown; +@@ -282,8 +292,9 @@ + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv40_fb_init; + engine->fb.takedown = nv40_fb_takedown; +- engine->fb.set_region_tiling = nv40_fb_set_region_tiling; +- engine->graph.grclass = nv40_graph_grclass; ++ engine->fb.init_tile_region = nv30_fb_init_tile_region; ++ engine->fb.set_tile_region = nv40_fb_set_tile_region; ++ engine->fb.free_tile_region = nv30_fb_free_tile_region; + engine->graph.init = nv40_graph_init; + engine->graph.takedown = nv40_graph_takedown; + engine->graph.fifo_access = nv04_graph_fifo_access; +@@ -292,17 +303,17 @@ + engine->graph.destroy_context = nv40_graph_destroy_context; + engine->graph.load_context = nv40_graph_load_context; + engine->graph.unload_context = nv40_graph_unload_context; +- engine->graph.set_region_tiling = nv40_graph_set_region_tiling; ++ engine->graph.set_tile_region = nv40_graph_set_tile_region; + engine->fifo.channels = 32; + engine->fifo.init = nv40_fifo_init; +- engine->fifo.takedown = nouveau_stub_takedown; ++ engine->fifo.takedown = nv04_fifo_fini; + engine->fifo.disable = nv04_fifo_disable; + engine->fifo.enable = nv04_fifo_enable; + engine->fifo.reassign = nv04_fifo_reassign; + engine->fifo.cache_pull = nv04_fifo_cache_pull; + engine->fifo.channel_id = nv10_fifo_channel_id; + engine->fifo.create_context = nv40_fifo_create_context; +- engine->fifo.destroy_context = nv40_fifo_destroy_context; ++ engine->fifo.destroy_context = nv04_fifo_destroy_context; + engine->fifo.load_context = nv40_fifo_load_context; + engine->fifo.unload_context = nv40_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; +@@ -321,6 +332,8 @@ + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; + engine->pm.temp_get = nv40_temp_get; ++ engine->crypt.init = nouveau_stub_init; ++ engine->crypt.takedown = nouveau_stub_takedown; + break; + case 0x50: + case 0x80: /* gotta love NVIDIA's consistency.. */ +@@ -330,10 +343,10 @@ + engine->instmem.takedown = nv50_instmem_takedown; + engine->instmem.suspend = nv50_instmem_suspend; + engine->instmem.resume = nv50_instmem_resume; +- engine->instmem.populate = nv50_instmem_populate; +- engine->instmem.clear = nv50_instmem_clear; +- engine->instmem.bind = nv50_instmem_bind; +- engine->instmem.unbind = nv50_instmem_unbind; ++ engine->instmem.get = nv50_instmem_get; ++ engine->instmem.put = nv50_instmem_put; ++ engine->instmem.map = nv50_instmem_map; ++ engine->instmem.unmap = nv50_instmem_unmap; + if (dev_priv->chipset == 0x50) + engine->instmem.flush = nv50_instmem_flush; + else +@@ -345,7 +358,6 @@ + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nv50_fb_init; + engine->fb.takedown = nv50_fb_takedown; +- engine->graph.grclass = nv50_graph_grclass; + engine->graph.init = nv50_graph_init; + engine->graph.takedown = nv50_graph_takedown; + engine->graph.fifo_access = nv50_graph_fifo_access; +@@ -354,6 +366,15 @@ + engine->graph.destroy_context = nv50_graph_destroy_context; + engine->graph.load_context = nv50_graph_load_context; + engine->graph.unload_context = nv50_graph_unload_context; ++ if (dev_priv->chipset != 0x86) ++ engine->graph.tlb_flush = nv50_graph_tlb_flush; ++ else { ++ /* from what i can see nvidia do this on every ++ * pre-NVA3 board except NVAC, but, we've only ++ * ever seen problems on NV86 ++ */ ++ engine->graph.tlb_flush = nv86_graph_tlb_flush; ++ } + engine->fifo.channels = 128; + engine->fifo.init = nv50_fifo_init; + engine->fifo.takedown = nv50_fifo_takedown; +@@ -365,30 +386,39 @@ + engine->fifo.destroy_context = nv50_fifo_destroy_context; + engine->fifo.load_context = nv50_fifo_load_context; + engine->fifo.unload_context = nv50_fifo_unload_context; ++ engine->fifo.tlb_flush = nv50_fifo_tlb_flush; + engine->display.early_init = nv50_display_early_init; + engine->display.late_takedown = nv50_display_late_takedown; + engine->display.create = nv50_display_create; + engine->display.init = nv50_display_init; + engine->display.destroy = nv50_display_destroy; + engine->gpio.init = nv50_gpio_init; +- engine->gpio.takedown = nouveau_stub_takedown; ++ engine->gpio.takedown = nv50_gpio_fini; + engine->gpio.get = nv50_gpio_get; + engine->gpio.set = nv50_gpio_set; ++ engine->gpio.irq_register = nv50_gpio_irq_register; ++ engine->gpio.irq_unregister = nv50_gpio_irq_unregister; + engine->gpio.irq_enable = nv50_gpio_irq_enable; + switch (dev_priv->chipset) { +- case 0xa3: +- case 0xa5: +- case 0xa8: +- case 0xaf: +- engine->pm.clock_get = nva3_pm_clock_get; +- engine->pm.clock_pre = nva3_pm_clock_pre; +- engine->pm.clock_set = nva3_pm_clock_set; +- break; +- default: ++ case 0x84: ++ case 0x86: ++ case 0x92: ++ case 0x94: ++ case 0x96: ++ case 0x98: ++ case 0xa0: ++ case 0xaa: ++ case 0xac: ++ case 0x50: + engine->pm.clock_get = nv50_pm_clock_get; + engine->pm.clock_pre = nv50_pm_clock_pre; + engine->pm.clock_set = nv50_pm_clock_set; + break; ++ default: ++ engine->pm.clock_get = nva3_pm_clock_get; ++ engine->pm.clock_pre = nva3_pm_clock_pre; ++ engine->pm.clock_set = nva3_pm_clock_set; ++ break; + } + engine->pm.voltage_get = nouveau_voltage_gpio_get; + engine->pm.voltage_set = nouveau_voltage_gpio_set; +@@ -396,16 +426,33 @@ + engine->pm.temp_get = nv84_temp_get; + else + engine->pm.temp_get = nv40_temp_get; ++ switch (dev_priv->chipset) { ++ case 0x84: ++ case 0x86: ++ case 0x92: ++ case 0x94: ++ case 0x96: ++ case 0xa0: ++ engine->crypt.init = nv84_crypt_init; ++ engine->crypt.takedown = nv84_crypt_fini; ++ engine->crypt.create_context = nv84_crypt_create_context; ++ engine->crypt.destroy_context = nv84_crypt_destroy_context; ++ break; ++ default: ++ engine->crypt.init = nouveau_stub_init; ++ engine->crypt.takedown = nouveau_stub_takedown; ++ break; ++ } + break; + case 0xC0: + engine->instmem.init = nvc0_instmem_init; + engine->instmem.takedown = nvc0_instmem_takedown; + engine->instmem.suspend = nvc0_instmem_suspend; + engine->instmem.resume = nvc0_instmem_resume; +- engine->instmem.populate = nvc0_instmem_populate; +- engine->instmem.clear = nvc0_instmem_clear; +- engine->instmem.bind = nvc0_instmem_bind; +- engine->instmem.unbind = nvc0_instmem_unbind; ++ engine->instmem.get = nvc0_instmem_get; ++ engine->instmem.put = nvc0_instmem_put; ++ engine->instmem.map = nvc0_instmem_map; ++ engine->instmem.unmap = nvc0_instmem_unmap; + engine->instmem.flush = nvc0_instmem_flush; + engine->mc.init = nv50_mc_init; + engine->mc.takedown = nv50_mc_takedown; +@@ -414,7 +461,6 @@ + engine->timer.takedown = nv04_timer_takedown; + engine->fb.init = nvc0_fb_init; + engine->fb.takedown = nvc0_fb_takedown; +- engine->graph.grclass = NULL; //nvc0_graph_grclass; + engine->graph.init = nvc0_graph_init; + engine->graph.takedown = nvc0_graph_takedown; + engine->graph.fifo_access = nvc0_graph_fifo_access; +@@ -443,7 +489,11 @@ + engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.get = nv50_gpio_get; + engine->gpio.set = nv50_gpio_set; ++ engine->gpio.irq_register = nv50_gpio_irq_register; ++ engine->gpio.irq_unregister = nv50_gpio_irq_unregister; + engine->gpio.irq_enable = nv50_gpio_irq_enable; ++ engine->crypt.init = nouveau_stub_init; ++ engine->crypt.takedown = nouveau_stub_takedown; + break; + default: + NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); +@@ -506,11 +556,11 @@ + if (ret) + goto out_err; + ++ mutex_unlock(&dev_priv->channel->mutex); + return 0; + + out_err: +- nouveau_channel_free(dev_priv->channel); +- dev_priv->channel = NULL; ++ nouveau_channel_put(&dev_priv->channel); + return ret; + } + +@@ -557,6 +607,8 @@ + if (ret) + goto out; + engine = &dev_priv->engine; ++ spin_lock_init(&dev_priv->channels.lock); ++ spin_lock_init(&dev_priv->tile.lock); + spin_lock_init(&dev_priv->context_switch_lock); + + /* Make the CRTCs and I2C buses accessible */ +@@ -615,26 +667,28 @@ + if (ret) + goto out_fb; + ++ /* PCRYPT */ ++ ret = engine->crypt.init(dev); ++ if (ret) ++ goto out_graph; ++ + /* PFIFO */ + ret = engine->fifo.init(dev); + if (ret) +- goto out_graph; ++ goto out_crypt; + } + + ret = engine->display.create(dev); + if (ret) + goto out_fifo; + +- /* this call irq_preinstall, register irq handler and +- * call irq_postinstall +- */ +- ret = drm_irq_install(dev); ++ ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1); + if (ret) +- goto out_display; ++ goto out_vblank; + +- ret = drm_vblank_init(dev, 0); ++ ret = nouveau_irq_init(dev); + if (ret) +- goto out_irq; ++ goto out_vblank; + + /* what about PVIDEO/PCRTC/PRAMDAC etc? */ + +@@ -659,12 +713,16 @@ + out_fence: + nouveau_fence_fini(dev); + out_irq: +- drm_irq_uninstall(dev); +-out_display: ++ nouveau_irq_fini(dev); ++out_vblank: ++ drm_vblank_cleanup(dev); + engine->display.destroy(dev); + out_fifo: + if (!nouveau_noaccel) + engine->fifo.takedown(dev); ++out_crypt: ++ if (!nouveau_noaccel) ++ engine->crypt.takedown(dev); + out_graph: + if (!nouveau_noaccel) + engine->graph.takedown(dev); +@@ -703,12 +761,12 @@ + + if (!engine->graph.accel_blocked) { + nouveau_fence_fini(dev); +- nouveau_channel_free(dev_priv->channel); +- dev_priv->channel = NULL; ++ nouveau_channel_put_unlocked(&dev_priv->channel); + } + + if (!nouveau_noaccel) { + engine->fifo.takedown(dev); ++ engine->crypt.takedown(dev); + engine->graph.takedown(dev); + } + engine->fb.takedown(dev); +@@ -727,7 +785,8 @@ + nouveau_gpuobj_takedown(dev); + nouveau_mem_vram_fini(dev); + +- drm_irq_uninstall(dev); ++ nouveau_irq_fini(dev); ++ drm_vblank_cleanup(dev); + + nouveau_pm_fini(dev); + nouveau_bios_takedown(dev); +@@ -1041,6 +1100,12 @@ + case NOUVEAU_GETPARAM_PTIMER_TIME: + getparam->value = dev_priv->engine.timer.read(dev); + break; ++ case NOUVEAU_GETPARAM_HAS_BO_USAGE: ++ getparam->value = 1; ++ break; ++ case NOUVEAU_GETPARAM_HAS_PAGEFLIP: ++ getparam->value = (dev_priv->card_type < NV_50); ++ break; + case NOUVEAU_GETPARAM_GRAPH_UNITS: + /* NV40 and NV50 versions are quite different, but register + * address is the same. User is supposed to know the card +@@ -1051,7 +1116,7 @@ + } + /* FALLTHRU */ + default: +- NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); ++ NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); + return -EINVAL; + } + +@@ -1066,7 +1131,7 @@ + + switch (setparam->param) { + default: +- NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); ++ NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); + return -EINVAL; + } + +@@ -1092,7 +1157,13 @@ + /* Waits for PGRAPH to go completely idle */ + bool nouveau_wait_for_idle(struct drm_device *dev) + { +- if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t mask = ~0; ++ ++ if (dev_priv->card_type == NV_40) ++ mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; ++ ++ if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) { + NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", + nv_rd32(dev, NV04_PGRAPH_STATUS)); + return false; +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_temp.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_temp.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_temp.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_temp.c 2010-11-12 06:18:06.000000000 +0100 +@@ -191,7 +191,7 @@ + int offset = sensor->offset_mult / sensor->offset_div; + int core_temp; + +- if (dev_priv->chipset >= 0x50) { ++ if (dev_priv->card_type >= NV_50) { + core_temp = nv_rd32(dev, 0x20008); + } else { + core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_util.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_util.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_util.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_util.c 2010-11-12 06:18:06.000000000 +0100 +@@ -0,0 +1,69 @@ ++/* ++ * Copyright (C) 2010 Nouveau Project ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#include ++ ++#include "nouveau_util.h" ++ ++static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); ++ ++void ++nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value) ++{ ++ while (bf->name) { ++ if (value & bf->mask) { ++ printk(" %s", bf->name); ++ value &= ~bf->mask; ++ } ++ ++ bf++; ++ } ++ ++ if (value) ++ printk(" (unknown bits 0x%08x)", value); ++} ++ ++void ++nouveau_enum_print(const struct nouveau_enum *en, u32 value) ++{ ++ while (en->name) { ++ if (value == en->value) { ++ printk("%s", en->name); ++ return; ++ } ++ ++ en++; ++ } ++ ++ printk("(unknown enum 0x%08x)", value); ++} ++ ++int ++nouveau_ratelimit(void) ++{ ++ return __ratelimit(&nouveau_ratelimit_state); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_util.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_util.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nouveau_util.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nouveau_util.h 2010-11-12 06:18:06.000000000 +0100 +@@ -0,0 +1,45 @@ ++/* ++ * Copyright (C) 2010 Nouveau Project ++ * ++ * All Rights Reserved. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining ++ * a copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sublicense, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial ++ * portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE ++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION ++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION ++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ * ++ */ ++ ++#ifndef __NOUVEAU_UTIL_H__ ++#define __NOUVEAU_UTIL_H__ ++ ++struct nouveau_bitfield { ++ u32 mask; ++ const char *name; ++}; ++ ++struct nouveau_enum { ++ u32 value; ++ const char *name; ++}; ++ ++void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value); ++void nouveau_enum_print(const struct nouveau_enum *, u32 value); ++int nouveau_ratelimit(void); ++ ++#endif +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_crtc.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_crtc.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_crtc.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_crtc.c 2010-11-12 06:18:06.000000000 +0100 +@@ -158,7 +158,6 @@ + { + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct drm_device *dev = crtc->dev; +- struct drm_connector *connector; + unsigned char seq1 = 0, crtc17 = 0; + unsigned char crtc1A; + +@@ -213,10 +212,6 @@ + NVVgaSeqReset(dev, nv_crtc->index, false); + + NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); +- +- /* Update connector polling modes */ +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) +- nouveau_connector_set_polling(connector); + } + + static bool +@@ -556,7 +551,10 @@ + if (dev_priv->card_type >= NV_30) + regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); + +- regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC; ++ if (dev_priv->card_type >= NV_10) ++ regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; ++ else ++ regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; + + /* Some misc regs */ + if (dev_priv->card_type == NV_40) { +@@ -674,6 +672,7 @@ + if (nv_two_heads(dev)) + NVSetOwner(dev, nv_crtc->index); + ++ drm_vblank_pre_modeset(dev, nv_crtc->index); + funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + + NVBlankScreen(dev, nv_crtc->index, true); +@@ -706,6 +705,7 @@ + #endif + + funcs->dpms(crtc, DRM_MODE_DPMS_ON); ++ drm_vblank_post_modeset(dev, nv_crtc->index); + } + + static void nv_crtc_destroy(struct drm_crtc *crtc) +@@ -831,7 +831,7 @@ + /* Update the framebuffer location. */ + regp->fb_start = nv_crtc->fb.offset & ~3; + regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); +- NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); ++ nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); + + /* Update the arbitration parameters. */ + nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, +@@ -991,6 +991,7 @@ + .cursor_move = nv04_crtc_cursor_move, + .gamma_set = nv_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, ++ .page_flip = nouveau_crtc_page_flip, + .destroy = nv_crtc_destroy, + }; + +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_dfp.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_dfp.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_dfp.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_dfp.c 2010-11-12 06:18:06.000000000 +0100 +@@ -185,14 +185,15 @@ + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); + +- /* For internal panels and gpu scaling on DVI we need the native mode */ +- if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { +- if (!nv_connector->native_mode) +- return false; ++ if (!nv_connector->native_mode || ++ nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || ++ mode->hdisplay > nv_connector->native_mode->hdisplay || ++ mode->vdisplay > nv_connector->native_mode->vdisplay) { ++ nv_encoder->mode = *adjusted_mode; ++ ++ } else { + nv_encoder->mode = *nv_connector->native_mode; + adjusted_mode->clock = nv_connector->native_mode->clock; +- } else { +- nv_encoder->mode = *adjusted_mode; + } + + return true; +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_display.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_display.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_display.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_display.c 2010-11-12 06:18:06.000000000 +0100 +@@ -32,6 +32,9 @@ + #include "nouveau_encoder.h" + #include "nouveau_connector.h" + ++static void nv04_vblank_crtc0_isr(struct drm_device *); ++static void nv04_vblank_crtc1_isr(struct drm_device *); ++ + static void + nv04_display_store_initial_head_owner(struct drm_device *dev) + { +@@ -197,6 +200,8 @@ + func->save(encoder); + } + ++ nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr); ++ nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr); + return 0; + } + +@@ -208,6 +213,9 @@ + + NV_DEBUG_KMS(dev, "\n"); + ++ nouveau_irq_unregister(dev, 24); ++ nouveau_irq_unregister(dev, 25); ++ + /* Turn every CRTC off. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_mode_set modeset = { +@@ -258,3 +266,16 @@ + return 0; + } + ++static void ++nv04_vblank_crtc0_isr(struct drm_device *dev) ++{ ++ nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); ++ drm_handle_vblank(dev, 0); ++} ++ ++static void ++nv04_vblank_crtc1_isr(struct drm_device *dev) ++{ ++ nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); ++ drm_handle_vblank(dev, 1); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_fbcon.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_fbcon.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_fbcon.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_fbcon.c 2010-11-12 06:18:06.000000000 +0100 +@@ -28,52 +28,39 @@ + #include "nouveau_ramht.h" + #include "nouveau_fbcon.h" + +-void ++int + nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) + { + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; ++ int ret; + +- if (info->state != FBINFO_STATE_RUNNING) +- return; +- +- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { +- nouveau_fbcon_gpu_lockup(info); +- } +- +- if (info->flags & FBINFO_HWACCEL_DISABLED) { +- cfb_copyarea(info, region); +- return; +- } ++ ret = RING_SPACE(chan, 4); ++ if (ret) ++ return ret; + + BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); + OUT_RING(chan, (region->sy << 16) | region->sx); + OUT_RING(chan, (region->dy << 16) | region->dx); + OUT_RING(chan, (region->height << 16) | region->width); + FIRE_RING(chan); ++ return 0; + } + +-void ++int + nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) + { + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; ++ int ret; + +- if (info->state != FBINFO_STATE_RUNNING) +- return; +- +- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { +- nouveau_fbcon_gpu_lockup(info); +- } +- +- if (info->flags & FBINFO_HWACCEL_DISABLED) { +- cfb_fillrect(info, rect); +- return; +- } ++ ret = RING_SPACE(chan, 7); ++ if (ret) ++ return ret; + + BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); + OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); +@@ -87,9 +74,10 @@ + OUT_RING(chan, (rect->dx << 16) | rect->dy); + OUT_RING(chan, (rect->width << 16) | rect->height); + FIRE_RING(chan); ++ return 0; + } + +-void ++int + nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + { + struct nouveau_fbdev *nfbdev = info->par; +@@ -101,23 +89,14 @@ + uint32_t dsize; + uint32_t width; + uint32_t *data = (uint32_t *)image->data; ++ int ret; + +- if (info->state != FBINFO_STATE_RUNNING) +- return; +- +- if (image->depth != 1) { +- cfb_imageblit(info, image); +- return; +- } +- +- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { +- nouveau_fbcon_gpu_lockup(info); +- } ++ if (image->depth != 1) ++ return -ENODEV; + +- if (info->flags & FBINFO_HWACCEL_DISABLED) { +- cfb_imageblit(info, image); +- return; +- } ++ ret = RING_SPACE(chan, 8); ++ if (ret) ++ return ret; + + width = ALIGN(image->width, 8); + dsize = ALIGN(width * image->height, 32) >> 5; +@@ -144,11 +123,9 @@ + while (dsize) { + int iter_len = dsize > 128 ? 128 : dsize; + +- if (RING_SPACE(chan, iter_len + 1)) { +- nouveau_fbcon_gpu_lockup(info); +- cfb_imageblit(info, image); +- return; +- } ++ ret = RING_SPACE(chan, iter_len + 1); ++ if (ret) ++ return ret; + + BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); + OUT_RINGp(chan, data, iter_len); +@@ -157,6 +134,7 @@ + } + + FIRE_RING(chan); ++ return 0; + } + + static int +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_fifo.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_fifo.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_fifo.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_fifo.c 2010-11-12 06:18:06.000000000 +0100 +@@ -28,6 +28,7 @@ + #include "drm.h" + #include "nouveau_drv.h" + #include "nouveau_ramht.h" ++#include "nouveau_util.h" + + #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) + #define NV04_RAMFC__SIZE 32 +@@ -151,10 +152,27 @@ + nv04_fifo_destroy_context(struct nouveau_channel *chan) + { + struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; ++ unsigned long flags; + +- nv_wr32(dev, NV04_PFIFO_MODE, +- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ pfifo->reassign(dev, false); ++ ++ /* Unload the context if it's the currently active one */ ++ if (pfifo->channel_id(dev) == chan->id) { ++ pfifo->disable(dev); ++ pfifo->unload_context(dev); ++ pfifo->enable(dev); ++ } + ++ /* Keep it from being rescheduled */ ++ nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); ++ ++ pfifo->reassign(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ ++ /* Free the channel resources */ + nouveau_gpuobj_ref(NULL, &chan->ramfc); + } + +@@ -208,7 +226,7 @@ + if (chid < 0 || chid >= dev_priv->engine.fifo.channels) + return 0; + +- chan = dev_priv->fifos[chid]; ++ chan = dev_priv->channels.ptr[chid]; + if (!chan) { + NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); + return -EINVAL; +@@ -267,6 +285,7 @@ + static void + nv04_fifo_init_intr(struct drm_device *dev) + { ++ nouveau_irq_register(dev, 8, nv04_fifo_isr); + nv_wr32(dev, 0x002100, 0xffffffff); + nv_wr32(dev, 0x002140, 0xffffffff); + } +@@ -289,7 +308,7 @@ + pfifo->reassign(dev, true); + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- if (dev_priv->fifos[i]) { ++ if (dev_priv->channels.ptr[i]) { + uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); + nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); + } +@@ -298,3 +317,207 @@ + return 0; + } + ++void ++nv04_fifo_fini(struct drm_device *dev) ++{ ++ nv_wr32(dev, 0x2140, 0x00000000); ++ nouveau_irq_unregister(dev, 8); ++} ++ ++static bool ++nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan = NULL; ++ struct nouveau_gpuobj *obj; ++ unsigned long flags; ++ const int subc = (addr >> 13) & 0x7; ++ const int mthd = addr & 0x1ffc; ++ bool handled = false; ++ u32 engine; ++ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); ++ if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) ++ chan = dev_priv->channels.ptr[chid]; ++ if (unlikely(!chan)) ++ goto out; ++ ++ switch (mthd) { ++ case 0x0000: /* bind object to subchannel */ ++ obj = nouveau_ramht_find(chan, data); ++ if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) ++ break; ++ ++ chan->sw_subchannel[subc] = obj->class; ++ engine = 0x0000000f << (subc * 4); ++ ++ nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); ++ handled = true; ++ break; ++ default: ++ engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); ++ if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) ++ break; ++ ++ if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], ++ mthd, data)) ++ handled = true; ++ break; ++ } ++ ++out: ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); ++ return handled; ++} ++ ++void ++nv04_fifo_isr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_engine *engine = &dev_priv->engine; ++ uint32_t status, reassign; ++ int cnt = 0; ++ ++ reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; ++ while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { ++ uint32_t chid, get; ++ ++ nv_wr32(dev, NV03_PFIFO_CACHES, 0); ++ ++ chid = engine->fifo.channel_id(dev); ++ get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); ++ ++ if (status & NV_PFIFO_INTR_CACHE_ERROR) { ++ uint32_t mthd, data; ++ int ptr; ++ ++ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before ++ * wrapping on my G80 chips, but CACHE1 isn't big ++ * enough for this much data.. Tests show that it ++ * wraps around to the start at GET=0x800.. No clue ++ * as to why.. ++ */ ++ ptr = (get & 0x7ff) >> 2; ++ ++ if (dev_priv->card_type < NV_40) { ++ mthd = nv_rd32(dev, ++ NV04_PFIFO_CACHE1_METHOD(ptr)); ++ data = nv_rd32(dev, ++ NV04_PFIFO_CACHE1_DATA(ptr)); ++ } else { ++ mthd = nv_rd32(dev, ++ NV40_PFIFO_CACHE1_METHOD(ptr)); ++ data = nv_rd32(dev, ++ NV40_PFIFO_CACHE1_DATA(ptr)); ++ } ++ ++ if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { ++ NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " ++ "Mthd 0x%04x Data 0x%08x\n", ++ chid, (mthd >> 13) & 7, mthd & 0x1ffc, ++ data); ++ } ++ ++ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); ++ nv_wr32(dev, NV03_PFIFO_INTR_0, ++ NV_PFIFO_INTR_CACHE_ERROR); ++ ++ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, ++ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); ++ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); ++ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, ++ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); ++ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); ++ ++ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, ++ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); ++ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); ++ ++ status &= ~NV_PFIFO_INTR_CACHE_ERROR; ++ } ++ ++ if (status & NV_PFIFO_INTR_DMA_PUSHER) { ++ u32 dma_get = nv_rd32(dev, 0x003244); ++ u32 dma_put = nv_rd32(dev, 0x003240); ++ u32 push = nv_rd32(dev, 0x003220); ++ u32 state = nv_rd32(dev, 0x003228); ++ ++ if (dev_priv->card_type == NV_50) { ++ u32 ho_get = nv_rd32(dev, 0x003328); ++ u32 ho_put = nv_rd32(dev, 0x003320); ++ u32 ib_get = nv_rd32(dev, 0x003334); ++ u32 ib_put = nv_rd32(dev, 0x003330); ++ ++ if (nouveau_ratelimit()) ++ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " ++ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " ++ "State 0x%08x Push 0x%08x\n", ++ chid, ho_get, dma_get, ho_put, ++ dma_put, ib_get, ib_put, state, ++ push); ++ ++ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ ++ nv_wr32(dev, 0x003364, 0x00000000); ++ if (dma_get != dma_put || ho_get != ho_put) { ++ nv_wr32(dev, 0x003244, dma_put); ++ nv_wr32(dev, 0x003328, ho_put); ++ } else ++ if (ib_get != ib_put) { ++ nv_wr32(dev, 0x003334, ib_put); ++ } ++ } else { ++ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " ++ "Put 0x%08x State 0x%08x Push 0x%08x\n", ++ chid, dma_get, dma_put, state, push); ++ ++ if (dma_get != dma_put) ++ nv_wr32(dev, 0x003244, dma_put); ++ } ++ ++ nv_wr32(dev, 0x003228, 0x00000000); ++ nv_wr32(dev, 0x003220, 0x00000001); ++ nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); ++ status &= ~NV_PFIFO_INTR_DMA_PUSHER; ++ } ++ ++ if (status & NV_PFIFO_INTR_SEMAPHORE) { ++ uint32_t sem; ++ ++ status &= ~NV_PFIFO_INTR_SEMAPHORE; ++ nv_wr32(dev, NV03_PFIFO_INTR_0, ++ NV_PFIFO_INTR_SEMAPHORE); ++ ++ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); ++ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); ++ ++ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); ++ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); ++ } ++ ++ if (dev_priv->card_type == NV_50) { ++ if (status & 0x00000010) { ++ nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); ++ status &= ~0x00000010; ++ nv_wr32(dev, 0x002100, 0x00000010); ++ } ++ } ++ ++ if (status) { ++ if (nouveau_ratelimit()) ++ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", ++ status, chid); ++ nv_wr32(dev, NV03_PFIFO_INTR_0, status); ++ status = 0; ++ } ++ ++ nv_wr32(dev, NV03_PFIFO_CACHES, reassign); ++ } ++ ++ if (status) { ++ NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); ++ nv_wr32(dev, 0x2140, 0); ++ nv_wr32(dev, 0x140, 0); ++ } ++ ++ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_graph.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_graph.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_graph.c 2010-11-12 06:18:06.000000000 +0100 +@@ -26,6 +26,11 @@ + #include "drm.h" + #include "nouveau_drm.h" + #include "nouveau_drv.h" ++#include "nouveau_hw.h" ++#include "nouveau_util.h" ++ ++static int nv04_graph_register(struct drm_device *dev); ++static void nv04_graph_isr(struct drm_device *dev); + + static uint32_t nv04_graph_ctx_regs[] = { + 0x0040053c, +@@ -357,10 +362,10 @@ + if (chid >= dev_priv->engine.fifo.channels) + return NULL; + +- return dev_priv->fifos[chid]; ++ return dev_priv->channels.ptr[chid]; + } + +-void ++static void + nv04_graph_context_switch(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +@@ -376,7 +381,7 @@ + + /* Load context for next channel */ + chid = dev_priv->engine.fifo.channel_id(dev); +- chan = dev_priv->fifos[chid]; ++ chan = dev_priv->channels.ptr[chid]; + if (chan) + nv04_graph_load_context(chan); + +@@ -412,10 +417,25 @@ + + void nv04_graph_destroy_context(struct nouveau_channel *chan) + { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct graph_state *pgraph_ctx = chan->pgraph_ctx; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ pgraph->fifo_access(dev, false); ++ ++ /* Unload the context if it's the currently active one */ ++ if (pgraph->channel(dev) == chan) ++ pgraph->unload_context(dev); + ++ /* Free the context resources */ + kfree(pgraph_ctx); + chan->pgraph_ctx = NULL; ++ ++ pgraph->fifo_access(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + } + + int nv04_graph_load_context(struct nouveau_channel *chan) +@@ -468,13 +488,19 @@ + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t tmp; ++ int ret; + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & + ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + ++ ret = nv04_graph_register(dev); ++ if (ret) ++ return ret; ++ + /* Enable PGRAPH interrupts */ ++ nouveau_irq_register(dev, 12, nv04_graph_isr); + nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + +@@ -510,6 +536,8 @@ + + void nv04_graph_takedown(struct drm_device *dev) + { ++ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); ++ nouveau_irq_unregister(dev, 12); + } + + void +@@ -524,13 +552,27 @@ + } + + static int +-nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_set_ref(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + atomic_set(&chan->fence.last_sequence_irq, data); + return 0; + } + ++int ++nv04_graph_mthd_page_flip(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) ++{ ++ struct drm_device *dev = chan->dev; ++ struct nouveau_page_flip_state s; ++ ++ if (!nouveau_finish_page_flip(chan, &s)) ++ nv_set_crtc_base(dev, s.crtc, ++ s.offset + s.y * s.pitch + s.x * s.bpp / 8); ++ ++ return 0; ++} ++ + /* + * Software methods, why they are needed, and how they all work: + * +@@ -606,12 +648,12 @@ + */ + + static void +-nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) ++nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value) + { + struct drm_device *dev = chan->dev; +- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; ++ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; + int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; +- uint32_t tmp; ++ u32 tmp; + + tmp = nv_ri32(dev, instance); + tmp &= ~mask; +@@ -623,11 +665,11 @@ + } + + static void +-nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) ++nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value) + { + struct drm_device *dev = chan->dev; +- uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; +- uint32_t tmp, ctx1; ++ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; ++ u32 tmp, ctx1; + int class, op, valid = 1; + + ctx1 = nv_ri32(dev, instance); +@@ -672,13 +714,13 @@ + } + + static int +-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_set_operation(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + if (data > 5) + return 1; + /* Old versions of the objects only accept first three operations. */ +- if (data > 2 && grclass < 0x40) ++ if (data > 2 && class < 0x40) + return 1; + nv04_graph_set_ctx1(chan, 0x00038000, data << 15); + /* changing operation changes set of objects needed for validation */ +@@ -687,8 +729,8 @@ + } + + static int +-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + uint32_t min = data & 0xffff, max; + uint32_t w = data >> 16; +@@ -706,8 +748,8 @@ + } + + static int +-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + uint32_t min = data & 0xffff, max; + uint32_t w = data >> 16; +@@ -725,8 +767,8 @@ + } + + static int +-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -742,8 +784,8 @@ + } + + static int +-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -763,8 +805,8 @@ + } + + static int +-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -778,8 +820,8 @@ + } + + static int +-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -793,8 +835,8 @@ + } + + static int +-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -808,8 +850,8 @@ + } + + static int +-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -823,8 +865,8 @@ + } + + static int +-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -838,8 +880,8 @@ + } + + static int +-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -853,8 +895,8 @@ + } + + static int +-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -868,8 +910,8 @@ + } + + static int +-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -883,8 +925,8 @@ + } + + static int +-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -898,8 +940,8 @@ + } + + static int +-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -913,8 +955,8 @@ + } + + static int +-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + switch (nv_ri32(chan->dev, data << 4) & 0xff) { + case 0x30: +@@ -930,194 +972,346 @@ + return 1; + } + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { +- { 0x0150, nv04_graph_mthd_set_ref }, +- {} +-}; +- +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = { +- { 0x0184, nv04_graph_mthd_bind_nv01_patt }, +- { 0x0188, nv04_graph_mthd_bind_rop }, +- { 0x018c, nv04_graph_mthd_bind_beta1 }, +- { 0x0190, nv04_graph_mthd_bind_surf_dst }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; +- +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = { +- { 0x0188, nv04_graph_mthd_bind_nv04_patt }, +- { 0x018c, nv04_graph_mthd_bind_rop }, +- { 0x0190, nv04_graph_mthd_bind_beta1 }, +- { 0x0194, nv04_graph_mthd_bind_beta4 }, +- { 0x0198, nv04_graph_mthd_bind_surf2d }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; +- +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = { +- { 0x0184, nv04_graph_mthd_bind_chroma }, +- { 0x0188, nv04_graph_mthd_bind_clip }, +- { 0x018c, nv04_graph_mthd_bind_nv01_patt }, +- { 0x0190, nv04_graph_mthd_bind_rop }, +- { 0x0194, nv04_graph_mthd_bind_beta1 }, +- { 0x0198, nv04_graph_mthd_bind_surf_dst }, +- { 0x019c, nv04_graph_mthd_bind_surf_src }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; +- +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = { +- { 0x0184, nv04_graph_mthd_bind_chroma }, +- { 0x0188, nv04_graph_mthd_bind_clip }, +- { 0x018c, nv04_graph_mthd_bind_nv04_patt }, +- { 0x0190, nv04_graph_mthd_bind_rop }, +- { 0x0194, nv04_graph_mthd_bind_beta1 }, +- { 0x0198, nv04_graph_mthd_bind_beta4 }, +- { 0x019c, nv04_graph_mthd_bind_surf2d }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; +- +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = { +- { 0x0188, nv04_graph_mthd_bind_chroma }, +- { 0x018c, nv04_graph_mthd_bind_clip }, +- { 0x0190, nv04_graph_mthd_bind_nv04_patt }, +- { 0x0194, nv04_graph_mthd_bind_rop }, +- { 0x0198, nv04_graph_mthd_bind_beta1 }, +- { 0x019c, nv04_graph_mthd_bind_beta4 }, +- { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, +- { 0x03e4, nv04_graph_mthd_set_operation }, +- {}, +-}; ++static int ++nv04_graph_register(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = { +- { 0x0184, nv04_graph_mthd_bind_chroma }, +- { 0x0188, nv04_graph_mthd_bind_clip }, +- { 0x018c, nv04_graph_mthd_bind_nv01_patt }, +- { 0x0190, nv04_graph_mthd_bind_rop }, +- { 0x0194, nv04_graph_mthd_bind_beta1 }, +- { 0x0198, nv04_graph_mthd_bind_surf_dst }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; ++ if (dev_priv->engine.graph.registered) ++ return 0; + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = { +- { 0x0184, nv04_graph_mthd_bind_chroma }, +- { 0x0188, nv04_graph_mthd_bind_nv01_patt }, +- { 0x018c, nv04_graph_mthd_bind_rop }, +- { 0x0190, nv04_graph_mthd_bind_beta1 }, +- { 0x0194, nv04_graph_mthd_bind_surf_dst }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; ++ /* dvd subpicture */ ++ NVOBJ_CLASS(dev, 0x0038, GR); + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = { +- { 0x0184, nv04_graph_mthd_bind_chroma }, +- { 0x0188, nv04_graph_mthd_bind_nv04_patt }, +- { 0x018c, nv04_graph_mthd_bind_rop }, +- { 0x0190, nv04_graph_mthd_bind_beta1 }, +- { 0x0194, nv04_graph_mthd_bind_beta4 }, +- { 0x0198, nv04_graph_mthd_bind_surf2d }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; ++ /* m2mf */ ++ NVOBJ_CLASS(dev, 0x0039, GR); + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = { +- { 0x0188, nv04_graph_mthd_bind_nv01_patt }, +- { 0x018c, nv04_graph_mthd_bind_rop }, +- { 0x0190, nv04_graph_mthd_bind_beta1 }, +- { 0x0194, nv04_graph_mthd_bind_surf_dst }, +- { 0x0304, nv04_graph_mthd_set_operation }, +- {}, +-}; ++ /* nv03 gdirect */ ++ NVOBJ_CLASS(dev, 0x004b, GR); ++ NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 gdirect */ ++ NVOBJ_CLASS(dev, 0x004a, GR); ++ NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d); ++ NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv01 imageblit */ ++ NVOBJ_CLASS(dev, 0x001f, GR); ++ NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma); ++ NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src); ++ NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 imageblit */ ++ NVOBJ_CLASS(dev, 0x005f, GR); ++ NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma); ++ NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d); ++ NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 iifc */ ++ NVOBJ_CLASS(dev, 0x0060, GR); ++ NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma); ++ NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf); ++ NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation); ++ ++ /* nv05 iifc */ ++ NVOBJ_CLASS(dev, 0x0064, GR); ++ ++ /* nv01 ifc */ ++ NVOBJ_CLASS(dev, 0x0021, GR); ++ NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma); ++ NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 ifc */ ++ NVOBJ_CLASS(dev, 0x0061, GR); ++ NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma); ++ NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d); ++ NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv05 ifc */ ++ NVOBJ_CLASS(dev, 0x0065, GR); ++ ++ /* nv03 sifc */ ++ NVOBJ_CLASS(dev, 0x0036, GR); ++ NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma); ++ NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 sifc */ ++ NVOBJ_CLASS(dev, 0x0076, GR); ++ NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma); ++ NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d); ++ NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv05 sifc */ ++ NVOBJ_CLASS(dev, 0x0066, GR); ++ ++ /* nv03 sifm */ ++ NVOBJ_CLASS(dev, 0x0037, GR); ++ NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation); ++ ++ /* nv04 sifm */ ++ NVOBJ_CLASS(dev, 0x0077, GR); ++ NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf); ++ NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation); ++ ++ /* null */ ++ NVOBJ_CLASS(dev, 0x0030, GR); ++ ++ /* surf2d */ ++ NVOBJ_CLASS(dev, 0x0042, GR); ++ ++ /* rop */ ++ NVOBJ_CLASS(dev, 0x0043, GR); ++ ++ /* beta1 */ ++ NVOBJ_CLASS(dev, 0x0012, GR); ++ ++ /* beta4 */ ++ NVOBJ_CLASS(dev, 0x0072, GR); ++ ++ /* cliprect */ ++ NVOBJ_CLASS(dev, 0x0019, GR); ++ ++ /* nv01 pattern */ ++ NVOBJ_CLASS(dev, 0x0018, GR); ++ ++ /* nv04 pattern */ ++ NVOBJ_CLASS(dev, 0x0044, GR); ++ ++ /* swzsurf */ ++ NVOBJ_CLASS(dev, 0x0052, GR); ++ ++ /* surf3d */ ++ NVOBJ_CLASS(dev, 0x0053, GR); ++ NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h); ++ NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v); ++ ++ /* nv03 tex_tri */ ++ NVOBJ_CLASS(dev, 0x0048, GR); ++ NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color); ++ NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta); ++ ++ /* tex_tri */ ++ NVOBJ_CLASS(dev, 0x0054, GR); ++ ++ /* multitex_tri */ ++ NVOBJ_CLASS(dev, 0x0055, GR); ++ ++ /* nv01 chroma */ ++ NVOBJ_CLASS(dev, 0x0017, GR); ++ ++ /* nv04 chroma */ ++ NVOBJ_CLASS(dev, 0x0057, GR); ++ ++ /* surf_dst */ ++ NVOBJ_CLASS(dev, 0x0058, GR); ++ ++ /* surf_src */ ++ NVOBJ_CLASS(dev, 0x0059, GR); ++ ++ /* surf_color */ ++ NVOBJ_CLASS(dev, 0x005a, GR); ++ ++ /* surf_zeta */ ++ NVOBJ_CLASS(dev, 0x005b, GR); ++ ++ /* nv01 line */ ++ NVOBJ_CLASS(dev, 0x001c, GR); ++ NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 line */ ++ NVOBJ_CLASS(dev, 0x005c, GR); ++ NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d); ++ NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv01 tri */ ++ NVOBJ_CLASS(dev, 0x001d, GR); ++ NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 tri */ ++ NVOBJ_CLASS(dev, 0x005d, GR); ++ NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d); ++ NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv01 rect */ ++ NVOBJ_CLASS(dev, 0x001e, GR); ++ NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt); ++ NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst); ++ NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nv04 rect */ ++ NVOBJ_CLASS(dev, 0x005e, GR); ++ NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip); ++ NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt); ++ NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop); ++ NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1); ++ NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4); ++ NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d); ++ NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation); ++ ++ /* nvsw */ ++ NVOBJ_CLASS(dev, 0x506e, SW); ++ NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); ++ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = { +- { 0x0188, nv04_graph_mthd_bind_nv04_patt }, +- { 0x018c, nv04_graph_mthd_bind_rop }, +- { 0x0190, nv04_graph_mthd_bind_beta1 }, +- { 0x0194, nv04_graph_mthd_bind_beta4 }, +- { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf }, +- { 0x0304, nv04_graph_mthd_set_operation }, +- {}, +-}; +- +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { +- { 0x0184, nv04_graph_mthd_bind_clip }, +- { 0x0188, nv04_graph_mthd_bind_nv01_patt }, +- { 0x018c, nv04_graph_mthd_bind_rop }, +- { 0x0190, nv04_graph_mthd_bind_beta1 }, +- { 0x0194, nv04_graph_mthd_bind_surf_dst }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, +-}; +- +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { +- { 0x0184, nv04_graph_mthd_bind_clip }, +- { 0x0188, nv04_graph_mthd_bind_nv04_patt }, +- { 0x018c, nv04_graph_mthd_bind_rop }, +- { 0x0190, nv04_graph_mthd_bind_beta1 }, +- { 0x0194, nv04_graph_mthd_bind_beta4 }, +- { 0x0198, nv04_graph_mthd_bind_surf2d }, +- { 0x02fc, nv04_graph_mthd_set_operation }, +- {}, ++ dev_priv->engine.graph.registered = true; ++ return 0; + }; + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { +- { 0x0188, nv04_graph_mthd_bind_clip }, +- { 0x018c, nv04_graph_mthd_bind_surf_color }, +- { 0x0190, nv04_graph_mthd_bind_surf_zeta }, +- {}, ++static struct nouveau_bitfield nv04_graph_intr[] = { ++ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, ++ {} + }; + +-static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { +- { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, +- { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, +- {}, ++static struct nouveau_bitfield nv04_graph_nstatus[] = ++{ ++ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, ++ {} + }; + +-struct nouveau_pgraph_object_class nv04_graph_grclass[] = { +- { 0x0038, false, NULL }, /* dvd subpicture */ +- { 0x0039, false, NULL }, /* m2mf */ +- { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ +- { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ +- { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ +- { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ +- { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ +- { 0x0064, false, NULL }, /* nv05 iifc */ +- { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ +- { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ +- { 0x0065, false, NULL }, /* nv05 ifc */ +- { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ +- { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ +- { 0x0066, false, NULL }, /* nv05 sifc */ +- { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ +- { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ +- { 0x0030, false, NULL }, /* null */ +- { 0x0042, false, NULL }, /* surf2d */ +- { 0x0043, false, NULL }, /* rop */ +- { 0x0012, false, NULL }, /* beta1 */ +- { 0x0072, false, NULL }, /* beta4 */ +- { 0x0019, false, NULL }, /* cliprect */ +- { 0x0018, false, NULL }, /* nv01 pattern */ +- { 0x0044, false, NULL }, /* nv04 pattern */ +- { 0x0052, false, NULL }, /* swzsurf */ +- { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */ +- { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ +- { 0x0054, false, NULL }, /* tex_tri */ +- { 0x0055, false, NULL }, /* multitex_tri */ +- { 0x0017, false, NULL }, /* nv01 chroma */ +- { 0x0057, false, NULL }, /* nv04 chroma */ +- { 0x0058, false, NULL }, /* surf_dst */ +- { 0x0059, false, NULL }, /* surf_src */ +- { 0x005a, false, NULL }, /* surf_color */ +- { 0x005b, false, NULL }, /* surf_zeta */ +- { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */ +- { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */ +- { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ +- { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ +- { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ +- { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ +- { 0x506e, true, nv04_graph_mthds_sw }, ++struct nouveau_bitfield nv04_graph_nsource[] = ++{ ++ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, ++ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, ++ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, ++ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, ++ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, ++ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, ++ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, ++ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, ++ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, ++ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, ++ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, + {} + }; + ++static void ++nv04_graph_isr(struct drm_device *dev) ++{ ++ u32 stat; ++ ++ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { ++ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); ++ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); ++ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); ++ u32 chid = (addr & 0x0f000000) >> 24; ++ u32 subc = (addr & 0x0000e000) >> 13; ++ u32 mthd = (addr & 0x00001ffc); ++ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); ++ u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff; ++ u32 show = stat; ++ ++ if (stat & NV_PGRAPH_INTR_NOTIFY) { ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) ++ show &= ~NV_PGRAPH_INTR_NOTIFY; ++ } ++ } ++ ++ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { ++ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); ++ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; ++ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; ++ nv04_graph_context_switch(dev); ++ } ++ ++ nv_wr32(dev, NV03_PGRAPH_INTR, stat); ++ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); ++ ++ if (show && nouveau_ratelimit()) { ++ NV_INFO(dev, "PGRAPH -"); ++ nouveau_bitfield_print(nv04_graph_intr, show); ++ printk(" nsource:"); ++ nouveau_bitfield_print(nv04_graph_nsource, nsource); ++ printk(" nstatus:"); ++ nouveau_bitfield_print(nv04_graph_nstatus, nstatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " ++ "mthd 0x%04x data 0x%08x\n", ++ chid, subc, class, mthd, data); ++ } ++ } ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_instmem.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_instmem.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_instmem.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_instmem.c 2010-11-12 06:18:06.000000000 +0100 +@@ -98,42 +98,66 @@ + } + + int +-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, +- uint32_t *sz) ++nv04_instmem_suspend(struct drm_device *dev) + { + return 0; + } + + void +-nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++nv04_instmem_resume(struct drm_device *dev) + { + } + + int +-nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) + { +- return 0; +-} ++ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; ++ struct drm_mm_node *ramin = NULL; + +-int +-nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +-{ ++ do { ++ if (drm_mm_pre_get(&dev_priv->ramin_heap)) ++ return -ENOMEM; ++ ++ spin_lock(&dev_priv->ramin_lock); ++ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); ++ if (ramin == NULL) { ++ spin_unlock(&dev_priv->ramin_lock); ++ return -ENOMEM; ++ } ++ ++ ramin = drm_mm_get_block_atomic(ramin, size, align); ++ spin_unlock(&dev_priv->ramin_lock); ++ } while (ramin == NULL); ++ ++ gpuobj->node = ramin; ++ gpuobj->vinst = ramin->start; + return 0; + } + + void +-nv04_instmem_flush(struct drm_device *dev) ++nv04_instmem_put(struct nouveau_gpuobj *gpuobj) + { ++ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; ++ ++ spin_lock(&dev_priv->ramin_lock); ++ drm_mm_put_block(gpuobj->node); ++ gpuobj->node = NULL; ++ spin_unlock(&dev_priv->ramin_lock); + } + + int +-nv04_instmem_suspend(struct drm_device *dev) ++nv04_instmem_map(struct nouveau_gpuobj *gpuobj) + { ++ gpuobj->pinst = gpuobj->vinst; + return 0; + } + + void +-nv04_instmem_resume(struct drm_device *dev) ++nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) + { + } + ++void ++nv04_instmem_flush(struct drm_device *dev) ++{ ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_pm.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_pm.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv04_pm.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv04_pm.c 2010-11-12 06:18:06.000000000 +0100 +@@ -76,6 +76,15 @@ + reg += 4; + + nouveau_hw_setpll(dev, reg, &state->calc); ++ ++ if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) { ++ if (dev_priv->card_type == NV_20) ++ nv_mask(dev, 0x1002c4, 0, 1 << 20); ++ ++ /* Reset the DLLs */ ++ nv_mask(dev, 0x1002c0, 0, 1 << 8); ++ } ++ + kfree(state); + } + +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv10_fb.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv10_fb.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv10_fb.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv10_fb.c 2010-11-12 06:18:06.000000000 +0100 +@@ -3,23 +3,109 @@ + #include "nouveau_drv.h" + #include "nouveau_drm.h" + ++static struct drm_mm_node * ++nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; ++ struct drm_mm_node *mem; ++ int ret; ++ ++ ret = drm_mm_pre_get(&pfb->tag_heap); ++ if (ret) ++ return NULL; ++ ++ spin_lock(&dev_priv->tile.lock); ++ mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); ++ if (mem) ++ mem = drm_mm_get_block_atomic(mem, size, 0); ++ spin_unlock(&dev_priv->tile.lock); ++ ++ return mem; ++} ++ ++static void ++nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ spin_lock(&dev_priv->tile.lock); ++ drm_mm_put_block(mem); ++ spin_unlock(&dev_priv->tile.lock); ++} ++ ++void ++nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, ++ uint32_t size, uint32_t pitch, uint32_t flags) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; ++ int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); ++ ++ tile->addr = addr; ++ tile->limit = max(1u, addr + size) - 1; ++ tile->pitch = pitch; ++ ++ if (dev_priv->card_type == NV_20) { ++ if (flags & NOUVEAU_GEM_TILE_ZETA) { ++ /* ++ * Allocate some of the on-die tag memory, ++ * used to store Z compression meta-data (most ++ * likely just a bitmap determining if a given ++ * tile is compressed or not). ++ */ ++ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); ++ ++ if (tile->tag_mem) { ++ /* Enable Z compression */ ++ if (dev_priv->chipset >= 0x25) ++ tile->zcomp = tile->tag_mem->start | ++ (bpp == 16 ? ++ NV25_PFB_ZCOMP_MODE_16 : ++ NV25_PFB_ZCOMP_MODE_32); ++ else ++ tile->zcomp = tile->tag_mem->start | ++ NV20_PFB_ZCOMP_EN | ++ (bpp == 16 ? 0 : ++ NV20_PFB_ZCOMP_MODE_32); ++ } ++ ++ tile->addr |= 3; ++ } else { ++ tile->addr |= 1; ++ } ++ ++ } else { ++ tile->addr |= 1 << 31; ++ } ++} ++ + void +-nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch) ++nv10_fb_free_tile_region(struct drm_device *dev, int i) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t limit = max(1u, addr + size) - 1; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + +- if (pitch) { +- if (dev_priv->card_type >= NV_20) +- addr |= 1; +- else +- addr |= 1 << 31; ++ if (tile->tag_mem) { ++ nv20_fb_free_tag(dev, tile->tag_mem); ++ tile->tag_mem = NULL; + } + +- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); +- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); +- nv_wr32(dev, NV10_PFB_TILE(i), addr); ++ tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; ++} ++ ++void ++nv10_fb_set_tile_region(struct drm_device *dev, int i) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; ++ ++ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); ++ ++ if (dev_priv->card_type == NV_20) ++ nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); + } + + int +@@ -31,9 +117,14 @@ + + pfb->num_tiles = NV10_PFB_TILE__SIZE; + ++ if (dev_priv->card_type == NV_20) ++ drm_mm_init(&pfb->tag_heap, 0, ++ (dev_priv->chipset >= 0x25 ? ++ 64 * 1024 : 32 * 1024)); ++ + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->num_tiles; i++) +- pfb->set_region_tiling(dev, i, 0, 0, 0); ++ pfb->set_tile_region(dev, i); + + return 0; + } +@@ -41,4 +132,13 @@ + void + nv10_fb_takedown(struct drm_device *dev) + { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; ++ int i; ++ ++ for (i = 0; i < pfb->num_tiles; i++) ++ pfb->free_tile_region(dev, i); ++ ++ if (dev_priv->card_type == NV_20) ++ drm_mm_takedown(&pfb->tag_heap); + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv10_fifo.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv10_fifo.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv10_fifo.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv10_fifo.c 2010-11-12 06:18:06.000000000 +0100 +@@ -73,17 +73,6 @@ + return 0; + } + +-void +-nv10_fifo_destroy_context(struct nouveau_channel *chan) +-{ +- struct drm_device *dev = chan->dev; +- +- nv_wr32(dev, NV04_PFIFO_MODE, +- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); +- +- nouveau_gpuobj_ref(NULL, &chan->ramfc); +-} +- + static void + nv10_fifo_do_load_context(struct drm_device *dev, int chid) + { +@@ -219,6 +208,7 @@ + static void + nv10_fifo_init_intr(struct drm_device *dev) + { ++ nouveau_irq_register(dev, 8, nv04_fifo_isr); + nv_wr32(dev, 0x002100, 0xffffffff); + nv_wr32(dev, 0x002140, 0xffffffff); + } +@@ -241,7 +231,7 @@ + pfifo->reassign(dev, true); + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- if (dev_priv->fifos[i]) { ++ if (dev_priv->channels.ptr[i]) { + uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); + nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv10_graph.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv10_graph.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv10_graph.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv10_graph.c 2010-11-12 06:18:06.000000000 +0100 +@@ -26,6 +26,10 @@ + #include "drm.h" + #include "nouveau_drm.h" + #include "nouveau_drv.h" ++#include "nouveau_util.h" ++ ++static int nv10_graph_register(struct drm_device *); ++static void nv10_graph_isr(struct drm_device *); + + #define NV10_FIFO_NUMBER 32 + +@@ -786,7 +790,7 @@ + return 0; + } + +-void ++static void + nv10_graph_context_switch(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +@@ -802,7 +806,7 @@ + + /* Load context for next channel */ + chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; +- chan = dev_priv->fifos[chid]; ++ chan = dev_priv->channels.ptr[chid]; + if (chan && chan->pgraph_ctx) + nv10_graph_load_context(chan); + +@@ -833,7 +837,7 @@ + if (chid >= dev_priv->engine.fifo.channels) + return NULL; + +- return dev_priv->fifos[chid]; ++ return dev_priv->channels.ptr[chid]; + } + + int nv10_graph_create_context(struct nouveau_channel *chan) +@@ -875,37 +879,54 @@ + + void nv10_graph_destroy_context(struct nouveau_channel *chan) + { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct graph_state *pgraph_ctx = chan->pgraph_ctx; ++ unsigned long flags; + ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ pgraph->fifo_access(dev, false); ++ ++ /* Unload the context if it's the currently active one */ ++ if (pgraph->channel(dev) == chan) ++ pgraph->unload_context(dev); ++ ++ /* Free the context resources */ + kfree(pgraph_ctx); + chan->pgraph_ctx = NULL; ++ ++ pgraph->fifo_access(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + } + + void +-nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch) ++nv10_graph_set_tile_region(struct drm_device *dev, int i) + { +- uint32_t limit = max(1u, addr + size) - 1; +- +- if (pitch) +- addr |= 1 << 31; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + +- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); +- nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); +- nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); ++ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); + } + + int nv10_graph_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t tmp; +- int i; ++ int ret, i; + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & + ~NV_PMC_ENABLE_PGRAPH); + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | + NV_PMC_ENABLE_PGRAPH); + ++ ret = nv10_graph_register(dev); ++ if (ret) ++ return ret; ++ ++ nouveau_irq_register(dev, 12, nv10_graph_isr); + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + +@@ -928,7 +949,7 @@ + + /* Turn all the tiling regions off. */ + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) +- nv10_graph_set_region_tiling(dev, i, 0, 0, 0); ++ nv10_graph_set_tile_region(dev, i); + + nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); + nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); +@@ -948,11 +969,13 @@ + + void nv10_graph_takedown(struct drm_device *dev) + { ++ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); ++ nouveau_irq_unregister(dev, 12); + } + + static int +-nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv17_graph_mthd_lma_window(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + struct drm_device *dev = chan->dev; + struct graph_state *ctx = chan->pgraph_ctx; +@@ -1031,8 +1054,8 @@ + } + + static int +-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; +@@ -1050,35 +1073,115 @@ + return 0; + } + +-static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { +- { 0x1638, nv17_graph_mthd_lma_window }, +- { 0x163c, nv17_graph_mthd_lma_window }, +- { 0x1640, nv17_graph_mthd_lma_window }, +- { 0x1644, nv17_graph_mthd_lma_window }, +- { 0x1658, nv17_graph_mthd_lma_enable }, ++static int ++nv10_graph_register(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->engine.graph.registered) ++ return 0; ++ ++ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ ++ NVOBJ_CLASS(dev, 0x0030, GR); /* null */ ++ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ ++ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ ++ NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ ++ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ ++ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ ++ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ ++ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ ++ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ ++ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ ++ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ ++ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ ++ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ ++ NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ ++ NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ ++ NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ ++ NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ ++ ++ /* celcius */ ++ if (dev_priv->chipset <= 0x10) { ++ NVOBJ_CLASS(dev, 0x0056, GR); ++ } else ++ if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { ++ NVOBJ_CLASS(dev, 0x0096, GR); ++ } else { ++ NVOBJ_CLASS(dev, 0x0099, GR); ++ NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); ++ NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); ++ NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); ++ NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); ++ NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); ++ } ++ ++ /* nvsw */ ++ NVOBJ_CLASS(dev, 0x506e, SW); ++ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); ++ ++ dev_priv->engine.graph.registered = true; ++ return 0; ++} ++ ++struct nouveau_bitfield nv10_graph_intr[] = { ++ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, ++ { NV_PGRAPH_INTR_ERROR, "ERROR" }, + {} + }; + +-struct nouveau_pgraph_object_class nv10_graph_grclass[] = { +- { 0x0030, false, NULL }, /* null */ +- { 0x0039, false, NULL }, /* m2mf */ +- { 0x004a, false, NULL }, /* gdirect */ +- { 0x005f, false, NULL }, /* imageblit */ +- { 0x009f, false, NULL }, /* imageblit (nv12) */ +- { 0x008a, false, NULL }, /* ifc */ +- { 0x0089, false, NULL }, /* sifm */ +- { 0x0062, false, NULL }, /* surf2d */ +- { 0x0043, false, NULL }, /* rop */ +- { 0x0012, false, NULL }, /* beta1 */ +- { 0x0072, false, NULL }, /* beta4 */ +- { 0x0019, false, NULL }, /* cliprect */ +- { 0x0044, false, NULL }, /* pattern */ +- { 0x0052, false, NULL }, /* swzsurf */ +- { 0x0093, false, NULL }, /* surf3d */ +- { 0x0094, false, NULL }, /* tex_tri */ +- { 0x0095, false, NULL }, /* multitex_tri */ +- { 0x0056, false, NULL }, /* celcius (nv10) */ +- { 0x0096, false, NULL }, /* celcius (nv11) */ +- { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */ ++struct nouveau_bitfield nv10_graph_nstatus[] = ++{ ++ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, ++ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, ++ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, ++ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} + }; ++ ++static void ++nv10_graph_isr(struct drm_device *dev) ++{ ++ u32 stat; ++ ++ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { ++ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); ++ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); ++ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); ++ u32 chid = (addr & 0x01f00000) >> 20; ++ u32 subc = (addr & 0x00070000) >> 16; ++ u32 mthd = (addr & 0x00001ffc); ++ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); ++ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; ++ u32 show = stat; ++ ++ if (stat & NV_PGRAPH_INTR_ERROR) { ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) ++ show &= ~NV_PGRAPH_INTR_ERROR; ++ } ++ } ++ ++ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { ++ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); ++ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; ++ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; ++ nv10_graph_context_switch(dev); ++ } ++ ++ nv_wr32(dev, NV03_PGRAPH_INTR, stat); ++ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); ++ ++ if (show && nouveau_ratelimit()) { ++ NV_INFO(dev, "PGRAPH -"); ++ nouveau_bitfield_print(nv10_graph_intr, show); ++ printk(" nsource:"); ++ nouveau_bitfield_print(nv04_graph_nsource, nsource); ++ printk(" nstatus:"); ++ nouveau_bitfield_print(nv10_graph_nstatus, nstatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " ++ "mthd 0x%04x data 0x%08x\n", ++ chid, subc, class, mthd, data); ++ } ++ } ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv20_graph.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv20_graph.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv20_graph.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv20_graph.c 2010-11-12 06:18:06.000000000 +0100 +@@ -32,6 +32,10 @@ + #define NV34_GRCTX_SIZE (18140) + #define NV35_36_GRCTX_SIZE (22396) + ++static int nv20_graph_register(struct drm_device *); ++static int nv30_graph_register(struct drm_device *); ++static void nv20_graph_isr(struct drm_device *); ++ + static void + nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) + { +@@ -425,9 +429,21 @@ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; ++ unsigned long flags; + +- nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ pgraph->fifo_access(dev, false); ++ ++ /* Unload the context if it's the currently active one */ ++ if (pgraph->channel(dev) == chan) ++ pgraph->unload_context(dev); ++ ++ pgraph->fifo_access(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ ++ /* Free the context resources */ + nv_wo32(pgraph->ctx_table, chan->id * 4, 0); ++ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + } + + int +@@ -496,24 +512,27 @@ + } + + void +-nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch) ++nv20_graph_set_tile_region(struct drm_device *dev, int i) + { +- uint32_t limit = max(1u, addr + size) - 1; +- +- if (pitch) +- addr |= 1; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + +- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); +- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); +- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); ++ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); + + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); +- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); ++ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); +- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); ++ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); +- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); ++ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr); ++ ++ if (dev_priv->card_type == NV_20) { ++ nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp); ++ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); ++ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp); ++ } + } + + int +@@ -560,6 +579,13 @@ + + nv20_graph_rdi(dev); + ++ ret = nv20_graph_register(dev); ++ if (ret) { ++ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); ++ return ret; ++ } ++ ++ nouveau_irq_register(dev, 12, nv20_graph_isr); + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + +@@ -571,16 +597,17 @@ + nv_wr32(dev, 0x40009C , 0x00000040); + + if (dev_priv->chipset >= 0x25) { +- nv_wr32(dev, 0x400890, 0x00080000); ++ nv_wr32(dev, 0x400890, 0x00a8cfff); + nv_wr32(dev, 0x400610, 0x304B1FB6); +- nv_wr32(dev, 0x400B80, 0x18B82880); ++ nv_wr32(dev, 0x400B80, 0x1cbd3883); + nv_wr32(dev, 0x400B84, 0x44000000); + nv_wr32(dev, 0x400098, 0x40000080); + nv_wr32(dev, 0x400B88, 0x000000ff); ++ + } else { +- nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ ++ nv_wr32(dev, 0x400880, 0x0008c7df); + nv_wr32(dev, 0x400094, 0x00000005); +- nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ ++ nv_wr32(dev, 0x400B80, 0x45eae20e); + nv_wr32(dev, 0x400B84, 0x24000000); + nv_wr32(dev, 0x400098, 0x00000040); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); +@@ -591,14 +618,8 @@ + + /* Turn all the tiling regions off. */ + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) +- nv20_graph_set_region_tiling(dev, i, 0, 0, 0); ++ nv20_graph_set_tile_region(dev, i); + +- for (i = 0; i < 8; i++) { +- nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); +- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); +- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, +- nv_rd32(dev, 0x100300 + i * 4)); +- } + nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); +@@ -642,6 +663,9 @@ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + ++ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); ++ nouveau_irq_unregister(dev, 12); ++ + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); + } + +@@ -684,9 +708,16 @@ + return ret; + } + ++ ret = nv30_graph_register(dev); ++ if (ret) { ++ nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); ++ return ret; ++ } ++ + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, + pgraph->ctx_table->pinst >> 4); + ++ nouveau_irq_register(dev, 12, nv20_graph_isr); + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); + +@@ -724,7 +755,7 @@ + + /* Turn all the tiling regions off. */ + for (i = 0; i < NV10_PFB_TILE__SIZE; i++) +- nv20_graph_set_region_tiling(dev, i, 0, 0, 0); ++ nv20_graph_set_tile_region(dev, i); + + nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); + nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); +@@ -744,46 +775,125 @@ + return 0; + } + +-struct nouveau_pgraph_object_class nv20_graph_grclass[] = { +- { 0x0030, false, NULL }, /* null */ +- { 0x0039, false, NULL }, /* m2mf */ +- { 0x004a, false, NULL }, /* gdirect */ +- { 0x009f, false, NULL }, /* imageblit (nv12) */ +- { 0x008a, false, NULL }, /* ifc */ +- { 0x0089, false, NULL }, /* sifm */ +- { 0x0062, false, NULL }, /* surf2d */ +- { 0x0043, false, NULL }, /* rop */ +- { 0x0012, false, NULL }, /* beta1 */ +- { 0x0072, false, NULL }, /* beta4 */ +- { 0x0019, false, NULL }, /* cliprect */ +- { 0x0044, false, NULL }, /* pattern */ +- { 0x009e, false, NULL }, /* swzsurf */ +- { 0x0096, false, NULL }, /* celcius */ +- { 0x0097, false, NULL }, /* kelvin (nv20) */ +- { 0x0597, false, NULL }, /* kelvin (nv25) */ +- {} +-}; +- +-struct nouveau_pgraph_object_class nv30_graph_grclass[] = { +- { 0x0030, false, NULL }, /* null */ +- { 0x0039, false, NULL }, /* m2mf */ +- { 0x004a, false, NULL }, /* gdirect */ +- { 0x009f, false, NULL }, /* imageblit (nv12) */ +- { 0x008a, false, NULL }, /* ifc */ +- { 0x038a, false, NULL }, /* ifc (nv30) */ +- { 0x0089, false, NULL }, /* sifm */ +- { 0x0389, false, NULL }, /* sifm (nv30) */ +- { 0x0062, false, NULL }, /* surf2d */ +- { 0x0362, false, NULL }, /* surf2d (nv30) */ +- { 0x0043, false, NULL }, /* rop */ +- { 0x0012, false, NULL }, /* beta1 */ +- { 0x0072, false, NULL }, /* beta4 */ +- { 0x0019, false, NULL }, /* cliprect */ +- { 0x0044, false, NULL }, /* pattern */ +- { 0x039e, false, NULL }, /* swzsurf */ +- { 0x0397, false, NULL }, /* rankine (nv30) */ +- { 0x0497, false, NULL }, /* rankine (nv35) */ +- { 0x0697, false, NULL }, /* rankine (nv34) */ +- {} +-}; ++static int ++nv20_graph_register(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; + ++ if (dev_priv->engine.graph.registered) ++ return 0; ++ ++ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ ++ NVOBJ_CLASS(dev, 0x0030, GR); /* null */ ++ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ ++ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ ++ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ ++ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ ++ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ ++ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ ++ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ ++ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ ++ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ ++ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ ++ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ ++ NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */ ++ NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */ ++ ++ /* kelvin */ ++ if (dev_priv->chipset < 0x25) ++ NVOBJ_CLASS(dev, 0x0097, GR); ++ else ++ NVOBJ_CLASS(dev, 0x0597, GR); ++ ++ /* nvsw */ ++ NVOBJ_CLASS(dev, 0x506e, SW); ++ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); ++ ++ dev_priv->engine.graph.registered = true; ++ return 0; ++} ++ ++static int ++nv30_graph_register(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->engine.graph.registered) ++ return 0; ++ ++ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ ++ NVOBJ_CLASS(dev, 0x0030, GR); /* null */ ++ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ ++ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ ++ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ ++ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ ++ NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */ ++ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ ++ NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */ ++ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ ++ NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */ ++ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ ++ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ ++ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ ++ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ ++ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ ++ NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */ ++ ++ /* rankine */ ++ if (0x00000003 & (1 << (dev_priv->chipset & 0x0f))) ++ NVOBJ_CLASS(dev, 0x0397, GR); ++ else ++ if (0x00000010 & (1 << (dev_priv->chipset & 0x0f))) ++ NVOBJ_CLASS(dev, 0x0697, GR); ++ else ++ if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) ++ NVOBJ_CLASS(dev, 0x0497, GR); ++ ++ /* nvsw */ ++ NVOBJ_CLASS(dev, 0x506e, SW); ++ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); ++ ++ dev_priv->engine.graph.registered = true; ++ return 0; ++} ++ ++static void ++nv20_graph_isr(struct drm_device *dev) ++{ ++ u32 stat; ++ ++ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { ++ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); ++ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); ++ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); ++ u32 chid = (addr & 0x01f00000) >> 20; ++ u32 subc = (addr & 0x00070000) >> 16; ++ u32 mthd = (addr & 0x00001ffc); ++ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); ++ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; ++ u32 show = stat; ++ ++ if (stat & NV_PGRAPH_INTR_ERROR) { ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) ++ show &= ~NV_PGRAPH_INTR_ERROR; ++ } ++ } ++ ++ nv_wr32(dev, NV03_PGRAPH_INTR, stat); ++ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); ++ ++ if (show && nouveau_ratelimit()) { ++ NV_INFO(dev, "PGRAPH -"); ++ nouveau_bitfield_print(nv10_graph_intr, show); ++ printk(" nsource:"); ++ nouveau_bitfield_print(nv04_graph_nsource, nsource); ++ printk(" nstatus:"); ++ nouveau_bitfield_print(nv10_graph_nstatus, nstatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " ++ "mthd 0x%04x data 0x%08x\n", ++ chid, subc, class, mthd, data); ++ } ++ } ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv30_fb.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv30_fb.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv30_fb.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv30_fb.c 2010-11-12 06:18:06.000000000 +0100 +@@ -29,6 +29,27 @@ + #include "nouveau_drv.h" + #include "nouveau_drm.h" + ++void ++nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, ++ uint32_t size, uint32_t pitch, uint32_t flags) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; ++ ++ tile->addr = addr | 1; ++ tile->limit = max(1u, addr + size) - 1; ++ tile->pitch = pitch; ++} ++ ++void ++nv30_fb_free_tile_region(struct drm_device *dev, int i) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; ++ ++ tile->addr = tile->limit = tile->pitch = 0; ++} ++ + static int + calc_bias(struct drm_device *dev, int k, int i, int j) + { +@@ -65,7 +86,7 @@ + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->num_tiles; i++) +- pfb->set_region_tiling(dev, i, 0, 0, 0); ++ pfb->set_tile_region(dev, i); + + /* Init the memory timing regs at 0x10037c/0x1003ac */ + if (dev_priv->chipset == 0x30 || +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv40_fb.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv40_fb.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv40_fb.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv40_fb.c 2010-11-12 06:18:06.000000000 +0100 +@@ -4,26 +4,22 @@ + #include "nouveau_drm.h" + + void +-nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch) ++nv40_fb_set_tile_region(struct drm_device *dev, int i) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t limit = max(1u, addr + size) - 1; +- +- if (pitch) +- addr |= 1; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + switch (dev_priv->chipset) { + case 0x40: +- nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); +- nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); +- nv_wr32(dev, NV10_PFB_TILE(i), addr); ++ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); + break; + + default: +- nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); +- nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); +- nv_wr32(dev, NV40_PFB_TILE(i), addr); ++ nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV40_PFB_TILE(i), tile->addr); + break; + } + } +@@ -64,7 +60,7 @@ + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->num_tiles; i++) +- pfb->set_region_tiling(dev, i, 0, 0, 0); ++ pfb->set_tile_region(dev, i); + + return 0; + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv40_fifo.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv40_fifo.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv40_fifo.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv40_fifo.c 2010-11-12 06:18:06.000000000 +0100 +@@ -70,17 +70,6 @@ + return 0; + } + +-void +-nv40_fifo_destroy_context(struct nouveau_channel *chan) +-{ +- struct drm_device *dev = chan->dev; +- +- nv_wr32(dev, NV04_PFIFO_MODE, +- nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); +- +- nouveau_gpuobj_ref(NULL, &chan->ramfc); +-} +- + static void + nv40_fifo_do_load_context(struct drm_device *dev, int chid) + { +@@ -279,6 +268,7 @@ + static void + nv40_fifo_init_intr(struct drm_device *dev) + { ++ nouveau_irq_register(dev, 8, nv04_fifo_isr); + nv_wr32(dev, 0x002100, 0xffffffff); + nv_wr32(dev, 0x002140, 0xffffffff); + } +@@ -301,7 +291,7 @@ + pfifo->reassign(dev, true); + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- if (dev_priv->fifos[i]) { ++ if (dev_priv->channels.ptr[i]) { + uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); + nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); + } +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv40_graph.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv40_graph.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv40_graph.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv40_graph.c 2010-11-12 06:18:06.000000000 +0100 +@@ -29,6 +29,9 @@ + #include "nouveau_drv.h" + #include "nouveau_grctx.h" + ++static int nv40_graph_register(struct drm_device *); ++static void nv40_graph_isr(struct drm_device *); ++ + struct nouveau_channel * + nv40_graph_channel(struct drm_device *dev) + { +@@ -42,7 +45,7 @@ + inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- struct nouveau_channel *chan = dev_priv->fifos[i]; ++ struct nouveau_channel *chan = dev_priv->channels.ptr[i]; + + if (chan && chan->ramin_grctx && + chan->ramin_grctx->pinst == inst) +@@ -79,6 +82,22 @@ + void + nv40_graph_destroy_context(struct nouveau_channel *chan) + { ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ pgraph->fifo_access(dev, false); ++ ++ /* Unload the context if it's the currently active one */ ++ if (pgraph->channel(dev) == chan) ++ pgraph->unload_context(dev); ++ ++ pgraph->fifo_access(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ ++ /* Free the context resources */ + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + } + +@@ -174,43 +193,39 @@ + } + + void +-nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, +- uint32_t size, uint32_t pitch) ++nv40_graph_set_tile_region(struct drm_device *dev, int i) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t limit = max(1u, addr + size) - 1; +- +- if (pitch) +- addr |= 1; ++ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + switch (dev_priv->chipset) { + case 0x44: + case 0x4a: + case 0x4e: +- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); +- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); +- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); ++ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); + break; + + case 0x46: + case 0x47: + case 0x49: + case 0x4b: +- nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); +- nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); +- nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); +- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); +- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); +- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); ++ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); ++ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); ++ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); ++ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); + break; + + default: +- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); +- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); +- nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); +- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); +- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); +- nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); ++ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); ++ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); ++ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); ++ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); ++ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); ++ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); + break; + } + } +@@ -232,7 +247,7 @@ + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct nouveau_grctx ctx = {}; + uint32_t vramsz, *cp; +- int i, j; ++ int ret, i, j; + + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & + ~NV_PMC_ENABLE_PGRAPH); +@@ -256,9 +271,14 @@ + + kfree(cp); + ++ ret = nv40_graph_register(dev); ++ if (ret) ++ return ret; ++ + /* No context present currently */ + nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); + ++ nouveau_irq_register(dev, 12, nv40_graph_isr); + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); + nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); + +@@ -347,7 +367,7 @@ + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->num_tiles; i++) +- nv40_graph_set_region_tiling(dev, i, 0, 0, 0); ++ nv40_graph_set_tile_region(dev, i); + + /* begin RAM config */ + vramsz = pci_resource_len(dev->pdev, 0) - 1; +@@ -390,26 +410,111 @@ + + void nv40_graph_takedown(struct drm_device *dev) + { ++ nouveau_irq_unregister(dev, 12); ++} ++ ++static int ++nv40_graph_register(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->engine.graph.registered) ++ return 0; ++ ++ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ ++ NVOBJ_CLASS(dev, 0x0030, GR); /* null */ ++ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ ++ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ ++ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ ++ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ ++ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ ++ NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ ++ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ ++ NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ ++ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ ++ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ ++ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ ++ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ ++ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ ++ NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ ++ ++ /* curie */ ++ if (dev_priv->chipset >= 0x60 || ++ 0x00005450 & (1 << (dev_priv->chipset & 0x0f))) ++ NVOBJ_CLASS(dev, 0x4497, GR); ++ else ++ NVOBJ_CLASS(dev, 0x4097, GR); ++ ++ /* nvsw */ ++ NVOBJ_CLASS(dev, 0x506e, SW); ++ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); ++ ++ dev_priv->engine.graph.registered = true; ++ return 0; ++} ++ ++static int ++nv40_graph_isr_chid(struct drm_device *dev, u32 inst) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan; ++ unsigned long flags; ++ int i; ++ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); ++ for (i = 0; i < dev_priv->engine.fifo.channels; i++) { ++ chan = dev_priv->channels.ptr[i]; ++ if (!chan || !chan->ramin_grctx) ++ continue; ++ ++ if (inst == chan->ramin_grctx->pinst) ++ break; ++ } ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); ++ return i; + } + +-struct nouveau_pgraph_object_class nv40_graph_grclass[] = { +- { 0x0030, false, NULL }, /* null */ +- { 0x0039, false, NULL }, /* m2mf */ +- { 0x004a, false, NULL }, /* gdirect */ +- { 0x009f, false, NULL }, /* imageblit (nv12) */ +- { 0x008a, false, NULL }, /* ifc */ +- { 0x0089, false, NULL }, /* sifm */ +- { 0x3089, false, NULL }, /* sifm (nv40) */ +- { 0x0062, false, NULL }, /* surf2d */ +- { 0x3062, false, NULL }, /* surf2d (nv40) */ +- { 0x0043, false, NULL }, /* rop */ +- { 0x0012, false, NULL }, /* beta1 */ +- { 0x0072, false, NULL }, /* beta4 */ +- { 0x0019, false, NULL }, /* cliprect */ +- { 0x0044, false, NULL }, /* pattern */ +- { 0x309e, false, NULL }, /* swzsurf */ +- { 0x4097, false, NULL }, /* curie (nv40) */ +- { 0x4497, false, NULL }, /* curie (nv44) */ +- {} +-}; ++static void ++nv40_graph_isr(struct drm_device *dev) ++{ ++ u32 stat; ++ ++ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { ++ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); ++ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); ++ u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4; ++ u32 chid = nv40_graph_isr_chid(dev, inst); ++ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); ++ u32 subc = (addr & 0x00070000) >> 16; ++ u32 mthd = (addr & 0x00001ffc); ++ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); ++ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff; ++ u32 show = stat; ++ ++ if (stat & NV_PGRAPH_INTR_ERROR) { ++ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { ++ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) ++ show &= ~NV_PGRAPH_INTR_ERROR; ++ } else ++ if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { ++ nv_mask(dev, 0x402000, 0, 0); ++ } ++ } ++ ++ nv_wr32(dev, NV03_PGRAPH_INTR, stat); ++ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + ++ if (show && nouveau_ratelimit()) { ++ NV_INFO(dev, "PGRAPH -"); ++ nouveau_bitfield_print(nv10_graph_intr, show); ++ printk(" nsource:"); ++ nouveau_bitfield_print(nv04_graph_nsource, nsource); ++ printk(" nstatus:"); ++ nouveau_bitfield_print(nv10_graph_nstatus, nstatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d " ++ "class 0x%04x mthd 0x%04x data 0x%08x\n", ++ chid, inst, subc, class, mthd, data); ++ } ++ } ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_calc.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_calc.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_calc.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_calc.c 2010-11-12 06:18:06.000000000 +0100 +@@ -51,24 +51,28 @@ + int *N, int *fN, int *M, int *P) + { + fixed20_12 fb_div, a, b; ++ u32 refclk = pll->refclk / 10; ++ u32 max_vco_freq = pll->vco1.maxfreq / 10; ++ u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10; ++ clk /= 10; + +- *P = pll->vco1.maxfreq / clk; ++ *P = max_vco_freq / clk; + if (*P > pll->max_p) + *P = pll->max_p; + if (*P < pll->min_p) + *P = pll->min_p; + +- /* *M = ceil(refclk / pll->vco.max_inputfreq); */ +- a.full = dfixed_const(pll->refclk); +- b.full = dfixed_const(pll->vco1.max_inputfreq); ++ /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ ++ a.full = dfixed_const(refclk + max_vco_inputfreq); ++ b.full = dfixed_const(max_vco_inputfreq); + a.full = dfixed_div(a, b); +- a.full = dfixed_ceil(a); ++ a.full = dfixed_floor(a); + *M = dfixed_trunc(a); + + /* fb_div = (vco * *M) / refclk; */ + fb_div.full = dfixed_const(clk * *P); + fb_div.full = dfixed_mul(fb_div, a); +- a.full = dfixed_const(pll->refclk); ++ a.full = dfixed_const(refclk); + fb_div.full = dfixed_div(fb_div, a); + + /* *N = floor(fb_div); */ +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_crtc.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_crtc.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_crtc.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_crtc.c 2010-11-12 06:18:06.000000000 +0100 +@@ -437,6 +437,7 @@ + .cursor_move = nv50_crtc_cursor_move, + .gamma_set = nv50_crtc_gamma_set, + .set_config = drm_crtc_helper_set_config, ++ .page_flip = nouveau_crtc_page_flip, + .destroy = nv50_crtc_destroy, + }; + +@@ -453,6 +454,7 @@ + + NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + ++ drm_vblank_pre_modeset(dev, nv_crtc->index); + nv50_crtc_blank(nv_crtc, true); + } + +@@ -468,6 +470,7 @@ + NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + + nv50_crtc_blank(nv_crtc, false); ++ drm_vblank_post_modeset(dev, nv_crtc->index); + + ret = RING_SPACE(evo, 2); + if (ret) { +@@ -546,7 +549,7 @@ + } + + nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; +- nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; ++ nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); + nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; + if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { + ret = RING_SPACE(evo, 2); +@@ -578,7 +581,7 @@ + fb->nvbo->tile_mode); + } + if (dev_priv->chipset == 0x50) +- OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); ++ OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format); + else + OUT_RING(evo, format); + +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_display.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_display.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_display.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_display.c 2010-11-12 06:18:06.000000000 +0100 +@@ -33,6 +33,8 @@ + #include "nouveau_ramht.h" + #include "drm_crtc_helper.h" + ++static void nv50_display_isr(struct drm_device *); ++ + static inline int + nv50_sor_nr(struct drm_device *dev) + { +@@ -46,159 +48,6 @@ + return 4; + } + +-static void +-nv50_evo_channel_del(struct nouveau_channel **pchan) +-{ +- struct nouveau_channel *chan = *pchan; +- +- if (!chan) +- return; +- *pchan = NULL; +- +- nouveau_gpuobj_channel_takedown(chan); +- nouveau_bo_unmap(chan->pushbuf_bo); +- nouveau_bo_ref(NULL, &chan->pushbuf_bo); +- +- if (chan->user) +- iounmap(chan->user); +- +- kfree(chan); +-} +- +-static int +-nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, +- uint32_t tile_flags, uint32_t magic_flags, +- uint32_t offset, uint32_t limit) +-{ +- struct drm_nouveau_private *dev_priv = evo->dev->dev_private; +- struct drm_device *dev = evo->dev; +- struct nouveau_gpuobj *obj = NULL; +- int ret; +- +- ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); +- if (ret) +- return ret; +- obj->engine = NVOBJ_ENGINE_DISPLAY; +- +- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); +- nv_wo32(obj, 4, limit); +- nv_wo32(obj, 8, offset); +- nv_wo32(obj, 12, 0x00000000); +- nv_wo32(obj, 16, 0x00000000); +- if (dev_priv->card_type < NV_C0) +- nv_wo32(obj, 20, 0x00010000); +- else +- nv_wo32(obj, 20, 0x00020000); +- dev_priv->engine.instmem.flush(dev); +- +- ret = nouveau_ramht_insert(evo, name, obj); +- nouveau_gpuobj_ref(NULL, &obj); +- if (ret) { +- return ret; +- } +- +- return 0; +-} +- +-static int +-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) +-{ +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_gpuobj *ramht = NULL; +- struct nouveau_channel *chan; +- int ret; +- +- chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); +- if (!chan) +- return -ENOMEM; +- *pchan = chan; +- +- chan->id = -1; +- chan->dev = dev; +- chan->user_get = 4; +- chan->user_put = 0; +- +- ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, +- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); +- if (ret) { +- NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- ret = drm_mm_init(&chan->ramin_heap, 0, 32768); +- if (ret) { +- NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); +- if (ret) { +- NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- ret = nouveau_ramht_new(dev, ramht, &chan->ramht); +- nouveau_gpuobj_ref(NULL, &ramht); +- if (ret) { +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- if (dev_priv->chipset != 0x50) { +- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, +- 0, 0xffffffff); +- if (ret) { +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- +- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, +- 0, 0xffffffff); +- if (ret) { +- nv50_evo_channel_del(pchan); +- return ret; +- } +- } +- +- ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, +- 0, dev_priv->vram_size); +- if (ret) { +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, +- false, true, &chan->pushbuf_bo); +- if (ret == 0) +- ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); +- if (ret) { +- NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- ret = nouveau_bo_map(chan->pushbuf_bo); +- if (ret) { +- NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); +- nv50_evo_channel_del(pchan); +- return ret; +- } +- +- chan->user = ioremap(pci_resource_start(dev->pdev, 0) + +- NV50_PDISPLAY_USER(0), PAGE_SIZE); +- if (!chan->user) { +- NV_ERROR(dev, "Error mapping EVO control regs.\n"); +- nv50_evo_channel_del(pchan); +- return -ENOMEM; +- } +- +- return 0; +-} +- + int + nv50_display_early_init(struct drm_device *dev) + { +@@ -214,17 +63,16 @@ + nv50_display_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; +- struct nouveau_channel *evo = dev_priv->evo; + struct drm_connector *connector; +- uint32_t val, ram_amount; +- uint64_t start; ++ struct nouveau_channel *evo; + int ret, i; ++ u32 val; + + NV_DEBUG_KMS(dev, "\n"); + + nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); ++ + /* + * I think the 0x006101XX range is some kind of main control area + * that enables things. +@@ -240,16 +88,19 @@ + val = nv_rd32(dev, 0x0061610c + (i * 0x800)); + nv_wr32(dev, 0x0061019c + (i * 0x10), val); + } ++ + /* DAC */ + for (i = 0; i < 3; i++) { + val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); + nv_wr32(dev, 0x006101d0 + (i * 0x04), val); + } ++ + /* SOR */ + for (i = 0; i < nv50_sor_nr(dev); i++) { + val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); + nv_wr32(dev, 0x006101e0 + (i * 0x04), val); + } ++ + /* EXT */ + for (i = 0; i < 3; i++) { + val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); +@@ -262,17 +113,6 @@ + nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); + } + +- /* This used to be in crtc unblank, but seems out of place there. */ +- nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); +- /* RAM is clamped to 256 MiB. */ +- ram_amount = dev_priv->vram_size; +- NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); +- if (ram_amount > 256*1024*1024) +- ram_amount = 256*1024*1024; +- nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); +- nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000); +- nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0); +- + /* The precise purpose is unknown, i suspect it has something to do + * with text mode. + */ +@@ -287,37 +127,6 @@ + } + } + +- /* taken from nv bug #12637, attempts to un-wedge the hw if it's +- * stuck in some unspecified state +- */ +- start = ptimer->read(dev); +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00); +- while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) { +- if ((val & 0x9f0000) == 0x20000) +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), +- val | 0x800000); +- +- if ((val & 0x3f0000) == 0x30000) +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), +- val | 0x200000); +- +- if (ptimer->read(dev) - start > 1000000000ULL) { +- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); +- NV_ERROR(dev, "0x610200 = 0x%08x\n", val); +- return -EBUSY; +- } +- } +- +- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); +- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), +- 0x40000000, 0x40000000)) { +- NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); +- NV_ERROR(dev, "0x610200 = 0x%08x\n", +- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); +- return -EBUSY; +- } +- + for (i = 0; i < 2; i++) { + nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), +@@ -341,39 +150,31 @@ + } + } + +- nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); ++ nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); ++ nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); ++ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000); ++ nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); ++ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, ++ NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | ++ NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | ++ NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); + +- /* initialise fifo */ +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), +- ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | +- NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | +- NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); +- if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { +- NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); +- NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); +- return -EBUSY; +- } +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), +- (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) | +- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); +- nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 | +- NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); +- nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1); +- +- evo->dma.max = (4096/4) - 2; +- evo->dma.put = 0; +- evo->dma.cur = evo->dma.put; +- evo->dma.free = evo->dma.max - evo->dma.cur; ++ /* enable hotplug interrupts */ ++ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ++ struct nouveau_connector *conn = nouveau_connector(connector); ++ ++ if (conn->dcb->gpio_tag == 0xff) ++ continue; ++ ++ pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); ++ } + +- ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); ++ ret = nv50_evo_init(dev); + if (ret) + return ret; ++ evo = dev_priv->evo; + +- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) +- OUT_RING(evo, 0); ++ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); + + ret = RING_SPACE(evo, 11); + if (ret) +@@ -393,21 +194,6 @@ + if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) + NV_ERROR(dev, "evo pushbuf stalled\n"); + +- /* enable clock change interrupts. */ +- nv_wr32(dev, 0x610028, 0x00010001); +- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 | +- NV50_PDISPLAY_INTR_EN_CLK_UNK20 | +- NV50_PDISPLAY_INTR_EN_CLK_UNK40)); +- +- /* enable hotplug interrupts */ +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct nouveau_connector *conn = nouveau_connector(connector); +- +- if (conn->dcb->gpio_tag == 0xff) +- continue; +- +- pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); +- } + + return 0; + } +@@ -452,13 +238,7 @@ + } + } + +- nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); +- nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); +- if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { +- NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); +- NV_ERROR(dev, "0x610200 = 0x%08x\n", +- nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); +- } ++ nv50_evo_fini(dev); + + for (i = 0; i < 3; i++) { + if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), +@@ -470,7 +250,7 @@ + } + + /* disable interrupts. */ +- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000); ++ nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000); + + /* disable hotplug interrupts */ + nv_wr32(dev, 0xe054, 0xffffffff); +@@ -508,13 +288,6 @@ + + dev->mode_config.fb_base = dev_priv->fb_phys; + +- /* Create EVO channel */ +- ret = nv50_evo_channel_new(dev, &dev_priv->evo); +- if (ret) { +- NV_ERROR(dev, "Error creating EVO channel: %d\n", ret); +- return ret; +- } +- + /* Create CRTC objects */ + for (i = 0; i < 2; i++) + nv50_crtc_create(dev, i); +@@ -557,6 +330,9 @@ + } + } + ++ INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); ++ nouveau_irq_register(dev, 26, nv50_display_isr); ++ + ret = nv50_display_init(dev); + if (ret) { + nv50_display_destroy(dev); +@@ -569,14 +345,12 @@ + void + nv50_display_destroy(struct drm_device *dev) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- + NV_DEBUG_KMS(dev, "\n"); + + drm_mode_config_cleanup(dev); + + nv50_display_disable(dev); +- nv50_evo_channel_del(&dev_priv->evo); ++ nouveau_irq_unregister(dev, 26); + } + + static u16 +@@ -660,32 +434,32 @@ + nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_channel *chan; +- struct list_head *entry, *tmp; ++ struct nouveau_channel *chan, *tmp; + +- list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) { +- chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait); ++ list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting, ++ nvsw.vbl_wait) { ++ if (chan->nvsw.vblsem_head != crtc) ++ continue; + + nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, + chan->nvsw.vblsem_rval); + list_del(&chan->nvsw.vbl_wait); ++ drm_vblank_put(dev, crtc); + } ++ ++ drm_handle_vblank(dev, crtc); + } + + static void + nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) + { +- intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; +- + if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) + nv50_display_vblank_crtc_handler(dev, 0); + + if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) + nv50_display_vblank_crtc_handler(dev, 1); + +- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, +- NV50_PDISPLAY_INTR_EN) & ~intr); +- nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr); ++ nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC); + } + + static void +@@ -1011,91 +785,31 @@ + static void + nv50_display_error_handler(struct drm_device *dev) + { +- uint32_t addr, data; +- +- nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000); +- addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR); +- data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA); +- +- NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n", +- 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); +- +- nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); +-} +- +-void +-nv50_display_irq_hotplug_bh(struct work_struct *work) +-{ +- struct drm_nouveau_private *dev_priv = +- container_of(work, struct drm_nouveau_private, hpd_work); +- struct drm_device *dev = dev_priv->dev; +- struct drm_connector *connector; +- const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; +- uint32_t unplug_mask, plug_mask, change_mask; +- uint32_t hpd0, hpd1 = 0; +- +- hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); +- if (dev_priv->chipset >= 0x90) +- hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); +- +- plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); +- unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); +- change_mask = plug_mask | unplug_mask; +- +- list_for_each_entry(connector, &dev->mode_config.connector_list, head) { +- struct drm_encoder_helper_funcs *helper; +- struct nouveau_connector *nv_connector = +- nouveau_connector(connector); +- struct nouveau_encoder *nv_encoder; +- struct dcb_gpio_entry *gpio; +- uint32_t reg; +- bool plugged; ++ u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; ++ u32 addr, data; ++ int chid; ++ ++ for (chid = 0; chid < 5; chid++) { ++ if (!(channels & (1 << chid))) ++ continue; ++ ++ nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); ++ addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid)); ++ data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid)); ++ NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x " ++ "(0x%04x 0x%02x)\n", chid, ++ addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); + +- if (!nv_connector->dcb) +- continue; +- +- gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag); +- if (!gpio || !(change_mask & (1 << gpio->line))) +- continue; +- +- reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]); +- plugged = !!(reg & (4 << ((gpio->line & 7) << 2))); +- NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", +- drm_get_connector_name(connector)) ; +- +- if (!connector->encoder || !connector->encoder->crtc || +- !connector->encoder->crtc->enabled) +- continue; +- nv_encoder = nouveau_encoder(connector->encoder); +- helper = connector->encoder->helper_private; +- +- if (nv_encoder->dcb->type != OUTPUT_DP) +- continue; +- +- if (plugged) +- helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); +- else +- helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); ++ nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); + } +- +- nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); +- if (dev_priv->chipset >= 0x90) +- nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); +- +- drm_helper_hpd_irq_event(dev); + } + +-void +-nv50_display_irq_handler(struct drm_device *dev) ++static void ++nv50_display_isr(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t delayed = 0; + +- if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { +- if (!work_pending(&dev_priv->hpd_work)) +- queue_work(dev_priv->wq, &dev_priv->hpd_work); +- } +- + while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { + uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); + uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); +@@ -1106,9 +820,9 @@ + if (!intr0 && !(intr1 & ~delayed)) + break; + +- if (intr0 & 0x00010000) { ++ if (intr0 & 0x001f0000) { + nv50_display_error_handler(dev); +- intr0 &= ~0x00010000; ++ intr0 &= ~0x001f0000; + } + + if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { +@@ -1139,4 +853,3 @@ + } + } + } +- +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_display.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_display.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_display.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_display.h 2010-11-12 06:18:06.000000000 +0100 +@@ -35,9 +35,7 @@ + #include "nouveau_crtc.h" + #include "nv50_evo.h" + +-void nv50_display_irq_handler(struct drm_device *dev); + void nv50_display_irq_handler_bh(struct work_struct *work); +-void nv50_display_irq_hotplug_bh(struct work_struct *work); + int nv50_display_early_init(struct drm_device *dev); + void nv50_display_late_takedown(struct drm_device *dev); + int nv50_display_create(struct drm_device *dev); +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_evo.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_evo.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_evo.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_evo.c 2010-11-12 06:18:06.000000000 +0100 +@@ -0,0 +1,318 @@ ++/* ++ * Copyright 2010 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Ben Skeggs ++ */ ++ ++#include "drmP.h" ++ ++#include "nouveau_drv.h" ++#include "nouveau_dma.h" ++#include "nouveau_ramht.h" ++ ++static void ++nv50_evo_channel_del(struct nouveau_channel **pevo) ++{ ++ struct drm_nouveau_private *dev_priv; ++ struct nouveau_channel *evo = *pevo; ++ ++ if (!evo) ++ return; ++ *pevo = NULL; ++ ++ dev_priv = evo->dev->dev_private; ++ dev_priv->evo_alloc &= ~(1 << evo->id); ++ ++ nouveau_gpuobj_channel_takedown(evo); ++ nouveau_bo_unmap(evo->pushbuf_bo); ++ nouveau_bo_ref(NULL, &evo->pushbuf_bo); ++ ++ if (evo->user) ++ iounmap(evo->user); ++ ++ kfree(evo); ++} ++ ++int ++nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, ++ u32 tile_flags, u32 magic_flags, u32 offset, u32 limit) ++{ ++ struct drm_nouveau_private *dev_priv = evo->dev->dev_private; ++ struct drm_device *dev = evo->dev; ++ struct nouveau_gpuobj *obj = NULL; ++ int ret; ++ ++ ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj); ++ if (ret) ++ return ret; ++ obj->engine = NVOBJ_ENGINE_DISPLAY; ++ ++ nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); ++ nv_wo32(obj, 4, limit); ++ nv_wo32(obj, 8, offset); ++ nv_wo32(obj, 12, 0x00000000); ++ nv_wo32(obj, 16, 0x00000000); ++ if (dev_priv->card_type < NV_C0) ++ nv_wo32(obj, 20, 0x00010000); ++ else ++ nv_wo32(obj, 20, 0x00020000); ++ dev_priv->engine.instmem.flush(dev); ++ ++ ret = nouveau_ramht_insert(evo, name, obj); ++ nouveau_gpuobj_ref(NULL, &obj); ++ if (ret) { ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *evo; ++ int ret; ++ ++ evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); ++ if (!evo) ++ return -ENOMEM; ++ *pevo = evo; ++ ++ for (evo->id = 0; evo->id < 5; evo->id++) { ++ if (dev_priv->evo_alloc & (1 << evo->id)) ++ continue; ++ ++ dev_priv->evo_alloc |= (1 << evo->id); ++ break; ++ } ++ ++ if (evo->id == 5) { ++ kfree(evo); ++ return -ENODEV; ++ } ++ ++ evo->dev = dev; ++ evo->user_get = 4; ++ evo->user_put = 0; ++ ++ ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, ++ false, true, &evo->pushbuf_bo); ++ if (ret == 0) ++ ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); ++ if (ret) { ++ NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); ++ nv50_evo_channel_del(pevo); ++ return ret; ++ } ++ ++ ret = nouveau_bo_map(evo->pushbuf_bo); ++ if (ret) { ++ NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); ++ nv50_evo_channel_del(pevo); ++ return ret; ++ } ++ ++ evo->user = ioremap(pci_resource_start(dev->pdev, 0) + ++ NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); ++ if (!evo->user) { ++ NV_ERROR(dev, "Error mapping EVO control regs.\n"); ++ nv50_evo_channel_del(pevo); ++ return -ENOMEM; ++ } ++ ++ /* bind primary evo channel's ramht to the channel */ ++ if (dev_priv->evo && evo != dev_priv->evo) ++ nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL); ++ ++ return 0; ++} ++ ++static int ++nv50_evo_channel_init(struct nouveau_channel *evo) ++{ ++ struct drm_device *dev = evo->dev; ++ int id = evo->id, ret, i; ++ u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; ++ u32 tmp; ++ ++ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); ++ if ((tmp & 0x009f0000) == 0x00020000) ++ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); ++ ++ tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); ++ if ((tmp & 0x003f0000) == 0x00030000) ++ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); ++ ++ /* initialise fifo */ ++ nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | ++ NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | ++ NV50_PDISPLAY_EVO_DMA_CB_VALID); ++ nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); ++ nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); ++ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, ++ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); ++ ++ nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); ++ nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | ++ NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); ++ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { ++ NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, ++ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); ++ return -EBUSY; ++ } ++ ++ /* enable error reporting on the channel */ ++ nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); ++ ++ evo->dma.max = (4096/4) - 2; ++ evo->dma.put = 0; ++ evo->dma.cur = evo->dma.put; ++ evo->dma.free = evo->dma.max - evo->dma.cur; ++ ++ ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) ++ OUT_RING(evo, 0); ++ ++ return 0; ++} ++ ++static void ++nv50_evo_channel_fini(struct nouveau_channel *evo) ++{ ++ struct drm_device *dev = evo->dev; ++ int id = evo->id; ++ ++ nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); ++ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); ++ nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); ++ nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); ++ if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { ++ NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, ++ nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); ++ } ++} ++ ++static int ++nv50_evo_create(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramht = NULL; ++ struct nouveau_channel *evo; ++ int ret; ++ ++ /* create primary evo channel, the one we use for modesetting ++ * purporses ++ */ ++ ret = nv50_evo_channel_new(dev, &dev_priv->evo); ++ if (ret) ++ return ret; ++ evo = dev_priv->evo; ++ ++ /* setup object management on it, any other evo channel will ++ * use this also as there's no per-channel support on the ++ * hardware ++ */ ++ ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, ++ NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); ++ if (ret) { ++ NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); ++ nv50_evo_channel_del(&dev_priv->evo); ++ return ret; ++ } ++ ++ ret = drm_mm_init(&evo->ramin_heap, 0, 32768); ++ if (ret) { ++ NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); ++ nv50_evo_channel_del(&dev_priv->evo); ++ return ret; ++ } ++ ++ ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); ++ if (ret) { ++ NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); ++ nv50_evo_channel_del(&dev_priv->evo); ++ return ret; ++ } ++ ++ ret = nouveau_ramht_new(dev, ramht, &evo->ramht); ++ nouveau_gpuobj_ref(NULL, &ramht); ++ if (ret) { ++ nv50_evo_channel_del(&dev_priv->evo); ++ return ret; ++ } ++ ++ /* create some default objects for the scanout memtypes we support */ ++ if (dev_priv->chipset != 0x50) { ++ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, ++ 0, 0xffffffff); ++ if (ret) { ++ nv50_evo_channel_del(&dev_priv->evo); ++ return ret; ++ } ++ ++ ++ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19, ++ 0, 0xffffffff); ++ if (ret) { ++ nv50_evo_channel_del(&dev_priv->evo); ++ return ret; ++ } ++ } ++ ++ ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, ++ 0, dev_priv->vram_size); ++ if (ret) { ++ nv50_evo_channel_del(&dev_priv->evo); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++int ++nv50_evo_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int ret; ++ ++ if (!dev_priv->evo) { ++ ret = nv50_evo_create(dev); ++ if (ret) ++ return ret; ++ } ++ ++ return nv50_evo_channel_init(dev_priv->evo); ++} ++ ++void ++nv50_evo_fini(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->evo) { ++ nv50_evo_channel_fini(dev_priv->evo); ++ nv50_evo_channel_del(&dev_priv->evo); ++ } ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_evo.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_evo.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_evo.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_evo.h 2010-11-12 06:18:06.000000000 +0100 +@@ -24,6 +24,15 @@ + * + */ + ++#ifndef __NV50_EVO_H__ ++#define __NV50_EVO_H__ ++ ++int nv50_evo_init(struct drm_device *dev); ++void nv50_evo_fini(struct drm_device *dev); ++int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name, ++ u32 tile_flags, u32 magic_flags, ++ u32 offset, u32 limit); ++ + #define NV50_EVO_UPDATE 0x00000080 + #define NV50_EVO_UNK84 0x00000084 + #define NV50_EVO_UNK84_NOTIFY 0x40000000 +@@ -111,3 +120,4 @@ + #define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 + #define NV50_EVO_CRTC_SCALE_RES2 0x000008dc + ++#endif +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_fb.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_fb.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_fb.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_fb.c 2010-11-12 06:18:06.000000000 +0100 +@@ -42,6 +42,7 @@ + nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ unsigned long flags; + u32 trap[6], idx, chinst; + int i, ch; + +@@ -60,8 +61,10 @@ + return; + + chinst = (trap[2] << 16) | trap[1]; ++ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { +- struct nouveau_channel *chan = dev_priv->fifos[ch]; ++ struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; + + if (!chan || !chan->ramin) + continue; +@@ -69,6 +72,7 @@ + if (chinst == chan->ramin->vinst >> 12) + break; + } ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + + NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " + "channel %d (0x%08x)\n", +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_fbcon.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_fbcon.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_fbcon.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_fbcon.c 2010-11-12 06:18:06.000000000 +0100 +@@ -4,26 +4,18 @@ + #include "nouveau_ramht.h" + #include "nouveau_fbcon.h" + +-void ++int + nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) + { + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; ++ int ret; + +- if (info->state != FBINFO_STATE_RUNNING) +- return; +- +- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && +- RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { +- nouveau_fbcon_gpu_lockup(info); +- } +- +- if (info->flags & FBINFO_HWACCEL_DISABLED) { +- cfb_fillrect(info, rect); +- return; +- } ++ ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); ++ if (ret) ++ return ret; + + if (rect->rop != ROP_COPY) { + BEGIN_RING(chan, NvSub2D, 0x02ac, 1); +@@ -45,27 +37,21 @@ + OUT_RING(chan, 3); + } + FIRE_RING(chan); ++ return 0; + } + +-void ++int + nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) + { + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; ++ int ret; + +- if (info->state != FBINFO_STATE_RUNNING) +- return; +- +- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { +- nouveau_fbcon_gpu_lockup(info); +- } +- +- if (info->flags & FBINFO_HWACCEL_DISABLED) { +- cfb_copyarea(info, region); +- return; +- } ++ ret = RING_SPACE(chan, 12); ++ if (ret) ++ return ret; + + BEGIN_RING(chan, NvSub2D, 0x0110, 1); + OUT_RING(chan, 0); +@@ -80,9 +66,10 @@ + OUT_RING(chan, 0); + OUT_RING(chan, region->sy); + FIRE_RING(chan); ++ return 0; + } + +-void ++int + nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) + { + struct nouveau_fbdev *nfbdev = info->par; +@@ -92,23 +79,14 @@ + uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); + uint32_t *palette = info->pseudo_palette; ++ int ret; + +- if (info->state != FBINFO_STATE_RUNNING) +- return; +- +- if (image->depth != 1) { +- cfb_imageblit(info, image); +- return; +- } +- +- if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { +- nouveau_fbcon_gpu_lockup(info); +- } ++ if (image->depth != 1) ++ return -ENODEV; + +- if (info->flags & FBINFO_HWACCEL_DISABLED) { +- cfb_imageblit(info, image); +- return; +- } ++ ret = RING_SPACE(chan, 11); ++ if (ret) ++ return ret; + + width = ALIGN(image->width, 32); + dwords = (width * image->height) >> 5; +@@ -134,11 +112,9 @@ + while (dwords) { + int push = dwords > 2047 ? 2047 : dwords; + +- if (RING_SPACE(chan, push + 1)) { +- nouveau_fbcon_gpu_lockup(info); +- cfb_imageblit(info, image); +- return; +- } ++ ret = RING_SPACE(chan, push + 1); ++ if (ret) ++ return ret; + + dwords -= push; + +@@ -148,6 +124,7 @@ + } + + FIRE_RING(chan); ++ return 0; + } + + int +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_fifo.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_fifo.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_fifo.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_fifo.c 2010-11-12 06:18:06.000000000 +0100 +@@ -44,7 +44,8 @@ + + /* We never schedule channel 0 or 127 */ + for (i = 1, nr = 0; i < 127; i++) { +- if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { ++ if (dev_priv->channels.ptr[i] && ++ dev_priv->channels.ptr[i]->ramfc) { + nv_wo32(cur, (nr * 4), i); + nr++; + } +@@ -60,7 +61,7 @@ + nv50_fifo_channel_enable(struct drm_device *dev, int channel) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_channel *chan = dev_priv->fifos[channel]; ++ struct nouveau_channel *chan = dev_priv->channels.ptr[channel]; + uint32_t inst; + + NV_DEBUG(dev, "ch%d\n", channel); +@@ -105,6 +106,7 @@ + { + NV_DEBUG(dev, "\n"); + ++ nouveau_irq_register(dev, 8, nv04_fifo_isr); + nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); + nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); + } +@@ -118,7 +120,7 @@ + NV_DEBUG(dev, "\n"); + + for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { +- if (dev_priv->fifos[i]) ++ if (dev_priv->channels.ptr[i]) + nv50_fifo_channel_enable(dev, i); + else + nv50_fifo_channel_disable(dev, i); +@@ -206,6 +208,9 @@ + if (!pfifo->playlist[0]) + return; + ++ nv_wr32(dev, 0x2140, 0x00000000); ++ nouveau_irq_unregister(dev, 8); ++ + nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); + } +@@ -291,10 +296,23 @@ + nv50_fifo_destroy_context(struct nouveau_channel *chan) + { + struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_gpuobj *ramfc = NULL; ++ unsigned long flags; + + NV_DEBUG(dev, "ch%d\n", chan->id); + ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ pfifo->reassign(dev, false); ++ ++ /* Unload the context if it's the currently active one */ ++ if (pfifo->channel_id(dev) == chan->id) { ++ pfifo->disable(dev); ++ pfifo->unload_context(dev); ++ pfifo->enable(dev); ++ } ++ + /* This will ensure the channel is seen as disabled. */ + nouveau_gpuobj_ref(chan->ramfc, &ramfc); + nouveau_gpuobj_ref(NULL, &chan->ramfc); +@@ -305,6 +323,10 @@ + nv50_fifo_channel_disable(dev, 127); + nv50_fifo_playlist_update(dev); + ++ pfifo->reassign(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ ++ /* Free the channel resources */ + nouveau_gpuobj_ref(NULL, &ramfc); + nouveau_gpuobj_ref(NULL, &chan->cache); + } +@@ -392,7 +414,7 @@ + if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) + return 0; + +- chan = dev_priv->fifos[chid]; ++ chan = dev_priv->channels.ptr[chid]; + if (!chan) { + NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); + return -EINVAL; +@@ -464,3 +486,8 @@ + return 0; + } + ++void ++nv50_fifo_tlb_flush(struct drm_device *dev) ++{ ++ nv50_vm_flush(dev, 5); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_gpio.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_gpio.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_gpio.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_gpio.c 2010-11-12 06:18:06.000000000 +0100 +@@ -26,6 +26,28 @@ + #include "nouveau_drv.h" + #include "nouveau_hw.h" + ++#include "nv50_display.h" ++ ++static void nv50_gpio_isr(struct drm_device *dev); ++static void nv50_gpio_isr_bh(struct work_struct *work); ++ ++struct nv50_gpio_priv { ++ struct list_head handlers; ++ spinlock_t lock; ++}; ++ ++struct nv50_gpio_handler { ++ struct drm_device *dev; ++ struct list_head head; ++ struct work_struct work; ++ bool inhibit; ++ ++ struct dcb_gpio_entry *gpio; ++ ++ void (*handler)(void *data, int state); ++ void *data; ++}; ++ + static int + nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) + { +@@ -75,29 +97,123 @@ + return 0; + } + ++int ++nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, ++ void (*handler)(void *, int), void *data) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; ++ struct nv50_gpio_priv *priv = pgpio->priv; ++ struct nv50_gpio_handler *gpioh; ++ struct dcb_gpio_entry *gpio; ++ unsigned long flags; ++ ++ gpio = nouveau_bios_gpio_entry(dev, tag); ++ if (!gpio) ++ return -ENOENT; ++ ++ gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL); ++ if (!gpioh) ++ return -ENOMEM; ++ ++ INIT_WORK(&gpioh->work, nv50_gpio_isr_bh); ++ gpioh->dev = dev; ++ gpioh->gpio = gpio; ++ gpioh->handler = handler; ++ gpioh->data = data; ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ list_add(&gpioh->head, &priv->handlers); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ return 0; ++} ++ + void +-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) ++nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag, ++ void (*handler)(void *, int), void *data) + { ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; ++ struct nv50_gpio_priv *priv = pgpio->priv; ++ struct nv50_gpio_handler *gpioh, *tmp; + struct dcb_gpio_entry *gpio; +- u32 reg, mask; ++ unsigned long flags; + + gpio = nouveau_bios_gpio_entry(dev, tag); +- if (!gpio) { +- NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag); ++ if (!gpio) + return; ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) { ++ if (gpioh->gpio != gpio || ++ gpioh->handler != handler || ++ gpioh->data != data) ++ continue; ++ list_del(&gpioh->head); ++ kfree(gpioh); + } ++ spin_unlock_irqrestore(&priv->lock, flags); ++} ++ ++bool ++nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) ++{ ++ struct dcb_gpio_entry *gpio; ++ u32 reg, mask; ++ ++ gpio = nouveau_bios_gpio_entry(dev, tag); ++ if (!gpio) ++ return false; + + reg = gpio->line < 16 ? 0xe050 : 0xe070; + mask = 0x00010001 << (gpio->line & 0xf); + + nv_wr32(dev, reg + 4, mask); +- nv_mask(dev, reg + 0, mask, on ? mask : 0); ++ reg = nv_mask(dev, reg + 0, mask, on ? mask : 0); ++ return (reg & mask) == mask; ++} ++ ++static int ++nv50_gpio_create(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; ++ struct nv50_gpio_priv *priv; ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&priv->handlers); ++ spin_lock_init(&priv->lock); ++ pgpio->priv = priv; ++ return 0; ++} ++ ++static void ++nv50_gpio_destroy(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; ++ ++ kfree(pgpio->priv); ++ pgpio->priv = NULL; + } + + int + nv50_gpio_init(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; ++ struct nv50_gpio_priv *priv; ++ int ret; ++ ++ if (!pgpio->priv) { ++ ret = nv50_gpio_create(dev); ++ if (ret) ++ return ret; ++ } ++ priv = pgpio->priv; + + /* disable, and ack any pending gpio interrupts */ + nv_wr32(dev, 0xe050, 0x00000000); +@@ -107,5 +223,77 @@ + nv_wr32(dev, 0xe074, 0xffffffff); + } + ++ nouveau_irq_register(dev, 21, nv50_gpio_isr); + return 0; + } ++ ++void ++nv50_gpio_fini(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ nv_wr32(dev, 0xe050, 0x00000000); ++ if (dev_priv->chipset >= 0x90) ++ nv_wr32(dev, 0xe070, 0x00000000); ++ nouveau_irq_unregister(dev, 21); ++ ++ nv50_gpio_destroy(dev); ++} ++ ++static void ++nv50_gpio_isr_bh(struct work_struct *work) ++{ ++ struct nv50_gpio_handler *gpioh = ++ container_of(work, struct nv50_gpio_handler, work); ++ struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; ++ struct nv50_gpio_priv *priv = pgpio->priv; ++ unsigned long flags; ++ int state; ++ ++ state = pgpio->get(gpioh->dev, gpioh->gpio->tag); ++ if (state < 0) ++ return; ++ ++ gpioh->handler(gpioh->data, state); ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ gpioh->inhibit = false; ++ spin_unlock_irqrestore(&priv->lock, flags); ++} ++ ++static void ++nv50_gpio_isr(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; ++ struct nv50_gpio_priv *priv = pgpio->priv; ++ struct nv50_gpio_handler *gpioh; ++ u32 intr0, intr1 = 0; ++ u32 hi, lo, ch; ++ ++ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); ++ if (dev_priv->chipset >= 0x90) ++ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); ++ ++ hi = (intr0 & 0x0000ffff) | (intr1 << 16); ++ lo = (intr0 >> 16) | (intr1 & 0xffff0000); ++ ch = hi | lo; ++ ++ nv_wr32(dev, 0xe054, intr0); ++ if (dev_priv->chipset >= 0x90) ++ nv_wr32(dev, 0xe074, intr1); ++ ++ spin_lock(&priv->lock); ++ list_for_each_entry(gpioh, &priv->handlers, head) { ++ if (!(ch & (1 << gpioh->gpio->line))) ++ continue; ++ ++ if (gpioh->inhibit) ++ continue; ++ gpioh->inhibit = true; ++ ++ queue_work(dev_priv->wq, &gpioh->work); ++ } ++ spin_unlock(&priv->lock); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_graph.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_graph.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_graph.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_graph.c 2010-11-12 06:18:06.000000000 +0100 +@@ -29,6 +29,11 @@ + #include "nouveau_drv.h" + #include "nouveau_ramht.h" + #include "nouveau_grctx.h" ++#include "nouveau_dma.h" ++#include "nv50_evo.h" ++ ++static int nv50_graph_register(struct drm_device *); ++static void nv50_graph_isr(struct drm_device *); + + static void + nv50_graph_init_reset(struct drm_device *dev) +@@ -46,6 +51,7 @@ + { + NV_DEBUG(dev, "\n"); + ++ nouveau_irq_register(dev, 12, nv50_graph_isr); + nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); + nv_wr32(dev, 0x400138, 0xffffffff); + nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); +@@ -145,12 +151,15 @@ + nv50_graph_init_reset(dev); + nv50_graph_init_regs__nv(dev); + nv50_graph_init_regs(dev); +- nv50_graph_init_intr(dev); + + ret = nv50_graph_init_ctxctl(dev); + if (ret) + return ret; + ++ ret = nv50_graph_register(dev); ++ if (ret) ++ return ret; ++ nv50_graph_init_intr(dev); + return 0; + } + +@@ -158,6 +167,8 @@ + nv50_graph_takedown(struct drm_device *dev) + { + NV_DEBUG(dev, "\n"); ++ nv_wr32(dev, 0x40013c, 0x00000000); ++ nouveau_irq_unregister(dev, 12); + } + + void +@@ -190,7 +201,7 @@ + inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; + + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { +- struct nouveau_channel *chan = dev_priv->fifos[i]; ++ struct nouveau_channel *chan = dev_priv->channels.ptr[i]; + + if (chan && chan->ramin && chan->ramin->vinst == inst) + return chan; +@@ -211,7 +222,7 @@ + + NV_DEBUG(dev, "ch%d\n", chan->id); + +- ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, ++ ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); + if (ret) +@@ -242,17 +253,28 @@ + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; ++ unsigned long flags; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + if (!chan->ramin) + return; + ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ pgraph->fifo_access(dev, false); ++ ++ if (pgraph->channel(dev) == chan) ++ pgraph->unload_context(dev); ++ + for (i = hdr; i < hdr + 24; i += 4) + nv_wo32(chan->ramin, i, 0); + dev_priv->engine.instmem.flush(dev); + ++ pgraph->fifo_access(dev, true); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++ + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + } + +@@ -306,7 +328,7 @@ + return 0; + } + +-void ++static void + nv50_graph_context_switch(struct drm_device *dev) + { + uint32_t inst; +@@ -322,8 +344,8 @@ + } + + static int +-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + struct nouveau_gpuobj *gpuobj; + +@@ -340,8 +362,8 @@ + } + + static int +-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) + return -ERANGE; +@@ -351,16 +373,16 @@ + } + + static int +-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + chan->nvsw.vblsem_rval = data; + return 0; + } + + static int +-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, +- int mthd, uint32_t data) ++nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) + { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; +@@ -368,37 +390,621 @@ + if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) + return -EINVAL; + +- if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & +- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) { +- nv_wr32(dev, NV50_PDISPLAY_INTR_1, +- NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); +- nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, +- NV50_PDISPLAY_INTR_EN) | +- NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data)); +- } ++ drm_vblank_get(dev, data); + ++ chan->nvsw.vblsem_head = data; + list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); ++ ++ return 0; ++} ++ ++static int ++nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan, ++ u32 class, u32 mthd, u32 data) ++{ ++ struct nouveau_page_flip_state s; ++ ++ if (!nouveau_finish_page_flip(chan, &s)) { ++ /* XXX - Do something here */ ++ } ++ ++ return 0; ++} ++ ++static int ++nv50_graph_register(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ ++ if (dev_priv->engine.graph.registered) ++ return 0; ++ ++ NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ ++ NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); ++ NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); ++ NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); ++ NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); ++ NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); ++ ++ NVOBJ_CLASS(dev, 0x0030, GR); /* null */ ++ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ ++ NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ ++ ++ /* tesla */ ++ if (dev_priv->chipset == 0x50) ++ NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ ++ else ++ if (dev_priv->chipset < 0xa0) ++ NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ ++ else { ++ switch (dev_priv->chipset) { ++ case 0xa0: ++ case 0xaa: ++ case 0xac: ++ NVOBJ_CLASS(dev, 0x8397, GR); ++ break; ++ case 0xa3: ++ case 0xa5: ++ case 0xa8: ++ NVOBJ_CLASS(dev, 0x8597, GR); ++ break; ++ case 0xaf: ++ NVOBJ_CLASS(dev, 0x8697, GR); ++ break; ++ } ++ } ++ ++ /* compute */ ++ NVOBJ_CLASS(dev, 0x50c0, GR); ++ if (dev_priv->chipset > 0xa0 && ++ dev_priv->chipset != 0xaa && ++ dev_priv->chipset != 0xac) ++ NVOBJ_CLASS(dev, 0x85c0, GR); ++ ++ dev_priv->engine.graph.registered = true; + return 0; + } + +-static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { +- { 0x018c, nv50_graph_nvsw_dma_vblsem }, +- { 0x0400, nv50_graph_nvsw_vblsem_offset }, +- { 0x0404, nv50_graph_nvsw_vblsem_release_val }, +- { 0x0408, nv50_graph_nvsw_vblsem_release }, ++void ++nv50_graph_tlb_flush(struct drm_device *dev) ++{ ++ nv50_vm_flush(dev, 0); ++} ++ ++void ++nv86_graph_tlb_flush(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; ++ bool idle, timeout = false; ++ unsigned long flags; ++ u64 start; ++ u32 tmp; ++ ++ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); ++ nv_mask(dev, 0x400500, 0x00000001, 0x00000000); ++ ++ start = ptimer->read(dev); ++ do { ++ idle = true; ++ ++ for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { ++ if ((tmp & 7) == 1) ++ idle = false; ++ } ++ ++ for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { ++ if ((tmp & 7) == 1) ++ idle = false; ++ } ++ ++ for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { ++ if ((tmp & 7) == 1) ++ idle = false; ++ } ++ } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000)); ++ ++ if (timeout) { ++ NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " ++ "0x%08x 0x%08x 0x%08x 0x%08x\n", ++ nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), ++ nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); ++ } ++ ++ nv50_vm_flush(dev, 0); ++ ++ nv_mask(dev, 0x400500, 0x00000001, 0x00000001); ++ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); ++} ++ ++static struct nouveau_enum nv50_mp_exec_error_names[] = ++{ ++ { 3, "STACK_UNDERFLOW" }, ++ { 4, "QUADON_ACTIVE" }, ++ { 8, "TIMEOUT" }, ++ { 0x10, "INVALID_OPCODE" }, ++ { 0x40, "BREAKPOINT" }, ++ {} ++}; ++ ++static struct nouveau_bitfield nv50_graph_trap_m2mf[] = { ++ { 0x00000001, "NOTIFY" }, ++ { 0x00000002, "IN" }, ++ { 0x00000004, "OUT" }, ++ {} ++}; ++ ++static struct nouveau_bitfield nv50_graph_trap_vfetch[] = { ++ { 0x00000001, "FAULT" }, ++ {} ++}; ++ ++static struct nouveau_bitfield nv50_graph_trap_strmout[] = { ++ { 0x00000001, "FAULT" }, ++ {} ++}; ++ ++static struct nouveau_bitfield nv50_graph_trap_ccache[] = { ++ { 0x00000001, "FAULT" }, + {} + }; + +-struct nouveau_pgraph_object_class nv50_graph_grclass[] = { +- { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ +- { 0x0030, false, NULL }, /* null */ +- { 0x5039, false, NULL }, /* m2mf */ +- { 0x502d, false, NULL }, /* 2d */ +- { 0x50c0, false, NULL }, /* compute */ +- { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ +- { 0x5097, false, NULL }, /* tesla (nv50) */ +- { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ +- { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ +- { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ ++/* There must be a *lot* of these. Will take some time to gather them up. */ ++static struct nouveau_enum nv50_data_error_names[] = { ++ { 4, "INVALID_VALUE" }, ++ { 5, "INVALID_ENUM" }, ++ { 8, "INVALID_OBJECT" }, ++ { 0xc, "INVALID_BITFIELD" }, ++ { 0x28, "MP_NO_REG_SPACE" }, ++ { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, + {} + }; ++ ++static struct nouveau_bitfield nv50_graph_intr[] = { ++ { 0x00000001, "NOTIFY" }, ++ { 0x00000002, "COMPUTE_QUERY" }, ++ { 0x00000010, "ILLEGAL_MTHD" }, ++ { 0x00000020, "ILLEGAL_CLASS" }, ++ { 0x00000040, "DOUBLE_NOTIFY" }, ++ { 0x00001000, "CONTEXT_SWITCH" }, ++ { 0x00010000, "BUFFER_NOTIFY" }, ++ { 0x00100000, "DATA_ERROR" }, ++ { 0x00200000, "TRAP" }, ++ { 0x01000000, "SINGLE_STEP" }, ++ {} ++}; ++ ++static void ++nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ uint32_t units = nv_rd32(dev, 0x1540); ++ uint32_t addr, mp10, status, pc, oplow, ophigh; ++ int i; ++ int mps = 0; ++ for (i = 0; i < 4; i++) { ++ if (!(units & 1 << (i+24))) ++ continue; ++ if (dev_priv->chipset < 0xa0) ++ addr = 0x408200 + (tpid << 12) + (i << 7); ++ else ++ addr = 0x408100 + (tpid << 11) + (i << 7); ++ mp10 = nv_rd32(dev, addr + 0x10); ++ status = nv_rd32(dev, addr + 0x14); ++ if (!status) ++ continue; ++ if (display) { ++ nv_rd32(dev, addr + 0x20); ++ pc = nv_rd32(dev, addr + 0x24); ++ oplow = nv_rd32(dev, addr + 0x70); ++ ophigh= nv_rd32(dev, addr + 0x74); ++ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " ++ "TP %d MP %d: ", tpid, i); ++ nouveau_enum_print(nv50_mp_exec_error_names, status); ++ printk(" at %06x warp %d, opcode %08x %08x\n", ++ pc&0xffffff, pc >> 24, ++ oplow, ophigh); ++ } ++ nv_wr32(dev, addr + 0x10, mp10); ++ nv_wr32(dev, addr + 0x14, 0); ++ mps++; ++ } ++ if (!mps && display) ++ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " ++ "No MPs claiming errors?\n", tpid); ++} ++ ++static void ++nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, ++ uint32_t ustatus_new, int display, const char *name) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ int tps = 0; ++ uint32_t units = nv_rd32(dev, 0x1540); ++ int i, r; ++ uint32_t ustatus_addr, ustatus; ++ for (i = 0; i < 16; i++) { ++ if (!(units & (1 << i))) ++ continue; ++ if (dev_priv->chipset < 0xa0) ++ ustatus_addr = ustatus_old + (i << 12); ++ else ++ ustatus_addr = ustatus_new + (i << 11); ++ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; ++ if (!ustatus) ++ continue; ++ tps++; ++ switch (type) { ++ case 6: /* texture error... unknown for now */ ++ nv50_fb_vm_trap(dev, display, name); ++ if (display) { ++ NV_ERROR(dev, "magic set %d:\n", i); ++ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) ++ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, ++ nv_rd32(dev, r)); ++ } ++ break; ++ case 7: /* MP error */ ++ if (ustatus & 0x00010000) { ++ nv50_pgraph_mp_trap(dev, i, display); ++ ustatus &= ~0x00010000; ++ } ++ break; ++ case 8: /* TPDMA error */ ++ { ++ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); ++ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); ++ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); ++ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); ++ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); ++ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); ++ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); ++ nv50_fb_vm_trap(dev, display, name); ++ /* 2d engine destination */ ++ if (ustatus & 0x00000010) { ++ if (display) { ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", ++ i, e14, e10); ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", ++ i, e0c, e18, e1c, e20, e24); ++ } ++ ustatus &= ~0x00000010; ++ } ++ /* Render target */ ++ if (ustatus & 0x00000040) { ++ if (display) { ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", ++ i, e14, e10); ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", ++ i, e0c, e18, e1c, e20, e24); ++ } ++ ustatus &= ~0x00000040; ++ } ++ /* CUDA memory: l[], g[] or stack. */ ++ if (ustatus & 0x00000080) { ++ if (display) { ++ if (e18 & 0x80000000) { ++ /* g[] read fault? */ ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", ++ i, e14, e10 | ((e18 >> 24) & 0x1f)); ++ e18 &= ~0x1f000000; ++ } else if (e18 & 0xc) { ++ /* g[] write fault? */ ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", ++ i, e14, e10 | ((e18 >> 7) & 0x1f)); ++ e18 &= ~0x00000f80; ++ } else { ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", ++ i, e14, e10); ++ } ++ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", ++ i, e0c, e18, e1c, e20, e24); ++ } ++ ustatus &= ~0x00000080; ++ } ++ } ++ break; ++ } ++ if (ustatus) { ++ if (display) ++ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); ++ } ++ nv_wr32(dev, ustatus_addr, 0xc0000000); ++ } ++ ++ if (!tps && display) ++ NV_INFO(dev, "%s - No TPs claiming errors?\n", name); ++} ++ ++static int ++nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid) ++{ ++ u32 status = nv_rd32(dev, 0x400108); ++ u32 ustatus; ++ ++ if (!status && display) { ++ NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n"); ++ return 1; ++ } ++ ++ /* DISPATCH: Relays commands to other units and handles NOTIFY, ++ * COND, QUERY. If you get a trap from it, the command is still stuck ++ * in DISPATCH and you need to do something about it. */ ++ if (status & 0x001) { ++ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; ++ if (!ustatus && display) { ++ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); ++ } ++ ++ nv_wr32(dev, 0x400500, 0x00000000); ++ ++ /* Known to be triggered by screwed up NOTIFY and COND... */ ++ if (ustatus & 0x00000001) { ++ u32 addr = nv_rd32(dev, 0x400808); ++ u32 subc = (addr & 0x00070000) >> 16; ++ u32 mthd = (addr & 0x00001ffc); ++ u32 datal = nv_rd32(dev, 0x40080c); ++ u32 datah = nv_rd32(dev, 0x400810); ++ u32 class = nv_rd32(dev, 0x400814); ++ u32 r848 = nv_rd32(dev, 0x400848); ++ ++ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n"); ++ if (display && (addr & 0x80000000)) { ++ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " ++ "subc %d class 0x%04x mthd 0x%04x " ++ "data 0x%08x%08x " ++ "400808 0x%08x 400848 0x%08x\n", ++ chid, inst, subc, class, mthd, datah, ++ datal, addr, r848); ++ } else ++ if (display) { ++ NV_INFO(dev, "PGRAPH - no stuck command?\n"); ++ } ++ ++ nv_wr32(dev, 0x400808, 0); ++ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); ++ nv_wr32(dev, 0x400848, 0); ++ ustatus &= ~0x00000001; ++ } ++ ++ if (ustatus & 0x00000002) { ++ u32 addr = nv_rd32(dev, 0x40084c); ++ u32 subc = (addr & 0x00070000) >> 16; ++ u32 mthd = (addr & 0x00001ffc); ++ u32 data = nv_rd32(dev, 0x40085c); ++ u32 class = nv_rd32(dev, 0x400814); ++ ++ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n"); ++ if (display && (addr & 0x80000000)) { ++ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " ++ "subc %d class 0x%04x mthd 0x%04x " ++ "data 0x%08x 40084c 0x%08x\n", ++ chid, inst, subc, class, mthd, ++ data, addr); ++ } else ++ if (display) { ++ NV_INFO(dev, "PGRAPH - no stuck command?\n"); ++ } ++ ++ nv_wr32(dev, 0x40084c, 0); ++ ustatus &= ~0x00000002; ++ } ++ ++ if (ustatus && display) { ++ NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown " ++ "0x%08x)\n", ustatus); ++ } ++ ++ nv_wr32(dev, 0x400804, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x001); ++ status &= ~0x001; ++ if (!status) ++ return 0; ++ } ++ ++ /* M2MF: Memory to memory copy engine. */ ++ if (status & 0x002) { ++ u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; ++ if (display) { ++ NV_INFO(dev, "PGRAPH - TRAP_M2MF"); ++ nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n", ++ nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808), ++ nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810)); ++ ++ } ++ ++ /* No sane way found yet -- just reset the bugger. */ ++ nv_wr32(dev, 0x400040, 2); ++ nv_wr32(dev, 0x400040, 0); ++ nv_wr32(dev, 0x406800, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x002); ++ status &= ~0x002; ++ } ++ ++ /* VFETCH: Fetches data from vertex buffers. */ ++ if (status & 0x004) { ++ u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; ++ if (display) { ++ NV_INFO(dev, "PGRAPH - TRAP_VFETCH"); ++ nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n", ++ nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), ++ nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10)); ++ } ++ ++ nv_wr32(dev, 0x400c04, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x004); ++ status &= ~0x004; ++ } ++ ++ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ ++ if (status & 0x008) { ++ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; ++ if (display) { ++ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT"); ++ nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n", ++ nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), ++ nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810)); ++ ++ } ++ ++ /* No sane way found yet -- just reset the bugger. */ ++ nv_wr32(dev, 0x400040, 0x80); ++ nv_wr32(dev, 0x400040, 0); ++ nv_wr32(dev, 0x401800, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x008); ++ status &= ~0x008; ++ } ++ ++ /* CCACHE: Handles code and c[] caches and fills them. */ ++ if (status & 0x010) { ++ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; ++ if (display) { ++ NV_INFO(dev, "PGRAPH - TRAP_CCACHE"); ++ nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" ++ " %08x %08x %08x\n", ++ nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804), ++ nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c), ++ nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814), ++ nv_rd32(dev, 0x40581c)); ++ ++ } ++ ++ nv_wr32(dev, 0x405018, 0xc0000000); ++ nv_wr32(dev, 0x400108, 0x010); ++ status &= ~0x010; ++ } ++ ++ /* Unknown, not seen yet... 0x402000 is the only trap status reg ++ * remaining, so try to handle it anyway. Perhaps related to that ++ * unknown DMA slot on tesla? */ ++ if (status & 0x20) { ++ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; ++ if (display) ++ NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus); ++ nv_wr32(dev, 0x402000, 0xc0000000); ++ /* no status modifiction on purpose */ ++ } ++ ++ /* TEXTURE: CUDA texturing units */ ++ if (status & 0x040) { ++ nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display, ++ "PGRAPH - TRAP_TEXTURE"); ++ nv_wr32(dev, 0x400108, 0x040); ++ status &= ~0x040; ++ } ++ ++ /* MP: CUDA execution engines. */ ++ if (status & 0x080) { ++ nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display, ++ "PGRAPH - TRAP_MP"); ++ nv_wr32(dev, 0x400108, 0x080); ++ status &= ~0x080; ++ } ++ ++ /* TPDMA: Handles TP-initiated uncached memory accesses: ++ * l[], g[], stack, 2d surfaces, render targets. */ ++ if (status & 0x100) { ++ nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display, ++ "PGRAPH - TRAP_TPDMA"); ++ nv_wr32(dev, 0x400108, 0x100); ++ status &= ~0x100; ++ } ++ ++ if (status) { ++ if (display) ++ NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status); ++ nv_wr32(dev, 0x400108, status); ++ } ++ ++ return 1; ++} ++ ++static int ++nv50_graph_isr_chid(struct drm_device *dev, u64 inst) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_channel *chan; ++ unsigned long flags; ++ int i; ++ ++ spin_lock_irqsave(&dev_priv->channels.lock, flags); ++ for (i = 0; i < dev_priv->engine.fifo.channels; i++) { ++ chan = dev_priv->channels.ptr[i]; ++ if (!chan || !chan->ramin) ++ continue; ++ ++ if (inst == chan->ramin->vinst) ++ break; ++ } ++ spin_unlock_irqrestore(&dev_priv->channels.lock, flags); ++ return i; ++} ++ ++static void ++nv50_graph_isr(struct drm_device *dev) ++{ ++ u32 stat; ++ ++ while ((stat = nv_rd32(dev, 0x400100))) { ++ u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12; ++ u32 chid = nv50_graph_isr_chid(dev, inst); ++ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); ++ u32 subc = (addr & 0x00070000) >> 16; ++ u32 mthd = (addr & 0x00001ffc); ++ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); ++ u32 class = nv_rd32(dev, 0x400814); ++ u32 show = stat; ++ ++ if (stat & 0x00000010) { ++ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, ++ mthd, data)) ++ show &= ~0x00000010; ++ } ++ ++ if (stat & 0x00001000) { ++ nv_wr32(dev, 0x400500, 0x00000000); ++ nv_wr32(dev, 0x400100, 0x00001000); ++ nv_mask(dev, 0x40013c, 0x00001000, 0x00000000); ++ nv50_graph_context_switch(dev); ++ stat &= ~0x00001000; ++ show &= ~0x00001000; ++ } ++ ++ show = (show && nouveau_ratelimit()) ? show : 0; ++ ++ if (show & 0x00100000) { ++ u32 ecode = nv_rd32(dev, 0x400110); ++ NV_INFO(dev, "PGRAPH - DATA_ERROR "); ++ nouveau_enum_print(nv50_data_error_names, ecode); ++ printk("\n"); ++ } ++ ++ if (stat & 0x00200000) { ++ if (!nv50_pgraph_trap_handler(dev, show, inst, chid)) ++ show &= ~0x00200000; ++ } ++ ++ nv_wr32(dev, 0x400100, stat); ++ nv_wr32(dev, 0x400500, 0x00010001); ++ ++ if (show) { ++ NV_INFO(dev, "PGRAPH -"); ++ nouveau_bitfield_print(nv50_graph_intr, show); ++ printk("\n"); ++ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d " ++ "class 0x%04x mthd 0x%04x data 0x%08x\n", ++ chid, inst, subc, class, mthd, data); ++ } ++ } ++ ++ if (nv_rd32(dev, 0x400824) & (1 << 31)) ++ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_instmem.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_instmem.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv50_instmem.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv50_instmem.c 2010-11-12 06:18:06.000000000 +0100 +@@ -131,10 +131,10 @@ + } + + /* we need a channel to plug into the hw to control the BARs */ +- ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); ++ ret = nv50_channel_new(dev, 128*1024, &dev_priv->channels.ptr[0]); + if (ret) + return ret; +- chan = dev_priv->fifos[127] = dev_priv->fifos[0]; ++ chan = dev_priv->channels.ptr[127] = dev_priv->channels.ptr[0]; + + /* allocate page table for PRAMIN BAR */ + ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, +@@ -157,10 +157,7 @@ + nv_wo32(priv->pramin_bar, 0x10, 0x00000000); + nv_wo32(priv->pramin_bar, 0x14, 0x00000000); + +- /* map channel into PRAMIN, gpuobj didn't do it for us */ +- ret = nv50_instmem_bind(dev, chan->ramin); +- if (ret) +- return ret; ++ nv50_instmem_map(chan->ramin); + + /* poke regs... */ + nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); +@@ -240,7 +237,7 @@ + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; +- struct nouveau_channel *chan = dev_priv->fifos[0]; ++ struct nouveau_channel *chan = dev_priv->channels.ptr[0]; + int i; + + NV_DEBUG(dev, "\n"); +@@ -264,8 +261,8 @@ + nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); + dev_priv->vm_vram_pt_nr = 0; + +- nv50_channel_del(&dev_priv->fifos[0]); +- dev_priv->fifos[127] = NULL; ++ nv50_channel_del(&dev_priv->channels.ptr[0]); ++ dev_priv->channels.ptr[127] = NULL; + } + + dev_priv->engine.instmem.priv = NULL; +@@ -276,16 +273,8 @@ + nv50_instmem_suspend(struct drm_device *dev) + { + struct drm_nouveau_private *dev_priv = dev->dev_private; +- struct nouveau_channel *chan = dev_priv->fifos[0]; +- struct nouveau_gpuobj *ramin = chan->ramin; +- int i; + +- ramin->im_backing_suspend = vmalloc(ramin->size); +- if (!ramin->im_backing_suspend) +- return -ENOMEM; +- +- for (i = 0; i < ramin->size; i += 4) +- ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); ++ dev_priv->ramin_available = false; + return 0; + } + +@@ -294,18 +283,9 @@ + { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; +- struct nouveau_channel *chan = dev_priv->fifos[0]; +- struct nouveau_gpuobj *ramin = chan->ramin; ++ struct nouveau_channel *chan = dev_priv->channels.ptr[0]; + int i; + +- dev_priv->ramin_available = false; +- dev_priv->ramin_base = ~0; +- for (i = 0; i < ramin->size; i += 4) +- nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]); +- dev_priv->ramin_available = true; +- vfree(ramin->im_backing_suspend); +- ramin->im_backing_suspend = NULL; +- + /* Poke the relevant regs, and pray it works :) */ + nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); + nv_wr32(dev, NV50_PUNK_UNK1710, 0); +@@ -318,74 +298,95 @@ + + for (i = 0; i < 8; i++) + nv_wr32(dev, 0x1900 + (i*4), 0); ++ ++ dev_priv->ramin_available = true; + } + ++struct nv50_gpuobj_node { ++ struct nouveau_bo *vram; ++ struct drm_mm_node *ramin; ++ u32 align; ++}; ++ ++ + int +-nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, +- uint32_t *sz) ++nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) + { ++ struct drm_device *dev = gpuobj->dev; ++ struct nv50_gpuobj_node *node = NULL; + int ret; + +- if (gpuobj->im_backing) +- return -EINVAL; +- +- *sz = ALIGN(*sz, 4096); +- if (*sz == 0) +- return -EINVAL; ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (!node) ++ return -ENOMEM; ++ node->align = align; + +- ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, +- true, false, &gpuobj->im_backing); ++ ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, ++ 0, 0x0000, true, false, &node->vram); + if (ret) { + NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); + return ret; + } + +- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); ++ ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); +- nouveau_bo_ref(NULL, &gpuobj->im_backing); ++ nouveau_bo_ref(NULL, &node->vram); + return ret; + } + +- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; ++ gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; ++ gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; ++ gpuobj->node = node; + return 0; + } + + void +-nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++nv50_instmem_put(struct nouveau_gpuobj *gpuobj) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nv50_gpuobj_node *node; + +- if (gpuobj && gpuobj->im_backing) { +- if (gpuobj->im_bound) +- dev_priv->engine.instmem.unbind(dev, gpuobj); +- nouveau_bo_unpin(gpuobj->im_backing); +- nouveau_bo_ref(NULL, &gpuobj->im_backing); +- gpuobj->im_backing = NULL; +- } ++ node = gpuobj->node; ++ gpuobj->node = NULL; ++ ++ nouveau_bo_unpin(node->vram); ++ nouveau_bo_ref(NULL, &node->vram); ++ kfree(node); + } + + int +-nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++nv50_instmem_map(struct nouveau_gpuobj *gpuobj) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; +- struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; +- uint32_t pte, pte_end; +- uint64_t vram; +- +- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) +- return -EINVAL; ++ struct nv50_gpuobj_node *node = gpuobj->node; ++ struct drm_device *dev = gpuobj->dev; ++ struct drm_mm_node *ramin = NULL; ++ u32 pte, pte_end; ++ u64 vram; ++ ++ do { ++ if (drm_mm_pre_get(&dev_priv->ramin_heap)) ++ return -ENOMEM; ++ ++ spin_lock(&dev_priv->ramin_lock); ++ ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, ++ node->align, 0); ++ if (ramin == NULL) { ++ spin_unlock(&dev_priv->ramin_lock); ++ return -ENOMEM; ++ } + +- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", +- gpuobj->im_pramin->start, gpuobj->im_pramin->size); ++ ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); ++ spin_unlock(&dev_priv->ramin_lock); ++ } while (ramin == NULL); + +- pte = (gpuobj->im_pramin->start >> 12) << 1; +- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; ++ pte = (ramin->start >> 12) << 1; ++ pte_end = ((ramin->size >> 12) << 1) + pte; + vram = gpuobj->vinst; + + NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", +- gpuobj->im_pramin->start, pte, pte_end); ++ ramin->start, pte, pte_end); + NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); + + vram |= 1; +@@ -395,46 +396,45 @@ + } + + while (pte < pte_end) { +- nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); +- nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); ++ nv_wo32(priv->pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); ++ nv_wo32(priv->pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); + vram += 0x1000; + pte += 2; + } + dev_priv->engine.instmem.flush(dev); + +- nv50_vm_flush(dev, 4); + nv50_vm_flush(dev, 6); + +- gpuobj->im_bound = 1; ++ node->ramin = ramin; ++ gpuobj->pinst = ramin->start; + return 0; + } + +-int +-nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++void ++nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; +- uint32_t pte, pte_end; +- +- if (gpuobj->im_bound == 0) +- return -EINVAL; ++ struct nv50_gpuobj_node *node = gpuobj->node; ++ u32 pte, pte_end; + +- /* can happen during late takedown */ +- if (unlikely(!dev_priv->ramin_available)) +- return 0; ++ if (!node->ramin || !dev_priv->ramin_available) ++ return; + +- pte = (gpuobj->im_pramin->start >> 12) << 1; +- pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; ++ pte = (node->ramin->start >> 12) << 1; ++ pte_end = ((node->ramin->size >> 12) << 1) + pte; + + while (pte < pte_end) { + nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); + nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); + pte += 2; + } +- dev_priv->engine.instmem.flush(dev); ++ dev_priv->engine.instmem.flush(gpuobj->dev); + +- gpuobj->im_bound = 0; +- return 0; ++ spin_lock(&dev_priv->ramin_lock); ++ drm_mm_put_block(node->ramin); ++ node->ramin = NULL; ++ spin_unlock(&dev_priv->ramin_lock); + } + + void +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv84_crypt.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv84_crypt.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nv84_crypt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nv84_crypt.c 2010-11-12 06:18:06.000000000 +0100 +@@ -0,0 +1,137 @@ ++/* ++ * Copyright 2010 Red Hat Inc. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the "Software"), ++ * to deal in the Software without restriction, including without limitation ++ * the rights to use, copy, modify, merge, publish, distribute, sublicense, ++ * and/or sell copies of the Software, and to permit persons to whom the ++ * Software is furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR ++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR ++ * OTHER DEALINGS IN THE SOFTWARE. ++ * ++ * Authors: Ben Skeggs ++ */ ++ ++#include "drmP.h" ++#include "nouveau_drv.h" ++#include "nouveau_util.h" ++ ++static void nv84_crypt_isr(struct drm_device *); ++ ++int ++nv84_crypt_create_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_gpuobj *ramin = chan->ramin; ++ int ret; ++ ++ NV_DEBUG(dev, "ch%d\n", chan->id); ++ ++ ret = nouveau_gpuobj_new(dev, chan, 256, 0, ++ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, ++ &chan->crypt_ctx); ++ if (ret) ++ return ret; ++ ++ nv_wo32(ramin, 0xa0, 0x00190000); ++ nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff); ++ nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst); ++ nv_wo32(ramin, 0xac, 0); ++ nv_wo32(ramin, 0xb0, 0); ++ nv_wo32(ramin, 0xb4, 0); ++ ++ dev_priv->engine.instmem.flush(dev); ++ return 0; ++} ++ ++void ++nv84_crypt_destroy_context(struct nouveau_channel *chan) ++{ ++ struct drm_device *dev = chan->dev; ++ u32 inst; ++ ++ if (!chan->ramin) ++ return; ++ ++ inst = (chan->ramin->vinst >> 12); ++ inst |= 0x80000000; ++ ++ /* mark context as invalid if still on the hardware, not ++ * doing this causes issues the next time PCRYPT is used, ++ * unsurprisingly :) ++ */ ++ nv_wr32(dev, 0x10200c, 0x00000000); ++ if (nv_rd32(dev, 0x102188) == inst) ++ nv_mask(dev, 0x102188, 0x80000000, 0x00000000); ++ if (nv_rd32(dev, 0x10218c) == inst) ++ nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); ++ nv_wr32(dev, 0x10200c, 0x00000010); ++ ++ nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); ++} ++ ++void ++nv84_crypt_tlb_flush(struct drm_device *dev) ++{ ++ nv50_vm_flush(dev, 0x0a); ++} ++ ++int ++nv84_crypt_init(struct drm_device *dev) ++{ ++ struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; ++ ++ if (!pcrypt->registered) { ++ NVOBJ_CLASS(dev, 0x74c1, CRYPT); ++ pcrypt->registered = true; ++ } ++ ++ nv_mask(dev, 0x000200, 0x00004000, 0x00000000); ++ nv_mask(dev, 0x000200, 0x00004000, 0x00004000); ++ ++ nouveau_irq_register(dev, 14, nv84_crypt_isr); ++ nv_wr32(dev, 0x102130, 0xffffffff); ++ nv_wr32(dev, 0x102140, 0xffffffbf); ++ ++ nv_wr32(dev, 0x10200c, 0x00000010); ++ return 0; ++} ++ ++void ++nv84_crypt_fini(struct drm_device *dev) ++{ ++ nv_wr32(dev, 0x102140, 0x00000000); ++ nouveau_irq_unregister(dev, 14); ++} ++ ++static void ++nv84_crypt_isr(struct drm_device *dev) ++{ ++ u32 stat = nv_rd32(dev, 0x102130); ++ u32 mthd = nv_rd32(dev, 0x102190); ++ u32 data = nv_rd32(dev, 0x102194); ++ u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; ++ int show = nouveau_ratelimit(); ++ ++ if (show) { ++ NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", ++ stat, mthd, data, inst); ++ } ++ ++ nv_wr32(dev, 0x102130, stat); ++ nv_wr32(dev, 0x10200c, 0x10); ++ ++ nv50_fb_vm_trap(dev, show, "PCRYPT"); ++} +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nvc0_instmem.c linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nvc0_instmem.c +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nvc0_instmem.c 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nvc0_instmem.c 2010-11-12 06:18:06.000000000 +0100 +@@ -26,67 +26,89 @@ + + #include "nouveau_drv.h" + ++struct nvc0_gpuobj_node { ++ struct nouveau_bo *vram; ++ struct drm_mm_node *ramin; ++ u32 align; ++}; ++ + int +-nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, +- uint32_t *size) ++nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) + { ++ struct drm_device *dev = gpuobj->dev; ++ struct nvc0_gpuobj_node *node = NULL; + int ret; + +- *size = ALIGN(*size, 4096); +- if (*size == 0) +- return -EINVAL; ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (!node) ++ return -ENOMEM; ++ node->align = align; + +- ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, +- true, false, &gpuobj->im_backing); ++ ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, ++ 0, 0x0000, true, false, &node->vram); + if (ret) { + NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); + return ret; + } + +- ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); ++ ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); +- nouveau_bo_ref(NULL, &gpuobj->im_backing); ++ nouveau_bo_ref(NULL, &node->vram); + return ret; + } + +- gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; ++ gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; ++ gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; ++ gpuobj->node = node; + return 0; + } + + void +-nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++nvc0_instmem_put(struct nouveau_gpuobj *gpuobj) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; ++ struct nvc0_gpuobj_node *node; + +- if (gpuobj && gpuobj->im_backing) { +- if (gpuobj->im_bound) +- dev_priv->engine.instmem.unbind(dev, gpuobj); +- nouveau_bo_unpin(gpuobj->im_backing); +- nouveau_bo_ref(NULL, &gpuobj->im_backing); +- gpuobj->im_backing = NULL; +- } ++ node = gpuobj->node; ++ gpuobj->node = NULL; ++ ++ nouveau_bo_unpin(node->vram); ++ nouveau_bo_ref(NULL, &node->vram); ++ kfree(node); + } + + int +-nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++nvc0_instmem_map(struct nouveau_gpuobj *gpuobj) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t pte, pte_end; +- uint64_t vram; +- +- if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) +- return -EINVAL; +- +- NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", +- gpuobj->im_pramin->start, gpuobj->im_pramin->size); ++ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; ++ struct nvc0_gpuobj_node *node = gpuobj->node; ++ struct drm_device *dev = gpuobj->dev; ++ struct drm_mm_node *ramin = NULL; ++ u32 pte, pte_end; ++ u64 vram; ++ ++ do { ++ if (drm_mm_pre_get(&dev_priv->ramin_heap)) ++ return -ENOMEM; ++ ++ spin_lock(&dev_priv->ramin_lock); ++ ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, ++ node->align, 0); ++ if (ramin == NULL) { ++ spin_unlock(&dev_priv->ramin_lock); ++ return -ENOMEM; ++ } ++ ++ ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); ++ spin_unlock(&dev_priv->ramin_lock); ++ } while (ramin == NULL); + +- pte = gpuobj->im_pramin->start >> 12; +- pte_end = (gpuobj->im_pramin->size >> 12) + pte; ++ pte = (ramin->start >> 12) << 1; ++ pte_end = ((ramin->size >> 12) << 1) + pte; + vram = gpuobj->vinst; + + NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", +- gpuobj->im_pramin->start, pte, pte_end); ++ ramin->start, pte, pte_end); + NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); + + while (pte < pte_end) { +@@ -103,30 +125,35 @@ + nv_wr32(dev, 0x100cbc, 0x80000005); + } + +- gpuobj->im_bound = 1; ++ node->ramin = ramin; ++ gpuobj->pinst = ramin->start; + return 0; + } + +-int +-nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) ++void ++nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj) + { +- struct drm_nouveau_private *dev_priv = dev->dev_private; +- uint32_t pte, pte_end; ++ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; ++ struct nvc0_gpuobj_node *node = gpuobj->node; ++ u32 pte, pte_end; ++ ++ if (!node->ramin || !dev_priv->ramin_available) ++ return; + +- if (gpuobj->im_bound == 0) +- return -EINVAL; ++ pte = (node->ramin->start >> 12) << 1; ++ pte_end = ((node->ramin->size >> 12) << 1) + pte; + +- pte = gpuobj->im_pramin->start >> 12; +- pte_end = (gpuobj->im_pramin->size >> 12) + pte; + while (pte < pte_end) { +- nv_wr32(dev, 0x702000 + (pte * 8), 0); +- nv_wr32(dev, 0x702004 + (pte * 8), 0); ++ nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0); ++ nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0); + pte++; + } +- dev_priv->engine.instmem.flush(dev); ++ dev_priv->engine.instmem.flush(gpuobj->dev); + +- gpuobj->im_bound = 0; +- return 0; ++ spin_lock(&dev_priv->ramin_lock); ++ drm_mm_put_block(node->ramin); ++ node->ramin = NULL; ++ spin_unlock(&dev_priv->ramin_lock); + } + + void +diff -Naur linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nvreg.h linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nvreg.h +--- linux-2.6.37-rc1/drivers/gpu/drm/nouveau/nvreg.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/drivers/gpu/drm/nouveau/nvreg.h 2010-11-12 06:18:06.000000000 +0100 +@@ -153,7 +153,8 @@ + #define NV_PCRTC_START 0x00600800 + #define NV_PCRTC_CONFIG 0x00600804 + # define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) +-# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) ++# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0) ++# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) + #define NV_PCRTC_CURSOR_CONFIG 0x00600810 + # define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) + # define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) +diff -Naur linux-2.6.37-rc1/include/drm/nouveau_drm.h linux-2.6.37-rc1.nouveau/include/drm/nouveau_drm.h +--- linux-2.6.37-rc1/include/drm/nouveau_drm.h 2010-11-01 12:54:12.000000000 +0100 ++++ linux-2.6.37-rc1.nouveau/include/drm/nouveau_drm.h 2010-11-12 06:18:06.000000000 +0100 +@@ -80,6 +80,8 @@ + #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 + #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 + #define NOUVEAU_GETPARAM_PTIMER_TIME 14 ++#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 ++#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 + struct drm_nouveau_getparam { + uint64_t param; + uint64_t value; +@@ -95,6 +97,12 @@ + #define NOUVEAU_GEM_DOMAIN_GART (1 << 2) + #define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3) + ++#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00 ++#define NOUVEAU_GEM_TILE_16BPP 0x00000001 ++#define NOUVEAU_GEM_TILE_32BPP 0x00000002 ++#define NOUVEAU_GEM_TILE_ZETA 0x00000004 ++#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008 ++ + struct drm_nouveau_gem_info { + uint32_t handle; + uint32_t domain; +@@ -164,7 +172,6 @@ + }; + + #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 +-#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 + #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 + struct drm_nouveau_gem_cpu_prep { + uint32_t handle; diff --git a/projects/ATV/options b/projects/ATV/options index ee2a8b8708..ddb04a5319 100644 --- a/projects/ATV/options +++ b/projects/ATV/options @@ -134,6 +134,9 @@ # use Mesa-master (latest git) instead latest released version MESA_MASTER="yes" +# use linux-next (latest rc) instead latest released version + LINUX_NEXT="yes" + # Use VDPAU video acceleration (needs nVidia driver and a supported card) VDPAU="no" diff --git a/projects/ION/options b/projects/ION/options index 209efb238d..f7d5cd9c35 100644 --- a/projects/ION/options +++ b/projects/ION/options @@ -134,6 +134,9 @@ # use Mesa-master (latest git) instead latest released version MESA_MASTER="no" +# use linux-next (latest rc) instead latest released version + LINUX_NEXT="no" + # Use VDPAU video acceleration (needs nVidia driver and a supported card) VDPAU="yes" diff --git a/projects/generic/options b/projects/generic/options index fe71dad5e9..efa94a9555 100644 --- a/projects/generic/options +++ b/projects/generic/options @@ -134,7 +134,10 @@ # use Mesa-master (latest git) instead latest released version MESA_MASTER="no" -# Use VDPAU video acceleration (needs nVidia driver and a supported card) +# Use VDPAU video# use linux-next (latest rc) instead latest released version + LINUX_NEXT="no" + +# acceleration (needs nVidia driver and a supported card) VDPAU="yes" # Use VAAPI video acceleration (needs intel i965 driver and a supported card) diff --git a/projects/intel/options b/projects/intel/options index 8758d44965..01c30a808a 100644 --- a/projects/intel/options +++ b/projects/intel/options @@ -134,6 +134,9 @@ # use Mesa-master (latest git) instead latest released version MESA_MASTER="no" +# use linux-next (latest rc) instead latest released version + LINUX_NEXT="no" + # Use VDPAU video acceleration (needs nVidia driver and a supported card) VDPAU="no"