diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-07-23 11:20:10 +0200 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-07-23 11:20:10 +0200 |
commit | 39fe5434cb9de5da40510028b17b96bc4eb312b3 (patch) | |
tree | 7a02a317b9ad57da51ca99887c119e779ccf3f13 /drivers/edac | |
parent | [JFFS2] Add declaration of jffs2_lzo_{init,exit} to compr.h (diff) | |
parent | Linux 2.6.23-rc1 (diff) | |
download | linux-39fe5434cb9de5da40510028b17b96bc4eb312b3.tar.xz linux-39fe5434cb9de5da40510028b17b96bc4eb312b3.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/edac')
-rw-r--r-- | drivers/edac/Kconfig | 65 | ||||
-rw-r--r-- | drivers/edac/Makefile | 17 | ||||
-rw-r--r-- | drivers/edac/amd76x_edac.c | 75 | ||||
-rw-r--r-- | drivers/edac/e752x_edac.c | 320 | ||||
-rw-r--r-- | drivers/edac/e7xxx_edac.c | 125 | ||||
-rw-r--r-- | drivers/edac/edac_core.h (renamed from drivers/edac/edac_mc.h) | 506 | ||||
-rw-r--r-- | drivers/edac/edac_device.c | 746 | ||||
-rw-r--r-- | drivers/edac/edac_device_sysfs.c | 896 | ||||
-rw-r--r-- | drivers/edac/edac_mc.c | 1675 | ||||
-rw-r--r-- | drivers/edac/edac_mc_sysfs.c | 1024 | ||||
-rw-r--r-- | drivers/edac/edac_module.c | 222 | ||||
-rw-r--r-- | drivers/edac/edac_module.h | 77 | ||||
-rw-r--r-- | drivers/edac/edac_pci.c | 433 | ||||
-rw-r--r-- | drivers/edac/edac_pci_sysfs.c | 620 | ||||
-rw-r--r-- | drivers/edac/edac_stub.c | 46 | ||||
-rw-r--r-- | drivers/edac/i3000_edac.c | 506 | ||||
-rw-r--r-- | drivers/edac/i5000_edac.c | 1505 | ||||
-rw-r--r-- | drivers/edac/i82443bxgx_edac.c | 402 | ||||
-rw-r--r-- | drivers/edac/i82860_edac.c | 56 | ||||
-rw-r--r-- | drivers/edac/i82875p_edac.c | 92 | ||||
-rw-r--r-- | drivers/edac/i82975x_edac.c | 666 | ||||
-rw-r--r-- | drivers/edac/pasemi_edac.c | 299 | ||||
-rw-r--r-- | drivers/edac/r82600_edac.c | 77 |
23 files changed, 8645 insertions, 1805 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 807c402df049..1724c41d2414 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -3,20 +3,18 @@ # Copyright (c) 2003 Linux Networx # Licensed and distributed under the GPL # -# $Id: Kconfig,v 1.4.2.7 2005/07/08 22:05:38 dsp_llnl Exp $ -# -menu 'EDAC - error detection and reporting (RAS) (EXPERIMENTAL)' +menuconfig EDAC + bool "EDAC - error detection and reporting (EXPERIMENTAL)" depends on HAS_IOMEM - -config EDAC - tristate "EDAC core system error reporting (EXPERIMENTAL)" - depends on X86 && EXPERIMENTAL + depends on EXPERIMENTAL + depends on X86 || MIPS || PPC help EDAC is designed to report errors in the core system. These are low-level errors that are reported in the CPU or - supporting chipset: memory errors, cache errors, PCI errors, - thermal throttling, etc.. If unsure, select 'Y'. + supporting chipset or other subsystems: + memory errors, cache errors, PCI errors, thermal throttling, etc.. + If unsure, select 'Y'. If this code is reporting problems on your system, please see the EDAC project web pages for more information at: @@ -30,13 +28,12 @@ config EDAC There is also a mailing list for the EDAC project, which can be found via the sourceforge page. +if EDAC comment "Reporting subsystems" - depends on EDAC config EDAC_DEBUG bool "Debugging" - depends on EDAC help This turns on debugging information for the entire EDAC sub-system. You can insert module with "debug_level=x", current @@ -45,7 +42,6 @@ config EDAC_DEBUG config EDAC_MM_EDAC tristate "Main Memory EDAC (Error Detection And Correction) reporting" - depends on EDAC default y help Some systems are able to detect and correct errors in main @@ -77,6 +73,14 @@ config EDAC_E752X Support for error detection and correction on the Intel E7520, E7525, E7320 server chipsets. +config EDAC_I82443BXGX + tristate "Intel 82443BX/GX (440BX/GX)" + depends on EDAC_MM_EDAC && PCI && X86_32 + depends on BROKEN + help + Support for error detection and correction on the Intel + 82443BX/GX memory controllers (440BX/GX chipsets). + config EDAC_I82875P tristate "Intel 82875p (D82875P, E7210)" depends on EDAC_MM_EDAC && PCI && X86_32 @@ -84,6 +88,20 @@ config EDAC_I82875P Support for error detection and correction on the Intel DP82785P and E7210 server chipsets. +config EDAC_I82975X + tristate "Intel 82975x (D82975x)" + depends on EDAC_MM_EDAC && PCI && X86 + help + Support for error detection and correction on the Intel + DP82975x server chipsets. + +config EDAC_I3000 + tristate "Intel 3000/3010" + depends on EDAC_MM_EDAC && PCI && X86_32 + help + Support for error detection and correction on the Intel + 3000 and 3010 server chipsets. + config EDAC_I82860 tristate "Intel 82860" depends on EDAC_MM_EDAC && PCI && X86_32 @@ -98,17 +116,20 @@ config EDAC_R82600 Support for error detection and correction on the Radisys 82600 embedded chipset. -choice - prompt "Error detecting method" - depends on EDAC - default EDAC_POLL +config EDAC_I5000 + tristate "Intel Greencreek/Blackford chipset" + depends on EDAC_MM_EDAC && X86 && PCI + help + Support for error detection and correction the Intel + Greekcreek/Blackford chipsets. -config EDAC_POLL - bool "Poll for errors" - depends on EDAC +config EDAC_PASEMI + tristate "PA Semi PWRficient" + depends on EDAC_MM_EDAC && PCI + depends on PPC help - Poll the chipset periodically to detect errors. + Support for error detection and correction on PA Semi + PWRficient. -endchoice -endmenu +endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 93137fdab4b3..02c09f0ff157 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -5,14 +5,27 @@ # This file may be distributed under the terms of the # GNU General Public License. # -# $Id: Makefile,v 1.4.2.3 2005/07/08 22:05:38 dsp_llnl Exp $ -obj-$(CONFIG_EDAC_MM_EDAC) += edac_mc.o +obj-$(CONFIG_EDAC) := edac_stub.o +obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o + +edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o +edac_core-objs += edac_module.o edac_device_sysfs.o + +ifdef CONFIG_PCI +edac_core-objs += edac_pci.o edac_pci_sysfs.o +endif + obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o +obj-$(CONFIG_EDAC_I5000) += i5000_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o +obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o obj-$(CONFIG_EDAC_I82875P) += i82875p_edac.o +obj-$(CONFIG_EDAC_I82975X) += i82975x_edac.o +obj-$(CONFIG_EDAC_I3000) += i3000_edac.o obj-$(CONFIG_EDAC_I82860) += i82860_edac.o obj-$(CONFIG_EDAC_R82600) += r82600_edac.o +obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index f79f6b587bfa..f22075410591 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c @@ -17,9 +17,9 @@ #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> -#include "edac_mc.h" +#include "edac_core.h" -#define AMD76X_REVISION " Ver: 2.0.1 " __DATE__ +#define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ #define EDAC_MOD_STR "amd76x_edac" #define amd76x_printk(level, fmt, arg...) \ @@ -86,13 +86,13 @@ struct amd76x_dev_info { static const struct amd76x_dev_info amd76x_devs[] = { [AMD761] = { - .ctl_name = "AMD761" - }, + .ctl_name = "AMD761"}, [AMD762] = { - .ctl_name = "AMD762" - }, + .ctl_name = "AMD762"}, }; +static struct edac_pci_ctl_info *amd76x_pci; + /** * amd76x_get_error_info - fetch error information * @mci: Memory controller @@ -102,21 +102,21 @@ static const struct amd76x_dev_info amd76x_devs[] = { * on the chip so that further errors will be reported */ static void amd76x_get_error_info(struct mem_ctl_info *mci, - struct amd76x_error_info *info) + struct amd76x_error_info *info) { struct pci_dev *pdev; pdev = to_pci_dev(mci->dev); pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, - &info->ecc_mode_status); + &info->ecc_mode_status); if (info->ecc_mode_status & BIT(8)) pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, - (u32) BIT(8), (u32) BIT(8)); + (u32) BIT(8), (u32) BIT(8)); if (info->ecc_mode_status & BIT(9)) pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS, - (u32) BIT(9), (u32) BIT(9)); + (u32) BIT(9), (u32) BIT(9)); } /** @@ -130,7 +130,8 @@ static void amd76x_get_error_info(struct mem_ctl_info *mci, * then attempt to handle and clean up after the error */ static int amd76x_process_error_info(struct mem_ctl_info *mci, - struct amd76x_error_info *info, int handle_errors) + struct amd76x_error_info *info, + int handle_errors) { int error_found; u32 row; @@ -138,7 +139,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, error_found = 0; /* - * Check for an uncorrectable error + * Check for an uncorrectable error */ if (info->ecc_mode_status & BIT(8)) { error_found = 1; @@ -146,12 +147,12 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, if (handle_errors) { row = (info->ecc_mode_status >> 4) & 0xf; edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, - row, mci->ctl_name); + row, mci->ctl_name); } } /* - * Check for a correctable error + * Check for a correctable error */ if (info->ecc_mode_status & BIT(9)) { error_found = 1; @@ -159,7 +160,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, if (handle_errors) { row = info->ecc_mode_status & 0xf; edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, - 0, row, 0, mci->ctl_name); + 0, row, 0, mci->ctl_name); } } @@ -182,7 +183,7 @@ static void amd76x_check(struct mem_ctl_info *mci) } static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, - enum edac_type edac_mode) + enum edac_type edac_mode) { struct csrow_info *csrow; u32 mba, mba_base, mba_mask, dms; @@ -193,8 +194,7 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, /* find the DRAM Chip Select Base address and mask */ pci_read_config_dword(pdev, - AMD76X_MEM_BASE_ADDR + (index * 4), - &mba); + AMD76X_MEM_BASE_ADDR + (index * 4), &mba); if (!(mba & BIT(0))) continue; @@ -238,7 +238,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) debugf0("%s()\n", __func__); pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); ems_mode = (ems >> 10) & 0x3; - mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); + mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0); if (mci == NULL) { return -ENOMEM; @@ -249,24 +249,36 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->edac_cap = ems_mode ? - (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; + (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = AMD76X_REVISION; mci->ctl_name = amd76x_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); mci->edac_check = amd76x_check; mci->ctl_page_to_phys = NULL; amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]); - amd76x_get_error_info(mci, &discard); /* clear counters */ + amd76x_get_error_info(mci, &discard); /* clear counters */ /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ - if (edac_mc_add_mc(mci,0)) { + if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } + /* allocating generic PCI control info */ + amd76x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!amd76x_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + /* get this far and it's successful */ debugf3("%s(): success\n", __func__); return 0; @@ -278,7 +290,7 @@ fail: /* returns count (>= 0), or negative on error */ static int __devinit amd76x_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { debugf0("%s()\n", __func__); @@ -300,6 +312,9 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev) debugf0("%s()\n", __func__); + if (amd76x_pci) + edac_pci_release_generic_ctl(amd76x_pci); + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; @@ -308,16 +323,14 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev) static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { { - PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - AMD762 - }, + PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + AMD762}, { - PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - AMD761 - }, + PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + AMD761}, { - 0, - } /* 0 terminated list. */ + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 8bcc887692ab..3bba224cb55d 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c @@ -22,13 +22,16 @@ #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> -#include "edac_mc.h" +#include <linux/edac.h> +#include "edac_core.h" -#define E752X_REVISION " Ver: 2.0.1 " __DATE__ +#define E752X_REVISION " Ver: 2.0.2 " __DATE__ #define EDAC_MOD_STR "e752x_edac" static int force_function_unhide; +static struct edac_pci_ctl_info *e752x_pci; + #define e752x_printk(level, fmt, arg...) \ edac_printk(level, "e752x", fmt, ##arg) @@ -203,25 +206,22 @@ static const struct e752x_dev_info e752x_devs[] = { [E7520] = { .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, - .ctl_name = "E7520" - }, + .ctl_name = "E7520"}, [E7525] = { .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, - .ctl_name = "E7525" - }, + .ctl_name = "E7525"}, [E7320] = { .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, - .ctl_name = "E7320" - }, + .ctl_name = "E7320"}, }; static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, - unsigned long page) + unsigned long page) { u32 remap; - struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; + struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; debugf3("%s()\n", __func__); @@ -241,13 +241,13 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, } static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, - u32 sec1_add, u16 sec1_syndrome) + u32 sec1_add, u16 sec1_syndrome) { u32 page; int row; int channel; int i; - struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; + struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; debugf3("%s()\n", __func__); @@ -261,7 +261,8 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, e752x_printk(KERN_WARNING, "Test row %d Table %d %d %d %d %d %d %d %d\n", row, pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], - pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]); + pvt->map[4], pvt->map[5], pvt->map[6], + pvt->map[7]); /* test for channel remapping */ for (i = 0; i < 8; i++) { @@ -275,24 +276,22 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, row = i; else e752x_mc_printk(mci, KERN_WARNING, - "row %d not found in remap table\n", row); + "row %d not found in remap table\n", + row); } else row = edac_mc_find_csrow_by_page(mci, page); /* 0 = channel A, 1 = channel B */ channel = !(error_one & 1); - if (!pvt->map_type) - row = 7 - row; - /* e752x mc reads 34:6 of the DRAM linear address */ edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4), sec1_syndrome, row, channel, "e752x CE"); } static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, - u32 sec1_add, u16 sec1_syndrome, int *error_found, - int handle_error) + u32 sec1_add, u16 sec1_syndrome, int *error_found, + int handle_error) { *error_found = 1; @@ -301,11 +300,11 @@ static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, } static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, - u32 ded_add, u32 scrb_add) + u32 ded_add, u32 scrb_add) { u32 error_2b, block_page; int row; - struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; + struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; debugf3("%s()\n", __func__); @@ -316,14 +315,14 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, block_page = error_2b >> (PAGE_SHIFT - 4); row = pvt->mc_symmetric ? - /* chip select are bits 14 & 13 */ + /* chip select are bits 14 & 13 */ ((block_page >> 1) & 3) : edac_mc_find_csrow_by_page(mci, block_page); /* e752x mc reads 34:6 of the DRAM linear address */ edac_mc_handle_ue(mci, block_page, - offset_in_page(error_2b << 4), - row, "e752x UE from Read"); + offset_in_page(error_2b << 4), + row, "e752x UE from Read"); } if (error_one & 0x0404) { error_2b = scrb_add; @@ -332,19 +331,20 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, block_page = error_2b >> (PAGE_SHIFT - 4); row = pvt->mc_symmetric ? - /* chip select are bits 14 & 13 */ + /* chip select are bits 14 & 13 */ ((block_page >> 1) & 3) : edac_mc_find_csrow_by_page(mci, block_page); /* e752x mc reads 34:6 of the DRAM linear address */ edac_mc_handle_ue(mci, block_page, - offset_in_page(error_2b << 4), - row, "e752x UE from Scruber"); + offset_in_page(error_2b << 4), + row, "e752x UE from Scruber"); } } static inline void process_ue(struct mem_ctl_info *mci, u16 error_one, - u32 ded_add, u32 scrb_add, int *error_found, int handle_error) + u32 ded_add, u32 scrb_add, int *error_found, + int handle_error) { *error_found = 1; @@ -353,7 +353,7 @@ static inline void process_ue(struct mem_ctl_info *mci, u16 error_one, } static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, - int *error_found, int handle_error) + int *error_found, int handle_error) { *error_found = 1; @@ -365,24 +365,24 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, } static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, - u32 retry_add) + u32 retry_add) { u32 error_1b, page; int row; - struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; + struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; error_1b = retry_add; - page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ - row = pvt->mc_symmetric ? - ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ + page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ + row = pvt->mc_symmetric ? ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ edac_mc_find_csrow_by_page(mci, page); e752x_mc_printk(mci, KERN_WARNING, - "CE page 0x%lx, row %d : Memory read retry\n", - (long unsigned int) page, row); + "CE page 0x%lx, row %d : Memory read retry\n", + (long unsigned int)page, row); } static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, - u32 retry_add, int *error_found, int handle_error) + u32 retry_add, int *error_found, + int handle_error) { *error_found = 1; @@ -391,7 +391,7 @@ static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, } static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, - int *error_found, int handle_error) + int *error_found, int handle_error) { *error_found = 1; @@ -420,7 +420,7 @@ static void do_global_error(int fatal, u32 errors) } static inline void global_error(int fatal, u32 errors, int *error_found, - int handle_error) + int handle_error) { *error_found = 1; @@ -447,7 +447,7 @@ static void do_hub_error(int fatal, u8 errors) } static inline void hub_error(int fatal, u8 errors, int *error_found, - int handle_error) + int handle_error) { *error_found = 1; @@ -505,7 +505,7 @@ static void do_sysbus_error(int fatal, u32 errors) } static inline void sysbus_error(int fatal, u32 errors, int *error_found, - int handle_error) + int handle_error) { *error_found = 1; @@ -514,7 +514,7 @@ static inline void sysbus_error(int fatal, u32 errors, int *error_found, } static void e752x_check_hub_interface(struct e752x_error_info *info, - int *error_found, int handle_error) + int *error_found, int handle_error) { u8 stat8; @@ -522,33 +522,32 @@ static void e752x_check_hub_interface(struct e752x_error_info *info, stat8 = info->hi_ferr; - if(stat8 & 0x7f) { /* Error, so process */ + if (stat8 & 0x7f) { /* Error, so process */ stat8 &= 0x7f; - if(stat8 & 0x2b) + if (stat8 & 0x2b) hub_error(1, stat8 & 0x2b, error_found, handle_error); - if(stat8 & 0x54) + if (stat8 & 0x54) hub_error(0, stat8 & 0x54, error_found, handle_error); } - //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); stat8 = info->hi_nerr; - if(stat8 & 0x7f) { /* Error, so process */ + if (stat8 & 0x7f) { /* Error, so process */ stat8 &= 0x7f; if (stat8 & 0x2b) hub_error(1, stat8 & 0x2b, error_found, handle_error); - if(stat8 & 0x54) + if (stat8 & 0x54) hub_error(0, stat8 & 0x54, error_found, handle_error); } } static void e752x_check_sysbus(struct e752x_error_info *info, - int *error_found, int handle_error) + int *error_found, int handle_error) { u32 stat32, error32; @@ -556,47 +555,47 @@ static void e752x_check_sysbus(struct e752x_error_info *info, stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16); if (stat32 == 0) - return; /* no errors */ + return; /* no errors */ error32 = (stat32 >> 16) & 0x3ff; stat32 = stat32 & 0x3ff; - if(stat32 & 0x087) + if (stat32 & 0x087) sysbus_error(1, stat32 & 0x087, error_found, handle_error); - if(stat32 & 0x378) + if (stat32 & 0x378) sysbus_error(0, stat32 & 0x378, error_found, handle_error); - if(error32 & 0x087) + if (error32 & 0x087) sysbus_error(1, error32 & 0x087, error_found, handle_error); - if(error32 & 0x378) + if (error32 & 0x378) sysbus_error(0, error32 & 0x378, error_found, handle_error); } -static void e752x_check_membuf (struct e752x_error_info *info, - int *error_found, int handle_error) +static void e752x_check_membuf(struct e752x_error_info *info, + int *error_found, int handle_error) { u8 stat8; stat8 = info->buf_ferr; - if (stat8 & 0x0f) { /* Error, so process */ + if (stat8 & 0x0f) { /* Error, so process */ stat8 &= 0x0f; membuf_error(stat8, error_found, handle_error); } stat8 = info->buf_nerr; - if (stat8 & 0x0f) { /* Error, so process */ + if (stat8 & 0x0f) { /* Error, so process */ stat8 &= 0x0f; membuf_error(stat8, error_found, handle_error); } } -static void e752x_check_dram (struct mem_ctl_info *mci, - struct e752x_error_info *info, int *error_found, - int handle_error) +static void e752x_check_dram(struct mem_ctl_info *mci, + struct e752x_error_info *info, int *error_found, + int handle_error) { u16 error_one, error_next; @@ -604,55 +603,52 @@ static void e752x_check_dram (struct mem_ctl_info *mci, error_next = info->dram_nerr; /* decode and report errors */ - if(error_one & 0x0101) /* check first error correctable */ + if (error_one & 0x0101) /* check first error correctable */ process_ce(mci, error_one, info->dram_sec1_add, - info->dram_sec1_syndrome, error_found, - handle_error); + info->dram_sec1_syndrome, error_found, handle_error); - if(error_next & 0x0101) /* check next error correctable */ + if (error_next & 0x0101) /* check next error correctable */ process_ce(mci, error_next, info->dram_sec2_add, - info->dram_sec2_syndrome, error_found, - handle_error); + info->dram_sec2_syndrome, error_found, handle_error); - if(error_one & 0x4040) + if (error_one & 0x4040) process_ue_no_info_wr(mci, error_found, handle_error); - if(error_next & 0x4040) + if (error_next & 0x4040) process_ue_no_info_wr(mci, error_found, handle_error); - if(error_one & 0x2020) + if (error_one & 0x2020) process_ded_retry(mci, error_one, info->dram_retr_add, - error_found, handle_error); + error_found, handle_error); - if(error_next & 0x2020) + if (error_next & 0x2020) process_ded_retry(mci, error_next, info->dram_retr_add, - error_found, handle_error); + error_found, handle_error); - if(error_one & 0x0808) - process_threshold_ce(mci, error_one, error_found, - handle_error); + if (error_one & 0x0808) + process_threshold_ce(mci, error_one, error_found, handle_error); - if(error_next & 0x0808) + if (error_next & 0x0808) process_threshold_ce(mci, error_next, error_found, - handle_error); + handle_error); - if(error_one & 0x0606) + if (error_one & 0x0606) process_ue(mci, error_one, info->dram_ded_add, - info->dram_scrb_add, error_found, handle_error); + info->dram_scrb_add, error_found, handle_error); - if(error_next & 0x0606) + if (error_next & 0x0606) process_ue(mci, error_next, info->dram_ded_add, - info->dram_scrb_add, error_found, handle_error); + info->dram_scrb_add, error_found, handle_error); } -static void e752x_get_error_info (struct mem_ctl_info *mci, - struct e752x_error_info *info) +static void e752x_get_error_info(struct mem_ctl_info *mci, + struct e752x_error_info *info) { struct pci_dev *dev; struct e752x_pvt *pvt; memset(info, 0, sizeof(*info)); - pvt = (struct e752x_pvt *) mci->pvt_info; + pvt = (struct e752x_pvt *)mci->pvt_info; dev = pvt->dev_d0f1; pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); @@ -661,8 +657,7 @@ static void e752x_get_error_info (struct mem_ctl_info *mci, pci_read_config_word(dev, E752X_SYSBUS_FERR, &info->sysbus_ferr); pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); - pci_read_config_word(dev, E752X_DRAM_FERR, - &info->dram_ferr); + pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr); pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD, &info->dram_sec1_add); pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME, @@ -688,7 +683,7 @@ static void e752x_get_error_info (struct mem_ctl_info *mci, if (info->dram_ferr) pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR, - info->dram_ferr, info->dram_ferr); + info->dram_ferr, info->dram_ferr); pci_write_config_dword(dev, E752X_FERR_GLOBAL, info->ferr_global); @@ -701,8 +696,7 @@ static void e752x_get_error_info (struct mem_ctl_info *mci, pci_read_config_word(dev, E752X_SYSBUS_NERR, &info->sysbus_nerr); pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); - pci_read_config_word(dev, E752X_DRAM_NERR, - &info->dram_nerr); + pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr); pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD, &info->dram_sec2_add); pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME, @@ -722,15 +716,16 @@ static void e752x_get_error_info (struct mem_ctl_info *mci, if (info->dram_nerr) pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR, - info->dram_nerr, info->dram_nerr); + info->dram_nerr, info->dram_nerr); pci_write_config_dword(dev, E752X_NERR_GLOBAL, info->nerr_global); } } -static int e752x_process_error_info (struct mem_ctl_info *mci, - struct e752x_error_info *info, int handle_errors) +static int e752x_process_error_info(struct mem_ctl_info *mci, + struct e752x_error_info *info, + int handle_errors) { u32 error32, stat32; int error_found; @@ -776,26 +771,38 @@ static inline int dual_channel_active(u16 ddrcsr) return (((ddrcsr >> 12) & 3) == 3); } +/* Remap csrow index numbers if map_type is "reverse" + */ +static inline int remap_csrow_index(struct mem_ctl_info *mci, int index) +{ + struct e752x_pvt *pvt = mci->pvt_info; + + if (!pvt->map_type) + return (7 - index); + + return (index); +} + static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, - u16 ddrcsr) + u16 ddrcsr) { struct csrow_info *csrow; unsigned long last_cumul_size; int index, mem_dev, drc_chan; - int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ - int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ + int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ + int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ u8 value; u32 dra, drc, cumul_size; dra = 0; - for (index=0; index < 4; index++) { + for (index = 0; index < 4; index++) { u8 dra_reg; - pci_read_config_byte(pdev, E752X_DRA+index, &dra_reg); + pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg); dra |= dra_reg << (index * 8); } pci_read_config_dword(pdev, E752X_DRC, &drc); drc_chan = dual_channel_active(ddrcsr); - drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ + drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ drc_ddim = (drc >> 20) & 0x3; /* The dram row boundary (DRB) reg values are boundary address for @@ -806,7 +813,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { /* mem_dev 0=x8, 1=x4 */ mem_dev = (dra >> (index * 4 + 2)) & 0x3; - csrow = &mci->csrows[index]; + csrow = &mci->csrows[remap_csrow_index(mci, index)]; mem_dev = (mem_dev == 2); pci_read_config_byte(pdev, E752X_DRB + index, &value); @@ -843,10 +850,10 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, } static void e752x_init_mem_map_table(struct pci_dev *pdev, - struct e752x_pvt *pvt) + struct e752x_pvt *pvt) { int index; - u8 value, last, row, stat8; + u8 value, last, row; last = 0; row = 0; @@ -858,7 +865,7 @@ static void e752x_init_mem_map_table(struct pci_dev *pdev, /* no dimm in the slot, so flag it as empty */ pvt->map[index] = 0xff; pvt->map[index + 1] = 0xff; - } else { /* there is a dimm in the slot */ + } else { /* there is a dimm in the slot */ pvt->map[index] = row; row++; last = value; @@ -866,31 +873,25 @@ static void e752x_init_mem_map_table(struct pci_dev *pdev, * sided */ pci_read_config_byte(pdev, E752X_DRB + index + 1, - &value); - pvt->map[index + 1] = (value == last) ? - 0xff : /* the dimm is single sided, - so flag as empty */ - row; /* this is a double sided dimm - to save the next row # */ + &value); + + /* the dimm is single sided, so flag as empty */ + /* this is a double sided dimm to save the next row #*/ + pvt->map[index + 1] = (value == last) ? 0xff : row; row++; last = value; } } - - /* set the map type. 1 = normal, 0 = reversed */ - pci_read_config_byte(pdev, E752X_DRM, &stat8); - pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); } /* Return 0 on success or 1 on failure. */ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, - struct e752x_pvt *pvt) + struct e752x_pvt *pvt) { struct pci_dev *dev; pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, - pvt->dev_info->err_dev, - pvt->bridge_ck); + pvt->dev_info->err_dev, pvt->bridge_ck); if (pvt->bridge_ck == NULL) pvt->bridge_ck = pci_scan_single_device(pdev->bus, @@ -898,13 +899,13 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx, if (pvt->bridge_ck == NULL) { e752x_printk(KERN_ERR, "error reporting device not found:" - "vendor %x device 0x%x (broken BIOS?)\n", - PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); + "vendor %x device 0x%x (broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); return 1; } dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, - NULL); + NULL); if (dev == NULL) goto fail; @@ -942,12 +943,22 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) struct mem_ctl_info *mci; struct e752x_pvt *pvt; u16 ddrcsr; - int drc_chan; /* Number of channels 0=1chan,1=2chan */ + int drc_chan; /* Number of channels 0=1chan,1=2chan */ struct e752x_error_info discard; debugf0("%s(): mci\n", __func__); debugf0("Starting Probe1\n"); + /* make sure error reporting method is sane */ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_NMI: + break; + default: + edac_op_state = EDAC_OPSTATE_POLL; + break; + } + /* check to see if device 0 function 1 is enabled; if it isn't, we * assume the BIOS has reserved it for a reason and is expecting * exclusive access, we take care not to violate that assumption and @@ -966,7 +977,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) /* Dual channel = 1, Single channel = 0 */ drc_chan = dual_channel_active(ddrcsr); - mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); + mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0); if (mci == NULL) { return -ENOMEM; @@ -975,14 +986,14 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) debugf3("%s(): init mci\n", __func__); mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | - EDAC_FLAG_S4ECD4ED; + EDAC_FLAG_S4ECD4ED; /* FIXME - what if different memory types are in different csrows? */ mci->mod_name = EDAC_MOD_STR; mci->mod_ver = E752X_REVISION; mci->dev = &pdev->dev; debugf3("%s(): init pvt\n", __func__); - pvt = (struct e752x_pvt *) mci->pvt_info; + pvt = (struct e752x_pvt *)mci->pvt_info; pvt->dev_info = &e752x_devs[dev_idx]; pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); @@ -993,16 +1004,20 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) debugf3("%s(): more mci init\n", __func__); mci->ctl_name = pvt->dev_info->ctl_name; + mci->dev_name = pci_name(pdev); mci->edac_check = e752x_check; mci->ctl_page_to_phys = ctl_page_to_phys; - e752x_init_csrows(mci, pdev, ddrcsr); - e752x_init_mem_map_table(pdev, pvt); - - /* set the map type. 1 = normal, 0 = reversed */ + /* set the map type. 1 = normal, 0 = reversed + * Must be set before e752x_init_csrows in case csrow mapping + * is reversed. + */ pci_read_config_byte(pdev, E752X_DRM, &stat8); pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); + e752x_init_csrows(mci, pdev, ddrcsr); + e752x_init_mem_map_table(pdev, pvt); + mci->edac_cap |= EDAC_FLAG_NONE; debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); @@ -1014,19 +1029,29 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data); pvt->remaplimit = ((u32) pci_data) << 14; e752x_printk(KERN_INFO, - "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, - pvt->remapbase, pvt->remaplimit); + "tolm = %x, remapbase = %x, remaplimit = %x\n", + pvt->tolm, pvt->remapbase, pvt->remaplimit); /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ - if (edac_mc_add_mc(mci,0)) { + if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } e752x_init_error_reporting_regs(pvt); - e752x_get_error_info(mci, &discard); /* clear other MCH errors */ + e752x_get_error_info(mci, &discard); /* clear other MCH errors */ + + /* allocating generic PCI control info */ + e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!e752x_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } /* get this far and it's successful */ debugf3("%s(): success\n", __func__); @@ -1043,12 +1068,12 @@ fail: /* returns count (>= 0), or negative on error */ static int __devinit e752x_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { debugf0("%s()\n", __func__); /* wake up and enable device */ - if(pci_enable_device(pdev) < 0) + if (pci_enable_device(pdev) < 0) return -EIO; return e752x_probe1(pdev, ent->driver_data); @@ -1061,10 +1086,13 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) debugf0("%s()\n", __func__); + if (e752x_pci) + edac_pci_release_generic_ctl(e752x_pci); + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; - pvt = (struct e752x_pvt *) mci->pvt_info; + pvt = (struct e752x_pvt *)mci->pvt_info; pci_dev_put(pvt->dev_d0f0); pci_dev_put(pvt->dev_d0f1); pci_dev_put(pvt->bridge_ck); @@ -1073,20 +1101,17 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { { - PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7520 - }, + PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7520}, { - PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7525 - }, + PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7525}, { - PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7320 - }, + PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7320}, { - 0, - } /* 0 terminated list. */ + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); @@ -1122,5 +1147,6 @@ MODULE_DESCRIPTION("MC support for Intel e752x memory controllers"); module_param(force_function_unhide, int, 0444); MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" -" 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); - + " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 310d91b41c96..96ecc4926641 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c @@ -27,9 +27,10 @@ #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> -#include "edac_mc.h" +#include <linux/edac.h> +#include "edac_core.h" -#define E7XXX_REVISION " Ver: 2.0.1 " __DATE__ +#define E7XXX_REVISION " Ver: 2.0.2 " __DATE__ #define EDAC_MOD_STR "e7xxx_edac" #define e7xxx_printk(level, fmt, arg...) \ @@ -143,23 +144,21 @@ struct e7xxx_error_info { u32 dram_uelog_add; }; +static struct edac_pci_ctl_info *e7xxx_pci; + static const struct e7xxx_dev_info e7xxx_devs[] = { [E7500] = { .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, - .ctl_name = "E7500" - }, + .ctl_name = "E7500"}, [E7501] = { .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, - .ctl_name = "E7501" - }, + .ctl_name = "E7501"}, [E7505] = { .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, - .ctl_name = "E7505" - }, + .ctl_name = "E7505"}, [E7205] = { .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, - .ctl_name = "E7205" - }, + .ctl_name = "E7205"}, }; /* FIXME - is this valid for both SECDED and S4ECD4ED? */ @@ -180,15 +179,15 @@ static inline int e7xxx_find_channel(u16 syndrome) } static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, - unsigned long page) + unsigned long page) { u32 remap; - struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; + struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; debugf3("%s()\n", __func__); if ((page < pvt->tolm) || - ((page >= 0x100000) && (page < pvt->remapbase))) + ((page >= 0x100000) && (page < pvt->remapbase))) return page; remap = (page - pvt->tolm) + pvt->remapbase; @@ -200,8 +199,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, return pvt->tolm - 1; } -static void process_ce(struct mem_ctl_info *mci, - struct e7xxx_error_info *info) +static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { u32 error_1b, page; u16 syndrome; @@ -212,7 +210,7 @@ static void process_ce(struct mem_ctl_info *mci, /* read the error address */ error_1b = info->dram_celog_add; /* FIXME - should use PAGE_SHIFT */ - page = error_1b >> 6; /* convert the address to 4k page */ + page = error_1b >> 6; /* convert the address to 4k page */ /* read the syndrome */ syndrome = info->dram_celog_syndrome; /* FIXME - check for -1 */ @@ -228,8 +226,7 @@ static void process_ce_no_info(struct mem_ctl_info *mci) edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); } -static void process_ue(struct mem_ctl_info *mci, - struct e7xxx_error_info *info) +static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { u32 error_2b, block_page; int row; @@ -238,7 +235,7 @@ static void process_ue(struct mem_ctl_info *mci, /* read the error address */ error_2b = info->dram_uelog_add; /* FIXME - should use PAGE_SHIFT */ - block_page = error_2b >> 6; /* convert to 4k address */ + block_page = error_2b >> 6; /* convert to 4k address */ row = edac_mc_find_csrow_by_page(mci, block_page); edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); } @@ -249,16 +246,14 @@ static void process_ue_no_info(struct mem_ctl_info *mci) edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); } -static void e7xxx_get_error_info (struct mem_ctl_info *mci, - struct e7xxx_error_info *info) +static void e7xxx_get_error_info(struct mem_ctl_info *mci, + struct e7xxx_error_info *info) { struct e7xxx_pvt *pvt; - pvt = (struct e7xxx_pvt *) mci->pvt_info; - pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, - &info->dram_ferr); - pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, - &info->dram_nerr); + pvt = (struct e7xxx_pvt *)mci->pvt_info; + pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr); + pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr); if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, @@ -279,8 +274,9 @@ static void e7xxx_get_error_info (struct mem_ctl_info *mci, pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); } -static int e7xxx_process_error_info (struct mem_ctl_info *mci, - struct e7xxx_error_info *info, int handle_errors) +static int e7xxx_process_error_info(struct mem_ctl_info *mci, + struct e7xxx_error_info *info, + int handle_errors) { int error_found; @@ -341,7 +337,6 @@ static inline int dual_channel_active(u32 drc, int dev_idx) return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1; } - /* Return DRB granularity (0=32mb, 1=64mb). */ static inline int drb_granularity(u32 drc, int dev_idx) { @@ -349,9 +344,8 @@ static inline int drb_granularity(u32 drc, int dev_idx) return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1; } - static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, - int dev_idx, u32 drc) + int dev_idx, u32 drc) { unsigned long last_cumul_size; int index; @@ -419,10 +413,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) struct e7xxx_error_info discard; debugf0("%s(): mci\n", __func__); + + /* make sure error reporting method is sane */ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_NMI: + break; + default: + edac_op_state = EDAC_OPSTATE_POLL; + break; + } + pci_read_config_dword(pdev, E7XXX_DRC, &drc); drc_chan = dual_channel_active(drc, dev_idx); - mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); + mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0); if (mci == NULL) return -ENOMEM; @@ -430,17 +435,16 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) debugf3("%s(): init mci\n", __func__); mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | - EDAC_FLAG_S4ECD4ED; + EDAC_FLAG_S4ECD4ED; /* FIXME - what if different memory types are in different csrows? */ mci->mod_name = EDAC_MOD_STR; mci->mod_ver = E7XXX_REVISION; mci->dev = &pdev->dev; debugf3("%s(): init pvt\n", __func__); - pvt = (struct e7xxx_pvt *) mci->pvt_info; + pvt = (struct e7xxx_pvt *)mci->pvt_info; pvt->dev_info = &e7xxx_devs[dev_idx]; pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, - pvt->dev_info->err_dev, - pvt->bridge_ck); + pvt->dev_info->err_dev, pvt->bridge_ck); if (!pvt->bridge_ck) { e7xxx_printk(KERN_ERR, "error reporting device not found:" @@ -451,6 +455,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) debugf3("%s(): more mci init\n", __func__); mci->ctl_name = pvt->dev_info->ctl_name; + mci->dev_name = pci_name(pdev); mci->edac_check = e7xxx_check; mci->ctl_page_to_phys = ctl_page_to_phys; e7xxx_init_csrows(mci, pdev, dev_idx, drc); @@ -473,11 +478,22 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ - if (edac_mc_add_mc(mci,0)) { + if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail1; } + /* allocating generic PCI control info */ + e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!e7xxx_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + /* get this far and it's successful */ debugf3("%s(): success\n", __func__); return 0; @@ -493,7 +509,7 @@ fail0: /* returns count (>= 0), or negative on error */ static int __devinit e7xxx_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { debugf0("%s()\n", __func__); @@ -509,34 +525,33 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev) debugf0("%s()\n", __func__); + if (e7xxx_pci) + edac_pci_release_generic_ctl(e7xxx_pci); + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; - pvt = (struct e7xxx_pvt *) mci->pvt_info; + pvt = (struct e7xxx_pvt *)mci->pvt_info; pci_dev_put(pvt->bridge_ck); edac_mc_free(mci); } static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { { - PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7205 - }, + PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7205}, { - PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7500 - }, + PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7500}, { - PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7501 - }, + PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7501}, { - PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - E7505 - }, + PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + E7505}, { - 0, - } /* 0 terminated list. */ + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); @@ -563,5 +578,7 @@ module_exit(e7xxx_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" - "Based on.work by Dan Hollis et al"); + "Based on.work by Dan Hollis et al"); MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_core.h index 713444cc4105..4e6bad15c4ba 100644 --- a/drivers/edac/edac_mc.h +++ b/drivers/edac/edac_core.h @@ -1,6 +1,7 @@ /* - * MC kernel module - * (C) 2003 Linux Networx (http://lnxi.com) + * Defines, structures, APIs for edac_core module + * + * (C) 2007 Linux Networx (http://lnxi.com) * This file may be distributed under the terms of the * GNU General Public License. * @@ -11,12 +12,13 @@ * NMI handling support added by * Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com> * - * $Id: edac_mc.h,v 1.4.2.10 2005/10/05 00:43:44 dsp_llnl Exp $ + * Refactored for multi-source files: + * Doug Thompson <norsk5@xmission.com> * */ -#ifndef _EDAC_MC_H_ -#define _EDAC_MC_H_ +#ifndef _EDAC_CORE_H_ +#define _EDAC_CORE_H_ #include <linux/kernel.h> #include <linux/types.h> @@ -30,9 +32,14 @@ #include <linux/completion.h> #include <linux/kobject.h> #include <linux/platform_device.h> +#include <linux/sysdev.h> +#include <linux/workqueue.h> +#include <linux/version.h> #define EDAC_MC_LABEL_LEN 31 -#define MC_PROC_NAME_MAX_LEN 7 +#define EDAC_DEVICE_NAME_LEN 31 +#define EDAC_ATTRIB_VALUE_LEN 15 +#define MC_PROC_NAME_MAX_LEN 7 #if PAGE_SHIFT < 20 #define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) ) @@ -49,6 +56,14 @@ #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) +/* edac_device printk */ +#define edac_device_printk(ctl, level, fmt, arg...) \ + printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg) + +/* edac_pci printk */ +#define edac_pci_printk(ctl, level, fmt, arg...) \ + printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg) + /* prefixes for edac_printk() and edac_mc_printk() */ #define EDAC_MC "MC" #define EDAC_PCI "PCI" @@ -60,7 +75,7 @@ extern int edac_debug_level; #define edac_debug_printk(level, fmt, arg...) \ do { \ if (level <= edac_debug_level) \ - edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ + edac_printk(KERN_EMERG, EDAC_DEBUG, fmt, ##arg); \ } while(0) #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) @@ -69,7 +84,7 @@ extern int edac_debug_level; #define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) #define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) -#else /* !CONFIG_EDAC_DEBUG */ +#else /* !CONFIG_EDAC_DEBUG */ #define debugf0( ... ) #define debugf1( ... ) @@ -77,18 +92,14 @@ extern int edac_debug_level; #define debugf3( ... ) #define debugf4( ... ) -#endif /* !CONFIG_EDAC_DEBUG */ +#endif /* !CONFIG_EDAC_DEBUG */ #define BIT(x) (1 << (x)) #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ PCI_DEVICE_ID_ ## vend ## _ ## dev -#if defined(CONFIG_X86) && defined(CONFIG_PCI) -#define dev_name(dev) pci_name(to_pci_dev(dev)) -#else -#define dev_name(dev) to_platform_device(dev)->name -#endif +#define dev_name(dev) (dev)->dev_name /* memory devices */ enum dev_type { @@ -124,8 +135,9 @@ enum mem_type { MEM_DDR, /* Double data rate SDRAM */ MEM_RDDR, /* Registered Double data rate SDRAM */ MEM_RMBS, /* Rambus DRAM */ - MEM_DDR2, /* DDR2 RAM */ - MEM_FB_DDR2, /* fully buffered DDR2 */ + MEM_DDR2, /* DDR2 RAM */ + MEM_FB_DDR2, /* fully buffered DDR2 */ + MEM_RDDR2, /* Registered DDR2 RAM */ }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -141,6 +153,7 @@ enum mem_type { #define MEM_FLAG_RMBS BIT(MEM_RMBS) #define MEM_FLAG_DDR2 BIT(MEM_DDR2) #define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2) +#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2) /* chipset Error Detection and Correction capabilities and mode */ enum edac_type { @@ -181,16 +194,23 @@ enum scrub_type { }; #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) -#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC_CORR) -#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC_CORR) +#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC) +#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC) #define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE) #define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG) -#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC_CORR) -#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR) +#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC) +#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC) #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ +/* EDAC internal operation states */ +#define OP_ALLOC 0x100 +#define OP_RUNNING_POLL 0x201 +#define OP_RUNNING_INTERRUPT 0x202 +#define OP_RUNNING_POLL_INTR 0x203 +#define OP_OFFLINE 0x300 + /* * There are several things to be aware of that aren't at all obvious: * @@ -276,7 +296,7 @@ enum scrub_type { struct channel_info { int chan_idx; /* channel index */ u32 ce_count; /* Correctable Errors for this CHANNEL */ - char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ + char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ struct csrow_info *csrow; /* the parent */ }; @@ -297,15 +317,29 @@ struct csrow_info { struct mem_ctl_info *mci; /* the parent */ struct kobject kobj; /* sysfs kobject for this csrow */ - struct completion kobj_complete; - /* FIXME the number of CHANNELs might need to become dynamic */ + /* channel information for this csrow */ u32 nr_channels; struct channel_info *channels; }; +/* mcidev_sysfs_attribute structure + * used for driver sysfs attributes and in mem_ctl_info + * sysfs top level entries + */ +struct mcidev_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct mem_ctl_info *,char *); + ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); +}; + +/* MEMORY controller information structure + */ struct mem_ctl_info { - struct list_head link; /* for global list of mem_ctl_info structs */ + struct list_head link; /* for global list of mem_ctl_info structs */ + + struct module *owner; /* Module owner of this control struct */ + unsigned long mtype_cap; /* memory types supported by mc */ unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ unsigned long edac_cap; /* configuration capabilities - this is @@ -322,14 +356,15 @@ struct mem_ctl_info { /* Translates sdram memory scrub rate given in bytes/sec to the internal representation and configures whatever else needs to be configured. - */ - int (*set_sdram_scrub_rate) (struct mem_ctl_info *mci, u32 *bw); + */ + int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); /* Get the current sdram memory scrub rate from the internal representation and converts it to the closest matching bandwith in bytes/sec. - */ - int (*get_sdram_scrub_rate) (struct mem_ctl_info *mci, u32 *bw); + */ + int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 * bw); + /* pointer to edac checking routine */ void (*edac_check) (struct mem_ctl_info * mci); @@ -340,7 +375,7 @@ struct mem_ctl_info { */ /* FIXME - why not send the phys page to begin with? */ unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, - unsigned long page); + unsigned long page); int mc_idx; int nr_csrows; struct csrow_info *csrows; @@ -353,6 +388,7 @@ struct mem_ctl_info { const char *mod_name; const char *mod_ver; const char *ctl_name; + const char *dev_name; char proc_name[MC_PROC_NAME_MAX_LEN + 1]; void *pvt_info; u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */ @@ -369,14 +405,327 @@ struct mem_ctl_info { /* edac sysfs device control */ struct kobject edac_mci_kobj; - struct completion kobj_complete; + + /* Additional top controller level attributes, but specified + * by the low level driver. + * + * Set by the low level driver to provide attributes at the + * controller level, same level as 'ue_count' and 'ce_count' above. + * An array of structures, NULL terminated + * + * If attributes are desired, then set to array of attributes + * If no attributes are desired, leave NULL + */ + struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes; + + /* work struct for this MC */ + struct delayed_work work; + + /* the internal state of this controller instance */ + int op_state; +}; + +/* + * The following are the structures to provide for a generic + * or abstract 'edac_device'. This set of structures and the + * code that implements the APIs for the same, provide for + * registering EDAC type devices which are NOT standard memory. + * + * CPU caches (L1 and L2) + * DMA engines + * Core CPU swithces + * Fabric switch units + * PCIe interface controllers + * other EDAC/ECC type devices that can be monitored for + * errors, etc. + * + * It allows for a 2 level set of hiearchry. For example: + * + * cache could be composed of L1, L2 and L3 levels of cache. + * Each CPU core would have its own L1 cache, while sharing + * L2 and maybe L3 caches. + * + * View them arranged, via the sysfs presentation: + * /sys/devices/system/edac/.. + * + * mc/ <existing memory device directory> + * cpu/cpu0/.. <L1 and L2 block directory> + * /L1-cache/ce_count + * /ue_count + * /L2-cache/ce_count + * /ue_count + * cpu/cpu1/.. <L1 and L2 block directory> + * /L1-cache/ce_count + * /ue_count + * /L2-cache/ce_count + * /ue_count + * ... + * + * the L1 and L2 directories would be "edac_device_block's" + */ + +struct edac_device_counter { + u32 ue_count; + u32 ce_count; +}; + +/* forward reference */ +struct edac_device_ctl_info; +struct edac_device_block; + +/* edac_dev_sysfs_attribute structure + * used for driver sysfs attributes in mem_ctl_info + * for extra controls and attributes: + * like high level error Injection controls + */ +struct edac_dev_sysfs_attribute { + struct attribute attr; + ssize_t (*show)(struct edac_device_ctl_info *, char *); + ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t); +}; + +/* edac_dev_sysfs_block_attribute structure + * + * used in leaf 'block' nodes for adding controls/attributes + * + * each block in each instance of the containing control structure + * can have an array of the following. The show and store functions + * will be filled in with the show/store function in the + * low level driver. + * + * The 'value' field will be the actual value field used for + * counting + */ +struct edac_dev_sysfs_block_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *, struct attribute *, char *); + ssize_t (*store)(struct kobject *, struct attribute *, + const char *, size_t); + struct edac_device_block *block; + + unsigned int value; +}; + +/* device block control structure */ +struct edac_device_block { + struct edac_device_instance *instance; /* Up Pointer */ + char name[EDAC_DEVICE_NAME_LEN + 1]; + + struct edac_device_counter counters; /* basic UE and CE counters */ + + int nr_attribs; /* how many attributes */ + + /* this block's attributes, could be NULL */ + struct edac_dev_sysfs_block_attribute *block_attributes; + + /* edac sysfs device control */ + struct kobject kobj; +}; + +/* device instance control structure */ +struct edac_device_instance { + struct edac_device_ctl_info *ctl; /* Up pointer */ + char name[EDAC_DEVICE_NAME_LEN + 4]; + + struct edac_device_counter counters; /* instance counters */ + + u32 nr_blocks; /* how many blocks */ + struct edac_device_block *blocks; /* block array */ + + /* edac sysfs device control */ + struct kobject kobj; +}; + + +/* + * Abstract edac_device control info structure + * + */ +struct edac_device_ctl_info { + /* for global list of edac_device_ctl_info structs */ + struct list_head link; + + struct module *owner; /* Module owner of this control struct */ + + int dev_idx; + + /* Per instance controls for this edac_device */ + int log_ue; /* boolean for logging UEs */ + int log_ce; /* boolean for logging CEs */ + int panic_on_ue; /* boolean for panic'ing on an UE */ + unsigned poll_msec; /* number of milliseconds to poll interval */ + unsigned long delay; /* number of jiffies for poll_msec */ + + /* Additional top controller level attributes, but specified + * by the low level driver. + * + * Set by the low level driver to provide attributes at the + * controller level, same level as 'ue_count' and 'ce_count' above. + * An array of structures, NULL terminated + * + * If attributes are desired, then set to array of attributes + * If no attributes are desired, leave NULL + */ + struct edac_dev_sysfs_attribute *sysfs_attributes; + + /* pointer to main 'edac' class in sysfs */ + struct sysdev_class *edac_class; + + /* the internal state of this controller instance */ + int op_state; + /* work struct for this instance */ + struct delayed_work work; + + /* pointer to edac polling checking routine: + * If NOT NULL: points to polling check routine + * If NULL: Then assumes INTERRUPT operation, where + * MC driver will receive events + */ + void (*edac_check) (struct edac_device_ctl_info * edac_dev); + + struct device *dev; /* pointer to device structure */ + + const char *mod_name; /* module name */ + const char *ctl_name; /* edac controller name */ + const char *dev_name; /* pci/platform/etc... name */ + + void *pvt_info; /* pointer to 'private driver' info */ + + unsigned long start_time; /* edac_device load start time (jiffies) */ + + /* these are for safe removal of mc devices from global list while + * NMI handlers may be traversing list + */ + struct rcu_head rcu; + struct completion removal_complete; + + /* sysfs top name under 'edac' directory + * and instance name: + * cpu/cpu0/... + * cpu/cpu1/... + * cpu/cpu2/... + * ... + */ + char name[EDAC_DEVICE_NAME_LEN + 1]; + + /* Number of instances supported on this control structure + * and the array of those instances + */ + u32 nr_instances; + struct edac_device_instance *instances; + + /* Event counters for the this whole EDAC Device */ + struct edac_device_counter counters; + + /* edac sysfs device control for the 'name' + * device this structure controls + */ + struct kobject kobj; }; +/* To get from the instance's wq to the beginning of the ctl structure */ +#define to_edac_mem_ctl_work(w) \ + container_of(w, struct mem_ctl_info, work) + +#define to_edac_device_ctl_work(w) \ + container_of(w,struct edac_device_ctl_info,work) + +/* + * The alloc() and free() functions for the 'edac_device' control info + * structure. A MC driver will allocate one of these for each edac_device + * it is going to control/register with the EDAC CORE. + */ +extern struct edac_device_ctl_info *edac_device_alloc_ctl_info( + unsigned sizeof_private, + char *edac_device_name, unsigned nr_instances, + char *edac_block_name, unsigned nr_blocks, + unsigned offset_value, + struct edac_dev_sysfs_block_attribute *block_attributes, + unsigned nr_attribs, + int device_index); + +/* The offset value can be: + * -1 indicating no offset value + * 0 for zero-based block numbers + * 1 for 1-based block number + * other for other-based block number + */ +#define BLOCK_OFFSET_VALUE_OFF ((unsigned) -1) + +extern void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info); + #ifdef CONFIG_PCI +struct edac_pci_counter { + atomic_t pe_count; + atomic_t npe_count; +}; + +/* + * Abstract edac_pci control info structure + * + */ +struct edac_pci_ctl_info { + /* for global list of edac_pci_ctl_info structs */ + struct list_head link; + + int pci_idx; + + struct sysdev_class *edac_class; /* pointer to class */ + + /* the internal state of this controller instance */ + int op_state; + /* work struct for this instance */ + struct delayed_work work; + + /* pointer to edac polling checking routine: + * If NOT NULL: points to polling check routine + * If NULL: Then assumes INTERRUPT operation, where + * MC driver will receive events + */ + void (*edac_check) (struct edac_pci_ctl_info * edac_dev); + + struct device *dev; /* pointer to device structure */ + + const char *mod_name; /* module name */ + const char *ctl_name; /* edac controller name */ + const char *dev_name; /* pci/platform/etc... name */ + + void *pvt_info; /* pointer to 'private driver' info */ + + unsigned long start_time; /* edac_pci load start time (jiffies) */ + + /* these are for safe removal of devices from global list while + * NMI handlers may be traversing list + */ + struct rcu_head rcu; + struct completion complete; + + /* sysfs top name under 'edac' directory + * and instance name: + * cpu/cpu0/... + * cpu/cpu1/... + * cpu/cpu2/... + * ... + */ + char name[EDAC_DEVICE_NAME_LEN + 1]; + + /* Event counters for the this whole EDAC Device */ + struct edac_pci_counter counters; + + /* edac sysfs device control for the 'name' + * device this structure controls + */ + struct kobject kobj; + struct completion kobj_complete; +}; + +#define to_edac_pci_ctl_work(w) \ + container_of(w, struct edac_pci_ctl_info,work) + /* write all or some bits in a byte-register*/ static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value, - u8 mask) + u8 mask) { if (mask != 0xff) { u8 buf; @@ -392,7 +741,7 @@ static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value, /* write all or some bits in a word-register*/ static inline void pci_write_bits16(struct pci_dev *pdev, int offset, - u16 value, u16 mask) + u16 value, u16 mask) { if (mask != 0xffff) { u16 buf; @@ -408,7 +757,7 @@ static inline void pci_write_bits16(struct pci_dev *pdev, int offset, /* write all or some bits in a dword-register*/ static inline void pci_write_bits32(struct pci_dev *pdev, int offset, - u32 value, u32 mask) + u32 value, u32 mask) { if (mask != 0xffff) { u32 buf; @@ -422,20 +771,16 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset, pci_write_config_dword(pdev, offset, value); } -#endif /* CONFIG_PCI */ +#endif /* CONFIG_PCI */ -#ifdef CONFIG_EDAC_DEBUG -void edac_mc_dump_channel(struct channel_info *chan); -void edac_mc_dump_mci(struct mem_ctl_info *mci); -void edac_mc_dump_csrow(struct csrow_info *csrow); -#endif /* CONFIG_EDAC_DEBUG */ - -extern int edac_mc_add_mc(struct mem_ctl_info *mci,int mc_idx); -extern struct mem_ctl_info * edac_mc_del_mc(struct device *dev); +extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, + unsigned nr_chans, int edac_index); +extern int edac_mc_add_mc(struct mem_ctl_info *mci); +extern void edac_mc_free(struct mem_ctl_info *mci); +extern struct mem_ctl_info *edac_mc_find(int idx); +extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, - unsigned long page); -extern void edac_mc_scrub_block(unsigned long page, unsigned long offset, - u32 size); + unsigned long page); /* * The no info errors are used when error overflows are reported. @@ -448,34 +793,59 @@ extern void edac_mc_scrub_block(unsigned long page, unsigned long offset, * statement clutter and extra function arguments. */ extern void edac_mc_handle_ce(struct mem_ctl_info *mci, - unsigned long page_frame_number, unsigned long offset_in_page, - unsigned long syndrome, int row, int channel, - const char *msg); + unsigned long page_frame_number, + unsigned long offset_in_page, + unsigned long syndrome, int row, int channel, + const char *msg); extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, - const char *msg); + const char *msg); extern void edac_mc_handle_ue(struct mem_ctl_info *mci, - unsigned long page_frame_number, unsigned long offset_in_page, - int row, const char *msg); + unsigned long page_frame_number, + unsigned long offset_in_page, int row, + const char *msg); extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, - const char *msg); -extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, - unsigned int csrow, - unsigned int channel0, - unsigned int channel1, - char *msg); -extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, - unsigned int csrow, - unsigned int channel, - char *msg); + const char *msg); +extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow, + unsigned int channel0, unsigned int channel1, + char *msg); +extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow, + unsigned int channel, char *msg); /* - * This kmalloc's and initializes all the structures. - * Can't be used if all structures don't have the same lifetime. + * edac_device APIs */ -extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, - unsigned nr_chans); +extern int edac_device_add_device(struct edac_device_ctl_info *edac_dev); +extern struct edac_device_ctl_info *edac_device_del_device(struct device *dev); +extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, + int inst_nr, int block_nr, const char *msg); +extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, + int inst_nr, int block_nr, const char *msg); -/* Free an mc previously allocated by edac_mc_alloc() */ -extern void edac_mc_free(struct mem_ctl_info *mci); +/* + * edac_pci APIs + */ +extern struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt, + const char *edac_pci_name); + +extern void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci); + +extern void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, + unsigned long value); + +extern int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx); +extern struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev); + +extern struct edac_pci_ctl_info *edac_pci_create_generic_ctl( + struct device *dev, + const char *mod_name); + +extern void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci); +extern int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci); +extern void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci); + +/* + * edac misc APIs + */ +extern char *edac_op_state_to_string(int op_state); -#endif /* _EDAC_MC_H_ */ +#endif /* _EDAC_CORE_H_ */ diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c new file mode 100644 index 000000000000..f3690a697cf9 --- /dev/null +++ b/drivers/edac/edac_device.c @@ -0,0 +1,746 @@ + +/* + * edac_device.c + * (C) 2007 www.douglaskthompson.com + * + * This file may be distributed under the terms of the + * GNU General Public License. + * + * Written by Doug Thompson <norsk5@xmission.com> + * + * edac_device API implementation + * 19 Jan 2007 + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/smp.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/highmem.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/sysdev.h> +#include <linux/ctype.h> +#include <linux/workqueue.h> +#include <asm/uaccess.h> +#include <asm/page.h> + +#include "edac_core.h" +#include "edac_module.h" + +/* lock for the list: 'edac_device_list', manipulation of this list + * is protected by the 'device_ctls_mutex' lock + */ +static DEFINE_MUTEX(device_ctls_mutex); +static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list); + +#ifdef CONFIG_EDAC_DEBUG +static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) +{ + debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx); + debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check); + debugf3("\tdev = %p\n", edac_dev->dev); + debugf3("\tmod_name:ctl_name = %s:%s\n", + edac_dev->mod_name, edac_dev->ctl_name); + debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info); +} +#endif /* CONFIG_EDAC_DEBUG */ + + +/* + * edac_device_alloc_ctl_info() + * Allocate a new edac device control info structure + * + * The control structure is allocated in complete chunk + * from the OS. It is in turn sub allocated to the + * various objects that compose the struture + * + * The structure has a 'nr_instance' array within itself. + * Each instance represents a major component + * Example: L1 cache and L2 cache are 2 instance components + * + * Within each instance is an array of 'nr_blocks' blockoffsets + */ +struct edac_device_ctl_info *edac_device_alloc_ctl_info( + unsigned sz_private, + char *edac_device_name, unsigned nr_instances, + char *edac_block_name, unsigned nr_blocks, + unsigned offset_value, /* zero, 1, or other based offset */ + struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib, + int device_index) +{ + struct edac_device_ctl_info *dev_ctl; + struct edac_device_instance *dev_inst, *inst; + struct edac_device_block *dev_blk, *blk_p, *blk; + struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib; + unsigned total_size; + unsigned count; + unsigned instance, block, attr; + void *pvt; + int err; + + debugf4("%s() instances=%d blocks=%d\n", + __func__, nr_instances, nr_blocks); + + /* Calculate the size of memory we need to allocate AND + * determine the offsets of the various item arrays + * (instance,block,attrib) from the start of an allocated structure. + * We want the alignment of each item (instance,block,attrib) + * to be at least as stringent as what the compiler would + * provide if we could simply hardcode everything into a single struct. + */ + dev_ctl = (struct edac_device_ctl_info *)NULL; + + /* Calc the 'end' offset past end of ONE ctl_info structure + * which will become the start of the 'instance' array + */ + dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst)); + + /* Calc the 'end' offset past the instance array within the ctl_info + * which will become the start of the block array + */ + dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk)); + + /* Calc the 'end' offset past the dev_blk array + * which will become the start of the attrib array, if any. + */ + count = nr_instances * nr_blocks; + dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib)); + + /* Check for case of when an attribute array is specified */ + if (nr_attrib > 0) { + /* calc how many nr_attrib we need */ + count *= nr_attrib; + + /* Calc the 'end' offset past the attributes array */ + pvt = edac_align_ptr(&dev_attrib[count], sz_private); + } else { + /* no attribute array specificed */ + pvt = edac_align_ptr(dev_attrib, sz_private); + } + + /* 'pvt' now points to where the private data area is. + * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib) + * is baselined at ZERO + */ + total_size = ((unsigned long)pvt) + sz_private; + + /* Allocate the amount of memory for the set of control structures */ + dev_ctl = kzalloc(total_size, GFP_KERNEL); + if (dev_ctl == NULL) + return NULL; + + /* Adjust pointers so they point within the actual memory we + * just allocated rather than an imaginary chunk of memory + * located at address 0. + * 'dev_ctl' points to REAL memory, while the others are + * ZERO based and thus need to be adjusted to point within + * the allocated memory. + */ + dev_inst = (struct edac_device_instance *) + (((char *)dev_ctl) + ((unsigned long)dev_inst)); + dev_blk = (struct edac_device_block *) + (((char *)dev_ctl) + ((unsigned long)dev_blk)); + dev_attrib = (struct edac_dev_sysfs_block_attribute *) + (((char *)dev_ctl) + ((unsigned long)dev_attrib)); + pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL; + + /* Begin storing the information into the control info structure */ + dev_ctl->dev_idx = device_index; + dev_ctl->nr_instances = nr_instances; + dev_ctl->instances = dev_inst; + dev_ctl->pvt_info = pvt; + + /* Name of this edac device */ + snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); + + debugf4("%s() edac_dev=%p next after end=%p\n", + __func__, dev_ctl, pvt + sz_private ); + + /* Initialize every Instance */ + for (instance = 0; instance < nr_instances; instance++) { + inst = &dev_inst[instance]; + inst->ctl = dev_ctl; + inst->nr_blocks = nr_blocks; + blk_p = &dev_blk[instance * nr_blocks]; + inst->blocks = blk_p; + + /* name of this instance */ + snprintf(inst->name, sizeof(inst->name), + "%s%u", edac_device_name, instance); + + /* Initialize every block in each instance */ + for (block = 0; block < nr_blocks; block++) { + blk = &blk_p[block]; + blk->instance = inst; + snprintf(blk->name, sizeof(blk->name), + "%s%d", edac_block_name, block+offset_value); + + debugf4("%s() instance=%d inst_p=%p block=#%d " + "block_p=%p name='%s'\n", + __func__, instance, inst, block, + blk, blk->name); + + /* if there are NO attributes OR no attribute pointer + * then continue on to next block iteration + */ + if ((nr_attrib == 0) || (attrib_spec == NULL)) + continue; + + /* setup the attribute array for this block */ + blk->nr_attribs = nr_attrib; + attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; + blk->block_attributes = attrib_p; + + debugf4("%s() THIS BLOCK_ATTRIB=%p\n", + __func__, blk->block_attributes); + + /* Initialize every user specified attribute in this + * block with the data the caller passed in + * Each block gets its own copy of pointers, + * and its unique 'value' + */ + for (attr = 0; attr < nr_attrib; attr++) { + attrib = &attrib_p[attr]; + + /* populate the unique per attrib + * with the code pointers and info + */ + attrib->attr = attrib_spec[attr].attr; + attrib->show = attrib_spec[attr].show; + attrib->store = attrib_spec[attr].store; + + attrib->block = blk; /* up link */ + + debugf4("%s() alloc-attrib=%p attrib_name='%s' " + "attrib-spec=%p spec-name=%s\n", + __func__, attrib, attrib->attr.name, + &attrib_spec[attr], + attrib_spec[attr].attr.name + ); + } + } + } + + /* Mark this instance as merely ALLOCATED */ + dev_ctl->op_state = OP_ALLOC; + + /* + * Initialize the 'root' kobj for the edac_device controller + */ + err = edac_device_register_sysfs_main_kobj(dev_ctl); + if (err) { + kfree(dev_ctl); + return NULL; + } + + /* at this point, the root kobj is valid, and in order to + * 'free' the object, then the function: + * edac_device_unregister_sysfs_main_kobj() must be called + * which will perform kobj unregistration and the actual free + * will occur during the kobject callback operation + */ + + return dev_ctl; +} +EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info); + +/* + * edac_device_free_ctl_info() + * frees the memory allocated by the edac_device_alloc_ctl_info() + * function + */ +void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info) +{ + edac_device_unregister_sysfs_main_kobj(ctl_info); +} +EXPORT_SYMBOL_GPL(edac_device_free_ctl_info); + +/* + * find_edac_device_by_dev + * scans the edac_device list for a specific 'struct device *' + * + * lock to be held prior to call: device_ctls_mutex + * + * Return: + * pointer to control structure managing 'dev' + * NULL if not found on list + */ +static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev) +{ + struct edac_device_ctl_info *edac_dev; + struct list_head *item; + + debugf0("%s()\n", __func__); + + list_for_each(item, &edac_device_list) { + edac_dev = list_entry(item, struct edac_device_ctl_info, link); + + if (edac_dev->dev == dev) + return edac_dev; + } + + return NULL; +} + +/* + * add_edac_dev_to_global_list + * Before calling this function, caller must + * assign a unique value to edac_dev->dev_idx. + * + * lock to be held prior to call: device_ctls_mutex + * + * Return: + * 0 on success + * 1 on failure. + */ +static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev) +{ + struct list_head *item, *insert_before; + struct edac_device_ctl_info *rover; + + insert_before = &edac_device_list; + + /* Determine if already on the list */ + rover = find_edac_device_by_dev(edac_dev->dev); + if (unlikely(rover != NULL)) + goto fail0; + + /* Insert in ascending order by 'dev_idx', so find position */ + list_for_each(item, &edac_device_list) { + rover = list_entry(item, struct edac_device_ctl_info, link); + + if (rover->dev_idx >= edac_dev->dev_idx) { + if (unlikely(rover->dev_idx == edac_dev->dev_idx)) + goto fail1; + + insert_before = item; + break; + } + } + + list_add_tail_rcu(&edac_dev->link, insert_before); + return 0; + +fail0: + edac_printk(KERN_WARNING, EDAC_MC, + "%s (%s) %s %s already assigned %d\n", + rover->dev->bus_id, dev_name(rover), + rover->mod_name, rover->ctl_name, rover->dev_idx); + return 1; + +fail1: + edac_printk(KERN_WARNING, EDAC_MC, + "bug in low-level driver: attempt to assign\n" + " duplicate dev_idx %d in %s()\n", rover->dev_idx, + __func__); + return 1; +} + +/* + * complete_edac_device_list_del + * + * callback function when reference count is zero + */ +static void complete_edac_device_list_del(struct rcu_head *head) +{ + struct edac_device_ctl_info *edac_dev; + + edac_dev = container_of(head, struct edac_device_ctl_info, rcu); + INIT_LIST_HEAD(&edac_dev->link); + complete(&edac_dev->removal_complete); +} + +/* + * del_edac_device_from_global_list + * + * remove the RCU, setup for a callback call, + * then wait for the callback to occur + */ +static void del_edac_device_from_global_list(struct edac_device_ctl_info + *edac_device) +{ + list_del_rcu(&edac_device->link); + + init_completion(&edac_device->removal_complete); + call_rcu(&edac_device->rcu, complete_edac_device_list_del); + wait_for_completion(&edac_device->removal_complete); +} + +/** + * edac_device_find + * Search for a edac_device_ctl_info structure whose index is 'idx'. + * + * If found, return a pointer to the structure. + * Else return NULL. + * + * Caller must hold device_ctls_mutex. + */ +struct edac_device_ctl_info *edac_device_find(int idx) +{ + struct list_head *item; + struct edac_device_ctl_info *edac_dev; + + /* Iterate over list, looking for exact match of ID */ + list_for_each(item, &edac_device_list) { + edac_dev = list_entry(item, struct edac_device_ctl_info, link); + + if (edac_dev->dev_idx >= idx) { + if (edac_dev->dev_idx == idx) + return edac_dev; + + /* not on list, so terminate early */ + break; + } + } + + return NULL; +} +EXPORT_SYMBOL_GPL(edac_device_find); + +/* + * edac_device_workq_function + * performs the operation scheduled by a workq request + * + * this workq is embedded within an edac_device_ctl_info + * structure, that needs to be polled for possible error events. + * + * This operation is to acquire the list mutex lock + * (thus preventing insertation or deletion) + * and then call the device's poll function IFF this device is + * running polled and there is a poll function defined. + */ +static void edac_device_workq_function(struct work_struct *work_req) +{ + struct delayed_work *d_work = (struct delayed_work *)work_req; + struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work); + + mutex_lock(&device_ctls_mutex); + + /* Only poll controllers that are running polled and have a check */ + if ((edac_dev->op_state == OP_RUNNING_POLL) && + (edac_dev->edac_check != NULL)) { + edac_dev->edac_check(edac_dev); + } + + mutex_unlock(&device_ctls_mutex); + + /* Reschedule the workq for the next time period to start again + * if the number of msec is for 1 sec, then adjust to the next + * whole one second to save timers fireing all over the period + * between integral seconds + */ + if (edac_dev->poll_msec == 1000) + queue_delayed_work(edac_workqueue, &edac_dev->work, + round_jiffies(edac_dev->delay)); + else + queue_delayed_work(edac_workqueue, &edac_dev->work, + edac_dev->delay); +} + +/* + * edac_device_workq_setup + * initialize a workq item for this edac_device instance + * passing in the new delay period in msec + */ +void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, + unsigned msec) +{ + debugf0("%s()\n", __func__); + + /* take the arg 'msec' and set it into the control structure + * to used in the time period calculation + * then calc the number of jiffies that represents + */ + edac_dev->poll_msec = msec; + edac_dev->delay = msecs_to_jiffies(msec); + + INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function); + + /* optimize here for the 1 second case, which will be normal value, to + * fire ON the 1 second time event. This helps reduce all sorts of + * timers firing on sub-second basis, while they are happy + * to fire together on the 1 second exactly + */ + if (edac_dev->poll_msec == 1000) + queue_delayed_work(edac_workqueue, &edac_dev->work, + round_jiffies(edac_dev->delay)); + else + queue_delayed_work(edac_workqueue, &edac_dev->work, + edac_dev->delay); +} + +/* + * edac_device_workq_teardown + * stop the workq processing on this edac_dev + */ +void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev) +{ + int status; + + status = cancel_delayed_work(&edac_dev->work); + if (status == 0) { + /* workq instance might be running, wait for it */ + flush_workqueue(edac_workqueue); + } +} + +/* + * edac_device_reset_delay_period + * + * need to stop any outstanding workq queued up at this time + * because we will be resetting the sleep time. + * Then restart the workq on the new delay + */ +void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, + unsigned long value) +{ + /* cancel the current workq request, without the mutex lock */ + edac_device_workq_teardown(edac_dev); + + /* acquire the mutex before doing the workq setup */ + mutex_lock(&device_ctls_mutex); + + /* restart the workq request, with new delay value */ + edac_device_workq_setup(edac_dev, value); + + mutex_unlock(&device_ctls_mutex); +} + +/** + * edac_device_add_device: Insert the 'edac_dev' structure into the + * edac_device global list and create sysfs entries associated with + * edac_device structure. + * @edac_device: pointer to the edac_device structure to be added to the list + * 'edac_device' structure. + * + * Return: + * 0 Success + * !0 Failure + */ +int edac_device_add_device(struct edac_device_ctl_info *edac_dev) +{ + debugf0("%s()\n", __func__); + +#ifdef CONFIG_EDAC_DEBUG + if (edac_debug_level >= 3) + edac_device_dump_device(edac_dev); +#endif + mutex_lock(&device_ctls_mutex); + + if (add_edac_dev_to_global_list(edac_dev)) + goto fail0; + + /* set load time so that error rate can be tracked */ + edac_dev->start_time = jiffies; + + /* create this instance's sysfs entries */ + if (edac_device_create_sysfs(edac_dev)) { + edac_device_printk(edac_dev, KERN_WARNING, + "failed to create sysfs device\n"); + goto fail1; + } + + /* If there IS a check routine, then we are running POLLED */ + if (edac_dev->edac_check != NULL) { + /* This instance is NOW RUNNING */ + edac_dev->op_state = OP_RUNNING_POLL; + + /* + * enable workq processing on this instance, + * default = 1000 msec + */ + edac_device_workq_setup(edac_dev, 1000); + } else { + edac_dev->op_state = OP_RUNNING_INTERRUPT; + } + + /* Report action taken */ + edac_device_printk(edac_dev, KERN_INFO, + "Giving out device to module '%s' controller " + "'%s': DEV '%s' (%s)\n", + edac_dev->mod_name, + edac_dev->ctl_name, + dev_name(edac_dev), + edac_op_state_to_string(edac_dev->op_state)); + + mutex_unlock(&device_ctls_mutex); + return 0; + +fail1: + /* Some error, so remove the entry from the lsit */ + del_edac_device_from_global_list(edac_dev); + +fail0: + mutex_unlock(&device_ctls_mutex); + return 1; +} +EXPORT_SYMBOL_GPL(edac_device_add_device); + +/** + * edac_device_del_device: + * Remove sysfs entries for specified edac_device structure and + * then remove edac_device structure from global list + * + * @pdev: + * Pointer to 'struct device' representing edac_device + * structure to remove. + * + * Return: + * Pointer to removed edac_device structure, + * OR NULL if device not found. + */ +struct edac_device_ctl_info *edac_device_del_device(struct device *dev) +{ + struct edac_device_ctl_info *edac_dev; + + debugf0("%s()\n", __func__); + + mutex_lock(&device_ctls_mutex); + + /* Find the structure on the list, if not there, then leave */ + edac_dev = find_edac_device_by_dev(dev); + if (edac_dev == NULL) { + mutex_unlock(&device_ctls_mutex); + return NULL; + } + + /* mark this instance as OFFLINE */ + edac_dev->op_state = OP_OFFLINE; + + /* clear workq processing on this instance */ + edac_device_workq_teardown(edac_dev); + + /* deregister from global list */ + del_edac_device_from_global_list(edac_dev); + + mutex_unlock(&device_ctls_mutex); + + /* Tear down the sysfs entries for this instance */ + edac_device_remove_sysfs(edac_dev); + + edac_printk(KERN_INFO, EDAC_MC, + "Removed device %d for %s %s: DEV %s\n", + edac_dev->dev_idx, + edac_dev->mod_name, edac_dev->ctl_name, dev_name(edac_dev)); + + return edac_dev; +} +EXPORT_SYMBOL_GPL(edac_device_del_device); + +static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev) +{ + return edac_dev->log_ce; +} + +static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev) +{ + return edac_dev->log_ue; +} + +static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info + *edac_dev) +{ + return edac_dev->panic_on_ue; +} + +/* + * edac_device_handle_ce + * perform a common output and handling of an 'edac_dev' CE event + */ +void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, + int inst_nr, int block_nr, const char *msg) +{ + struct edac_device_instance *instance; + struct edac_device_block *block = NULL; + + if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) { + edac_device_printk(edac_dev, KERN_ERR, + "INTERNAL ERROR: 'instance' out of range " + "(%d >= %d)\n", inst_nr, + edac_dev->nr_instances); + return; + } + + instance = edac_dev->instances + inst_nr; + + if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { + edac_device_printk(edac_dev, KERN_ERR, + "INTERNAL ERROR: instance %d 'block' " + "out of range (%d >= %d)\n", + inst_nr, block_nr, + instance->nr_blocks); + return; + } + + if (instance->nr_blocks > 0) { + block = instance->blocks + block_nr; + block->counters.ce_count++; + } + + /* Propogate the count up the 'totals' tree */ + instance->counters.ce_count++; + edac_dev->counters.ce_count++; + + if (edac_device_get_log_ce(edac_dev)) + edac_device_printk(edac_dev, KERN_WARNING, + "CE: %s instance: %s block: %s '%s'\n", + edac_dev->ctl_name, instance->name, + block ? block->name : "N/A", msg); +} +EXPORT_SYMBOL_GPL(edac_device_handle_ce); + +/* + * edac_device_handle_ue + * perform a common output and handling of an 'edac_dev' UE event + */ +void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev, + int inst_nr, int block_nr, const char *msg) +{ + struct edac_device_instance *instance; + struct edac_device_block *block = NULL; + + if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) { + edac_device_printk(edac_dev, KERN_ERR, + "INTERNAL ERROR: 'instance' out of range " + "(%d >= %d)\n", inst_nr, + edac_dev->nr_instances); + return; + } + + instance = edac_dev->instances + inst_nr; + + if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { + edac_device_printk(edac_dev, KERN_ERR, + "INTERNAL ERROR: instance %d 'block' " + "out of range (%d >= %d)\n", + inst_nr, block_nr, + instance->nr_blocks); + return; + } + + if (instance->nr_blocks > 0) { + block = instance->blocks + block_nr; + block->counters.ue_count++; + } + + /* Propogate the count up the 'totals' tree */ + instance->counters.ue_count++; + edac_dev->counters.ue_count++; + + if (edac_device_get_log_ue(edac_dev)) + edac_device_printk(edac_dev, KERN_EMERG, + "UE: %s instance: %s block: %s '%s'\n", + edac_dev->ctl_name, instance->name, + block ? block->name : "N/A", msg); + + if (edac_device_get_panic_on_ue(edac_dev)) + panic("EDAC %s: UE instance: %s block %s '%s'\n", + edac_dev->ctl_name, instance->name, + block ? block->name : "N/A", msg); +} +EXPORT_SYMBOL_GPL(edac_device_handle_ue); diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c new file mode 100644 index 000000000000..70b837f23c43 --- /dev/null +++ b/drivers/edac/edac_device_sysfs.c @@ -0,0 +1,896 @@ +/* + * file for managing the edac_device class of devices for EDAC + * + * (C) 2007 SoftwareBitMaker (http://www.softwarebitmaker.com) + * + * This file may be distributed under the terms of the + * GNU General Public License. + * + * Written Doug Thompson <norsk5@xmission.com> + * + */ + +#include <linux/ctype.h> +#include <linux/module.h> + +#include "edac_core.h" +#include "edac_module.h" + +#define EDAC_DEVICE_SYMLINK "device" + +#define to_edacdev(k) container_of(k, struct edac_device_ctl_info, kobj) +#define to_edacdev_attr(a) container_of(a, struct edacdev_attribute, attr) + + +/* + * Set of edac_device_ctl_info attribute store/show functions + */ + +/* 'log_ue' */ +static ssize_t edac_device_ctl_log_ue_show(struct edac_device_ctl_info + *ctl_info, char *data) +{ + return sprintf(data, "%u\n", ctl_info->log_ue); +} + +static ssize_t edac_device_ctl_log_ue_store(struct edac_device_ctl_info + *ctl_info, const char *data, + size_t count) +{ + /* if parameter is zero, turn off flag, if non-zero turn on flag */ + ctl_info->log_ue = (simple_strtoul(data, NULL, 0) != 0); + + return count; +} + +/* 'log_ce' */ +static ssize_t edac_device_ctl_log_ce_show(struct edac_device_ctl_info + *ctl_info, char *data) +{ + return sprintf(data, "%u\n", ctl_info->log_ce); +} + +static ssize_t edac_device_ctl_log_ce_store(struct edac_device_ctl_info + *ctl_info, const char *data, + size_t count) +{ + /* if parameter is zero, turn off flag, if non-zero turn on flag */ + ctl_info->log_ce = (simple_strtoul(data, NULL, 0) != 0); + + return count; +} + +/* 'panic_on_ue' */ +static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info + *ctl_info, char *data) +{ + return sprintf(data, "%u\n", ctl_info->panic_on_ue); +} + +static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info + *ctl_info, const char *data, + size_t count) +{ + /* if parameter is zero, turn off flag, if non-zero turn on flag */ + ctl_info->panic_on_ue = (simple_strtoul(data, NULL, 0) != 0); + + return count; +} + +/* 'poll_msec' show and store functions*/ +static ssize_t edac_device_ctl_poll_msec_show(struct edac_device_ctl_info + *ctl_info, char *data) +{ + return sprintf(data, "%u\n", ctl_info->poll_msec); +} + +static ssize_t edac_device_ctl_poll_msec_store(struct edac_device_ctl_info + *ctl_info, const char *data, + size_t count) +{ + unsigned long value; + + /* get the value and enforce that it is non-zero, must be at least + * one millisecond for the delay period, between scans + * Then cancel last outstanding delay for the work request + * and set a new one. + */ + value = simple_strtoul(data, NULL, 0); + edac_device_reset_delay_period(ctl_info, value); + + return count; +} + +/* edac_device_ctl_info specific attribute structure */ +struct ctl_info_attribute { + struct attribute attr; + ssize_t(*show) (struct edac_device_ctl_info *, char *); + ssize_t(*store) (struct edac_device_ctl_info *, const char *, size_t); +}; + +#define to_ctl_info(k) container_of(k, struct edac_device_ctl_info, kobj) +#define to_ctl_info_attr(a) container_of(a,struct ctl_info_attribute,attr) + +/* Function to 'show' fields from the edac_dev 'ctl_info' structure */ +static ssize_t edac_dev_ctl_info_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj); + struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr); + + if (ctl_info_attr->show) + return ctl_info_attr->show(edac_dev, buffer); + return -EIO; +} + +/* Function to 'store' fields into the edac_dev 'ctl_info' structure */ +static ssize_t edac_dev_ctl_info_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, size_t count) +{ + struct edac_device_ctl_info *edac_dev = to_ctl_info(kobj); + struct ctl_info_attribute *ctl_info_attr = to_ctl_info_attr(attr); + + if (ctl_info_attr->store) + return ctl_info_attr->store(edac_dev, buffer, count); + return -EIO; +} + +/* edac_dev file operations for an 'ctl_info' */ +static struct sysfs_ops device_ctl_info_ops = { + .show = edac_dev_ctl_info_show, + .store = edac_dev_ctl_info_store +}; + +#define CTL_INFO_ATTR(_name,_mode,_show,_store) \ +static struct ctl_info_attribute attr_ctl_info_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +}; + +/* Declare the various ctl_info attributes here and their respective ops */ +CTL_INFO_ATTR(log_ue, S_IRUGO | S_IWUSR, + edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store); +CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR, + edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store); +CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR, + edac_device_ctl_panic_on_ue_show, + edac_device_ctl_panic_on_ue_store); +CTL_INFO_ATTR(poll_msec, S_IRUGO | S_IWUSR, + edac_device_ctl_poll_msec_show, edac_device_ctl_poll_msec_store); + +/* Base Attributes of the EDAC_DEVICE ECC object */ +static struct ctl_info_attribute *device_ctrl_attr[] = { + &attr_ctl_info_panic_on_ue, + &attr_ctl_info_log_ue, + &attr_ctl_info_log_ce, + &attr_ctl_info_poll_msec, + NULL, +}; + +/* + * edac_device_ctrl_master_release + * + * called when the reference count for the 'main' kobj + * for a edac_device control struct reaches zero + * + * Reference count model: + * One 'main' kobject for each control structure allocated. + * That main kobj is initially set to one AND + * the reference count for the EDAC 'core' module is + * bumped by one, thus added 'keep in memory' dependency. + * + * Each new internal kobj (in instances and blocks) then + * bumps the 'main' kobject. + * + * When they are released their release functions decrement + * the 'main' kobj. + * + * When the main kobj reaches zero (0) then THIS function + * is called which then decrements the EDAC 'core' module. + * When the module reference count reaches zero then the + * module no longer has dependency on keeping the release + * function code in memory and module can be unloaded. + * + * This will support several control objects as well, each + * with its own 'main' kobj. + */ +static void edac_device_ctrl_master_release(struct kobject *kobj) +{ + struct edac_device_ctl_info *edac_dev = to_edacdev(kobj); + + debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx); + + /* decrement the EDAC CORE module ref count */ + module_put(edac_dev->owner); + + /* free the control struct containing the 'main' kobj + * passed in to this routine + */ + kfree(edac_dev); +} + +/* ktype for the main (master) kobject */ +static struct kobj_type ktype_device_ctrl = { + .release = edac_device_ctrl_master_release, + .sysfs_ops = &device_ctl_info_ops, + .default_attrs = (struct attribute **)device_ctrl_attr, +}; + +/* + * edac_device_register_sysfs_main_kobj + * + * perform the high level setup for the new edac_device instance + * + * Return: 0 SUCCESS + * !0 FAILURE + */ +int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) +{ + struct sysdev_class *edac_class; + int err; + + debugf1("%s()\n", __func__); + + /* get the /sys/devices/system/edac reference */ + edac_class = edac_get_edac_class(); + if (edac_class == NULL) { + debugf1("%s() no edac_class error\n", __func__); + err = -ENODEV; + goto err_out; + } + + /* Point to the 'edac_class' this instance 'reports' to */ + edac_dev->edac_class = edac_class; + + /* Init the devices's kobject */ + memset(&edac_dev->kobj, 0, sizeof(struct kobject)); + edac_dev->kobj.ktype = &ktype_device_ctrl; + + /* set this new device under the edac_class kobject */ + edac_dev->kobj.parent = &edac_class->kset.kobj; + + /* generate sysfs "..../edac/<name>" */ + debugf4("%s() set name of kobject to: %s\n", __func__, edac_dev->name); + err = kobject_set_name(&edac_dev->kobj, "%s", edac_dev->name); + if (err) + goto err_out; + + /* Record which module 'owns' this control structure + * and bump the ref count of the module + */ + edac_dev->owner = THIS_MODULE; + + if (!try_module_get(edac_dev->owner)) { + err = -ENODEV; + goto err_out; + } + + /* register */ + err = kobject_register(&edac_dev->kobj); + if (err) { + debugf1("%s()Failed to register '.../edac/%s'\n", + __func__, edac_dev->name); + goto err_kobj_reg; + } + + /* At this point, to 'free' the control struct, + * edac_device_unregister_sysfs_main_kobj() must be used + */ + + debugf4("%s() Registered '.../edac/%s' kobject\n", + __func__, edac_dev->name); + + return 0; + + /* Error exit stack */ +err_kobj_reg: + module_put(edac_dev->owner); + +err_out: + return err; +} + +/* + * edac_device_unregister_sysfs_main_kobj: + * the '..../edac/<name>' kobject + */ +void edac_device_unregister_sysfs_main_kobj( + struct edac_device_ctl_info *edac_dev) +{ + debugf0("%s()\n", __func__); + debugf4("%s() name of kobject is: %s\n", + __func__, kobject_name(&edac_dev->kobj)); + + /* + * Unregister the edac device's kobject and + * allow for reference count to reach 0 at which point + * the callback will be called to: + * a) module_put() this module + * b) 'kfree' the memory + */ + kobject_unregister(&edac_dev->kobj); +} + +/* edac_dev -> instance information */ + +/* + * Set of low-level instance attribute show functions + */ +static ssize_t instance_ue_count_show(struct edac_device_instance *instance, + char *data) +{ + return sprintf(data, "%u\n", instance->counters.ue_count); +} + +static ssize_t instance_ce_count_show(struct edac_device_instance *instance, + char *data) +{ + return sprintf(data, "%u\n", instance->counters.ce_count); +} + +#define to_instance(k) container_of(k, struct edac_device_instance, kobj) +#define to_instance_attr(a) container_of(a,struct instance_attribute,attr) + +/* DEVICE instance kobject release() function */ +static void edac_device_ctrl_instance_release(struct kobject *kobj) +{ + struct edac_device_instance *instance; + + debugf1("%s()\n", __func__); + + /* map from this kobj to the main control struct + * and then dec the main kobj count + */ + instance = to_instance(kobj); + kobject_put(&instance->ctl->kobj); +} + +/* instance specific attribute structure */ +struct instance_attribute { + struct attribute attr; + ssize_t(*show) (struct edac_device_instance *, char *); + ssize_t(*store) (struct edac_device_instance *, const char *, size_t); +}; + +/* Function to 'show' fields from the edac_dev 'instance' structure */ +static ssize_t edac_dev_instance_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct edac_device_instance *instance = to_instance(kobj); + struct instance_attribute *instance_attr = to_instance_attr(attr); + + if (instance_attr->show) + return instance_attr->show(instance, buffer); + return -EIO; +} + +/* Function to 'store' fields into the edac_dev 'instance' structure */ +static ssize_t edac_dev_instance_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, size_t count) +{ + struct edac_device_instance *instance = to_instance(kobj); + struct instance_attribute *instance_attr = to_instance_attr(attr); + + if (instance_attr->store) + return instance_attr->store(instance, buffer, count); + return -EIO; +} + +/* edac_dev file operations for an 'instance' */ +static struct sysfs_ops device_instance_ops = { + .show = edac_dev_instance_show, + .store = edac_dev_instance_store +}; + +#define INSTANCE_ATTR(_name,_mode,_show,_store) \ +static struct instance_attribute attr_instance_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +}; + +/* + * Define attributes visible for the edac_device instance object + * Each contains a pointer to a show and an optional set + * function pointer that does the low level output/input + */ +INSTANCE_ATTR(ce_count, S_IRUGO, instance_ce_count_show, NULL); +INSTANCE_ATTR(ue_count, S_IRUGO, instance_ue_count_show, NULL); + +/* list of edac_dev 'instance' attributes */ +static struct instance_attribute *device_instance_attr[] = { + &attr_instance_ce_count, + &attr_instance_ue_count, + NULL, +}; + +/* The 'ktype' for each edac_dev 'instance' */ +static struct kobj_type ktype_instance_ctrl = { + .release = edac_device_ctrl_instance_release, + .sysfs_ops = &device_instance_ops, + .default_attrs = (struct attribute **)device_instance_attr, +}; + +/* edac_dev -> instance -> block information */ + +#define to_block(k) container_of(k, struct edac_device_block, kobj) +#define to_block_attr(a) \ + container_of(a, struct edac_dev_sysfs_block_attribute, attr) + +/* + * Set of low-level block attribute show functions + */ +static ssize_t block_ue_count_show(struct kobject *kobj, + struct attribute *attr, char *data) +{ + struct edac_device_block *block = to_block(kobj); + + return sprintf(data, "%u\n", block->counters.ue_count); +} + +static ssize_t block_ce_count_show(struct kobject *kobj, + struct attribute *attr, char *data) +{ + struct edac_device_block *block = to_block(kobj); + + return sprintf(data, "%u\n", block->counters.ce_count); +} + +/* DEVICE block kobject release() function */ +static void edac_device_ctrl_block_release(struct kobject *kobj) +{ + struct edac_device_block *block; + + debugf1("%s()\n", __func__); + + /* get the container of the kobj */ + block = to_block(kobj); + + /* map from 'block kobj' to 'block->instance->controller->main_kobj' + * now 'release' the block kobject + */ + kobject_put(&block->instance->ctl->kobj); +} + + +/* Function to 'show' fields from the edac_dev 'block' structure */ +static ssize_t edac_dev_block_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct edac_dev_sysfs_block_attribute *block_attr = + to_block_attr(attr); + + if (block_attr->show) + return block_attr->show(kobj, attr, buffer); + return -EIO; +} + +/* Function to 'store' fields into the edac_dev 'block' structure */ +static ssize_t edac_dev_block_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, size_t count) +{ + struct edac_dev_sysfs_block_attribute *block_attr; + + block_attr = to_block_attr(attr); + + if (block_attr->store) + return block_attr->store(kobj, attr, buffer, count); + return -EIO; +} + +/* edac_dev file operations for a 'block' */ +static struct sysfs_ops device_block_ops = { + .show = edac_dev_block_show, + .store = edac_dev_block_store +}; + +#define BLOCK_ATTR(_name,_mode,_show,_store) \ +static struct edac_dev_sysfs_block_attribute attr_block_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +}; + +BLOCK_ATTR(ce_count, S_IRUGO, block_ce_count_show, NULL); +BLOCK_ATTR(ue_count, S_IRUGO, block_ue_count_show, NULL); + +/* list of edac_dev 'block' attributes */ +static struct edac_dev_sysfs_block_attribute *device_block_attr[] = { + &attr_block_ce_count, + &attr_block_ue_count, + NULL, +}; + +/* The 'ktype' for each edac_dev 'block' */ +static struct kobj_type ktype_block_ctrl = { + .release = edac_device_ctrl_block_release, + .sysfs_ops = &device_block_ops, + .default_attrs = (struct attribute **)device_block_attr, +}; + +/* block ctor/dtor code */ + +/* + * edac_device_create_block + */ +static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, + struct edac_device_instance *instance, + struct edac_device_block *block) +{ + int i; + int err; + struct edac_dev_sysfs_block_attribute *sysfs_attrib; + struct kobject *main_kobj; + + debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n", + __func__, instance->name, instance, block->name, block); + debugf4("%s() block kobj=%p block kobj->parent=%p\n", + __func__, &block->kobj, &block->kobj.parent); + + /* init this block's kobject */ + memset(&block->kobj, 0, sizeof(struct kobject)); + block->kobj.parent = &instance->kobj; + block->kobj.ktype = &ktype_block_ctrl; + + err = kobject_set_name(&block->kobj, "%s", block->name); + if (err) + return err; + + /* bump the main kobject's reference count for this controller + * and this instance is dependant on the main + */ + main_kobj = kobject_get(&edac_dev->kobj); + if (!main_kobj) { + err = -ENODEV; + goto err_out; + } + + /* Add this block's kobject */ + err = kobject_register(&block->kobj); + if (err) { + debugf1("%s() Failed to register instance '%s'\n", + __func__, block->name); + kobject_put(main_kobj); + err = -ENODEV; + goto err_out; + } + + /* If there are driver level block attributes, then added them + * to the block kobject + */ + sysfs_attrib = block->block_attributes; + if (sysfs_attrib && block->nr_attribs) { + for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { + + debugf4("%s() creating block attrib='%s' " + "attrib->%p to kobj=%p\n", + __func__, + sysfs_attrib->attr.name, + sysfs_attrib, &block->kobj); + + /* Create each block_attribute file */ + err = sysfs_create_file(&block->kobj, + &sysfs_attrib->attr); + if (err) + goto err_on_attrib; + } + } + + return 0; + + /* Error unwind stack */ +err_on_attrib: + kobject_unregister(&block->kobj); + +err_out: + return err; +} + +/* + * edac_device_delete_block(edac_dev,block); + */ +static void edac_device_delete_block(struct edac_device_ctl_info *edac_dev, + struct edac_device_block *block) +{ + struct edac_dev_sysfs_block_attribute *sysfs_attrib; + int i; + + /* if this block has 'attributes' then we need to iterate over the list + * and 'remove' the attributes on this block + */ + sysfs_attrib = block->block_attributes; + if (sysfs_attrib && block->nr_attribs) { + for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { + + /* remove each block_attrib file */ + sysfs_remove_file(&block->kobj, + (struct attribute *) sysfs_attrib); + } + } + + /* unregister this block's kobject, SEE: + * edac_device_ctrl_block_release() callback operation + */ + kobject_unregister(&block->kobj); +} + +/* instance ctor/dtor code */ + +/* + * edac_device_create_instance + * create just one instance of an edac_device 'instance' + */ +static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev, + int idx) +{ + int i, j; + int err; + struct edac_device_instance *instance; + struct kobject *main_kobj; + + instance = &edac_dev->instances[idx]; + + /* Init the instance's kobject */ + memset(&instance->kobj, 0, sizeof(struct kobject)); + + /* set this new device under the edac_device main kobject */ + instance->kobj.parent = &edac_dev->kobj; + instance->kobj.ktype = &ktype_instance_ctrl; + instance->ctl = edac_dev; + + err = kobject_set_name(&instance->kobj, "%s", instance->name); + if (err) + goto err_out; + + /* bump the main kobject's reference count for this controller + * and this instance is dependant on the main + */ + main_kobj = kobject_get(&edac_dev->kobj); + if (!main_kobj) { + err = -ENODEV; + goto err_out; + } + + /* Formally register this instance's kobject */ + err = kobject_register(&instance->kobj); + if (err != 0) { + debugf2("%s() Failed to register instance '%s'\n", + __func__, instance->name); + kobject_put(main_kobj); + goto err_out; + } + + debugf4("%s() now register '%d' blocks for instance %d\n", + __func__, instance->nr_blocks, idx); + + /* register all blocks of this instance */ + for (i = 0; i < instance->nr_blocks; i++) { + err = edac_device_create_block(edac_dev, instance, + &instance->blocks[i]); + if (err) { + /* If any fail, remove all previous ones */ + for (j = 0; j < i; j++) + edac_device_delete_block(edac_dev, + &instance->blocks[j]); + goto err_release_instance_kobj; + } + } + + debugf4("%s() Registered instance %d '%s' kobject\n", + __func__, idx, instance->name); + + return 0; + + /* error unwind stack */ +err_release_instance_kobj: + kobject_unregister(&instance->kobj); + +err_out: + return err; +} + +/* + * edac_device_remove_instance + * remove an edac_device instance + */ +static void edac_device_delete_instance(struct edac_device_ctl_info *edac_dev, + int idx) +{ + struct edac_device_instance *instance; + int i; + + instance = &edac_dev->instances[idx]; + + /* unregister all blocks in this instance */ + for (i = 0; i < instance->nr_blocks; i++) + edac_device_delete_block(edac_dev, &instance->blocks[i]); + + /* unregister this instance's kobject, SEE: + * edac_device_ctrl_instance_release() for callback operation + */ + kobject_unregister(&instance->kobj); +} + +/* + * edac_device_create_instances + * create the first level of 'instances' for this device + * (ie 'cache' might have 'cache0', 'cache1', 'cache2', etc + */ +static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev) +{ + int i, j; + int err; + + debugf0("%s()\n", __func__); + + /* iterate over creation of the instances */ + for (i = 0; i < edac_dev->nr_instances; i++) { + err = edac_device_create_instance(edac_dev, i); + if (err) { + /* unwind previous instances on error */ + for (j = 0; j < i; j++) + edac_device_delete_instance(edac_dev, j); + return err; + } + } + + return 0; +} + +/* + * edac_device_delete_instances(edac_dev); + * unregister all the kobjects of the instances + */ +static void edac_device_delete_instances(struct edac_device_ctl_info *edac_dev) +{ + int i; + + /* iterate over creation of the instances */ + for (i = 0; i < edac_dev->nr_instances; i++) + edac_device_delete_instance(edac_dev, i); +} + +/* edac_dev sysfs ctor/dtor code */ + +/* + * edac_device_add_main_sysfs_attributes + * add some attributes to this instance's main kobject + */ +static int edac_device_add_main_sysfs_attributes( + struct edac_device_ctl_info *edac_dev) +{ + struct edac_dev_sysfs_attribute *sysfs_attrib; + int err = 0; + + sysfs_attrib = edac_dev->sysfs_attributes; + if (sysfs_attrib) { + /* iterate over the array and create an attribute for each + * entry in the list + */ + while (sysfs_attrib->attr.name != NULL) { + err = sysfs_create_file(&edac_dev->kobj, + (struct attribute*) sysfs_attrib); + if (err) + goto err_out; + + sysfs_attrib++; + } + } + +err_out: + return err; +} + +/* + * edac_device_remove_main_sysfs_attributes + * remove any attributes to this instance's main kobject + */ +static void edac_device_remove_main_sysfs_attributes( + struct edac_device_ctl_info *edac_dev) +{ + struct edac_dev_sysfs_attribute *sysfs_attrib; + + /* if there are main attributes, defined, remove them. First, + * point to the start of the array and iterate over it + * removing each attribute listed from this device's instance's kobject + */ + sysfs_attrib = edac_dev->sysfs_attributes; + if (sysfs_attrib) { + while (sysfs_attrib->attr.name != NULL) { + sysfs_remove_file(&edac_dev->kobj, + (struct attribute *) sysfs_attrib); + sysfs_attrib++; + } + } +} + +/* + * edac_device_create_sysfs() Constructor + * + * accept a created edac_device control structure + * and 'export' it to sysfs. The 'main' kobj should already have been + * created. 'instance' and 'block' kobjects should be registered + * along with any 'block' attributes from the low driver. In addition, + * the main attributes (if any) are connected to the main kobject of + * the control structure. + * + * Return: + * 0 Success + * !0 Failure + */ +int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) +{ + int err; + struct kobject *edac_kobj = &edac_dev->kobj; + + debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx); + + /* go create any main attributes callers wants */ + err = edac_device_add_main_sysfs_attributes(edac_dev); + if (err) { + debugf0("%s() failed to add sysfs attribs\n", __func__); + goto err_out; + } + + /* create a symlink from the edac device + * to the platform 'device' being used for this + */ + err = sysfs_create_link(edac_kobj, + &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK); + if (err) { + debugf0("%s() sysfs_create_link() returned err= %d\n", + __func__, err); + goto err_remove_main_attribs; + } + + /* Create the first level instance directories + * In turn, the nested blocks beneath the instances will + * be registered as well + */ + err = edac_device_create_instances(edac_dev); + if (err) { + debugf0("%s() edac_device_create_instances() " + "returned err= %d\n", __func__, err); + goto err_remove_link; + } + + + debugf4("%s() create-instances done, idx=%d\n", + __func__, edac_dev->dev_idx); + + return 0; + + /* Error unwind stack */ +err_remove_link: + /* remove the sym link */ + sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK); + +err_remove_main_attribs: + edac_device_remove_main_sysfs_attributes(edac_dev); + +err_out: + return err; +} + +/* + * edac_device_remove_sysfs() destructor + * + * given an edac_device struct, tear down the kobject resources + */ +void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev) +{ + debugf0("%s()\n", __func__); + + /* remove any main attributes for this device */ + edac_device_remove_main_sysfs_attributes(edac_dev); + + /* remove the device sym link */ + sysfs_remove_link(&edac_dev->kobj, EDAC_DEVICE_SYMLINK); + + /* walk the instance/block kobject tree, deconstructing it */ + edac_device_delete_instances(edac_dev); +} diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 7b622300d0e5..4471be362599 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -27,1200 +27,20 @@ #include <linux/list.h> #include <linux/sysdev.h> #include <linux/ctype.h> -#include <linux/kthread.h> -#include <linux/freezer.h> +#include <linux/edac.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/edac.h> -#include "edac_mc.h" - -#define EDAC_MC_VERSION "Ver: 2.0.1 " __DATE__ - - -#ifdef CONFIG_EDAC_DEBUG -/* Values of 0 to 4 will generate output */ -int edac_debug_level = 1; -EXPORT_SYMBOL_GPL(edac_debug_level); -#endif - -/* EDAC Controls, setable by module parameter, and sysfs */ -static int log_ue = 1; -static int log_ce = 1; -static int panic_on_ue; -static int poll_msec = 1000; +#include "edac_core.h" +#include "edac_module.h" /* lock to memory controller's control array */ -static DECLARE_MUTEX(mem_ctls_mutex); +static DEFINE_MUTEX(mem_ctls_mutex); static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); -static struct task_struct *edac_thread; - -#ifdef CONFIG_PCI -static int check_pci_parity = 0; /* default YES check PCI parity */ -static int panic_on_pci_parity; /* default no panic on PCI Parity */ -static atomic_t pci_parity_count = ATOMIC_INIT(0); - -static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */ -static struct completion edac_pci_kobj_complete; -#endif /* CONFIG_PCI */ - -/* START sysfs data and methods */ - - -static const char *mem_types[] = { - [MEM_EMPTY] = "Empty", - [MEM_RESERVED] = "Reserved", - [MEM_UNKNOWN] = "Unknown", - [MEM_FPM] = "FPM", - [MEM_EDO] = "EDO", - [MEM_BEDO] = "BEDO", - [MEM_SDR] = "Unbuffered-SDR", - [MEM_RDR] = "Registered-SDR", - [MEM_DDR] = "Unbuffered-DDR", - [MEM_RDDR] = "Registered-DDR", - [MEM_RMBS] = "RMBS" -}; - -static const char *dev_types[] = { - [DEV_UNKNOWN] = "Unknown", - [DEV_X1] = "x1", - [DEV_X2] = "x2", - [DEV_X4] = "x4", - [DEV_X8] = "x8", - [DEV_X16] = "x16", - [DEV_X32] = "x32", - [DEV_X64] = "x64" -}; - -static const char *edac_caps[] = { - [EDAC_UNKNOWN] = "Unknown", - [EDAC_NONE] = "None", - [EDAC_RESERVED] = "Reserved", - [EDAC_PARITY] = "PARITY", - [EDAC_EC] = "EC", - [EDAC_SECDED] = "SECDED", - [EDAC_S2ECD2ED] = "S2ECD2ED", - [EDAC_S4ECD4ED] = "S4ECD4ED", - [EDAC_S8ECD8ED] = "S8ECD8ED", - [EDAC_S16ECD16ED] = "S16ECD16ED" -}; - -/* sysfs object: /sys/devices/system/edac */ -static struct sysdev_class edac_class = { - set_kset_name("edac"), -}; - -/* sysfs object: - * /sys/devices/system/edac/mc - */ -static struct kobject edac_memctrl_kobj; - -/* We use these to wait for the reference counts on edac_memctrl_kobj and - * edac_pci_kobj to reach 0. - */ -static struct completion edac_memctrl_kobj_complete; - -/* - * /sys/devices/system/edac/mc; - * data structures and methods - */ -static ssize_t memctrl_int_show(void *ptr, char *buffer) -{ - int *value = (int*) ptr; - return sprintf(buffer, "%u\n", *value); -} - -static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) -{ - int *value = (int*) ptr; - - if (isdigit(*buffer)) - *value = simple_strtoul(buffer, NULL, 0); - - return count; -} - -struct memctrl_dev_attribute { - struct attribute attr; - void *value; - ssize_t (*show)(void *,char *); - ssize_t (*store)(void *, const char *, size_t); -}; - -/* Set of show/store abstract level functions for memory control object */ -static ssize_t memctrl_dev_show(struct kobject *kobj, - struct attribute *attr, char *buffer) -{ - struct memctrl_dev_attribute *memctrl_dev; - memctrl_dev = (struct memctrl_dev_attribute*)attr; - - if (memctrl_dev->show) - return memctrl_dev->show(memctrl_dev->value, buffer); - - return -EIO; -} - -static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct memctrl_dev_attribute *memctrl_dev; - memctrl_dev = (struct memctrl_dev_attribute*)attr; - - if (memctrl_dev->store) - return memctrl_dev->store(memctrl_dev->value, buffer, count); - - return -EIO; -} - -static struct sysfs_ops memctrlfs_ops = { - .show = memctrl_dev_show, - .store = memctrl_dev_store -}; - -#define MEMCTRL_ATTR(_name,_mode,_show,_store) \ -struct memctrl_dev_attribute attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .value = &_name, \ - .show = _show, \ - .store = _store, \ -}; - -#define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \ -struct memctrl_dev_attribute attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .value = _data, \ - .show = _show, \ - .store = _store, \ -}; - -/* csrow<id> control files */ -MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); -MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); -MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); -MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); - -/* Base Attributes of the memory ECC object */ -static struct memctrl_dev_attribute *memctrl_attr[] = { - &attr_panic_on_ue, - &attr_log_ue, - &attr_log_ce, - &attr_poll_msec, - NULL, -}; - -/* Main MC kobject release() function */ -static void edac_memctrl_master_release(struct kobject *kobj) -{ - debugf1("%s()\n", __func__); - complete(&edac_memctrl_kobj_complete); -} - -static struct kobj_type ktype_memctrl = { - .release = edac_memctrl_master_release, - .sysfs_ops = &memctrlfs_ops, - .default_attrs = (struct attribute **) memctrl_attr, -}; - -/* Initialize the main sysfs entries for edac: - * /sys/devices/system/edac - * - * and children - * - * Return: 0 SUCCESS - * !0 FAILURE - */ -static int edac_sysfs_memctrl_setup(void) -{ - int err = 0; - - debugf1("%s()\n", __func__); - - /* create the /sys/devices/system/edac directory */ - err = sysdev_class_register(&edac_class); - - if (err) { - debugf1("%s() error=%d\n", __func__, err); - return err; - } - - /* Init the MC's kobject */ - memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj)); - edac_memctrl_kobj.parent = &edac_class.kset.kobj; - edac_memctrl_kobj.ktype = &ktype_memctrl; - - /* generate sysfs "..../edac/mc" */ - err = kobject_set_name(&edac_memctrl_kobj,"mc"); - - if (err) - goto fail; - - /* FIXME: maybe new sysdev_create_subdir() */ - err = kobject_register(&edac_memctrl_kobj); - - if (err) { - debugf1("Failed to register '.../edac/mc'\n"); - goto fail; - } - - debugf1("Registered '.../edac/mc' kobject\n"); - - return 0; - -fail: - sysdev_class_unregister(&edac_class); - return err; -} - -/* - * MC teardown: - * the '..../edac/mc' kobject followed by '..../edac' itself - */ -static void edac_sysfs_memctrl_teardown(void) -{ - debugf0("MC: " __FILE__ ": %s()\n", __func__); - - /* Unregister the MC's kobject and wait for reference count to reach - * 0. - */ - init_completion(&edac_memctrl_kobj_complete); - kobject_unregister(&edac_memctrl_kobj); - wait_for_completion(&edac_memctrl_kobj_complete); - - /* Unregister the 'edac' object */ - sysdev_class_unregister(&edac_class); -} - -#ifdef CONFIG_PCI -static ssize_t edac_pci_int_show(void *ptr, char *buffer) -{ - int *value = ptr; - return sprintf(buffer,"%d\n",*value); -} - -static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count) -{ - int *value = ptr; - - if (isdigit(*buffer)) - *value = simple_strtoul(buffer,NULL,0); - - return count; -} - -struct edac_pci_dev_attribute { - struct attribute attr; - void *value; - ssize_t (*show)(void *,char *); - ssize_t (*store)(void *, const char *,size_t); -}; - -/* Set of show/store abstract level functions for PCI Parity object */ -static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, - char *buffer) -{ - struct edac_pci_dev_attribute *edac_pci_dev; - edac_pci_dev= (struct edac_pci_dev_attribute*)attr; - - if (edac_pci_dev->show) - return edac_pci_dev->show(edac_pci_dev->value, buffer); - return -EIO; -} - -static ssize_t edac_pci_dev_store(struct kobject *kobj, - struct attribute *attr, const char *buffer, size_t count) -{ - struct edac_pci_dev_attribute *edac_pci_dev; - edac_pci_dev= (struct edac_pci_dev_attribute*)attr; - - if (edac_pci_dev->show) - return edac_pci_dev->store(edac_pci_dev->value, buffer, count); - return -EIO; -} - -static struct sysfs_ops edac_pci_sysfs_ops = { - .show = edac_pci_dev_show, - .store = edac_pci_dev_store -}; - -#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ -struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .value = &_name, \ - .show = _show, \ - .store = _store, \ -}; - -#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \ -struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .value = _data, \ - .show = _show, \ - .store = _store, \ -}; - -/* PCI Parity control files */ -EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, - edac_pci_int_store); -EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, - edac_pci_int_store); -EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL); - -/* Base Attributes of the memory ECC object */ -static struct edac_pci_dev_attribute *edac_pci_attr[] = { - &edac_pci_attr_check_pci_parity, - &edac_pci_attr_panic_on_pci_parity, - &edac_pci_attr_pci_parity_count, - NULL, -}; - -/* No memory to release */ -static void edac_pci_release(struct kobject *kobj) -{ - debugf1("%s()\n", __func__); - complete(&edac_pci_kobj_complete); -} - -static struct kobj_type ktype_edac_pci = { - .release = edac_pci_release, - .sysfs_ops = &edac_pci_sysfs_ops, - .default_attrs = (struct attribute **) edac_pci_attr, -}; - -/** - * edac_sysfs_pci_setup() - * - */ -static int edac_sysfs_pci_setup(void) -{ - int err; - - debugf1("%s()\n", __func__); - - memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj)); - edac_pci_kobj.parent = &edac_class.kset.kobj; - edac_pci_kobj.ktype = &ktype_edac_pci; - err = kobject_set_name(&edac_pci_kobj, "pci"); - - if (!err) { - /* Instanstiate the csrow object */ - /* FIXME: maybe new sysdev_create_subdir() */ - err = kobject_register(&edac_pci_kobj); - - if (err) - debugf1("Failed to register '.../edac/pci'\n"); - else - debugf1("Registered '.../edac/pci' kobject\n"); - } - - return err; -} - -static void edac_sysfs_pci_teardown(void) -{ - debugf0("%s()\n", __func__); - init_completion(&edac_pci_kobj_complete); - kobject_unregister(&edac_pci_kobj); - wait_for_completion(&edac_pci_kobj_complete); -} - - -static u16 get_pci_parity_status(struct pci_dev *dev, int secondary) -{ - int where; - u16 status; - - where = secondary ? PCI_SEC_STATUS : PCI_STATUS; - pci_read_config_word(dev, where, &status); - - /* If we get back 0xFFFF then we must suspect that the card has been - * pulled but the Linux PCI layer has not yet finished cleaning up. - * We don't want to report on such devices - */ - - if (status == 0xFFFF) { - u32 sanity; - - pci_read_config_dword(dev, 0, &sanity); - - if (sanity == 0xFFFFFFFF) - return 0; - } - - status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | - PCI_STATUS_PARITY; - - if (status) - /* reset only the bits we are interested in */ - pci_write_config_word(dev, where, status); - - return status; -} - -typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); - -/* Clear any PCI parity errors logged by this device. */ -static void edac_pci_dev_parity_clear(struct pci_dev *dev) -{ - u8 header_type; - - get_pci_parity_status(dev, 0); - - /* read the device TYPE, looking for bridges */ - pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); - - if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) - get_pci_parity_status(dev, 1); -} - -/* - * PCI Parity polling - * - */ -static void edac_pci_dev_parity_test(struct pci_dev *dev) -{ - u16 status; - u8 header_type; - - /* read the STATUS register on this device - */ - status = get_pci_parity_status(dev, 0); - - debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id ); - - /* check the status reg for errors */ - if (status) { - if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) - edac_printk(KERN_CRIT, EDAC_PCI, - "Signaled System Error on %s\n", - pci_name(dev)); - - if (status & (PCI_STATUS_PARITY)) { - edac_printk(KERN_CRIT, EDAC_PCI, - "Master Data Parity Error on %s\n", - pci_name(dev)); - - atomic_inc(&pci_parity_count); - } - - if (status & (PCI_STATUS_DETECTED_PARITY)) { - edac_printk(KERN_CRIT, EDAC_PCI, - "Detected Parity Error on %s\n", - pci_name(dev)); - - atomic_inc(&pci_parity_count); - } - } - - /* read the device TYPE, looking for bridges */ - pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); - - debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id ); - - if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { - /* On bridges, need to examine secondary status register */ - status = get_pci_parity_status(dev, 1); - - debugf2("PCI SEC_STATUS= 0x%04x %s\n", - status, dev->dev.bus_id ); - - /* check the secondary status reg for errors */ - if (status) { - if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) - edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " - "Signaled System Error on %s\n", - pci_name(dev)); - - if (status & (PCI_STATUS_PARITY)) { - edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " - "Master Data Parity Error on " - "%s\n", pci_name(dev)); - - atomic_inc(&pci_parity_count); - } - - if (status & (PCI_STATUS_DETECTED_PARITY)) { - edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " - "Detected Parity Error on %s\n", - pci_name(dev)); - - atomic_inc(&pci_parity_count); - } - } - } -} - -/* - * pci_dev parity list iterator - * Scan the PCI device list for one iteration, looking for SERRORs - * Master Parity ERRORS or Parity ERRORs on primary or secondary devices - */ -static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) -{ - struct pci_dev *dev = NULL; - - /* request for kernel access to the next PCI device, if any, - * and while we are looking at it have its reference count - * bumped until we are done with it - */ - while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - fn(dev); - } -} - -static void do_pci_parity_check(void) -{ - unsigned long flags; - int before_count; - - debugf3("%s()\n", __func__); - - if (!check_pci_parity) - return; - - before_count = atomic_read(&pci_parity_count); - - /* scan all PCI devices looking for a Parity Error on devices and - * bridges - */ - local_irq_save(flags); - edac_pci_dev_parity_iterator(edac_pci_dev_parity_test); - local_irq_restore(flags); - - /* Only if operator has selected panic on PCI Error */ - if (panic_on_pci_parity) { - /* If the count is different 'after' from 'before' */ - if (before_count != atomic_read(&pci_parity_count)) - panic("EDAC: PCI Parity Error"); - } -} - -static inline void clear_pci_parity_errors(void) -{ - /* Clear any PCI bus parity errors that devices initially have logged - * in their registers. - */ - edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); -} - -#else /* CONFIG_PCI */ - -/* pre-process these away */ -#define do_pci_parity_check() -#define clear_pci_parity_errors() -#define edac_sysfs_pci_teardown() -#define edac_sysfs_pci_setup() (0) - -#endif /* CONFIG_PCI */ - -/* EDAC sysfs CSROW data structures and methods - */ - -/* Set of more default csrow<id> attribute show/store functions */ -static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, int private) -{ - return sprintf(data,"%u\n", csrow->ue_count); -} - -static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, int private) -{ - return sprintf(data,"%u\n", csrow->ce_count); -} - -static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, int private) -{ - return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages)); -} - -static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, int private) -{ - return sprintf(data,"%s\n", mem_types[csrow->mtype]); -} - -static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, int private) -{ - return sprintf(data,"%s\n", dev_types[csrow->dtype]); -} - -static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, int private) -{ - return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]); -} - -/* show/store functions for DIMM Label attributes */ -static ssize_t channel_dimm_label_show(struct csrow_info *csrow, - char *data, int channel) -{ - return snprintf(data, EDAC_MC_LABEL_LEN,"%s", - csrow->channels[channel].label); -} - -static ssize_t channel_dimm_label_store(struct csrow_info *csrow, - const char *data, - size_t count, - int channel) -{ - ssize_t max_size = 0; - - max_size = min((ssize_t)count,(ssize_t)EDAC_MC_LABEL_LEN-1); - strncpy(csrow->channels[channel].label, data, max_size); - csrow->channels[channel].label[max_size] = '\0'; - - return max_size; -} - -/* show function for dynamic chX_ce_count attribute */ -static ssize_t channel_ce_count_show(struct csrow_info *csrow, - char *data, - int channel) -{ - return sprintf(data, "%u\n", csrow->channels[channel].ce_count); -} - -/* csrow specific attribute structure */ -struct csrowdev_attribute { - struct attribute attr; - ssize_t (*show)(struct csrow_info *,char *,int); - ssize_t (*store)(struct csrow_info *, const char *,size_t,int); - int private; -}; - -#define to_csrow(k) container_of(k, struct csrow_info, kobj) -#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) - -/* Set of show/store higher level functions for default csrow attributes */ -static ssize_t csrowdev_show(struct kobject *kobj, - struct attribute *attr, - char *buffer) -{ - struct csrow_info *csrow = to_csrow(kobj); - struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); - - if (csrowdev_attr->show) - return csrowdev_attr->show(csrow, - buffer, - csrowdev_attr->private); - return -EIO; -} - -static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct csrow_info *csrow = to_csrow(kobj); - struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr); - - if (csrowdev_attr->store) - return csrowdev_attr->store(csrow, - buffer, - count, - csrowdev_attr->private); - return -EIO; -} - -static struct sysfs_ops csrowfs_ops = { - .show = csrowdev_show, - .store = csrowdev_store -}; - -#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \ -struct csrowdev_attribute attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .show = _show, \ - .store = _store, \ - .private = _private, \ -}; - -/* default cwrow<id>/attribute files */ -CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL,0); -CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL,0); -CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL,0); -CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL,0); -CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL,0); -CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL,0); - -/* default attributes of the CSROW<id> object */ -static struct csrowdev_attribute *default_csrow_attr[] = { - &attr_dev_type, - &attr_mem_type, - &attr_edac_mode, - &attr_size_mb, - &attr_ue_count, - &attr_ce_count, - NULL, -}; - - -/* possible dynamic channel DIMM Label attribute files */ -CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR, - channel_dimm_label_show, - channel_dimm_label_store, - 0 ); -CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR, - channel_dimm_label_show, - channel_dimm_label_store, - 1 ); -CSROWDEV_ATTR(ch2_dimm_label,S_IRUGO|S_IWUSR, - channel_dimm_label_show, - channel_dimm_label_store, - 2 ); -CSROWDEV_ATTR(ch3_dimm_label,S_IRUGO|S_IWUSR, - channel_dimm_label_show, - channel_dimm_label_store, - 3 ); -CSROWDEV_ATTR(ch4_dimm_label,S_IRUGO|S_IWUSR, - channel_dimm_label_show, - channel_dimm_label_store, - 4 ); -CSROWDEV_ATTR(ch5_dimm_label,S_IRUGO|S_IWUSR, - channel_dimm_label_show, - channel_dimm_label_store, - 5 ); - -/* Total possible dynamic DIMM Label attribute file table */ -static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = { - &attr_ch0_dimm_label, - &attr_ch1_dimm_label, - &attr_ch2_dimm_label, - &attr_ch3_dimm_label, - &attr_ch4_dimm_label, - &attr_ch5_dimm_label -}; - -/* possible dynamic channel ce_count attribute files */ -CSROWDEV_ATTR(ch0_ce_count,S_IRUGO|S_IWUSR, - channel_ce_count_show, - NULL, - 0 ); -CSROWDEV_ATTR(ch1_ce_count,S_IRUGO|S_IWUSR, - channel_ce_count_show, - NULL, - 1 ); -CSROWDEV_ATTR(ch2_ce_count,S_IRUGO|S_IWUSR, - channel_ce_count_show, - NULL, - 2 ); -CSROWDEV_ATTR(ch3_ce_count,S_IRUGO|S_IWUSR, - channel_ce_count_show, - NULL, - 3 ); -CSROWDEV_ATTR(ch4_ce_count,S_IRUGO|S_IWUSR, - channel_ce_count_show, - NULL, - 4 ); -CSROWDEV_ATTR(ch5_ce_count,S_IRUGO|S_IWUSR, - channel_ce_count_show, - NULL, - 5 ); - -/* Total possible dynamic ce_count attribute file table */ -static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = { - &attr_ch0_ce_count, - &attr_ch1_ce_count, - &attr_ch2_ce_count, - &attr_ch3_ce_count, - &attr_ch4_ce_count, - &attr_ch5_ce_count -}; - - -#define EDAC_NR_CHANNELS 6 - -/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */ -static int edac_create_channel_files(struct kobject *kobj, int chan) -{ - int err=-ENODEV; - - if (chan >= EDAC_NR_CHANNELS) - return err; - - /* create the DIMM label attribute file */ - err = sysfs_create_file(kobj, - (struct attribute *) dynamic_csrow_dimm_attr[chan]); - - if (!err) { - /* create the CE Count attribute file */ - err = sysfs_create_file(kobj, - (struct attribute *) dynamic_csrow_ce_count_attr[chan]); - } else { - debugf1("%s() dimm labels and ce_count files created", __func__); - } - - return err; -} - -/* No memory to release for this kobj */ -static void edac_csrow_instance_release(struct kobject *kobj) -{ - struct csrow_info *cs; - - cs = container_of(kobj, struct csrow_info, kobj); - complete(&cs->kobj_complete); -} - -/* the kobj_type instance for a CSROW */ -static struct kobj_type ktype_csrow = { - .release = edac_csrow_instance_release, - .sysfs_ops = &csrowfs_ops, - .default_attrs = (struct attribute **) default_csrow_attr, -}; - -/* Create a CSROW object under specifed edac_mc_device */ -static int edac_create_csrow_object( - struct kobject *edac_mci_kobj, - struct csrow_info *csrow, - int index) -{ - int err = 0; - int chan; - - memset(&csrow->kobj, 0, sizeof(csrow->kobj)); - - /* generate ..../edac/mc/mc<id>/csrow<index> */ - - csrow->kobj.parent = edac_mci_kobj; - csrow->kobj.ktype = &ktype_csrow; - - /* name this instance of csrow<id> */ - err = kobject_set_name(&csrow->kobj,"csrow%d",index); - if (err) - goto error_exit; - - /* Instanstiate the csrow object */ - err = kobject_register(&csrow->kobj); - if (!err) { - /* Create the dyanmic attribute files on this csrow, - * namely, the DIMM labels and the channel ce_count - */ - for (chan = 0; chan < csrow->nr_channels; chan++) { - err = edac_create_channel_files(&csrow->kobj,chan); - if (err) - break; - } - } - -error_exit: - return err; -} - -/* default sysfs methods and data structures for the main MCI kobject */ - -static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, - const char *data, size_t count) -{ - int row, chan; - - mci->ue_noinfo_count = 0; - mci->ce_noinfo_count = 0; - mci->ue_count = 0; - mci->ce_count = 0; - - for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *ri = &mci->csrows[row]; - - ri->ue_count = 0; - ri->ce_count = 0; - - for (chan = 0; chan < ri->nr_channels; chan++) - ri->channels[chan].ce_count = 0; - } - - mci->start_time = jiffies; - return count; -} - -/* memory scrubbing */ -static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, - const char *data, size_t count) -{ - u32 bandwidth = -1; - - if (mci->set_sdram_scrub_rate) { - - memctrl_int_store(&bandwidth, data, count); - - if (!(*mci->set_sdram_scrub_rate)(mci, &bandwidth)) { - edac_printk(KERN_DEBUG, EDAC_MC, - "Scrub rate set successfully, applied: %d\n", - bandwidth); - } else { - /* FIXME: error codes maybe? */ - edac_printk(KERN_DEBUG, EDAC_MC, - "Scrub rate set FAILED, could not apply: %d\n", - bandwidth); - } - } else { - /* FIXME: produce "not implemented" ERROR for user-side. */ - edac_printk(KERN_WARNING, EDAC_MC, - "Memory scrubbing 'set'control is not implemented!\n"); - } - return count; -} - -static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) -{ - u32 bandwidth = -1; - - if (mci->get_sdram_scrub_rate) { - if (!(*mci->get_sdram_scrub_rate)(mci, &bandwidth)) { - edac_printk(KERN_DEBUG, EDAC_MC, - "Scrub rate successfully, fetched: %d\n", - bandwidth); - } else { - /* FIXME: error codes maybe? */ - edac_printk(KERN_DEBUG, EDAC_MC, - "Scrub rate fetch FAILED, got: %d\n", - bandwidth); - } - } else { - /* FIXME: produce "not implemented" ERROR for user-side. */ - edac_printk(KERN_WARNING, EDAC_MC, - "Memory scrubbing 'get' control is not implemented!\n"); - } - return sprintf(data, "%d\n", bandwidth); -} - -/* default attribute files for the MCI object */ -static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) -{ - return sprintf(data,"%d\n", mci->ue_count); -} - -static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) -{ - return sprintf(data,"%d\n", mci->ce_count); -} - -static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) -{ - return sprintf(data,"%d\n", mci->ce_noinfo_count); -} - -static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) -{ - return sprintf(data,"%d\n", mci->ue_noinfo_count); -} - -static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) -{ - return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ); -} - -static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) -{ - return sprintf(data,"%s\n", mci->ctl_name); -} - -static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) -{ - int total_pages, csrow_idx; - - for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows; - csrow_idx++) { - struct csrow_info *csrow = &mci->csrows[csrow_idx]; - - if (!csrow->nr_pages) - continue; - - total_pages += csrow->nr_pages; - } - - return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages)); -} - -struct mcidev_attribute { - struct attribute attr; - ssize_t (*show)(struct mem_ctl_info *,char *); - ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); -}; - -#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) -#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr) - -/* MCI show/store functions for top most object */ -static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, - char *buffer) -{ - struct mem_ctl_info *mem_ctl_info = to_mci(kobj); - struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); - - if (mcidev_attr->show) - return mcidev_attr->show(mem_ctl_info, buffer); - - return -EIO; -} - -static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) -{ - struct mem_ctl_info *mem_ctl_info = to_mci(kobj); - struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); - - if (mcidev_attr->store) - return mcidev_attr->store(mem_ctl_info, buffer, count); - - return -EIO; -} - -static struct sysfs_ops mci_ops = { - .show = mcidev_show, - .store = mcidev_store -}; - -#define MCIDEV_ATTR(_name,_mode,_show,_store) \ -struct mcidev_attribute mci_attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .show = _show, \ - .store = _store, \ -}; - -/* default Control file */ -MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store); - -/* default Attribute files */ -MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL); -MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL); -MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL); -MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL); -MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL); -MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL); -MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL); - -/* memory scrubber attribute file */ -MCIDEV_ATTR(sdram_scrub_rate,S_IRUGO|S_IWUSR,mci_sdram_scrub_rate_show,mci_sdram_scrub_rate_store); - -static struct mcidev_attribute *mci_attr[] = { - &mci_attr_reset_counters, - &mci_attr_mc_name, - &mci_attr_size_mb, - &mci_attr_seconds_since_reset, - &mci_attr_ue_noinfo_count, - &mci_attr_ce_noinfo_count, - &mci_attr_ue_count, - &mci_attr_ce_count, - &mci_attr_sdram_scrub_rate, - NULL -}; - -/* - * Release of a MC controlling instance - */ -static void edac_mci_instance_release(struct kobject *kobj) -{ - struct mem_ctl_info *mci; - - mci = to_mci(kobj); - debugf0("%s() idx=%d\n", __func__, mci->mc_idx); - complete(&mci->kobj_complete); -} - -static struct kobj_type ktype_mci = { - .release = edac_mci_instance_release, - .sysfs_ops = &mci_ops, - .default_attrs = (struct attribute **) mci_attr, -}; - - -#define EDAC_DEVICE_SYMLINK "device" - -/* - * Create a new Memory Controller kobject instance, - * mc<id> under the 'mc' directory - * - * Return: - * 0 Success - * !0 Failure - */ -static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) -{ - int i; - int err; - struct csrow_info *csrow; - struct kobject *edac_mci_kobj=&mci->edac_mci_kobj; - - debugf0("%s() idx=%d\n", __func__, mci->mc_idx); - memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj)); - - /* set the name of the mc<id> object */ - err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx); - if (err) - return err; - - /* link to our parent the '..../edac/mc' object */ - edac_mci_kobj->parent = &edac_memctrl_kobj; - edac_mci_kobj->ktype = &ktype_mci; - - /* register the mc<id> kobject */ - err = kobject_register(edac_mci_kobj); - if (err) - return err; - - /* create a symlink for the device */ - err = sysfs_create_link(edac_mci_kobj, &mci->dev->kobj, - EDAC_DEVICE_SYMLINK); - if (err) - goto fail0; - - /* Make directories for each CSROW object - * under the mc<id> kobject - */ - for (i = 0; i < mci->nr_csrows; i++) { - csrow = &mci->csrows[i]; - - /* Only expose populated CSROWs */ - if (csrow->nr_pages > 0) { - err = edac_create_csrow_object(edac_mci_kobj,csrow,i); - if (err) - goto fail1; - } - } - - return 0; - - /* CSROW error: backout what has already been registered, */ -fail1: - for ( i--; i >= 0; i--) { - if (csrow->nr_pages > 0) { - init_completion(&csrow->kobj_complete); - kobject_unregister(&mci->csrows[i].kobj); - wait_for_completion(&csrow->kobj_complete); - } - } - -fail0: - init_completion(&mci->kobj_complete); - kobject_unregister(edac_mci_kobj); - wait_for_completion(&mci->kobj_complete); - return err; -} - -/* - * remove a Memory Controller instance - */ -static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) -{ - int i; - - debugf0("%s()\n", __func__); - - /* remove all csrow kobjects */ - for (i = 0; i < mci->nr_csrows; i++) { - if (mci->csrows[i].nr_pages > 0) { - init_completion(&mci->csrows[i].kobj_complete); - kobject_unregister(&mci->csrows[i].kobj); - wait_for_completion(&mci->csrows[i].kobj_complete); - } - } - - sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); - init_completion(&mci->kobj_complete); - kobject_unregister(&mci->edac_mci_kobj); - wait_for_completion(&mci->kobj_complete); -} - -/* END OF sysfs data and methods */ - #ifdef CONFIG_EDAC_DEBUG -void edac_mc_dump_channel(struct channel_info *chan) +static void edac_mc_dump_channel(struct channel_info *chan) { debugf4("\tchannel = %p\n", chan); debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); @@ -1228,25 +48,21 @@ void edac_mc_dump_channel(struct channel_info *chan) debugf4("\tchannel->label = '%s'\n", chan->label); debugf4("\tchannel->csrow = %p\n\n", chan->csrow); } -EXPORT_SYMBOL_GPL(edac_mc_dump_channel); -void edac_mc_dump_csrow(struct csrow_info *csrow) +static void edac_mc_dump_csrow(struct csrow_info *csrow) { debugf4("\tcsrow = %p\n", csrow); debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx); - debugf4("\tcsrow->first_page = 0x%lx\n", - csrow->first_page); + debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages); - debugf4("\tcsrow->nr_channels = %d\n", - csrow->nr_channels); + debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); debugf4("\tcsrow->channels = %p\n", csrow->channels); debugf4("\tcsrow->mci = %p\n\n", csrow->mci); } -EXPORT_SYMBOL_GPL(edac_mc_dump_csrow); -void edac_mc_dump_mci(struct mem_ctl_info *mci) +static void edac_mc_dump_mci(struct mem_ctl_info *mci) { debugf3("\tmci = %p\n", mci); debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap); @@ -1256,13 +72,11 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci) debugf3("\tmci->nr_csrows = %d, csrows = %p\n", mci->nr_csrows, mci->csrows); debugf3("\tdev = %p\n", mci->dev); - debugf3("\tmod_name:ctl_name = %s:%s\n", - mci->mod_name, mci->ctl_name); + debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); debugf3("\tpvt_info = %p\n\n", mci->pvt_info); } -EXPORT_SYMBOL_GPL(edac_mc_dump_mci); -#endif /* CONFIG_EDAC_DEBUG */ +#endif /* CONFIG_EDAC_DEBUG */ /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. * Adjust 'ptr' so that its alignment is at least as stringent as what the @@ -1271,7 +85,7 @@ EXPORT_SYMBOL_GPL(edac_mc_dump_mci); * If 'size' is a constant, the compiler will optimize this whole function * down to either a no-op or the addition of a constant to the value of 'ptr'. */ -static inline char * align_ptr(void *ptr, unsigned size) +void *edac_align_ptr(void *ptr, unsigned size) { unsigned align, r; @@ -1288,14 +102,14 @@ static inline char * align_ptr(void *ptr, unsigned size) else if (size > sizeof(char)) align = sizeof(short); else - return (char *) ptr; + return (char *)ptr; r = size % align; if (r == 0) - return (char *) ptr; + return (char *)ptr; - return (char *) (((unsigned long) ptr) + align - r); + return (void *)(((unsigned long)ptr) + align - r); } /** @@ -1315,7 +129,7 @@ static inline char * align_ptr(void *ptr, unsigned size) * struct mem_ctl_info pointer */ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, - unsigned nr_chans) + unsigned nr_chans, int edac_index) { struct mem_ctl_info *mci; struct csrow_info *csi, *csrow; @@ -1323,30 +137,32 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, void *pvt; unsigned size; int row, chn; + int err; /* Figure out the offsets of the various items from the start of an mc * structure. We want the alignment of each item to be at least as * stringent as what the compiler would provide if we could simply * hardcode everything into a single struct. */ - mci = (struct mem_ctl_info *) 0; - csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi)); - chi = (struct channel_info *) - align_ptr(&csi[nr_csrows], sizeof(*chi)); - pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt); - size = ((unsigned long) pvt) + sz_pvt; - - if ((mci = kmalloc(size, GFP_KERNEL)) == NULL) + mci = (struct mem_ctl_info *)0; + csi = edac_align_ptr(&mci[1], sizeof(*csi)); + chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi)); + pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt); + size = ((unsigned long)pvt) + sz_pvt; + + mci = kzalloc(size, GFP_KERNEL); + if (mci == NULL) return NULL; /* Adjust pointers so they point within the memory we just allocated * rather than an imaginary chunk of memory located at address 0. */ - csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi)); - chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi)); - pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL; + csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi)); + chi = (struct channel_info *)(((char *)mci) + ((unsigned long)chi)); + pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; - memset(mci, 0, size); /* clear all fields */ + /* setup index and various internal pointers */ + mci->mc_idx = edac_index; mci->csrows = csi; mci->pvt_info = pvt; mci->nr_csrows = nr_csrows; @@ -1366,17 +182,35 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, } } + mci->op_state = OP_ALLOC; + + /* + * Initialize the 'root' kobj for the edac_mc controller + */ + err = edac_mc_register_sysfs_main_kobj(mci); + if (err) { + kfree(mci); + return NULL; + } + + /* at this point, the root kobj is valid, and in order to + * 'free' the object, then the function: + * edac_mc_unregister_sysfs_main_kobj() must be called + * which will perform kobj unregistration and the actual free + * will occur during the kobject callback operation + */ return mci; } EXPORT_SYMBOL_GPL(edac_mc_alloc); /** - * edac_mc_free: Free a previously allocated 'mci' structure + * edac_mc_free + * 'Free' a previously allocated 'mci' structure * @mci: pointer to a struct mem_ctl_info structure */ void edac_mc_free(struct mem_ctl_info *mci) { - kfree(mci); + edac_mc_unregister_sysfs_main_kobj(mci); } EXPORT_SYMBOL_GPL(edac_mc_free); @@ -1397,18 +231,136 @@ static struct mem_ctl_info *find_mci_by_dev(struct device *dev) return NULL; } +/* + * handler for EDAC to check if NMI type handler has asserted interrupt + */ +static int edac_mc_assert_error_check_and_clear(void) +{ + int old_state; + + if (edac_op_state == EDAC_OPSTATE_POLL) + return 1; + + old_state = edac_err_assert; + edac_err_assert = 0; + + return old_state; +} + +/* + * edac_mc_workq_function + * performs the operation scheduled by a workq request + */ +static void edac_mc_workq_function(struct work_struct *work_req) +{ + struct delayed_work *d_work = (struct delayed_work *)work_req; + struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); + + mutex_lock(&mem_ctls_mutex); + + /* if this control struct has movd to offline state, we are done */ + if (mci->op_state == OP_OFFLINE) { + mutex_unlock(&mem_ctls_mutex); + return; + } + + /* Only poll controllers that are running polled and have a check */ + if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) + mci->edac_check(mci); + + /* + * FIXME: temp place holder for PCI checks, + * goes away when we break out PCI + */ + edac_pci_do_parity_check(); + + mutex_unlock(&mem_ctls_mutex); + + /* Reschedule */ + queue_delayed_work(edac_workqueue, &mci->work, + msecs_to_jiffies(edac_mc_get_poll_msec())); +} + +/* + * edac_mc_workq_setup + * initialize a workq item for this mci + * passing in the new delay period in msec + * + * locking model: + * + * called with the mem_ctls_mutex held + */ +static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) +{ + debugf0("%s()\n", __func__); + + /* if this instance is not in the POLL state, then simply return */ + if (mci->op_state != OP_RUNNING_POLL) + return; + + INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); + queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); +} + +/* + * edac_mc_workq_teardown + * stop the workq processing on this mci + * + * locking model: + * + * called WITHOUT lock held + */ +static void edac_mc_workq_teardown(struct mem_ctl_info *mci) +{ + int status; + + /* if not running POLL, leave now */ + if (mci->op_state == OP_RUNNING_POLL) { + status = cancel_delayed_work(&mci->work); + if (status == 0) { + debugf0("%s() not canceled, flush the queue\n", + __func__); + + /* workq instance might be running, wait for it */ + flush_workqueue(edac_workqueue); + } + } +} + +/* + * edac_reset_delay_period + */ +static void edac_reset_delay_period(struct mem_ctl_info *mci, unsigned long value) +{ + /* cancel the current workq request */ + edac_mc_workq_teardown(mci); + + /* lock the list of devices for the new setup */ + mutex_lock(&mem_ctls_mutex); + + /* restart the workq request, with new delay value */ + edac_mc_workq_setup(mci, value); + + mutex_unlock(&mem_ctls_mutex); +} + /* Return 0 on success, 1 on failure. * Before calling this function, caller must * assign a unique value to mci->mc_idx. + * + * locking model: + * + * called with the mem_ctls_mutex lock held */ -static int add_mc_to_global_list (struct mem_ctl_info *mci) +static int add_mc_to_global_list(struct mem_ctl_info *mci) { struct list_head *item, *insert_before; struct mem_ctl_info *p; insert_before = &mc_devices; - if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL)) + p = find_mci_by_dev(mci->dev); + if (unlikely(p != NULL)) goto fail0; list_for_each(item, &mc_devices) { @@ -1424,18 +376,19 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci) } list_add_tail_rcu(&mci->link, insert_before); + atomic_inc(&edac_handlers); return 0; fail0: edac_printk(KERN_WARNING, EDAC_MC, - "%s (%s) %s %s already assigned %d\n", p->dev->bus_id, - dev_name(p->dev), p->mod_name, p->ctl_name, p->mc_idx); + "%s (%s) %s %s already assigned %d\n", p->dev->bus_id, + dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); return 1; fail1: edac_printk(KERN_WARNING, EDAC_MC, - "bug in low-level driver: attempt to assign\n" - " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); + "bug in low-level driver: attempt to assign\n" + " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); return 1; } @@ -1450,6 +403,7 @@ static void complete_mc_list_del(struct rcu_head *head) static void del_mc_from_global_list(struct mem_ctl_info *mci) { + atomic_dec(&edac_handlers); list_del_rcu(&mci->link); init_completion(&mci->complete); call_rcu(&mci->rcu, complete_mc_list_del); @@ -1457,6 +411,34 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci) } /** + * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'. + * + * If found, return a pointer to the structure. + * Else return NULL. + * + * Caller must hold mem_ctls_mutex. + */ +struct mem_ctl_info *edac_mc_find(int idx) +{ + struct list_head *item; + struct mem_ctl_info *mci; + + list_for_each(item, &mc_devices) { + mci = list_entry(item, struct mem_ctl_info, link); + + if (mci->mc_idx >= idx) { + if (mci->mc_idx == idx) + return mci; + + break; + } + } + + return NULL; +} +EXPORT_SYMBOL(edac_mc_find); + +/** * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and * create sysfs entries associated with mci structure * @mci: pointer to the mci structure to be added to the list @@ -1468,10 +450,10 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci) */ /* FIXME - should a warning be printed if no error detection? correction? */ -int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx) +int edac_mc_add_mc(struct mem_ctl_info *mci) { debugf0("%s()\n", __func__); - mci->mc_idx = mc_idx; + #ifdef CONFIG_EDAC_DEBUG if (edac_debug_level >= 3) edac_mc_dump_mci(mci); @@ -1484,12 +466,12 @@ int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx) edac_mc_dump_csrow(&mci->csrows[i]); for (j = 0; j < mci->csrows[i].nr_channels; j++) - edac_mc_dump_channel( - &mci->csrows[i].channels[j]); + edac_mc_dump_channel(&mci->csrows[i]. + channels[j]); } } #endif - down(&mem_ctls_mutex); + mutex_lock(&mem_ctls_mutex); if (add_mc_to_global_list(mci)) goto fail0; @@ -1503,18 +485,28 @@ int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx) goto fail1; } + /* If there IS a check routine, then we are running POLLED */ + if (mci->edac_check != NULL) { + /* This instance is NOW RUNNING */ + mci->op_state = OP_RUNNING_POLL; + + edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); + } else { + mci->op_state = OP_RUNNING_INTERRUPT; + } + /* Report action taken */ - edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n", - mci->mod_name, mci->ctl_name, dev_name(mci->dev)); + edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" + " DEV %s\n", mci->mod_name, mci->ctl_name, dev_name(mci)); - up(&mem_ctls_mutex); + mutex_unlock(&mem_ctls_mutex); return 0; fail1: del_mc_from_global_list(mci); fail0: - up(&mem_ctls_mutex); + mutex_unlock(&mem_ctls_mutex); return 1; } EXPORT_SYMBOL_GPL(edac_mc_add_mc); @@ -1526,29 +518,41 @@ EXPORT_SYMBOL_GPL(edac_mc_add_mc); * * Return pointer to removed mci structure, or NULL if device not found. */ -struct mem_ctl_info * edac_mc_del_mc(struct device *dev) +struct mem_ctl_info *edac_mc_del_mc(struct device *dev) { struct mem_ctl_info *mci; - debugf0("MC: %s()\n", __func__); - down(&mem_ctls_mutex); + debugf0("%s()\n", __func__); + + mutex_lock(&mem_ctls_mutex); - if ((mci = find_mci_by_dev(dev)) == NULL) { - up(&mem_ctls_mutex); + /* find the requested mci struct in the global list */ + mci = find_mci_by_dev(dev); + if (mci == NULL) { + mutex_unlock(&mem_ctls_mutex); return NULL; } - edac_remove_sysfs_mci_device(mci); + /* marking MCI offline */ + mci->op_state = OP_OFFLINE; + del_mc_from_global_list(mci); - up(&mem_ctls_mutex); + mutex_unlock(&mem_ctls_mutex); + + /* flush workq processes and remove sysfs */ + edac_mc_workq_teardown(mci); + edac_remove_sysfs_mci_device(mci); + edac_printk(KERN_INFO, EDAC_MC, "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, - mci->mod_name, mci->ctl_name, dev_name(mci->dev)); + mci->mod_name, mci->ctl_name, dev_name(mci)); + return mci; } EXPORT_SYMBOL_GPL(edac_mc_del_mc); -void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size) +static void edac_mc_scrub_block(unsigned long page, unsigned long offset, + u32 size) { struct page *pg; void *virt_addr; @@ -1557,7 +561,7 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size) debugf3("%s()\n", __func__); /* ECC error page was not in our memory. Ignore it. */ - if(!pfn_valid(page)) + if (!pfn_valid(page)) return; /* Find the actual page structure then map it and fix */ @@ -1577,7 +581,6 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size) if (PageHighMem(pg)) local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(edac_mc_scrub_block); /* FIXME - should return -1 */ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) @@ -1611,7 +614,7 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) if (row == -1) edac_mc_printk(mci, KERN_ERR, "could not look up page error address %lx\n", - (unsigned long) page); + (unsigned long)page); return row; } @@ -1620,8 +623,9 @@ EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); /* FIXME - setable log (warning/emerg) levels */ /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ void edac_mc_handle_ce(struct mem_ctl_info *mci, - unsigned long page_frame_number, unsigned long offset_in_page, - unsigned long syndrome, int row, int channel, const char *msg) + unsigned long page_frame_number, + unsigned long offset_in_page, unsigned long syndrome, + int row, int channel, const char *msg) { unsigned long remapped_page; @@ -1647,7 +651,7 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci, return; } - if (log_ce) + if (edac_mc_get_log_ce()) /* FIXME - put in DIMM location */ edac_mc_printk(mci, KERN_WARNING, "CE page 0x%lx, offset 0x%lx, grain %d, syndrome " @@ -1671,18 +675,18 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci, * page - which can then be scrubbed. */ remapped_page = mci->ctl_page_to_phys ? - mci->ctl_page_to_phys(mci, page_frame_number) : - page_frame_number; + mci->ctl_page_to_phys(mci, page_frame_number) : + page_frame_number; edac_mc_scrub_block(remapped_page, offset_in_page, - mci->csrows[row].grain); + mci->csrows[row].grain); } } EXPORT_SYMBOL_GPL(edac_mc_handle_ce); void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) { - if (log_ce) + if (edac_mc_get_log_ce()) edac_mc_printk(mci, KERN_WARNING, "CE - no information available: %s\n", msg); @@ -1692,8 +696,8 @@ void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info); void edac_mc_handle_ue(struct mem_ctl_info *mci, - unsigned long page_frame_number, unsigned long offset_in_page, - int row, const char *msg) + unsigned long page_frame_number, + unsigned long offset_in_page, int row, const char *msg) { int len = EDAC_MC_LABEL_LEN * 4; char labels[len + 1]; @@ -1714,26 +718,26 @@ void edac_mc_handle_ue(struct mem_ctl_info *mci, } chars = snprintf(pos, len + 1, "%s", - mci->csrows[row].channels[0].label); + mci->csrows[row].channels[0].label); len -= chars; pos += chars; for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); - chan++) { + chan++) { chars = snprintf(pos, len + 1, ":%s", - mci->csrows[row].channels[chan].label); + mci->csrows[row].channels[chan].label); len -= chars; pos += chars; } - if (log_ue) + if (edac_mc_get_log_ue()) edac_mc_printk(mci, KERN_EMERG, "UE page 0x%lx, offset 0x%lx, grain %d, row %d, " "labels \"%s\": %s\n", page_frame_number, - offset_in_page, mci->csrows[row].grain, row, labels, - msg); + offset_in_page, mci->csrows[row].grain, row, + labels, msg); - if (panic_on_ue) + if (edac_mc_get_panic_on_ue()) panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, " "row %d, labels \"%s\": %s\n", mci->mc_idx, page_frame_number, offset_in_page, @@ -1746,10 +750,10 @@ EXPORT_SYMBOL_GPL(edac_mc_handle_ue); void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) { - if (panic_on_ue) + if (edac_mc_get_panic_on_ue()) panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); - if (log_ue) + if (edac_mc_get_log_ue()) edac_mc_printk(mci, KERN_WARNING, "UE - no information available: %s\n", msg); mci->ue_noinfo_count++; @@ -1757,16 +761,14 @@ void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) } EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info); - /************************************************************* * On Fully Buffered DIMM modules, this help function is * called to process UE events */ void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, - unsigned int csrow, - unsigned int channela, - unsigned int channelb, - char *msg) + unsigned int csrow, + unsigned int channela, + unsigned int channelb, char *msg) { int len = EDAC_MC_LABEL_LEN * 4; char labels[len + 1]; @@ -1808,20 +810,21 @@ void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, /* Generate the DIMM labels from the specified channels */ chars = snprintf(pos, len + 1, "%s", mci->csrows[csrow].channels[channela].label); - len -= chars; pos += chars; + len -= chars; + pos += chars; chars = snprintf(pos, len + 1, "-%s", mci->csrows[csrow].channels[channelb].label); - if (log_ue) + if (edac_mc_get_log_ue()) edac_mc_printk(mci, KERN_EMERG, "UE row %d, channel-a= %d channel-b= %d " "labels \"%s\": %s\n", csrow, channela, channelb, labels, msg); - if (panic_on_ue) + if (edac_mc_get_panic_on_ue()) panic("UE row %d, channel-a= %d channel-b= %d " - "labels \"%s\": %s\n", csrow, channela, - channelb, labels, msg); + "labels \"%s\": %s\n", csrow, channela, + channelb, labels, msg); } EXPORT_SYMBOL(edac_mc_handle_fbd_ue); @@ -1830,9 +833,7 @@ EXPORT_SYMBOL(edac_mc_handle_fbd_ue); * called to process CE events */ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, - unsigned int csrow, - unsigned int channel, - char *msg) + unsigned int csrow, unsigned int channel, char *msg) { /* Ensure boundary values */ @@ -1853,13 +854,12 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, return; } - if (log_ce) + if (edac_mc_get_log_ce()) /* FIXME - put in DIMM location */ edac_mc_printk(mci, KERN_WARNING, "CE row %d, channel %d, label \"%s\": %s\n", csrow, channel, - mci->csrows[csrow].channels[channel].label, - msg); + mci->csrows[csrow].channels[channel].label, msg); mci->ce_count++; mci->csrows[csrow].ce_count++; @@ -1867,17 +867,16 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, } EXPORT_SYMBOL(edac_mc_handle_fbd_ce); - /* * Iterate over all MC instances and check for ECC, et al, errors */ -static inline void check_mc_devices(void) +void edac_check_mc_devices(void) { struct list_head *item; struct mem_ctl_info *mci; debugf3("%s()\n", __func__); - down(&mem_ctls_mutex); + mutex_lock(&mem_ctls_mutex); list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); @@ -1886,119 +885,5 @@ static inline void check_mc_devices(void) mci->edac_check(mci); } - up(&mem_ctls_mutex); -} - -/* - * Check MC status every poll_msec. - * Check PCI status every poll_msec as well. - * - * This where the work gets done for edac. - * - * SMP safe, doesn't use NMI, and auto-rate-limits. - */ -static void do_edac_check(void) -{ - debugf3("%s()\n", __func__); - check_mc_devices(); - do_pci_parity_check(); -} - -static int edac_kernel_thread(void *arg) -{ - while (!kthread_should_stop()) { - do_edac_check(); - - /* goto sleep for the interval */ - schedule_timeout_interruptible((HZ * poll_msec) / 1000); - try_to_freeze(); - } - - return 0; + mutex_unlock(&mem_ctls_mutex); } - -/* - * edac_mc_init - * module initialization entry point - */ -static int __init edac_mc_init(void) -{ - edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n"); - - /* - * Harvest and clear any boot/initialization PCI parity errors - * - * FIXME: This only clears errors logged by devices present at time of - * module initialization. We should also do an initial clear - * of each newly hotplugged device. - */ - clear_pci_parity_errors(); - - /* Create the MC sysfs entries */ - if (edac_sysfs_memctrl_setup()) { - edac_printk(KERN_ERR, EDAC_MC, - "Error initializing sysfs code\n"); - return -ENODEV; - } - - /* Create the PCI parity sysfs entries */ - if (edac_sysfs_pci_setup()) { - edac_sysfs_memctrl_teardown(); - edac_printk(KERN_ERR, EDAC_MC, - "EDAC PCI: Error initializing sysfs code\n"); - return -ENODEV; - } - - /* create our kernel thread */ - edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac"); - - if (IS_ERR(edac_thread)) { - /* remove the sysfs entries */ - edac_sysfs_memctrl_teardown(); - edac_sysfs_pci_teardown(); - return PTR_ERR(edac_thread); - } - - return 0; -} - -/* - * edac_mc_exit() - * module exit/termination functioni - */ -static void __exit edac_mc_exit(void) -{ - debugf0("%s()\n", __func__); - kthread_stop(edac_thread); - - /* tear down the sysfs device */ - edac_sysfs_memctrl_teardown(); - edac_sysfs_pci_teardown(); -} - -module_init(edac_mc_init); -module_exit(edac_mc_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" - "Based on work by Dan Hollis et al"); -MODULE_DESCRIPTION("Core library routines for MC reporting"); - -module_param(panic_on_ue, int, 0644); -MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); -#ifdef CONFIG_PCI -module_param(check_pci_parity, int, 0644); -MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on"); -module_param(panic_on_pci_parity, int, 0644); -MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on"); -#endif -module_param(log_ue, int, 0644); -MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on"); -module_param(log_ce, int, 0644); -MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on"); -module_param(poll_msec, int, 0644); -MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds"); -#ifdef CONFIG_EDAC_DEBUG -module_param(edac_debug_level, int, 0644); -MODULE_PARM_DESC(edac_debug_level, "Debug level"); -#endif diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c new file mode 100644 index 000000000000..cd090b0677a7 --- /dev/null +++ b/drivers/edac/edac_mc_sysfs.c @@ -0,0 +1,1024 @@ +/* + * edac_mc kernel module + * (C) 2005-2007 Linux Networx (http://lnxi.com) + * + * This file may be distributed under the terms of the + * GNU General Public License. + * + * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com + * + */ + +#include <linux/ctype.h> +#include <linux/bug.h> + +#include "edac_core.h" +#include "edac_module.h" + + +/* MC EDAC Controls, setable by module parameter, and sysfs */ +static int edac_mc_log_ue = 1; +static int edac_mc_log_ce = 1; +static int edac_mc_panic_on_ue; +static int edac_mc_poll_msec = 1000; + +/* Getter functions for above */ +int edac_mc_get_log_ue(void) +{ + return edac_mc_log_ue; +} + +int edac_mc_get_log_ce(void) +{ + return edac_mc_log_ce; +} + +int edac_mc_get_panic_on_ue(void) +{ + return edac_mc_panic_on_ue; +} + +/* this is temporary */ +int edac_mc_get_poll_msec(void) +{ + return edac_mc_poll_msec; +} + +/* Parameter declarations for above */ +module_param(edac_mc_panic_on_ue, int, 0644); +MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); +module_param(edac_mc_log_ue, int, 0644); +MODULE_PARM_DESC(edac_mc_log_ue, + "Log uncorrectable error to console: 0=off 1=on"); +module_param(edac_mc_log_ce, int, 0644); +MODULE_PARM_DESC(edac_mc_log_ce, + "Log correctable error to console: 0=off 1=on"); +module_param(edac_mc_poll_msec, int, 0644); +MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); + +/* + * various constants for Memory Controllers + */ +static const char *mem_types[] = { + [MEM_EMPTY] = "Empty", + [MEM_RESERVED] = "Reserved", + [MEM_UNKNOWN] = "Unknown", + [MEM_FPM] = "FPM", + [MEM_EDO] = "EDO", + [MEM_BEDO] = "BEDO", + [MEM_SDR] = "Unbuffered-SDR", + [MEM_RDR] = "Registered-SDR", + [MEM_DDR] = "Unbuffered-DDR", + [MEM_RDDR] = "Registered-DDR", + [MEM_RMBS] = "RMBS", + [MEM_DDR2] = "Unbuffered-DDR2", + [MEM_FB_DDR2] = "FullyBuffered-DDR2", + [MEM_RDDR2] = "Registered-DDR2" +}; + +static const char *dev_types[] = { + [DEV_UNKNOWN] = "Unknown", + [DEV_X1] = "x1", + [DEV_X2] = "x2", + [DEV_X4] = "x4", + [DEV_X8] = "x8", + [DEV_X16] = "x16", + [DEV_X32] = "x32", + [DEV_X64] = "x64" +}; + +static const char *edac_caps[] = { + [EDAC_UNKNOWN] = "Unknown", + [EDAC_NONE] = "None", + [EDAC_RESERVED] = "Reserved", + [EDAC_PARITY] = "PARITY", + [EDAC_EC] = "EC", + [EDAC_SECDED] = "SECDED", + [EDAC_S2ECD2ED] = "S2ECD2ED", + [EDAC_S4ECD4ED] = "S4ECD4ED", + [EDAC_S8ECD8ED] = "S8ECD8ED", + [EDAC_S16ECD16ED] = "S16ECD16ED" +}; + + + +/* + * /sys/devices/system/edac/mc; + * data structures and methods + */ +static ssize_t memctrl_int_show(void *ptr, char *buffer) +{ + int *value = (int *)ptr; + return sprintf(buffer, "%u\n", *value); +} + +static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) +{ + int *value = (int *)ptr; + + if (isdigit(*buffer)) + *value = simple_strtoul(buffer, NULL, 0); + + return count; +} + + +/* EDAC sysfs CSROW data structures and methods + */ + +/* Set of more default csrow<id> attribute show/store functions */ +static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, + int private) +{ + return sprintf(data, "%u\n", csrow->ue_count); +} + +static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, + int private) +{ + return sprintf(data, "%u\n", csrow->ce_count); +} + +static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, + int private) +{ + return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); +} + +static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, + int private) +{ + return sprintf(data, "%s\n", mem_types[csrow->mtype]); +} + +static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, + int private) +{ + return sprintf(data, "%s\n", dev_types[csrow->dtype]); +} + +static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, + int private) +{ + return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]); +} + +/* show/store functions for DIMM Label attributes */ +static ssize_t channel_dimm_label_show(struct csrow_info *csrow, + char *data, int channel) +{ + return snprintf(data, EDAC_MC_LABEL_LEN, "%s", + csrow->channels[channel].label); +} + +static ssize_t channel_dimm_label_store(struct csrow_info *csrow, + const char *data, + size_t count, int channel) +{ + ssize_t max_size = 0; + + max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); + strncpy(csrow->channels[channel].label, data, max_size); + csrow->channels[channel].label[max_size] = '\0'; + + return max_size; +} + +/* show function for dynamic chX_ce_count attribute */ +static ssize_t channel_ce_count_show(struct csrow_info *csrow, + char *data, int channel) +{ + return sprintf(data, "%u\n", csrow->channels[channel].ce_count); +} + +/* csrow specific attribute structure */ +struct csrowdev_attribute { + struct attribute attr; + ssize_t(*show) (struct csrow_info *, char *, int); + ssize_t(*store) (struct csrow_info *, const char *, size_t, int); + int private; +}; + +#define to_csrow(k) container_of(k, struct csrow_info, kobj) +#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) + +/* Set of show/store higher level functions for default csrow attributes */ +static ssize_t csrowdev_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct csrow_info *csrow = to_csrow(kobj); + struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); + + if (csrowdev_attr->show) + return csrowdev_attr->show(csrow, + buffer, csrowdev_attr->private); + return -EIO; +} + +static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, + const char *buffer, size_t count) +{ + struct csrow_info *csrow = to_csrow(kobj); + struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); + + if (csrowdev_attr->store) + return csrowdev_attr->store(csrow, + buffer, + count, csrowdev_attr->private); + return -EIO; +} + +static struct sysfs_ops csrowfs_ops = { + .show = csrowdev_show, + .store = csrowdev_store +}; + +#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \ +static struct csrowdev_attribute attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ + .private = _private, \ +}; + +/* default cwrow<id>/attribute files */ +CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0); +CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0); +CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0); +CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0); +CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0); +CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0); + +/* default attributes of the CSROW<id> object */ +static struct csrowdev_attribute *default_csrow_attr[] = { + &attr_dev_type, + &attr_mem_type, + &attr_edac_mode, + &attr_size_mb, + &attr_ue_count, + &attr_ce_count, + NULL, +}; + +/* possible dynamic channel DIMM Label attribute files */ +CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 0); +CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 1); +CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 2); +CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 3); +CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 4); +CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 5); + +/* Total possible dynamic DIMM Label attribute file table */ +static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = { + &attr_ch0_dimm_label, + &attr_ch1_dimm_label, + &attr_ch2_dimm_label, + &attr_ch3_dimm_label, + &attr_ch4_dimm_label, + &attr_ch5_dimm_label +}; + +/* possible dynamic channel ce_count attribute files */ +CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0); +CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1); +CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2); +CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3); +CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4); +CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5); + +/* Total possible dynamic ce_count attribute file table */ +static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = { + &attr_ch0_ce_count, + &attr_ch1_ce_count, + &attr_ch2_ce_count, + &attr_ch3_ce_count, + &attr_ch4_ce_count, + &attr_ch5_ce_count +}; + +#define EDAC_NR_CHANNELS 6 + +/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */ +static int edac_create_channel_files(struct kobject *kobj, int chan) +{ + int err = -ENODEV; + + if (chan >= EDAC_NR_CHANNELS) + return err; + + /* create the DIMM label attribute file */ + err = sysfs_create_file(kobj, + (struct attribute *) + dynamic_csrow_dimm_attr[chan]); + + if (!err) { + /* create the CE Count attribute file */ + err = sysfs_create_file(kobj, + (struct attribute *) + dynamic_csrow_ce_count_attr[chan]); + } else { + debugf1("%s() dimm labels and ce_count files created", + __func__); + } + + return err; +} + +/* No memory to release for this kobj */ +static void edac_csrow_instance_release(struct kobject *kobj) +{ + struct mem_ctl_info *mci; + struct csrow_info *cs; + + debugf1("%s()\n", __func__); + + cs = container_of(kobj, struct csrow_info, kobj); + mci = cs->mci; + + kobject_put(&mci->edac_mci_kobj); +} + +/* the kobj_type instance for a CSROW */ +static struct kobj_type ktype_csrow = { + .release = edac_csrow_instance_release, + .sysfs_ops = &csrowfs_ops, + .default_attrs = (struct attribute **)default_csrow_attr, +}; + +/* Create a CSROW object under specifed edac_mc_device */ +static int edac_create_csrow_object(struct mem_ctl_info *mci, + struct csrow_info *csrow, int index) +{ + struct kobject *kobj_mci = &mci->edac_mci_kobj; + struct kobject *kobj; + int chan; + int err; + + /* generate ..../edac/mc/mc<id>/csrow<index> */ + memset(&csrow->kobj, 0, sizeof(csrow->kobj)); + csrow->mci = mci; /* include container up link */ + csrow->kobj.parent = kobj_mci; + csrow->kobj.ktype = &ktype_csrow; + + /* name this instance of csrow<id> */ + err = kobject_set_name(&csrow->kobj, "csrow%d", index); + if (err) + goto err_out; + + /* bump the mci instance's kobject's ref count */ + kobj = kobject_get(&mci->edac_mci_kobj); + if (!kobj) { + err = -ENODEV; + goto err_out; + } + + /* Instanstiate the csrow object */ + err = kobject_register(&csrow->kobj); + if (err) + goto err_release_top_kobj; + + /* At this point, to release a csrow kobj, one must + * call the kobject_unregister and allow that tear down + * to work the releasing + */ + + /* Create the dyanmic attribute files on this csrow, + * namely, the DIMM labels and the channel ce_count + */ + for (chan = 0; chan < csrow->nr_channels; chan++) { + err = edac_create_channel_files(&csrow->kobj, chan); + if (err) { + /* special case the unregister here */ + kobject_unregister(&csrow->kobj); + goto err_out; + } + } + + return 0; + + /* error unwind stack */ +err_release_top_kobj: + kobject_put(&mci->edac_mci_kobj); + +err_out: + return err; +} + +/* default sysfs methods and data structures for the main MCI kobject */ + +static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, + const char *data, size_t count) +{ + int row, chan; + + mci->ue_noinfo_count = 0; + mci->ce_noinfo_count = 0; + mci->ue_count = 0; + mci->ce_count = 0; + + for (row = 0; row < mci->nr_csrows; row++) { + struct csrow_info *ri = &mci->csrows[row]; + + ri->ue_count = 0; + ri->ce_count = 0; + + for (chan = 0; chan < ri->nr_channels; chan++) + ri->channels[chan].ce_count = 0; + } + + mci->start_time = jiffies; + return count; +} + +/* memory scrubbing */ +static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, + const char *data, size_t count) +{ + u32 bandwidth = -1; + + if (mci->set_sdram_scrub_rate) { + + memctrl_int_store(&bandwidth, data, count); + + if (!(*mci->set_sdram_scrub_rate) (mci, &bandwidth)) { + edac_printk(KERN_DEBUG, EDAC_MC, + "Scrub rate set successfully, applied: %d\n", + bandwidth); + } else { + /* FIXME: error codes maybe? */ + edac_printk(KERN_DEBUG, EDAC_MC, + "Scrub rate set FAILED, could not apply: %d\n", + bandwidth); + } + } else { + /* FIXME: produce "not implemented" ERROR for user-side. */ + edac_printk(KERN_WARNING, EDAC_MC, + "Memory scrubbing 'set'control is not implemented!\n"); + } + return count; +} + +static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) +{ + u32 bandwidth = -1; + + if (mci->get_sdram_scrub_rate) { + if (!(*mci->get_sdram_scrub_rate) (mci, &bandwidth)) { + edac_printk(KERN_DEBUG, EDAC_MC, + "Scrub rate successfully, fetched: %d\n", + bandwidth); + } else { + /* FIXME: error codes maybe? */ + edac_printk(KERN_DEBUG, EDAC_MC, + "Scrub rate fetch FAILED, got: %d\n", + bandwidth); + } + } else { + /* FIXME: produce "not implemented" ERROR for user-side. */ + edac_printk(KERN_WARNING, EDAC_MC, + "Memory scrubbing 'get' control is not implemented\n"); + } + return sprintf(data, "%d\n", bandwidth); +} + +/* default attribute files for the MCI object */ +static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) +{ + return sprintf(data, "%d\n", mci->ue_count); +} + +static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) +{ + return sprintf(data, "%d\n", mci->ce_count); +} + +static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) +{ + return sprintf(data, "%d\n", mci->ce_noinfo_count); +} + +static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) +{ + return sprintf(data, "%d\n", mci->ue_noinfo_count); +} + +static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) +{ + return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); +} + +static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) +{ + return sprintf(data, "%s\n", mci->ctl_name); +} + +static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) +{ + int total_pages, csrow_idx; + + for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows; + csrow_idx++) { + struct csrow_info *csrow = &mci->csrows[csrow_idx]; + + if (!csrow->nr_pages) + continue; + + total_pages += csrow->nr_pages; + } + + return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); +} + +#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) +#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr) + +/* MCI show/store functions for top most object */ +static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, + char *buffer) +{ + struct mem_ctl_info *mem_ctl_info = to_mci(kobj); + struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); + + if (mcidev_attr->show) + return mcidev_attr->show(mem_ctl_info, buffer); + + return -EIO; +} + +static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, + const char *buffer, size_t count) +{ + struct mem_ctl_info *mem_ctl_info = to_mci(kobj); + struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); + + if (mcidev_attr->store) + return mcidev_attr->store(mem_ctl_info, buffer, count); + + return -EIO; +} + +/* Intermediate show/store table */ +static struct sysfs_ops mci_ops = { + .show = mcidev_show, + .store = mcidev_store +}; + +#define MCIDEV_ATTR(_name,_mode,_show,_store) \ +static struct mcidev_sysfs_attribute mci_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +}; + +/* default Control file */ +MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); + +/* default Attribute files */ +MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); +MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); +MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); +MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); +MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); +MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); +MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); + +/* memory scrubber attribute file */ +MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, + mci_sdram_scrub_rate_store); + +static struct mcidev_sysfs_attribute *mci_attr[] = { + &mci_attr_reset_counters, + &mci_attr_mc_name, + &mci_attr_size_mb, + &mci_attr_seconds_since_reset, + &mci_attr_ue_noinfo_count, + &mci_attr_ce_noinfo_count, + &mci_attr_ue_count, + &mci_attr_ce_count, + &mci_attr_sdram_scrub_rate, + NULL +}; + + +/* + * Release of a MC controlling instance + * + * each MC control instance has the following resources upon entry: + * a) a ref count on the top memctl kobj + * b) a ref count on this module + * + * this function must decrement those ref counts and then + * issue a free on the instance's memory + */ +static void edac_mci_control_release(struct kobject *kobj) +{ + struct mem_ctl_info *mci; + + mci = to_mci(kobj); + + debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx); + + /* decrement the module ref count */ + module_put(mci->owner); + + /* free the mci instance memory here */ + kfree(mci); +} + +static struct kobj_type ktype_mci = { + .release = edac_mci_control_release, + .sysfs_ops = &mci_ops, + .default_attrs = (struct attribute **)mci_attr, +}; + +/* show/store, tables, etc for the MC kset */ + + +struct memctrl_dev_attribute { + struct attribute attr; + void *value; + ssize_t(*show) (void *, char *); + ssize_t(*store) (void *, const char *, size_t); +}; + +/* Set of show/store abstract level functions for memory control object */ +static ssize_t memctrl_dev_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct memctrl_dev_attribute *memctrl_dev; + memctrl_dev = (struct memctrl_dev_attribute *)attr; + + if (memctrl_dev->show) + return memctrl_dev->show(memctrl_dev->value, buffer); + + return -EIO; +} + +static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr, + const char *buffer, size_t count) +{ + struct memctrl_dev_attribute *memctrl_dev; + memctrl_dev = (struct memctrl_dev_attribute *)attr; + + if (memctrl_dev->store) + return memctrl_dev->store(memctrl_dev->value, buffer, count); + + return -EIO; +} + +static struct sysfs_ops memctrlfs_ops = { + .show = memctrl_dev_show, + .store = memctrl_dev_store +}; + +#define MEMCTRL_ATTR(_name, _mode, _show, _store) \ +static struct memctrl_dev_attribute attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .value = &_name, \ + .show = _show, \ + .store = _store, \ +}; + +#define MEMCTRL_STRING_ATTR(_name, _data, _mode, _show, _store) \ +static struct memctrl_dev_attribute attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .value = _data, \ + .show = _show, \ + .store = _store, \ +}; + +/* csrow<id> control files */ +MEMCTRL_ATTR(edac_mc_panic_on_ue, + S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store); + +MEMCTRL_ATTR(edac_mc_log_ue, + S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store); + +MEMCTRL_ATTR(edac_mc_log_ce, + S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store); + +MEMCTRL_ATTR(edac_mc_poll_msec, + S_IRUGO | S_IWUSR, memctrl_int_show, memctrl_int_store); + +/* Base Attributes of the memory ECC object */ +static struct memctrl_dev_attribute *memctrl_attr[] = { + &attr_edac_mc_panic_on_ue, + &attr_edac_mc_log_ue, + &attr_edac_mc_log_ce, + &attr_edac_mc_poll_msec, + NULL, +}; + + +/* the ktype for the mc_kset internal kobj */ +static struct kobj_type ktype_mc_set_attribs = { + .sysfs_ops = &memctrlfs_ops, + .default_attrs = (struct attribute **)memctrl_attr, +}; + +/* EDAC memory controller sysfs kset: + * /sys/devices/system/edac/mc + */ +static struct kset mc_kset = { + .kobj = {.name = "mc", .ktype = &ktype_mc_set_attribs }, + .ktype = &ktype_mci, +}; + + +/* + * edac_mc_register_sysfs_main_kobj + * + * setups and registers the main kobject for each mci + */ +int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci) +{ + struct kobject *kobj_mci; + int err; + + debugf1("%s()\n", __func__); + + kobj_mci = &mci->edac_mci_kobj; + + /* Init the mci's kobject */ + memset(kobj_mci, 0, sizeof(*kobj_mci)); + + /* this instance become part of the mc_kset */ + kobj_mci->kset = &mc_kset; + + /* set the name of the mc<id> object */ + err = kobject_set_name(kobj_mci, "mc%d", mci->mc_idx); + if (err) + goto fail_out; + + /* Record which module 'owns' this control structure + * and bump the ref count of the module + */ + mci->owner = THIS_MODULE; + + /* bump ref count on this module */ + if (!try_module_get(mci->owner)) { + err = -ENODEV; + goto fail_out; + } + + /* register the mc<id> kobject to the mc_kset */ + err = kobject_register(kobj_mci); + if (err) { + debugf1("%s()Failed to register '.../edac/mc%d'\n", + __func__, mci->mc_idx); + goto kobj_reg_fail; + } + + /* At this point, to 'free' the control struct, + * edac_mc_unregister_sysfs_main_kobj() must be used + */ + + debugf1("%s() Registered '.../edac/mc%d' kobject\n", + __func__, mci->mc_idx); + + return 0; + + /* Error exit stack */ + +kobj_reg_fail: + module_put(mci->owner); + +fail_out: + return err; +} + +/* + * edac_mc_register_sysfs_main_kobj + * + * tears down and the main mci kobject from the mc_kset + */ +void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci) +{ + /* delete the kobj from the mc_kset */ + kobject_unregister(&mci->edac_mci_kobj); +} + +#define EDAC_DEVICE_SYMLINK "device" + +/* + * edac_create_mci_instance_attributes + * create MC driver specific attributes at the topmost level + * directory of this mci instance. + */ +static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci) +{ + int err; + struct mcidev_sysfs_attribute *sysfs_attrib; + + /* point to the start of the array and iterate over it + * adding each attribute listed to this mci instance's kobject + */ + sysfs_attrib = mci->mc_driver_sysfs_attributes; + + while (sysfs_attrib && sysfs_attrib->attr.name) { + err = sysfs_create_file(&mci->edac_mci_kobj, + (struct attribute*) sysfs_attrib); + if (err) { + return err; + } + + sysfs_attrib++; + } + + return 0; +} + +/* + * edac_remove_mci_instance_attributes + * remove MC driver specific attributes at the topmost level + * directory of this mci instance. + */ +static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci) +{ + struct mcidev_sysfs_attribute *sysfs_attrib; + + /* point to the start of the array and iterate over it + * adding each attribute listed to this mci instance's kobject + */ + sysfs_attrib = mci->mc_driver_sysfs_attributes; + + /* loop if there are attributes and until we hit a NULL entry */ + while (sysfs_attrib && sysfs_attrib->attr.name) { + sysfs_remove_file(&mci->edac_mci_kobj, + (struct attribute *) sysfs_attrib); + sysfs_attrib++; + } +} + + +/* + * Create a new Memory Controller kobject instance, + * mc<id> under the 'mc' directory + * + * Return: + * 0 Success + * !0 Failure + */ +int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) +{ + int i; + int err; + struct csrow_info *csrow; + struct kobject *kobj_mci = &mci->edac_mci_kobj; + + debugf0("%s() idx=%d\n", __func__, mci->mc_idx); + + /* create a symlink for the device */ + err = sysfs_create_link(kobj_mci, &mci->dev->kobj, + EDAC_DEVICE_SYMLINK); + if (err) { + debugf1("%s() failure to create symlink\n", __func__); + goto fail0; + } + + /* If the low level driver desires some attributes, + * then create them now for the driver. + */ + if (mci->mc_driver_sysfs_attributes) { + err = edac_create_mci_instance_attributes(mci); + if (err) { + debugf1("%s() failure to create mci attributes\n", + __func__); + goto fail0; + } + } + + /* Make directories for each CSROW object under the mc<id> kobject + */ + for (i = 0; i < mci->nr_csrows; i++) { + csrow = &mci->csrows[i]; + + /* Only expose populated CSROWs */ + if (csrow->nr_pages > 0) { + err = edac_create_csrow_object(mci, csrow, i); + if (err) { + debugf1("%s() failure: create csrow %d obj\n", + __func__, i); + goto fail1; + } + } + } + + return 0; + + /* CSROW error: backout what has already been registered, */ +fail1: + for (i--; i >= 0; i--) { + if (csrow->nr_pages > 0) { + kobject_unregister(&mci->csrows[i].kobj); + } + } + + /* remove the mci instance's attributes, if any */ + edac_remove_mci_instance_attributes(mci); + + /* remove the symlink */ + sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK); + +fail0: + return err; +} + +/* + * remove a Memory Controller instance + */ +void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) +{ + int i; + + debugf0("%s()\n", __func__); + + /* remove all csrow kobjects */ + for (i = 0; i < mci->nr_csrows; i++) { + if (mci->csrows[i].nr_pages > 0) { + debugf0("%s() unreg csrow-%d\n", __func__, i); + kobject_unregister(&mci->csrows[i].kobj); + } + } + + debugf0("%s() remove_link\n", __func__); + + /* remove the symlink */ + sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); + + debugf0("%s() remove_mci_instance\n", __func__); + + /* remove this mci instance's attribtes */ + edac_remove_mci_instance_attributes(mci); + + debugf0("%s() unregister this mci kobj\n", __func__); + + /* unregister this instance's kobject */ + kobject_unregister(&mci->edac_mci_kobj); +} + + + + +/* + * edac_setup_sysfs_mc_kset(void) + * + * Initialize the mc_kset for the 'mc' entry + * This requires creating the top 'mc' directory with a kset + * and its controls/attributes. + * + * To this 'mc' kset, instance 'mci' will be grouped as children. + * + * Return: 0 SUCCESS + * !0 FAILURE error code + */ +int edac_sysfs_setup_mc_kset(void) +{ + int err = 0; + struct sysdev_class *edac_class; + + debugf1("%s()\n", __func__); + + /* get the /sys/devices/system/edac class reference */ + edac_class = edac_get_edac_class(); + if (edac_class == NULL) { + debugf1("%s() no edac_class error=%d\n", __func__, err); + goto fail_out; + } + + /* Init the MC's kobject */ + mc_kset.kobj.parent = &edac_class->kset.kobj; + + /* register the mc_kset */ + err = kset_register(&mc_kset); + if (err) { + debugf1("%s() Failed to register '.../edac/mc'\n", __func__); + goto fail_out; + } + + debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); + + return 0; + + + /* error unwind stack */ +fail_out: + return err; +} + +/* + * edac_sysfs_teardown_mc_kset + * + * deconstruct the mc_ket for memory controllers + */ +void edac_sysfs_teardown_mc_kset(void) +{ + kset_unregister(&mc_kset); +} + diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c new file mode 100644 index 000000000000..e0c4a4086055 --- /dev/null +++ b/drivers/edac/edac_module.c @@ -0,0 +1,222 @@ +/* + * edac_module.c + * + * (C) 2007 www.softwarebitmaker.com + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Author: Doug Thompson <dougthompson@xmission.com> + * + */ +#include <linux/edac.h> + +#include "edac_core.h" +#include "edac_module.h" + +#define EDAC_VERSION "Ver: 2.1.0 " __DATE__ + +#ifdef CONFIG_EDAC_DEBUG +/* Values of 0 to 4 will generate output */ +int edac_debug_level = 2; +EXPORT_SYMBOL_GPL(edac_debug_level); +#endif + +/* scope is to module level only */ +struct workqueue_struct *edac_workqueue; + +/* + * sysfs object: /sys/devices/system/edac + * need to export to other files in this modules + */ +static struct sysdev_class edac_class = { + set_kset_name("edac"), +}; +static int edac_class_valid; + +/* + * edac_op_state_to_string() + */ +char *edac_op_state_to_string(int opstate) +{ + if (opstate == OP_RUNNING_POLL) + return "POLLED"; + else if (opstate == OP_RUNNING_INTERRUPT) + return "INTERRUPT"; + else if (opstate == OP_RUNNING_POLL_INTR) + return "POLL-INTR"; + else if (opstate == OP_ALLOC) + return "ALLOC"; + else if (opstate == OP_OFFLINE) + return "OFFLINE"; + + return "UNKNOWN"; +} + +/* + * edac_get_edac_class() + * + * return pointer to the edac class of 'edac' + */ +struct sysdev_class *edac_get_edac_class(void) +{ + struct sysdev_class *classptr = NULL; + + if (edac_class_valid) + classptr = &edac_class; + + return classptr; +} + +/* + * edac_register_sysfs_edac_name() + * + * register the 'edac' into /sys/devices/system + * + * return: + * 0 success + * !0 error + */ +static int edac_register_sysfs_edac_name(void) +{ + int err; + + /* create the /sys/devices/system/edac directory */ + err = sysdev_class_register(&edac_class); + + if (err) { + debugf1("%s() error=%d\n", __func__, err); + return err; + } + + edac_class_valid = 1; + return 0; +} + +/* + * sysdev_class_unregister() + * + * unregister the 'edac' from /sys/devices/system + */ +static void edac_unregister_sysfs_edac_name(void) +{ + /* only if currently registered, then unregister it */ + if (edac_class_valid) + sysdev_class_unregister(&edac_class); + + edac_class_valid = 0; +} + +/* + * edac_workqueue_setup + * initialize the edac work queue for polling operations + */ +static int edac_workqueue_setup(void) +{ + edac_workqueue = create_singlethread_workqueue("edac-poller"); + if (edac_workqueue == NULL) + return -ENODEV; + else + return 0; +} + +/* + * edac_workqueue_teardown + * teardown the edac workqueue + */ +static void edac_workqueue_teardown(void) +{ + if (edac_workqueue) { + flush_workqueue(edac_workqueue); + destroy_workqueue(edac_workqueue); + edac_workqueue = NULL; + } +} + +/* + * edac_init + * module initialization entry point + */ +static int __init edac_init(void) +{ + int err = 0; + + edac_printk(KERN_INFO, EDAC_MC, EDAC_VERSION "\n"); + + /* + * Harvest and clear any boot/initialization PCI parity errors + * + * FIXME: This only clears errors logged by devices present at time of + * module initialization. We should also do an initial clear + * of each newly hotplugged device. + */ + edac_pci_clear_parity_errors(); + + /* + * perform the registration of the /sys/devices/system/edac class object + */ + if (edac_register_sysfs_edac_name()) { + edac_printk(KERN_ERR, EDAC_MC, + "Error initializing 'edac' kobject\n"); + err = -ENODEV; + goto error; + } + + /* + * now set up the mc_kset under the edac class object + */ + err = edac_sysfs_setup_mc_kset(); + if (err) + goto sysfs_setup_fail; + + /* Setup/Initialize the workq for this core */ + err = edac_workqueue_setup(); + if (err) { + edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n"); + goto workq_fail; + } + + return 0; + + /* Error teardown stack */ +workq_fail: + edac_sysfs_teardown_mc_kset(); + +sysfs_setup_fail: + edac_unregister_sysfs_edac_name(); + +error: + return err; +} + +/* + * edac_exit() + * module exit/termination function + */ +static void __exit edac_exit(void) +{ + debugf0("%s()\n", __func__); + + /* tear down the various subsystems */ + edac_workqueue_teardown(); + edac_sysfs_teardown_mc_kset(); + edac_unregister_sysfs_edac_name(); +} + +/* + * Inform the kernel of our entry and exit points + */ +module_init(edac_init); +module_exit(edac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Doug Thompson www.softwarebitmaker.com, et al"); +MODULE_DESCRIPTION("Core library routines for EDAC reporting"); + +/* refer to *_sysfs.c files for parameters that are exported via sysfs */ + +#ifdef CONFIG_EDAC_DEBUG +module_param(edac_debug_level, int, 0644); +MODULE_PARM_DESC(edac_debug_level, "Debug level"); +#endif diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h new file mode 100644 index 000000000000..a2134dfc3cc6 --- /dev/null +++ b/drivers/edac/edac_module.h @@ -0,0 +1,77 @@ + +/* + * edac_module.h + * + * For defining functions/data for within the EDAC_CORE module only + * + * written by doug thompson <norsk5@xmission.h> + */ + +#ifndef __EDAC_MODULE_H__ +#define __EDAC_MODULE_H__ + +#include <linux/sysdev.h> + +#include "edac_core.h" + +/* + * INTERNAL EDAC MODULE: + * EDAC memory controller sysfs create/remove functions + * and setup/teardown functions + * + * edac_mc objects + */ +extern int edac_sysfs_setup_mc_kset(void); +extern void edac_sysfs_teardown_mc_kset(void); +extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci); +extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); +extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); +extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); +extern void edac_check_mc_devices(void); +extern int edac_get_log_ue(void); +extern int edac_get_log_ce(void); +extern int edac_get_panic_on_ue(void); +extern int edac_mc_get_log_ue(void); +extern int edac_mc_get_log_ce(void); +extern int edac_mc_get_panic_on_ue(void); +extern int edac_get_poll_msec(void); +extern int edac_mc_get_poll_msec(void); + +extern int edac_device_register_sysfs_main_kobj( + struct edac_device_ctl_info *edac_dev); +extern void edac_device_unregister_sysfs_main_kobj( + struct edac_device_ctl_info *edac_dev); +extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev); +extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev); +extern struct sysdev_class *edac_get_edac_class(void); + +/* edac core workqueue: single CPU mode */ +extern struct workqueue_struct *edac_workqueue; +extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, + unsigned msec); +extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev); +extern void edac_device_reset_delay_period(struct edac_device_ctl_info + *edac_dev, unsigned long value); +extern void *edac_align_ptr(void *ptr, unsigned size); + +/* + * EDAC PCI functions + */ +#ifdef CONFIG_PCI +extern void edac_pci_do_parity_check(void); +extern void edac_pci_clear_parity_errors(void); +extern int edac_sysfs_pci_setup(void); +extern void edac_sysfs_pci_teardown(void); +extern int edac_pci_get_check_errors(void); +extern int edac_pci_get_poll_msec(void); +#else /* CONFIG_PCI */ +/* pre-process these away */ +#define edac_pci_do_parity_check() +#define edac_pci_clear_parity_errors() +#define edac_sysfs_pci_setup() (0) +#define edac_sysfs_pci_teardown() +#define edac_pci_get_check_errors() +#define edac_pci_get_poll_msec() +#endif /* CONFIG_PCI */ + +#endif /* __EDAC_MODULE_H__ */ diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c new file mode 100644 index 000000000000..d9cd5e048cee --- /dev/null +++ b/drivers/edac/edac_pci.c @@ -0,0 +1,433 @@ +/* + * EDAC PCI component + * + * Author: Dave Jiang <djiang@mvista.com> + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + */ +#include <linux/module.h> +#include <linux/types.h> +#include <linux/smp.h> +#include <linux/init.h> +#include <linux/sysctl.h> +#include <linux/highmem.h> +#include <linux/timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/list.h> +#include <linux/sysdev.h> +#include <linux/ctype.h> +#include <linux/workqueue.h> +#include <asm/uaccess.h> +#include <asm/page.h> + +#include "edac_core.h" +#include "edac_module.h" + +static DEFINE_MUTEX(edac_pci_ctls_mutex); +static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list); + +static inline void edac_lock_pci_list(void) +{ + mutex_lock(&edac_pci_ctls_mutex); +} + +static inline void edac_unlock_pci_list(void) +{ + mutex_unlock(&edac_pci_ctls_mutex); +} + +/* + * The alloc() and free() functions for the 'edac_pci' control info + * structure. The chip driver will allocate one of these for each + * edac_pci it is going to control/register with the EDAC CORE. + */ +struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt, + const char *edac_pci_name) +{ + struct edac_pci_ctl_info *pci; + void *pvt; + unsigned int size; + + pci = (struct edac_pci_ctl_info *)0; + pvt = edac_align_ptr(&pci[1], sz_pvt); + size = ((unsigned long)pvt) + sz_pvt; + + if ((pci = kzalloc(size, GFP_KERNEL)) == NULL) + return NULL; + + pvt = sz_pvt ? ((char *)pci) + ((unsigned long)pvt) : NULL; + + pci->pvt_info = pvt; + + pci->op_state = OP_ALLOC; + + snprintf(pci->name, strlen(edac_pci_name) + 1, "%s", edac_pci_name); + + return pci; +} + +EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info); + +/* + * edac_pci_free_ctl_info() + * frees the memory allocated by edac_pci_alloc_ctl_info() function + */ +void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci) +{ + kfree(pci); +} + +EXPORT_SYMBOL_GPL(edac_pci_free_ctl_info); + +/* + * find_edac_pci_by_dev() + * scans the edac_pci list for a specific 'struct device *' + */ +static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev) +{ + struct edac_pci_ctl_info *pci; + struct list_head *item; + + debugf3("%s()\n", __func__); + + list_for_each(item, &edac_pci_list) { + pci = list_entry(item, struct edac_pci_ctl_info, link); + + if (pci->dev == dev) + return pci; + } + + return NULL; +} + +/* + * add_edac_pci_to_global_list + * Before calling this function, caller must assign a unique value to + * edac_dev->pci_idx. + * Return: + * 0 on success + * 1 on failure + */ +static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci) +{ + struct list_head *item, *insert_before; + struct edac_pci_ctl_info *rover; + + insert_before = &edac_pci_list; + + /* Determine if already on the list */ + if (unlikely((rover = find_edac_pci_by_dev(pci->dev)) != NULL)) + goto fail0; + + /* Insert in ascending order by 'pci_idx', so find position */ + list_for_each(item, &edac_pci_list) { + rover = list_entry(item, struct edac_pci_ctl_info, link); + + if (rover->pci_idx >= pci->pci_idx) { + if (unlikely(rover->pci_idx == pci->pci_idx)) + goto fail1; + + insert_before = item; + break; + } + } + + list_add_tail_rcu(&pci->link, insert_before); + return 0; + +fail0: + edac_printk(KERN_WARNING, EDAC_PCI, + "%s (%s) %s %s already assigned %d\n", + rover->dev->bus_id, dev_name(rover), + rover->mod_name, rover->ctl_name, rover->pci_idx); + return 1; + +fail1: + edac_printk(KERN_WARNING, EDAC_PCI, + "but in low-level driver: attempt to assign\n" + "\tduplicate pci_idx %d in %s()\n", rover->pci_idx, + __func__); + return 1; +} + +/* + * complete_edac_pci_list_del + */ +static void complete_edac_pci_list_del(struct rcu_head *head) +{ + struct edac_pci_ctl_info *pci; + + pci = container_of(head, struct edac_pci_ctl_info, rcu); + INIT_LIST_HEAD(&pci->link); + complete(&pci->complete); +} + +/* + * del_edac_pci_from_global_list + */ +static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) +{ + list_del_rcu(&pci->link); + init_completion(&pci->complete); + call_rcu(&pci->rcu, complete_edac_pci_list_del); + wait_for_completion(&pci->complete); +} + +/* + * edac_pci_find() + * Search for an edac_pci_ctl_info structure whose index is 'idx' + * + * If found, return a pointer to the structure + * Else return NULL. + * + * Caller must hold pci_ctls_mutex. + */ +struct edac_pci_ctl_info *edac_pci_find(int idx) +{ + struct list_head *item; + struct edac_pci_ctl_info *pci; + + /* Iterage over list, looking for exact match of ID */ + list_for_each(item, &edac_pci_list) { + pci = list_entry(item, struct edac_pci_ctl_info, link); + + if (pci->pci_idx >= idx) { + if (pci->pci_idx == idx) + return pci; + + /* not on list, so terminate early */ + break; + } + } + + return NULL; +} + +EXPORT_SYMBOL_GPL(edac_pci_find); + +/* + * edac_pci_workq_function() + * performs the operation scheduled by a workq request + */ +static void edac_pci_workq_function(struct work_struct *work_req) +{ + struct delayed_work *d_work = (struct delayed_work *)work_req; + struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work); + + edac_lock_pci_list(); + + if ((pci->op_state == OP_RUNNING_POLL) && + (pci->edac_check != NULL) && (edac_pci_get_check_errors())) + pci->edac_check(pci); + + edac_unlock_pci_list(); + + /* Reschedule */ + queue_delayed_work(edac_workqueue, &pci->work, + msecs_to_jiffies(edac_pci_get_poll_msec())); +} + +/* + * edac_pci_workq_setup() + * initialize a workq item for this edac_pci instance + * passing in the new delay period in msec + */ +static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, + unsigned int msec) +{ + debugf0("%s()\n", __func__); + + INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); + queue_delayed_work(edac_workqueue, &pci->work, + msecs_to_jiffies(edac_pci_get_poll_msec())); +} + +/* + * edac_pci_workq_teardown() + * stop the workq processing on this edac_pci instance + */ +static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) +{ + int status; + + status = cancel_delayed_work(&pci->work); + if (status == 0) + flush_workqueue(edac_workqueue); +} + +/* + * edac_pci_reset_delay_period + */ +void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, + unsigned long value) +{ + edac_lock_pci_list(); + + edac_pci_workq_teardown(pci); + + edac_pci_workq_setup(pci, value); + + edac_unlock_pci_list(); +} + +EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period); + +/* + * edac_pci_add_device: Insert the 'edac_dev' structure into the + * edac_pci global list and create sysfs entries associated with + * edac_pci structure. + * @pci: pointer to the edac_device structure to be added to the list + * @edac_idx: A unique numeric identifier to be assigned to the + * 'edac_pci' structure. + * + * Return: + * 0 Success + * !0 Failure + */ +int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) +{ + debugf0("%s()\n", __func__); + + pci->pci_idx = edac_idx; + + edac_lock_pci_list(); + + if (add_edac_pci_to_global_list(pci)) + goto fail0; + + pci->start_time = jiffies; + + if (edac_pci_create_sysfs(pci)) { + edac_pci_printk(pci, KERN_WARNING, + "failed to create sysfs pci\n"); + goto fail1; + } + + if (pci->edac_check != NULL) { + pci->op_state = OP_RUNNING_POLL; + + edac_pci_workq_setup(pci, 1000); + } else { + pci->op_state = OP_RUNNING_INTERRUPT; + } + + edac_pci_printk(pci, KERN_INFO, + "Giving out device to module '%s' controller '%s':" + " DEV '%s' (%s)\n", + pci->mod_name, + pci->ctl_name, + dev_name(pci), edac_op_state_to_string(pci->op_state)); + + edac_unlock_pci_list(); + return 0; + +fail1: + del_edac_pci_from_global_list(pci); +fail0: + edac_unlock_pci_list(); + return 1; +} + +EXPORT_SYMBOL_GPL(edac_pci_add_device); + +/* + * edac_pci_del_device() + * Remove sysfs entries for specified edac_pci structure and + * then remove edac_pci structure from global list + * + * @dev: + * Pointer to 'struct device' representing edac_pci structure + * to remove + * + * Return: + * Pointer to removed edac_pci structure, + * or NULL if device not found + */ +struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev) +{ + struct edac_pci_ctl_info *pci; + + debugf0("%s()\n", __func__); + + edac_lock_pci_list(); + + if ((pci = find_edac_pci_by_dev(dev)) == NULL) { + edac_unlock_pci_list(); + return NULL; + } + + pci->op_state = OP_OFFLINE; + + edac_pci_workq_teardown(pci); + + edac_pci_remove_sysfs(pci); + + del_edac_pci_from_global_list(pci); + + edac_unlock_pci_list(); + + edac_printk(KERN_INFO, EDAC_PCI, + "Removed device %d for %s %s: DEV %s\n", + pci->pci_idx, pci->mod_name, pci->ctl_name, dev_name(pci)); + + return pci; +} + +EXPORT_SYMBOL_GPL(edac_pci_del_device); + +void edac_pci_generic_check(struct edac_pci_ctl_info *pci) +{ + edac_pci_do_parity_check(); +} + +static int edac_pci_idx; +#define EDAC_PCI_GENCTL_NAME "EDAC PCI controller" + +struct edac_pci_gen_data { + int edac_idx; +}; + +struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev, + const char *mod_name) +{ + struct edac_pci_ctl_info *pci; + struct edac_pci_gen_data *pdata; + + pci = edac_pci_alloc_ctl_info(sizeof(*pdata), EDAC_PCI_GENCTL_NAME); + if (!pci) + return NULL; + + pdata = pci->pvt_info; + pci->dev = dev; + dev_set_drvdata(pci->dev, pci); + pci->dev_name = pci_name(to_pci_dev(dev)); + + pci->mod_name = mod_name; + pci->ctl_name = EDAC_PCI_GENCTL_NAME; + pci->edac_check = edac_pci_generic_check; + + pdata->edac_idx = edac_pci_idx++; + + if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { + debugf3("%s(): failed edac_pci_add_device()\n", __func__); + edac_pci_free_ctl_info(pci); + return NULL; + } + + return pci; +} + +EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl); + +void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci) +{ + edac_pci_del_device(pci->dev); + edac_pci_free_ctl_info(pci); +} + +EXPORT_SYMBOL_GPL(edac_pci_release_generic_ctl); diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c new file mode 100644 index 000000000000..fac94cae2c3d --- /dev/null +++ b/drivers/edac/edac_pci_sysfs.c @@ -0,0 +1,620 @@ +/* + * (C) 2005, 2006 Linux Networx (http://lnxi.com) + * This file may be distributed under the terms of the + * GNU General Public License. + * + * Written Doug Thompson <norsk5@xmission.com> + * + */ +#include <linux/module.h> +#include <linux/sysdev.h> +#include <linux/ctype.h> + +#include "edac_core.h" +#include "edac_module.h" + +#ifdef CONFIG_PCI + +#define EDAC_PCI_SYMLINK "device" + +static int check_pci_errors; /* default YES check PCI parity */ +static int edac_pci_panic_on_pe; /* default no panic on PCI Parity */ +static int edac_pci_log_pe = 1; /* log PCI parity errors */ +static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */ +static atomic_t pci_parity_count = ATOMIC_INIT(0); +static atomic_t pci_nonparity_count = ATOMIC_INIT(0); +static int edac_pci_poll_msec = 1000; + +static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */ +static struct completion edac_pci_kobj_complete; +static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0); + +int edac_pci_get_check_errors(void) +{ + return check_pci_errors; +} + +int edac_pci_get_log_pe(void) +{ + return edac_pci_log_pe; +} + +int edac_pci_get_log_npe(void) +{ + return edac_pci_log_npe; +} + +int edac_pci_get_panic_on_pe(void) +{ + return edac_pci_panic_on_pe; +} + +int edac_pci_get_poll_msec(void) +{ + return edac_pci_poll_msec; +} + +/**************************** EDAC PCI sysfs instance *******************/ +static ssize_t instance_pe_count_show(struct edac_pci_ctl_info *pci, char *data) +{ + return sprintf(data, "%u\n", atomic_read(&pci->counters.pe_count)); +} + +static ssize_t instance_npe_count_show(struct edac_pci_ctl_info *pci, + char *data) +{ + return sprintf(data, "%u\n", atomic_read(&pci->counters.npe_count)); +} + +#define to_instance(k) container_of(k, struct edac_pci_ctl_info, kobj) +#define to_instance_attr(a) container_of(a, struct instance_attribute, attr) + +/* DEVICE instance kobject release() function */ +static void edac_pci_instance_release(struct kobject *kobj) +{ + struct edac_pci_ctl_info *pci; + + debugf1("%s()\n", __func__); + + pci = to_instance(kobj); + complete(&pci->kobj_complete); +} + +/* instance specific attribute structure */ +struct instance_attribute { + struct attribute attr; + ssize_t(*show) (struct edac_pci_ctl_info *, char *); + ssize_t(*store) (struct edac_pci_ctl_info *, const char *, size_t); +}; + +/* Function to 'show' fields from the edac_pci 'instance' structure */ +static ssize_t edac_pci_instance_show(struct kobject *kobj, + struct attribute *attr, char *buffer) +{ + struct edac_pci_ctl_info *pci = to_instance(kobj); + struct instance_attribute *instance_attr = to_instance_attr(attr); + + if (instance_attr->show) + return instance_attr->show(pci, buffer); + return -EIO; +} + +/* Function to 'store' fields into the edac_pci 'instance' structure */ +static ssize_t edac_pci_instance_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, size_t count) +{ + struct edac_pci_ctl_info *pci = to_instance(kobj); + struct instance_attribute *instance_attr = to_instance_attr(attr); + + if (instance_attr->store) + return instance_attr->store(pci, buffer, count); + return -EIO; +} + +static struct sysfs_ops pci_instance_ops = { + .show = edac_pci_instance_show, + .store = edac_pci_instance_store +}; + +#define INSTANCE_ATTR(_name, _mode, _show, _store) \ +static struct instance_attribute attr_instance_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ +}; + +INSTANCE_ATTR(pe_count, S_IRUGO, instance_pe_count_show, NULL); +INSTANCE_ATTR(npe_count, S_IRUGO, instance_npe_count_show, NULL); + +/* pci instance attributes */ +static struct instance_attribute *pci_instance_attr[] = { + &attr_instance_pe_count, + &attr_instance_npe_count, + NULL +}; + +/* the ktype for pci instance */ +static struct kobj_type ktype_pci_instance = { + .release = edac_pci_instance_release, + .sysfs_ops = &pci_instance_ops, + .default_attrs = (struct attribute **)pci_instance_attr, +}; + +static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx) +{ + int err; + + pci->kobj.parent = &edac_pci_kobj; + pci->kobj.ktype = &ktype_pci_instance; + + err = kobject_set_name(&pci->kobj, "pci%d", idx); + if (err) + return err; + + err = kobject_register(&pci->kobj); + if (err != 0) { + debugf2("%s() failed to register instance pci%d\n", + __func__, idx); + return err; + } + + debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx); + + return 0; +} + +static void +edac_pci_delete_instance_kobj(struct edac_pci_ctl_info *pci, int idx) +{ + init_completion(&pci->kobj_complete); + kobject_unregister(&pci->kobj); + wait_for_completion(&pci->kobj_complete); +} + +/***************************** EDAC PCI sysfs root **********************/ +#define to_edacpci(k) container_of(k, struct edac_pci_ctl_info, kobj) +#define to_edacpci_attr(a) container_of(a, struct edac_pci_attr, attr) + +static ssize_t edac_pci_int_show(void *ptr, char *buffer) +{ + int *value = ptr; + return sprintf(buffer, "%d\n", *value); +} + +static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count) +{ + int *value = ptr; + + if (isdigit(*buffer)) + *value = simple_strtoul(buffer, NULL, 0); + + return count; +} + +struct edac_pci_dev_attribute { + struct attribute attr; + void *value; + ssize_t(*show) (void *, char *); + ssize_t(*store) (void *, const char *, size_t); +}; + +/* Set of show/store abstract level functions for PCI Parity object */ +static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, + char *buffer) +{ + struct edac_pci_dev_attribute *edac_pci_dev; + edac_pci_dev = (struct edac_pci_dev_attribute *)attr; + + if (edac_pci_dev->show) + return edac_pci_dev->show(edac_pci_dev->value, buffer); + return -EIO; +} + +static ssize_t edac_pci_dev_store(struct kobject *kobj, + struct attribute *attr, const char *buffer, + size_t count) +{ + struct edac_pci_dev_attribute *edac_pci_dev; + edac_pci_dev = (struct edac_pci_dev_attribute *)attr; + + if (edac_pci_dev->show) + return edac_pci_dev->store(edac_pci_dev->value, buffer, count); + return -EIO; +} + +static struct sysfs_ops edac_pci_sysfs_ops = { + .show = edac_pci_dev_show, + .store = edac_pci_dev_store +}; + +#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ +static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .value = &_name, \ + .show = _show, \ + .store = _store, \ +}; + +#define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \ +static struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .value = _data, \ + .show = _show, \ + .store = _store, \ +}; + +/* PCI Parity control files */ +EDAC_PCI_ATTR(check_pci_errors, S_IRUGO | S_IWUSR, edac_pci_int_show, + edac_pci_int_store); +EDAC_PCI_ATTR(edac_pci_log_pe, S_IRUGO | S_IWUSR, edac_pci_int_show, + edac_pci_int_store); +EDAC_PCI_ATTR(edac_pci_log_npe, S_IRUGO | S_IWUSR, edac_pci_int_show, + edac_pci_int_store); +EDAC_PCI_ATTR(edac_pci_panic_on_pe, S_IRUGO | S_IWUSR, edac_pci_int_show, + edac_pci_int_store); +EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL); +EDAC_PCI_ATTR(pci_nonparity_count, S_IRUGO, edac_pci_int_show, NULL); + +/* Base Attributes of the memory ECC object */ +static struct edac_pci_dev_attribute *edac_pci_attr[] = { + &edac_pci_attr_check_pci_errors, + &edac_pci_attr_edac_pci_log_pe, + &edac_pci_attr_edac_pci_log_npe, + &edac_pci_attr_edac_pci_panic_on_pe, + &edac_pci_attr_pci_parity_count, + &edac_pci_attr_pci_nonparity_count, + NULL, +}; + +/* No memory to release */ +static void edac_pci_release(struct kobject *kobj) +{ + struct edac_pci_ctl_info *pci; + + pci = to_edacpci(kobj); + + debugf1("%s()\n", __func__); + complete(&pci->kobj_complete); +} + +static struct kobj_type ktype_edac_pci = { + .release = edac_pci_release, + .sysfs_ops = &edac_pci_sysfs_ops, + .default_attrs = (struct attribute **)edac_pci_attr, +}; + +/** + * edac_sysfs_pci_setup() + * + * setup the sysfs for EDAC PCI attributes + * assumes edac_class has already been initialized + */ +int edac_pci_register_main_kobj(void) +{ + int err; + struct sysdev_class *edac_class; + + debugf1("%s()\n", __func__); + + edac_class = edac_get_edac_class(); + if (edac_class == NULL) { + debugf1("%s() no edac_class\n", __func__); + return -ENODEV; + } + + edac_pci_kobj.ktype = &ktype_edac_pci; + + edac_pci_kobj.parent = &edac_class->kset.kobj; + + err = kobject_set_name(&edac_pci_kobj, "pci"); + if (err) + return err; + + /* Instanstiate the pci object */ + /* FIXME: maybe new sysdev_create_subdir() */ + err = kobject_register(&edac_pci_kobj); + + if (err) { + debugf1("Failed to register '.../edac/pci'\n"); + return err; + } + + debugf1("Registered '.../edac/pci' kobject\n"); + + return 0; +} + +/* + * edac_pci_unregister_main_kobj() + * + * perform the sysfs teardown for the PCI attributes + */ +void edac_pci_unregister_main_kobj(void) +{ + debugf0("%s()\n", __func__); + init_completion(&edac_pci_kobj_complete); + kobject_unregister(&edac_pci_kobj); + wait_for_completion(&edac_pci_kobj_complete); +} + +int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci) +{ + int err; + struct kobject *edac_kobj = &pci->kobj; + + if (atomic_inc_return(&edac_pci_sysfs_refcount) == 1) { + err = edac_pci_register_main_kobj(); + if (err) { + atomic_dec(&edac_pci_sysfs_refcount); + return err; + } + } + + err = edac_pci_create_instance_kobj(pci, pci->pci_idx); + if (err) { + if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) + edac_pci_unregister_main_kobj(); + } + + debugf0("%s() idx=%d\n", __func__, pci->pci_idx); + + err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK); + if (err) { + debugf0("%s() sysfs_create_link() returned err= %d\n", + __func__, err); + return err; + } + + return 0; +} + +void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) +{ + debugf0("%s()\n", __func__); + + edac_pci_delete_instance_kobj(pci, pci->pci_idx); + + sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK); + + if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) + edac_pci_unregister_main_kobj(); +} + +/************************ PCI error handling *************************/ +static u16 get_pci_parity_status(struct pci_dev *dev, int secondary) +{ + int where; + u16 status; + + where = secondary ? PCI_SEC_STATUS : PCI_STATUS; + pci_read_config_word(dev, where, &status); + + /* If we get back 0xFFFF then we must suspect that the card has been + * pulled but the Linux PCI layer has not yet finished cleaning up. + * We don't want to report on such devices + */ + + if (status == 0xFFFF) { + u32 sanity; + + pci_read_config_dword(dev, 0, &sanity); + + if (sanity == 0xFFFFFFFF) + return 0; + } + + status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | + PCI_STATUS_PARITY; + + if (status) + /* reset only the bits we are interested in */ + pci_write_config_word(dev, where, status); + + return status; +} + +typedef void (*pci_parity_check_fn_t) (struct pci_dev * dev); + +/* Clear any PCI parity errors logged by this device. */ +static void edac_pci_dev_parity_clear(struct pci_dev *dev) +{ + u8 header_type; + + get_pci_parity_status(dev, 0); + + /* read the device TYPE, looking for bridges */ + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); + + if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) + get_pci_parity_status(dev, 1); +} + +/* + * PCI Parity polling + * + */ +static void edac_pci_dev_parity_test(struct pci_dev *dev) +{ + u16 status; + u8 header_type; + + /* read the STATUS register on this device + */ + status = get_pci_parity_status(dev, 0); + + debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id); + + /* check the status reg for errors */ + if (status) { + if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) { + edac_printk(KERN_CRIT, EDAC_PCI, + "Signaled System Error on %s\n", + pci_name(dev)); + atomic_inc(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { + edac_printk(KERN_CRIT, EDAC_PCI, + "Master Data Parity Error on %s\n", + pci_name(dev)); + + atomic_inc(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { + edac_printk(KERN_CRIT, EDAC_PCI, + "Detected Parity Error on %s\n", + pci_name(dev)); + + atomic_inc(&pci_parity_count); + } + } + + /* read the device TYPE, looking for bridges */ + pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); + + debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id); + + if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { + /* On bridges, need to examine secondary status register */ + status = get_pci_parity_status(dev, 1); + + debugf2("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id); + + /* check the secondary status reg for errors */ + if (status) { + if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) { + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " + "Signaled System Error on %s\n", + pci_name(dev)); + atomic_inc(&pci_nonparity_count); + } + + if (status & (PCI_STATUS_PARITY)) { + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " + "Master Data Parity Error on " + "%s\n", pci_name(dev)); + + atomic_inc(&pci_parity_count); + } + + if (status & (PCI_STATUS_DETECTED_PARITY)) { + edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " + "Detected Parity Error on %s\n", + pci_name(dev)); + + atomic_inc(&pci_parity_count); + } + } + } +} + +/* + * pci_dev parity list iterator + * Scan the PCI device list for one iteration, looking for SERRORs + * Master Parity ERRORS or Parity ERRORs on primary or secondary devices + */ +static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) +{ + struct pci_dev *dev = NULL; + + /* request for kernel access to the next PCI device, if any, + * and while we are looking at it have its reference count + * bumped until we are done with it + */ + while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + fn(dev); + } +} + +/* + * edac_pci_do_parity_check + * + * performs the actual PCI parity check operation + */ +void edac_pci_do_parity_check(void) +{ + unsigned long flags; + int before_count; + + debugf3("%s()\n", __func__); + + if (!check_pci_errors) + return; + + before_count = atomic_read(&pci_parity_count); + + /* scan all PCI devices looking for a Parity Error on devices and + * bridges + */ + local_irq_save(flags); + edac_pci_dev_parity_iterator(edac_pci_dev_parity_test); + local_irq_restore(flags); + + /* Only if operator has selected panic on PCI Error */ + if (edac_pci_get_panic_on_pe()) { + /* If the count is different 'after' from 'before' */ + if (before_count != atomic_read(&pci_parity_count)) + panic("EDAC: PCI Parity Error"); + } +} + +void edac_pci_clear_parity_errors(void) +{ + /* Clear any PCI bus parity errors that devices initially have logged + * in their registers. + */ + edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); +} +void edac_pci_handle_pe(struct edac_pci_ctl_info *pci, const char *msg) +{ + + /* global PE counter incremented by edac_pci_do_parity_check() */ + atomic_inc(&pci->counters.pe_count); + + if (edac_pci_get_log_pe()) + edac_pci_printk(pci, KERN_WARNING, + "Parity Error ctl: %s %d: %s\n", + pci->ctl_name, pci->pci_idx, msg); + + /* + * poke all PCI devices and see which one is the troublemaker + * panic() is called if set + */ + edac_pci_do_parity_check(); +} + +EXPORT_SYMBOL_GPL(edac_pci_handle_pe); + +void edac_pci_handle_npe(struct edac_pci_ctl_info *pci, const char *msg) +{ + + /* global NPE counter incremented by edac_pci_do_parity_check() */ + atomic_inc(&pci->counters.npe_count); + + if (edac_pci_get_log_npe()) + edac_pci_printk(pci, KERN_WARNING, + "Non-Parity Error ctl: %s %d: %s\n", + pci->ctl_name, pci->pci_idx, msg); + + /* + * poke all PCI devices and see which one is the troublemaker + * panic() is called if set + */ + edac_pci_do_parity_check(); +} + +EXPORT_SYMBOL_GPL(edac_pci_handle_npe); + +/* + * Define the PCI parameter to the module + */ +module_param(check_pci_errors, int, 0644); +MODULE_PARM_DESC(check_pci_errors, + "Check for PCI bus parity errors: 0=off 1=on"); +module_param(edac_pci_panic_on_pe, int, 0644); +MODULE_PARM_DESC(edac_pci_panic_on_pe, + "Panic on PCI Bus Parity error: 0=off 1=on"); + +#endif /* CONFIG_PCI */ diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c new file mode 100644 index 000000000000..20b428aa155e --- /dev/null +++ b/drivers/edac/edac_stub.c @@ -0,0 +1,46 @@ +/* + * common EDAC components that must be in kernel + * + * Author: Dave Jiang <djiang@mvista.com> + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + */ +#include <linux/module.h> +#include <linux/edac.h> +#include <asm/atomic.h> +#include <asm/edac.h> + +int edac_op_state = EDAC_OPSTATE_INVAL; +EXPORT_SYMBOL_GPL(edac_op_state); + +atomic_t edac_handlers = ATOMIC_INIT(0); +EXPORT_SYMBOL_GPL(edac_handlers); + +int edac_err_assert = 0; +EXPORT_SYMBOL_GPL(edac_err_assert); + +/* + * called to determine if there is an EDAC driver interested in + * knowing an event (such as NMI) occurred + */ +int edac_handler_set(void) +{ + if (edac_op_state == EDAC_OPSTATE_POLL) + return 0; + + return atomic_read(&edac_handlers); +} +EXPORT_SYMBOL_GPL(edac_handler_set); + +/* + * handler for NMI type of interrupts to assert error + */ +void edac_atomic_assert_error(void) +{ + edac_err_assert++; +} +EXPORT_SYMBOL_GPL(edac_atomic_assert_error); diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c new file mode 100644 index 000000000000..0ecfdc432f87 --- /dev/null +++ b/drivers/edac/i3000_edac.c @@ -0,0 +1,506 @@ +/* + * Intel 3000/3010 Memory Controller kernel module + * Copyright (C) 2007 Akamai Technologies, Inc. + * Shamelessly copied from: + * Intel D82875P Memory Controller kernel module + * (C) 2003 Linux Networx (http://lnxi.com) + * + * This file may be distributed under the terms of the + * GNU General Public License. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> +#include "edac_core.h" + +#define I3000_REVISION "1.1" + +#define EDAC_MOD_STR "i3000_edac" + +#define I3000_RANKS 8 +#define I3000_RANKS_PER_CHANNEL 4 +#define I3000_CHANNELS 2 + +/* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */ + +#define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */ +#define I3000_MCHBAR_MASK 0xffffc000 +#define I3000_MMR_WINDOW_SIZE 16384 + +#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b) + * + * 7:1 reserved + * 0 bit 32 of address + */ +#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b) + * + * 31:7 address + * 6:1 reserved + * 0 Error channel 0/1 + */ +#define I3000_DEAP_GRAIN (1 << 7) +#define I3000_DEAP_PFN(edeap, deap) ((((edeap) & 1) << (32 - PAGE_SHIFT)) | \ + ((deap) >> PAGE_SHIFT)) +#define I3000_DEAP_OFFSET(deap) ((deap) & ~(I3000_DEAP_GRAIN-1) & ~PAGE_MASK) +#define I3000_DEAP_CHANNEL(deap) ((deap) & 1) + +#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b) + * + * 7:0 DRAM ECC Syndrome + */ + +#define I3000_ERRSTS 0xc8 /* Error Status Register (16b) + * + * 15:12 reserved + * 11 MCH Thermal Sensor Event for SMI/SCI/SERR + * 10 reserved + * 9 LOCK to non-DRAM Memory Flag (LCKF) + * 8 Received Refresh Timeout Flag (RRTOF) + * 7:2 reserved + * 1 Multiple-bit DRAM ECC Error Flag (DMERR) + * 0 Single-bit DRAM ECC Error Flag (DSERR) + */ +#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */ +#define I3000_ERRSTS_UE 0x0002 +#define I3000_ERRSTS_CE 0x0001 + +#define I3000_ERRCMD 0xca /* Error Command (16b) + * + * 15:12 reserved + * 11 SERR on MCH Thermal Sensor Event (TSESERR) + * 10 reserved + * 9 SERR on LOCK to non-DRAM Memory (LCKERR) + * 8 SERR on DRAM Refresh Timeout (DRTOERR) + * 7:2 reserved + * 1 SERR Multiple-Bit DRAM ECC Error (DMERR) + * 0 SERR on Single-Bit ECC Error (DSERR) + */ + +/* Intel MMIO register space - device 0 function 0 - MMR space */ + +#define I3000_DRB_SHIFT 25 /* 32MiB grain */ + +#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4) + * + * 7:0 Channel 0 DRAM Rank Boundary Address + */ +#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4) + * + * 7:0 Channel 1 DRAM Rank Boundary Address + */ + +#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2) + * + * 7 reserved + * 6:4 DRAM odd Rank Attribute + * 3 reserved + * 2:0 DRAM even Rank Attribute + * + * Each attribute defines the page + * size of the corresponding rank: + * 000: unpopulated + * 001: reserved + * 010: 4 KB + * 011: 8 KB + * 100: 16 KB + * Others: reserved + */ +#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */ +#define ODD_RANK_ATTRIB(dra) (((dra) & 0x70) >> 4) +#define EVEN_RANK_ATTRIB(dra) ((dra) & 0x07) + +#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b) + * + * 31:30 reserved + * 29 Initialization Complete (IC) + * 28:11 reserved + * 10:8 Refresh Mode Select (RMS) + * 7 reserved + * 6:4 Mode Select (SMS) + * 3:2 reserved + * 1:0 DRAM Type (DT) + */ + +#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b) + * + * 31 Enhanced Addressing Enable (ENHADE) + * 30:0 reserved + */ + +enum i3000p_chips { + I3000 = 0, +}; + +struct i3000_dev_info { + const char *ctl_name; +}; + +struct i3000_error_info { + u16 errsts; + u8 derrsyn; + u8 edeap; + u32 deap; + u16 errsts2; +}; + +static const struct i3000_dev_info i3000_devs[] = { + [I3000] = { + .ctl_name = "i3000"}, +}; + +static struct pci_dev *mci_pdev; +static int i3000_registered = 1; +static struct edac_pci_ctl_info *i3000_pci; + +static void i3000_get_error_info(struct mem_ctl_info *mci, + struct i3000_error_info *info) +{ + struct pci_dev *pdev; + + pdev = to_pci_dev(mci->dev); + + /* + * This is a mess because there is no atomic way to read all the + * registers at once and the registers can transition from CE being + * overwritten by UE. + */ + pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts); + if (!(info->errsts & I3000_ERRSTS_BITS)) + return; + pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap); + pci_read_config_dword(pdev, I3000_DEAP, &info->deap); + pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn); + pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2); + + /* + * If the error is the same for both reads then the first set + * of reads is valid. If there is a change then there is a CE + * with no info and the second set of reads is valid and + * should be UE info. + */ + if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { + pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap); + pci_read_config_dword(pdev, I3000_DEAP, &info->deap); + pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn); + } + + /* Clear any error bits. + * (Yes, we really clear bits by writing 1 to them.) + */ + pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS, + I3000_ERRSTS_BITS); +} + +static int i3000_process_error_info(struct mem_ctl_info *mci, + struct i3000_error_info *info, + int handle_errors) +{ + int row, multi_chan; + int pfn, offset, channel; + + multi_chan = mci->csrows[0].nr_channels - 1; + + if (!(info->errsts & I3000_ERRSTS_BITS)) + return 0; + + if (!handle_errors) + return 1; + + if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { + edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); + info->errsts = info->errsts2; + } + + pfn = I3000_DEAP_PFN(info->edeap, info->deap); + offset = I3000_DEAP_OFFSET(info->deap); + channel = I3000_DEAP_CHANNEL(info->deap); + + row = edac_mc_find_csrow_by_page(mci, pfn); + + if (info->errsts & I3000_ERRSTS_UE) + edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE"); + else + edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row, + multi_chan ? channel : 0, "i3000 CE"); + + return 1; +} + +static void i3000_check(struct mem_ctl_info *mci) +{ + struct i3000_error_info info; + + debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + i3000_get_error_info(mci, &info); + i3000_process_error_info(mci, &info, 1); +} + +static int i3000_is_interleaved(const unsigned char *c0dra, + const unsigned char *c1dra, + const unsigned char *c0drb, + const unsigned char *c1drb) +{ + int i; + + /* If the channels aren't populated identically then + * we're not interleaved. + */ + for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++) + if (ODD_RANK_ATTRIB(c0dra[i]) != ODD_RANK_ATTRIB(c1dra[i]) || + EVEN_RANK_ATTRIB(c0dra[i]) != + EVEN_RANK_ATTRIB(c1dra[i])) + return 0; + + /* If the rank boundaries for the two channels are different + * then we're not interleaved. + */ + for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) + if (c0drb[i] != c1drb[i]) + return 0; + + return 1; +} + +static int i3000_probe1(struct pci_dev *pdev, int dev_idx) +{ + int rc; + int i; + struct mem_ctl_info *mci = NULL; + unsigned long last_cumul_size; + int interleaved, nr_channels; + unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS]; + unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2]; + unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL]; + unsigned long mchbar; + void *window; + + debugf0("MC: %s()\n", __func__); + + pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); + mchbar &= I3000_MCHBAR_MASK; + window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE); + if (!window) { + printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n", + mchbar); + return -ENODEV; + } + + c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ + c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ + c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ + c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */ + + for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) { + c0drb[i] = readb(window + I3000_C0DRB + i); + c1drb[i] = readb(window + I3000_C1DRB + i); + } + + iounmap(window); + + /* Figure out how many channels we have. + * + * If we have what the datasheet calls "asymmetric channels" + * (essentially the same as what was called "virtual single + * channel mode" in the i82875) then it's a single channel as + * far as EDAC is concerned. + */ + interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb); + nr_channels = interleaved ? 2 : 1; + mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0); + if (!mci) + return -ENOMEM; + + debugf3("MC: %s(): init mci\n", __func__); + + mci->dev = &pdev->dev; + mci->mtype_cap = MEM_FLAG_DDR2; + + mci->edac_ctl_cap = EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + + mci->mod_name = EDAC_MOD_STR; + mci->mod_ver = I3000_REVISION; + mci->ctl_name = i3000_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); + mci->edac_check = i3000_check; + mci->ctl_page_to_phys = NULL; + + /* + * The dram rank boundary (DRB) reg values are boundary addresses + * for each DRAM rank with a granularity of 32MB. DRB regs are + * cumulative; the last one will contain the total memory + * contained in all ranks. + * + * If we're in interleaved mode then we're only walking through + * the ranks of controller 0, so we double all the values we see. + */ + for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { + u8 value; + u32 cumul_size; + struct csrow_info *csrow = &mci->csrows[i]; + + value = drb[i]; + cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); + if (interleaved) + cumul_size <<= 1; + debugf3("MC: %s(): (%d) cumul_size 0x%x\n", + __func__, i, cumul_size); + if (cumul_size == last_cumul_size) { + csrow->mtype = MEM_EMPTY; + continue; + } + + csrow->first_page = last_cumul_size; + csrow->last_page = cumul_size - 1; + csrow->nr_pages = cumul_size - last_cumul_size; + last_cumul_size = cumul_size; + csrow->grain = I3000_DEAP_GRAIN; + csrow->mtype = MEM_DDR2; + csrow->dtype = DEV_UNKNOWN; + csrow->edac_mode = EDAC_UNKNOWN; + } + + /* Clear any error bits. + * (Yes, we really clear bits by writing 1 to them.) + */ + pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS, + I3000_ERRSTS_BITS); + + rc = -ENODEV; + if (edac_mc_add_mc(mci)) { + debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); + goto fail; + } + + /* allocating generic PCI control info */ + i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!i3000_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + + /* get this far and it's successful */ + debugf3("MC: %s(): success\n", __func__); + return 0; + + fail: + if (mci) + edac_mc_free(mci); + + return rc; +} + +/* returns count (>= 0), or negative on error */ +static int __devinit i3000_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc; + + debugf0("MC: %s()\n", __func__); + + if (pci_enable_device(pdev) < 0) + return -EIO; + + rc = i3000_probe1(pdev, ent->driver_data); + if (mci_pdev == NULL) + mci_pdev = pci_dev_get(pdev); + + return rc; +} + +static void __devexit i3000_remove_one(struct pci_dev *pdev) +{ + struct mem_ctl_info *mci; + + debugf0("%s()\n", __func__); + + if (i3000_pci) + edac_pci_release_generic_ctl(i3000_pci); + + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) + return; + + edac_mc_free(mci); +} + +static const struct pci_device_id i3000_pci_tbl[] __devinitdata = { + { + PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I3000}, + { + 0, + } /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, i3000_pci_tbl); + +static struct pci_driver i3000_driver = { + .name = EDAC_MOD_STR, + .probe = i3000_init_one, + .remove = __devexit_p(i3000_remove_one), + .id_table = i3000_pci_tbl, +}; + +static int __init i3000_init(void) +{ + int pci_rc; + + debugf3("MC: %s()\n", __func__); + pci_rc = pci_register_driver(&i3000_driver); + if (pci_rc < 0) + goto fail0; + + if (mci_pdev == NULL) { + i3000_registered = 0; + mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_3000_HB, NULL); + if (!mci_pdev) { + debugf0("i3000 pci_get_device fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + + pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); + if (pci_rc < 0) { + debugf0("i3000 init fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + } + + return 0; + +fail1: + pci_unregister_driver(&i3000_driver); + +fail0: + if (mci_pdev) + pci_dev_put(mci_pdev); + + return pci_rc; +} + +static void __exit i3000_exit(void) +{ + debugf3("MC: %s()\n", __func__); + + pci_unregister_driver(&i3000_driver); + if (!i3000_registered) { + i3000_remove_one(mci_pdev); + pci_dev_put(mci_pdev); + } +} + +module_init(i3000_init); +module_exit(i3000_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott"); +MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers"); diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c new file mode 100644 index 000000000000..96f7e63e3996 --- /dev/null +++ b/drivers/edac/i5000_edac.c @@ -0,0 +1,1505 @@ +/* + * Intel 5000(P/V/X) class Memory Controllers kernel module + * + * This file may be distributed under the terms of the + * GNU General Public License. + * + * Written by Douglas Thompson Linux Networx (http://lnxi.com) + * norsk5@xmission.com + * + * This module is based on the following document: + * + * Intel 5000X Chipset Memory Controller Hub (MCH) - Datasheet + * http://developer.intel.com/design/chipsets/datashts/313070.htm + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> +#include <linux/edac.h> +#include <asm/mmzone.h> + +#include "edac_core.h" + +/* + * Alter this version for the I5000 module when modifications are made + */ +#define I5000_REVISION " Ver: 2.0.12 " __DATE__ +#define EDAC_MOD_STR "i5000_edac" + +#define i5000_printk(level, fmt, arg...) \ + edac_printk(level, "i5000", fmt, ##arg) + +#define i5000_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "i5000", fmt, ##arg) + +#ifndef PCI_DEVICE_ID_INTEL_FBD_0 +#define PCI_DEVICE_ID_INTEL_FBD_0 0x25F5 +#endif +#ifndef PCI_DEVICE_ID_INTEL_FBD_1 +#define PCI_DEVICE_ID_INTEL_FBD_1 0x25F6 +#endif + +/* Device 16, + * Function 0: System Address + * Function 1: Memory Branch Map, Control, Errors Register + * Function 2: FSB Error Registers + * + * All 3 functions of Device 16 (0,1,2) share the SAME DID + */ +#define PCI_DEVICE_ID_INTEL_I5000_DEV16 0x25F0 + +/* OFFSETS for Function 0 */ + +/* OFFSETS for Function 1 */ +#define AMBASE 0x48 +#define MAXCH 0x56 +#define MAXDIMMPERCH 0x57 +#define TOLM 0x6C +#define REDMEMB 0x7C +#define RED_ECC_LOCATOR(x) ((x) & 0x3FFFF) +#define REC_ECC_LOCATOR_EVEN(x) ((x) & 0x001FF) +#define REC_ECC_LOCATOR_ODD(x) ((x) & 0x3FE00) +#define MIR0 0x80 +#define MIR1 0x84 +#define MIR2 0x88 +#define AMIR0 0x8C +#define AMIR1 0x90 +#define AMIR2 0x94 + +#define FERR_FAT_FBD 0x98 +#define NERR_FAT_FBD 0x9C +#define EXTRACT_FBDCHAN_INDX(x) (((x)>>28) & 0x3) +#define FERR_FAT_FBDCHAN 0x30000000 +#define FERR_FAT_M3ERR 0x00000004 +#define FERR_FAT_M2ERR 0x00000002 +#define FERR_FAT_M1ERR 0x00000001 +#define FERR_FAT_MASK (FERR_FAT_M1ERR | \ + FERR_FAT_M2ERR | \ + FERR_FAT_M3ERR) + +#define FERR_NF_FBD 0xA0 + +/* Thermal and SPD or BFD errors */ +#define FERR_NF_M28ERR 0x01000000 +#define FERR_NF_M27ERR 0x00800000 +#define FERR_NF_M26ERR 0x00400000 +#define FERR_NF_M25ERR 0x00200000 +#define FERR_NF_M24ERR 0x00100000 +#define FERR_NF_M23ERR 0x00080000 +#define FERR_NF_M22ERR 0x00040000 +#define FERR_NF_M21ERR 0x00020000 + +/* Correctable errors */ +#define FERR_NF_M20ERR 0x00010000 +#define FERR_NF_M19ERR 0x00008000 +#define FERR_NF_M18ERR 0x00004000 +#define FERR_NF_M17ERR 0x00002000 + +/* Non-Retry or redundant Retry errors */ +#define FERR_NF_M16ERR 0x00001000 +#define FERR_NF_M15ERR 0x00000800 +#define FERR_NF_M14ERR 0x00000400 +#define FERR_NF_M13ERR 0x00000200 + +/* Uncorrectable errors */ +#define FERR_NF_M12ERR 0x00000100 +#define FERR_NF_M11ERR 0x00000080 +#define FERR_NF_M10ERR 0x00000040 +#define FERR_NF_M9ERR 0x00000020 +#define FERR_NF_M8ERR 0x00000010 +#define FERR_NF_M7ERR 0x00000008 +#define FERR_NF_M6ERR 0x00000004 +#define FERR_NF_M5ERR 0x00000002 +#define FERR_NF_M4ERR 0x00000001 + +#define FERR_NF_UNCORRECTABLE (FERR_NF_M12ERR | \ + FERR_NF_M11ERR | \ + FERR_NF_M10ERR | \ + FERR_NF_M8ERR | \ + FERR_NF_M7ERR | \ + FERR_NF_M6ERR | \ + FERR_NF_M5ERR | \ + FERR_NF_M4ERR) +#define FERR_NF_CORRECTABLE (FERR_NF_M20ERR | \ + FERR_NF_M19ERR | \ + FERR_NF_M18ERR | \ + FERR_NF_M17ERR) +#define FERR_NF_DIMM_SPARE (FERR_NF_M27ERR | \ + FERR_NF_M28ERR) +#define FERR_NF_THERMAL (FERR_NF_M26ERR | \ + FERR_NF_M25ERR | \ + FERR_NF_M24ERR | \ + FERR_NF_M23ERR) +#define FERR_NF_SPD_PROTOCOL (FERR_NF_M22ERR) +#define FERR_NF_NORTH_CRC (FERR_NF_M21ERR) +#define FERR_NF_NON_RETRY (FERR_NF_M13ERR | \ + FERR_NF_M14ERR | \ + FERR_NF_M15ERR) + +#define NERR_NF_FBD 0xA4 +#define FERR_NF_MASK (FERR_NF_UNCORRECTABLE | \ + FERR_NF_CORRECTABLE | \ + FERR_NF_DIMM_SPARE | \ + FERR_NF_THERMAL | \ + FERR_NF_SPD_PROTOCOL | \ + FERR_NF_NORTH_CRC | \ + FERR_NF_NON_RETRY) + +#define EMASK_FBD 0xA8 +#define EMASK_FBD_M28ERR 0x08000000 +#define EMASK_FBD_M27ERR 0x04000000 +#define EMASK_FBD_M26ERR 0x02000000 +#define EMASK_FBD_M25ERR 0x01000000 +#define EMASK_FBD_M24ERR 0x00800000 +#define EMASK_FBD_M23ERR 0x00400000 +#define EMASK_FBD_M22ERR 0x00200000 +#define EMASK_FBD_M21ERR 0x00100000 +#define EMASK_FBD_M20ERR 0x00080000 +#define EMASK_FBD_M19ERR 0x00040000 +#define EMASK_FBD_M18ERR 0x00020000 +#define EMASK_FBD_M17ERR 0x00010000 + +#define EMASK_FBD_M15ERR 0x00004000 +#define EMASK_FBD_M14ERR 0x00002000 +#define EMASK_FBD_M13ERR 0x00001000 +#define EMASK_FBD_M12ERR 0x00000800 +#define EMASK_FBD_M11ERR 0x00000400 +#define EMASK_FBD_M10ERR 0x00000200 +#define EMASK_FBD_M9ERR 0x00000100 +#define EMASK_FBD_M8ERR 0x00000080 +#define EMASK_FBD_M7ERR 0x00000040 +#define EMASK_FBD_M6ERR 0x00000020 +#define EMASK_FBD_M5ERR 0x00000010 +#define EMASK_FBD_M4ERR 0x00000008 +#define EMASK_FBD_M3ERR 0x00000004 +#define EMASK_FBD_M2ERR 0x00000002 +#define EMASK_FBD_M1ERR 0x00000001 + +#define ENABLE_EMASK_FBD_FATAL_ERRORS (EMASK_FBD_M1ERR | \ + EMASK_FBD_M2ERR | \ + EMASK_FBD_M3ERR) + +#define ENABLE_EMASK_FBD_UNCORRECTABLE (EMASK_FBD_M4ERR | \ + EMASK_FBD_M5ERR | \ + EMASK_FBD_M6ERR | \ + EMASK_FBD_M7ERR | \ + EMASK_FBD_M8ERR | \ + EMASK_FBD_M9ERR | \ + EMASK_FBD_M10ERR | \ + EMASK_FBD_M11ERR | \ + EMASK_FBD_M12ERR) +#define ENABLE_EMASK_FBD_CORRECTABLE (EMASK_FBD_M17ERR | \ + EMASK_FBD_M18ERR | \ + EMASK_FBD_M19ERR | \ + EMASK_FBD_M20ERR) +#define ENABLE_EMASK_FBD_DIMM_SPARE (EMASK_FBD_M27ERR | \ + EMASK_FBD_M28ERR) +#define ENABLE_EMASK_FBD_THERMALS (EMASK_FBD_M26ERR | \ + EMASK_FBD_M25ERR | \ + EMASK_FBD_M24ERR | \ + EMASK_FBD_M23ERR) +#define ENABLE_EMASK_FBD_SPD_PROTOCOL (EMASK_FBD_M22ERR) +#define ENABLE_EMASK_FBD_NORTH_CRC (EMASK_FBD_M21ERR) +#define ENABLE_EMASK_FBD_NON_RETRY (EMASK_FBD_M15ERR | \ + EMASK_FBD_M14ERR | \ + EMASK_FBD_M13ERR) + +#define ENABLE_EMASK_ALL (ENABLE_EMASK_FBD_NON_RETRY | \ + ENABLE_EMASK_FBD_NORTH_CRC | \ + ENABLE_EMASK_FBD_SPD_PROTOCOL | \ + ENABLE_EMASK_FBD_THERMALS | \ + ENABLE_EMASK_FBD_DIMM_SPARE | \ + ENABLE_EMASK_FBD_FATAL_ERRORS | \ + ENABLE_EMASK_FBD_CORRECTABLE | \ + ENABLE_EMASK_FBD_UNCORRECTABLE) + +#define ERR0_FBD 0xAC +#define ERR1_FBD 0xB0 +#define ERR2_FBD 0xB4 +#define MCERR_FBD 0xB8 +#define NRECMEMA 0xBE +#define NREC_BANK(x) (((x)>>12) & 0x7) +#define NREC_RDWR(x) (((x)>>11) & 1) +#define NREC_RANK(x) (((x)>>8) & 0x7) +#define NRECMEMB 0xC0 +#define NREC_CAS(x) (((x)>>16) & 0xFFFFFF) +#define NREC_RAS(x) ((x) & 0x7FFF) +#define NRECFGLOG 0xC4 +#define NREEECFBDA 0xC8 +#define NREEECFBDB 0xCC +#define NREEECFBDC 0xD0 +#define NREEECFBDD 0xD4 +#define NREEECFBDE 0xD8 +#define REDMEMA 0xDC +#define RECMEMA 0xE2 +#define REC_BANK(x) (((x)>>12) & 0x7) +#define REC_RDWR(x) (((x)>>11) & 1) +#define REC_RANK(x) (((x)>>8) & 0x7) +#define RECMEMB 0xE4 +#define REC_CAS(x) (((x)>>16) & 0xFFFFFF) +#define REC_RAS(x) ((x) & 0x7FFF) +#define RECFGLOG 0xE8 +#define RECFBDA 0xEC +#define RECFBDB 0xF0 +#define RECFBDC 0xF4 +#define RECFBDD 0xF8 +#define RECFBDE 0xFC + +/* OFFSETS for Function 2 */ + +/* + * Device 21, + * Function 0: Memory Map Branch 0 + * + * Device 22, + * Function 0: Memory Map Branch 1 + */ +#define PCI_DEVICE_ID_I5000_BRANCH_0 0x25F5 +#define PCI_DEVICE_ID_I5000_BRANCH_1 0x25F6 + +#define AMB_PRESENT_0 0x64 +#define AMB_PRESENT_1 0x66 +#define MTR0 0x80 +#define MTR1 0x84 +#define MTR2 0x88 +#define MTR3 0x8C + +#define NUM_MTRS 4 +#define CHANNELS_PER_BRANCH (2) + +/* Defines to extract the vaious fields from the + * MTRx - Memory Technology Registers + */ +#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8)) +#define MTR_DRAM_WIDTH(mtr) ((((mtr) >> 6) & 0x1) ? 8 : 4) +#define MTR_DRAM_BANKS(mtr) ((((mtr) >> 5) & 0x1) ? 8 : 4) +#define MTR_DRAM_BANKS_ADDR_BITS(mtr) ((MTR_DRAM_BANKS(mtr) == 8) ? 3 : 2) +#define MTR_DIMM_RANK(mtr) (((mtr) >> 4) & 0x1) +#define MTR_DIMM_RANK_ADDR_BITS(mtr) (MTR_DIMM_RANK(mtr) ? 2 : 1) +#define MTR_DIMM_ROWS(mtr) (((mtr) >> 2) & 0x3) +#define MTR_DIMM_ROWS_ADDR_BITS(mtr) (MTR_DIMM_ROWS(mtr) + 13) +#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) +#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) + +#ifdef CONFIG_EDAC_DEBUG +static char *numrow_toString[] = { + "8,192 - 13 rows", + "16,384 - 14 rows", + "32,768 - 15 rows", + "reserved" +}; + +static char *numcol_toString[] = { + "1,024 - 10 columns", + "2,048 - 11 columns", + "4,096 - 12 columns", + "reserved" +}; +#endif + +/* Enumeration of supported devices */ +enum i5000_chips { + I5000P = 0, + I5000V = 1, /* future */ + I5000X = 2 /* future */ +}; + +/* Device name and register DID (Device ID) */ +struct i5000_dev_info { + const char *ctl_name; /* name for this device */ + u16 fsb_mapping_errors; /* DID for the branchmap,control */ +}; + +/* Table of devices attributes supported by this driver */ +static const struct i5000_dev_info i5000_devs[] = { + [I5000P] = { + .ctl_name = "I5000", + .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I5000_DEV16, + }, +}; + +struct i5000_dimm_info { + int megabytes; /* size, 0 means not present */ + int dual_rank; +}; + +#define MAX_CHANNELS 6 /* max possible channels */ +#define MAX_CSROWS (8*2) /* max possible csrows per channel */ + +/* driver private data structure */ +struct i5000_pvt { + struct pci_dev *system_address; /* 16.0 */ + struct pci_dev *branchmap_werrors; /* 16.1 */ + struct pci_dev *fsb_error_regs; /* 16.2 */ + struct pci_dev *branch_0; /* 21.0 */ + struct pci_dev *branch_1; /* 22.0 */ + + u16 tolm; /* top of low memory */ + u64 ambase; /* AMB BAR */ + + u16 mir0, mir1, mir2; + + u16 b0_mtr[NUM_MTRS]; /* Memory Technlogy Reg */ + u16 b0_ambpresent0; /* Branch 0, Channel 0 */ + u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ + + u16 b1_mtr[NUM_MTRS]; /* Memory Technlogy Reg */ + u16 b1_ambpresent0; /* Branch 1, Channel 8 */ + u16 b1_ambpresent1; /* Branch 1, Channel 1 */ + + /* DIMM infomation matrix, allocating architecture maximums */ + struct i5000_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS]; + + /* Actual values for this controller */ + int maxch; /* Max channels */ + int maxdimmperch; /* Max DIMMs per channel */ +}; + +/* I5000 MCH error information retrieved from Hardware */ +struct i5000_error_info { + + /* These registers are always read from the MC */ + u32 ferr_fat_fbd; /* First Errors Fatal */ + u32 nerr_fat_fbd; /* Next Errors Fatal */ + u32 ferr_nf_fbd; /* First Errors Non-Fatal */ + u32 nerr_nf_fbd; /* Next Errors Non-Fatal */ + + /* These registers are input ONLY if there was a Recoverable Error */ + u32 redmemb; /* Recoverable Mem Data Error log B */ + u16 recmema; /* Recoverable Mem Error log A */ + u32 recmemb; /* Recoverable Mem Error log B */ + + /* These registers are input ONLY if there was a + * Non-Recoverable Error */ + u16 nrecmema; /* Non-Recoverable Mem log A */ + u16 nrecmemb; /* Non-Recoverable Mem log B */ + +}; + +static struct edac_pci_ctl_info *i5000_pci; + +/* + * i5000_get_error_info Retrieve the hardware error information from + * the hardware and cache it in the 'info' + * structure + */ +static void i5000_get_error_info(struct mem_ctl_info *mci, + struct i5000_error_info *info) +{ + struct i5000_pvt *pvt; + u32 value; + + pvt = mci->pvt_info; + + /* read in the 1st FATAL error register */ + pci_read_config_dword(pvt->branchmap_werrors, FERR_FAT_FBD, &value); + + /* Mask only the bits that the doc says are valid + */ + value &= (FERR_FAT_FBDCHAN | FERR_FAT_MASK); + + /* If there is an error, then read in the */ + /* NEXT FATAL error register and the Memory Error Log Register A */ + if (value & FERR_FAT_MASK) { + info->ferr_fat_fbd = value; + + /* harvest the various error data we need */ + pci_read_config_dword(pvt->branchmap_werrors, + NERR_FAT_FBD, &info->nerr_fat_fbd); + pci_read_config_word(pvt->branchmap_werrors, + NRECMEMA, &info->nrecmema); + pci_read_config_word(pvt->branchmap_werrors, + NRECMEMB, &info->nrecmemb); + + /* Clear the error bits, by writing them back */ + pci_write_config_dword(pvt->branchmap_werrors, + FERR_FAT_FBD, value); + } else { + info->ferr_fat_fbd = 0; + info->nerr_fat_fbd = 0; + info->nrecmema = 0; + info->nrecmemb = 0; + } + + /* read in the 1st NON-FATAL error register */ + pci_read_config_dword(pvt->branchmap_werrors, FERR_NF_FBD, &value); + + /* If there is an error, then read in the 1st NON-FATAL error + * register as well */ + if (value & FERR_NF_MASK) { + info->ferr_nf_fbd = value; + + /* harvest the various error data we need */ + pci_read_config_dword(pvt->branchmap_werrors, + NERR_NF_FBD, &info->nerr_nf_fbd); + pci_read_config_word(pvt->branchmap_werrors, + RECMEMA, &info->recmema); + pci_read_config_dword(pvt->branchmap_werrors, + RECMEMB, &info->recmemb); + pci_read_config_dword(pvt->branchmap_werrors, + REDMEMB, &info->redmemb); + + /* Clear the error bits, by writing them back */ + pci_write_config_dword(pvt->branchmap_werrors, + FERR_NF_FBD, value); + } else { + info->ferr_nf_fbd = 0; + info->nerr_nf_fbd = 0; + info->recmema = 0; + info->recmemb = 0; + info->redmemb = 0; + } +} + +/* + * i5000_process_fatal_error_info(struct mem_ctl_info *mci, + * struct i5000_error_info *info, + * int handle_errors); + * + * handle the Intel FATAL errors, if any + */ +static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, + struct i5000_error_info *info, + int handle_errors) +{ + char msg[EDAC_MC_LABEL_LEN + 1 + 90]; + u32 allErrors; + int branch; + int channel; + int bank; + int rank; + int rdwr; + int ras, cas; + + /* mask off the Error bits that are possible */ + allErrors = (info->ferr_fat_fbd & FERR_FAT_MASK); + if (!allErrors) + return; /* if no error, return now */ + + /* ONLY ONE of the possible error bits will be set, as per the docs */ + i5000_mc_printk(mci, KERN_ERR, + "FATAL ERRORS Found!!! 1st FATAL Err Reg= 0x%x\n", + allErrors); + + branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd); + channel = branch; + + /* Use the NON-Recoverable macros to extract data */ + bank = NREC_BANK(info->nrecmema); + rank = NREC_RANK(info->nrecmema); + rdwr = NREC_RDWR(info->nrecmema); + ras = NREC_RAS(info->nrecmemb); + cas = NREC_CAS(info->nrecmemb); + + debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " + "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, channel + 1, branch >> 1, bank, + rdwr ? "Write" : "Read", ras, cas); + + /* Only 1 bit will be on */ + if (allErrors & FERR_FAT_M1ERR) { + i5000_mc_printk(mci, KERN_ERR, + "Alert on non-redundant retry or fast " + "reset timeout\n"); + + } else if (allErrors & FERR_FAT_M2ERR) { + i5000_mc_printk(mci, KERN_ERR, + "Northbound CRC error on non-redundant " + "retry\n"); + + } else if (allErrors & FERR_FAT_M3ERR) { + i5000_mc_printk(mci, KERN_ERR, + ">Tmid Thermal event with intelligent " + "throttling disabled\n"); + } + + /* Form out message */ + snprintf(msg, sizeof(msg), + "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d " + "FATAL Err=0x%x)", + branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas, + allErrors); + + /* Call the helper to output message */ + edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); +} + +/* + * i5000_process_fatal_error_info(struct mem_ctl_info *mci, + * struct i5000_error_info *info, + * int handle_errors); + * + * handle the Intel NON-FATAL errors, if any + */ +static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, + struct i5000_error_info *info, + int handle_errors) +{ + char msg[EDAC_MC_LABEL_LEN + 1 + 90]; + u32 allErrors; + u32 ue_errors; + u32 ce_errors; + u32 misc_errors; + int branch; + int channel; + int bank; + int rank; + int rdwr; + int ras, cas; + + /* mask off the Error bits that are possible */ + allErrors = (info->ferr_nf_fbd & FERR_NF_MASK); + if (!allErrors) + return; /* if no error, return now */ + + /* ONLY ONE of the possible error bits will be set, as per the docs */ + i5000_mc_printk(mci, KERN_WARNING, + "NON-FATAL ERRORS Found!!! 1st NON-FATAL Err " + "Reg= 0x%x\n", allErrors); + + ue_errors = allErrors & FERR_NF_UNCORRECTABLE; + if (ue_errors) { + debugf0("\tUncorrected bits= 0x%x\n", ue_errors); + + branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); + channel = branch; + bank = NREC_BANK(info->nrecmema); + rank = NREC_RANK(info->nrecmema); + rdwr = NREC_RDWR(info->nrecmema); + ras = NREC_RAS(info->nrecmemb); + cas = NREC_CAS(info->nrecmemb); + + debugf0 + ("\t\tCSROW= %d Channels= %d,%d (Branch= %d " + "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, channel + 1, branch >> 1, bank, + rdwr ? "Write" : "Read", ras, cas); + + /* Form out message */ + snprintf(msg, sizeof(msg), + "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " + "CAS=%d, UE Err=0x%x)", + branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas, + ue_errors); + + /* Call the helper to output message */ + edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); + } + + /* Check correctable errors */ + ce_errors = allErrors & FERR_NF_CORRECTABLE; + if (ce_errors) { + debugf0("\tCorrected bits= 0x%x\n", ce_errors); + + branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); + + channel = 0; + if (REC_ECC_LOCATOR_ODD(info->redmemb)) + channel = 1; + + /* Convert channel to be based from zero, instead of + * from branch base of 0 */ + channel += branch; + + bank = REC_BANK(info->recmema); + rank = REC_RANK(info->recmema); + rdwr = REC_RDWR(info->recmema); + ras = REC_RAS(info->recmemb); + cas = REC_CAS(info->recmemb); + + debugf0("\t\tCSROW= %d Channel= %d (Branch %d " + "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, branch >> 1, bank, + rdwr ? "Write" : "Read", ras, cas); + + /* Form out message */ + snprintf(msg, sizeof(msg), + "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " + "CAS=%d, CE Err=0x%x)", branch >> 1, bank, + rdwr ? "Write" : "Read", ras, cas, ce_errors); + + /* Call the helper to output message */ + edac_mc_handle_fbd_ce(mci, rank, channel, msg); + } + + /* See if any of the thermal errors have fired */ + misc_errors = allErrors & FERR_NF_THERMAL; + if (misc_errors) { + i5000_printk(KERN_WARNING, "\tTHERMAL Error, bits= 0x%x\n", + misc_errors); + } + + /* See if any of the thermal errors have fired */ + misc_errors = allErrors & FERR_NF_NON_RETRY; + if (misc_errors) { + i5000_printk(KERN_WARNING, "\tNON-Retry Errors, bits= 0x%x\n", + misc_errors); + } + + /* See if any of the thermal errors have fired */ + misc_errors = allErrors & FERR_NF_NORTH_CRC; + if (misc_errors) { + i5000_printk(KERN_WARNING, + "\tNORTHBOUND CRC Error, bits= 0x%x\n", + misc_errors); + } + + /* See if any of the thermal errors have fired */ + misc_errors = allErrors & FERR_NF_SPD_PROTOCOL; + if (misc_errors) { + i5000_printk(KERN_WARNING, + "\tSPD Protocol Error, bits= 0x%x\n", + misc_errors); + } + + /* See if any of the thermal errors have fired */ + misc_errors = allErrors & FERR_NF_DIMM_SPARE; + if (misc_errors) { + i5000_printk(KERN_WARNING, "\tDIMM-Spare Error, bits= 0x%x\n", + misc_errors); + } +} + +/* + * i5000_process_error_info Process the error info that is + * in the 'info' structure, previously retrieved from hardware + */ +static void i5000_process_error_info(struct mem_ctl_info *mci, + struct i5000_error_info *info, + int handle_errors) +{ + /* First handle any fatal errors that occurred */ + i5000_process_fatal_error_info(mci, info, handle_errors); + + /* now handle any non-fatal errors that occurred */ + i5000_process_nonfatal_error_info(mci, info, handle_errors); +} + +/* + * i5000_clear_error Retrieve any error from the hardware + * but do NOT process that error. + * Used for 'clearing' out of previous errors + * Called by the Core module. + */ +static void i5000_clear_error(struct mem_ctl_info *mci) +{ + struct i5000_error_info info; + + i5000_get_error_info(mci, &info); +} + +/* + * i5000_check_error Retrieve and process errors reported by the + * hardware. Called by the Core module. + */ +static void i5000_check_error(struct mem_ctl_info *mci) +{ + struct i5000_error_info info; + debugf4("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + i5000_get_error_info(mci, &info); + i5000_process_error_info(mci, &info, 1); +} + +/* + * i5000_get_devices Find and perform 'get' operation on the MCH's + * device/functions we want to reference for this driver + * + * Need to 'get' device 16 func 1 and func 2 + */ +static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx) +{ + //const struct i5000_dev_info *i5000_dev = &i5000_devs[dev_idx]; + struct i5000_pvt *pvt; + struct pci_dev *pdev; + + pvt = mci->pvt_info; + + /* Attempt to 'get' the MCH register we want */ + pdev = NULL; + while (1) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev); + + /* End of list, leave */ + if (pdev == NULL) { + i5000_printk(KERN_ERR, + "'system address,Process Bus' " + "device not found:" + "vendor 0x%x device 0x%x FUNC 1 " + "(broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_I5000_DEV16); + + return 1; + } + + /* Scan for device 16 func 1 */ + if (PCI_FUNC(pdev->devfn) == 1) + break; + } + + pvt->branchmap_werrors = pdev; + + /* Attempt to 'get' the MCH register we want */ + pdev = NULL; + while (1) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_I5000_DEV16, pdev); + + if (pdev == NULL) { + i5000_printk(KERN_ERR, + "MC: 'branchmap,control,errors' " + "device not found:" + "vendor 0x%x device 0x%x Func 2 " + "(broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_I5000_DEV16); + + pci_dev_put(pvt->branchmap_werrors); + return 1; + } + + /* Scan for device 16 func 1 */ + if (PCI_FUNC(pdev->devfn) == 2) + break; + } + + pvt->fsb_error_regs = pdev; + + debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", + pci_name(pvt->system_address), + pvt->system_address->vendor, pvt->system_address->device); + debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->branchmap_werrors), + pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); + debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->fsb_error_regs), + pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); + + pdev = NULL; + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_I5000_BRANCH_0, pdev); + + if (pdev == NULL) { + i5000_printk(KERN_ERR, + "MC: 'BRANCH 0' device not found:" + "vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_I5000_BRANCH_0); + + pci_dev_put(pvt->branchmap_werrors); + pci_dev_put(pvt->fsb_error_regs); + return 1; + } + + pvt->branch_0 = pdev; + + /* If this device claims to have more than 2 channels then + * fetch Branch 1's information + */ + if (pvt->maxch >= CHANNELS_PER_BRANCH) { + pdev = NULL; + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_I5000_BRANCH_1, pdev); + + if (pdev == NULL) { + i5000_printk(KERN_ERR, + "MC: 'BRANCH 1' device not found:" + "vendor 0x%x device 0x%x Func 0 " + "(broken BIOS?)\n", + PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_I5000_BRANCH_1); + + pci_dev_put(pvt->branchmap_werrors); + pci_dev_put(pvt->fsb_error_regs); + pci_dev_put(pvt->branch_0); + return 1; + } + + pvt->branch_1 = pdev; + } + + return 0; +} + +/* + * i5000_put_devices 'put' all the devices that we have + * reserved via 'get' + */ +static void i5000_put_devices(struct mem_ctl_info *mci) +{ + struct i5000_pvt *pvt; + + pvt = mci->pvt_info; + + pci_dev_put(pvt->branchmap_werrors); /* FUNC 1 */ + pci_dev_put(pvt->fsb_error_regs); /* FUNC 2 */ + pci_dev_put(pvt->branch_0); /* DEV 21 */ + + /* Only if more than 2 channels do we release the second branch */ + if (pvt->maxch >= CHANNELS_PER_BRANCH) + pci_dev_put(pvt->branch_1); /* DEV 22 */ +} + +/* + * determine_amb_resent + * + * the information is contained in NUM_MTRS different registers + * determineing which of the NUM_MTRS requires knowing + * which channel is in question + * + * 2 branches, each with 2 channels + * b0_ambpresent0 for channel '0' + * b0_ambpresent1 for channel '1' + * b1_ambpresent0 for channel '2' + * b1_ambpresent1 for channel '3' + */ +static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel) +{ + int amb_present; + + if (channel < CHANNELS_PER_BRANCH) { + if (channel & 0x1) + amb_present = pvt->b0_ambpresent1; + else + amb_present = pvt->b0_ambpresent0; + } else { + if (channel & 0x1) + amb_present = pvt->b1_ambpresent1; + else + amb_present = pvt->b1_ambpresent0; + } + + return amb_present; +} + +/* + * determine_mtr(pvt, csrow, channel) + * + * return the proper MTR register as determine by the csrow and channel desired + */ +static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel) +{ + int mtr; + + if (channel < CHANNELS_PER_BRANCH) + mtr = pvt->b0_mtr[csrow >> 1]; + else + mtr = pvt->b1_mtr[csrow >> 1]; + + return mtr; +} + +/* + */ +static void decode_mtr(int slot_row, u16 mtr) +{ + int ans; + + ans = MTR_DIMMS_PRESENT(mtr); + + debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, + ans ? "Present" : "NOT Present"); + if (!ans) + return; + + debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); + debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); + debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); + debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); + debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); +} + +static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel, + struct i5000_dimm_info *dinfo) +{ + int mtr; + int amb_present_reg; + int addrBits; + + mtr = determine_mtr(pvt, csrow, channel); + if (MTR_DIMMS_PRESENT(mtr)) { + amb_present_reg = determine_amb_present_reg(pvt, channel); + + /* Determine if there is a DIMM present in this DIMM slot */ + if (amb_present_reg & (1 << (csrow >> 1))) { + dinfo->dual_rank = MTR_DIMM_RANK(mtr); + + if (!((dinfo->dual_rank == 0) && + ((csrow & 0x1) == 0x1))) { + /* Start with the number of bits for a Bank + * on the DRAM */ + addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); + /* Add thenumber of ROW bits */ + addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); + /* add the number of COLUMN bits */ + addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); + + addrBits += 6; /* add 64 bits per DIMM */ + addrBits -= 20; /* divide by 2^^20 */ + addrBits -= 3; /* 8 bits per bytes */ + + dinfo->megabytes = 1 << addrBits; + } + } + } +} + +/* + * calculate_dimm_size + * + * also will output a DIMM matrix map, if debug is enabled, for viewing + * how the DIMMs are populated + */ +static void calculate_dimm_size(struct i5000_pvt *pvt) +{ + struct i5000_dimm_info *dinfo; + int csrow, max_csrows; + char *p, *mem_buffer; + int space, n; + int channel; + + /* ================= Generate some debug output ================= */ + space = PAGE_SIZE; + mem_buffer = p = kmalloc(space, GFP_KERNEL); + if (p == NULL) { + i5000_printk(KERN_ERR, "MC: %s:%s() kmalloc() failed\n", + __FILE__, __func__); + return; + } + + n = snprintf(p, space, "\n"); + p += n; + space -= n; + + /* Scan all the actual CSROWS (which is # of DIMMS * 2) + * and calculate the information for each DIMM + * Start with the highest csrow first, to display it first + * and work toward the 0th csrow + */ + max_csrows = pvt->maxdimmperch * 2; + for (csrow = max_csrows - 1; csrow >= 0; csrow--) { + + /* on an odd csrow, first output a 'boundary' marker, + * then reset the message buffer */ + if (csrow & 0x1) { + n = snprintf(p, space, "---------------------------" + "--------------------------------"); + p += n; + space -= n; + debugf2("%s\n", mem_buffer); + p = mem_buffer; + space = PAGE_SIZE; + } + n = snprintf(p, space, "csrow %2d ", csrow); + p += n; + space -= n; + + for (channel = 0; channel < pvt->maxch; channel++) { + dinfo = &pvt->dimm_info[csrow][channel]; + handle_channel(pvt, csrow, channel, dinfo); + n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); + p += n; + space -= n; + } + n = snprintf(p, space, "\n"); + p += n; + space -= n; + } + + /* Output the last bottom 'boundary' marker */ + n = snprintf(p, space, "---------------------------" + "--------------------------------\n"); + p += n; + space -= n; + + /* now output the 'channel' labels */ + n = snprintf(p, space, " "); + p += n; + space -= n; + for (channel = 0; channel < pvt->maxch; channel++) { + n = snprintf(p, space, "channel %d | ", channel); + p += n; + space -= n; + } + n = snprintf(p, space, "\n"); + p += n; + space -= n; + + /* output the last message and free buffer */ + debugf2("%s\n", mem_buffer); + kfree(mem_buffer); +} + +/* + * i5000_get_mc_regs read in the necessary registers and + * cache locally + * + * Fills in the private data members + */ +static void i5000_get_mc_regs(struct mem_ctl_info *mci) +{ + struct i5000_pvt *pvt; + u32 actual_tolm; + u16 limit; + int slot_row; + int maxch; + int maxdimmperch; + int way0, way1; + + pvt = mci->pvt_info; + + pci_read_config_dword(pvt->system_address, AMBASE, + (u32 *) & pvt->ambase); + pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), + ((u32 *) & pvt->ambase) + sizeof(u32)); + + maxdimmperch = pvt->maxdimmperch; + maxch = pvt->maxch; + + debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", + (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); + + /* Get the Branch Map regs */ + pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); + pvt->tolm >>= 12; + debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, + pvt->tolm); + + actual_tolm = pvt->tolm << 28; + debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm); + + pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); + pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); + pci_read_config_word(pvt->branchmap_werrors, MIR2, &pvt->mir2); + + /* Get the MIR[0-2] regs */ + limit = (pvt->mir0 >> 4) & 0x0FFF; + way0 = pvt->mir0 & 0x1; + way1 = pvt->mir0 & 0x2; + debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + limit = (pvt->mir1 >> 4) & 0x0FFF; + way0 = pvt->mir1 & 0x1; + way1 = pvt->mir1 & 0x2; + debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + limit = (pvt->mir2 >> 4) & 0x0FFF; + way0 = pvt->mir2 & 0x1; + way1 = pvt->mir2 & 0x2; + debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + + /* Get the MTR[0-3] regs */ + for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { + int where = MTR0 + (slot_row * sizeof(u32)); + + pci_read_config_word(pvt->branch_0, where, + &pvt->b0_mtr[slot_row]); + + debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, + pvt->b0_mtr[slot_row]); + + if (pvt->maxch >= CHANNELS_PER_BRANCH) { + pci_read_config_word(pvt->branch_1, where, + &pvt->b1_mtr[slot_row]); + debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, + where, pvt->b0_mtr[slot_row]); + } else { + pvt->b1_mtr[slot_row] = 0; + } + } + + /* Read and dump branch 0's MTRs */ + debugf2("\nMemory Technology Registers:\n"); + debugf2(" Branch 0:\n"); + for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { + decode_mtr(slot_row, pvt->b0_mtr[slot_row]); + } + pci_read_config_word(pvt->branch_0, AMB_PRESENT_0, + &pvt->b0_ambpresent0); + debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); + pci_read_config_word(pvt->branch_0, AMB_PRESENT_1, + &pvt->b0_ambpresent1); + debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); + + /* Only if we have 2 branchs (4 channels) */ + if (pvt->maxch < CHANNELS_PER_BRANCH) { + pvt->b1_ambpresent0 = 0; + pvt->b1_ambpresent1 = 0; + } else { + /* Read and dump branch 1's MTRs */ + debugf2(" Branch 1:\n"); + for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { + decode_mtr(slot_row, pvt->b1_mtr[slot_row]); + } + pci_read_config_word(pvt->branch_1, AMB_PRESENT_0, + &pvt->b1_ambpresent0); + debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", + pvt->b1_ambpresent0); + pci_read_config_word(pvt->branch_1, AMB_PRESENT_1, + &pvt->b1_ambpresent1); + debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", + pvt->b1_ambpresent1); + } + + /* Go and determine the size of each DIMM and place in an + * orderly matrix */ + calculate_dimm_size(pvt); +} + +/* + * i5000_init_csrows Initialize the 'csrows' table within + * the mci control structure with the + * addressing of memory. + * + * return: + * 0 success + * 1 no actual memory found on this MC + */ +static int i5000_init_csrows(struct mem_ctl_info *mci) +{ + struct i5000_pvt *pvt; + struct csrow_info *p_csrow; + int empty, channel_count; + int max_csrows; + int mtr; + int csrow_megs; + int channel; + int csrow; + + pvt = mci->pvt_info; + + channel_count = pvt->maxch; + max_csrows = pvt->maxdimmperch * 2; + + empty = 1; /* Assume NO memory */ + + for (csrow = 0; csrow < max_csrows; csrow++) { + p_csrow = &mci->csrows[csrow]; + + p_csrow->csrow_idx = csrow; + + /* use branch 0 for the basis */ + mtr = pvt->b0_mtr[csrow >> 1]; + + /* if no DIMMS on this row, continue */ + if (!MTR_DIMMS_PRESENT(mtr)) + continue; + + /* FAKE OUT VALUES, FIXME */ + p_csrow->first_page = 0 + csrow * 20; + p_csrow->last_page = 9 + csrow * 20; + p_csrow->page_mask = 0xFFF; + + p_csrow->grain = 8; + + csrow_megs = 0; + for (channel = 0; channel < pvt->maxch; channel++) { + csrow_megs += pvt->dimm_info[csrow][channel].megabytes; + } + + p_csrow->nr_pages = csrow_megs << 8; + + /* Assume DDR2 for now */ + p_csrow->mtype = MEM_FB_DDR2; + + /* ask what device type on this row */ + if (MTR_DRAM_WIDTH(mtr)) + p_csrow->dtype = DEV_X8; + else + p_csrow->dtype = DEV_X4; + + p_csrow->edac_mode = EDAC_S8ECD8ED; + + empty = 0; + } + + return empty; +} + +/* + * i5000_enable_error_reporting + * Turn on the memory reporting features of the hardware + */ +static void i5000_enable_error_reporting(struct mem_ctl_info *mci) +{ + struct i5000_pvt *pvt; + u32 fbd_error_mask; + + pvt = mci->pvt_info; + + /* Read the FBD Error Mask Register */ + pci_read_config_dword(pvt->branchmap_werrors, EMASK_FBD, + &fbd_error_mask); + + /* Enable with a '0' */ + fbd_error_mask &= ~(ENABLE_EMASK_ALL); + + pci_write_config_dword(pvt->branchmap_werrors, EMASK_FBD, + fbd_error_mask); +} + +/* + * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels) + * + * ask the device how many channels are present and how many CSROWS + * as well + */ +static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev, + int *num_dimms_per_channel, + int *num_channels) +{ + u8 value; + + /* Need to retrieve just how many channels and dimms per channel are + * supported on this memory controller + */ + pci_read_config_byte(pdev, MAXDIMMPERCH, &value); + *num_dimms_per_channel = (int)value *2; + + pci_read_config_byte(pdev, MAXCH, &value); + *num_channels = (int)value; +} + +/* + * i5000_probe1 Probe for ONE instance of device to see if it is + * present. + * return: + * 0 for FOUND a device + * < 0 for error code + */ +static int i5000_probe1(struct pci_dev *pdev, int dev_idx) +{ + struct mem_ctl_info *mci; + struct i5000_pvt *pvt; + int num_channels; + int num_dimms_per_channel; + int num_csrows; + + debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", + __func__, + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + /* We only are looking for func 0 of the set */ + if (PCI_FUNC(pdev->devfn) != 0) + return -ENODEV; + + /* make sure error reporting method is sane */ + switch (edac_op_state) { + case EDAC_OPSTATE_POLL: + case EDAC_OPSTATE_NMI: + break; + default: + edac_op_state = EDAC_OPSTATE_POLL; + break; + } + + /* Ask the devices for the number of CSROWS and CHANNELS so + * that we can calculate the memory resources, etc + * + * The Chipset will report what it can handle which will be greater + * or equal to what the motherboard manufacturer will implement. + * + * As we don't have a motherboard identification routine to determine + * actual number of slots/dimms per channel, we thus utilize the + * resource as specified by the chipset. Thus, we might have + * have more DIMMs per channel than actually on the mobo, but this + * allows the driver to support upto the chipset max, without + * some fancy mobo determination. + */ + i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, + &num_channels); + num_csrows = num_dimms_per_channel * 2; + + debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", + __func__, num_channels, num_dimms_per_channel, num_csrows); + + /* allocate a new MC control structure */ + mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); + + if (mci == NULL) + return -ENOMEM; + + debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + + mci->dev = &pdev->dev; /* record ptr to the generic device */ + + pvt = mci->pvt_info; + pvt->system_address = pdev; /* Record this device in our private */ + pvt->maxch = num_channels; + pvt->maxdimmperch = num_dimms_per_channel; + + /* 'get' the pci devices we want to reserve for our use */ + if (i5000_get_devices(mci, dev_idx)) + goto fail0; + + /* Time to get serious */ + i5000_get_mc_regs(mci); /* retrieve the hardware registers */ + + mci->mc_idx = 0; + mci->mtype_cap = MEM_FLAG_FB_DDR2; + mci->edac_ctl_cap = EDAC_FLAG_NONE; + mci->edac_cap = EDAC_FLAG_NONE; + mci->mod_name = "i5000_edac.c"; + mci->mod_ver = I5000_REVISION; + mci->ctl_name = i5000_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); + mci->ctl_page_to_phys = NULL; + + /* Set the function pointer to an actual operation function */ + mci->edac_check = i5000_check_error; + + /* initialize the MC control structure 'csrows' table + * with the mapping and control information */ + if (i5000_init_csrows(mci)) { + debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" + " because i5000_init_csrows() returned nonzero " + "value\n"); + mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ + } else { + debugf1("MC: Enable error reporting now\n"); + i5000_enable_error_reporting(mci); + } + + /* add this new MC control structure to EDAC's list of MCs */ + if (edac_mc_add_mc(mci)) { + debugf0("MC: " __FILE__ + ": %s(): failed edac_mc_add_mc()\n", __func__); + /* FIXME: perhaps some code should go here that disables error + * reporting if we just enabled it + */ + goto fail1; + } + + i5000_clear_error(mci); + + /* allocating generic PCI control info */ + i5000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!i5000_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + + return 0; + + /* Error exit unwinding stack */ +fail1: + + i5000_put_devices(mci); + +fail0: + edac_mc_free(mci); + return -ENODEV; +} + +/* + * i5000_init_one constructor for one instance of device + * + * returns: + * negative on error + * count (>= 0) + */ +static int __devinit i5000_init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int rc; + + debugf0("MC: " __FILE__ ": %s()\n", __func__); + + /* wake up device */ + rc = pci_enable_device(pdev); + if (rc == -EIO) + return rc; + + /* now probe and enable the device */ + return i5000_probe1(pdev, id->driver_data); +} + +/* + * i5000_remove_one destructor for one instance of device + * + */ +static void __devexit i5000_remove_one(struct pci_dev *pdev) +{ + struct mem_ctl_info *mci; + + debugf0(__FILE__ ": %s()\n", __func__); + + if (i5000_pci) + edac_pci_release_generic_ctl(i5000_pci); + + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) + return; + + /* retrieve references to resources, and free those resources */ + i5000_put_devices(mci); + + edac_mc_free(mci); +} + +/* + * pci_device_id table for which devices we are looking for + * + * The "E500P" device is the first device supported. + */ +static const struct pci_device_id i5000_pci_tbl[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16), + .driver_data = I5000P}, + + {0,} /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, i5000_pci_tbl); + +/* + * i5000_driver pci_driver structure for this module + * + */ +static struct pci_driver i5000_driver = { + .name = __stringify(KBUILD_BASENAME), + .probe = i5000_init_one, + .remove = __devexit_p(i5000_remove_one), + .id_table = i5000_pci_tbl, +}; + +/* + * i5000_init Module entry function + * Try to initialize this module for its devices + */ +static int __init i5000_init(void) +{ + int pci_rc; + + debugf2("MC: " __FILE__ ": %s()\n", __func__); + + pci_rc = pci_register_driver(&i5000_driver); + + return (pci_rc < 0) ? pci_rc : 0; +} + +/* + * i5000_exit() Module exit function + * Unregister the driver + */ +static void __exit i5000_exit(void) +{ + debugf2("MC: " __FILE__ ": %s()\n", __func__); + pci_unregister_driver(&i5000_driver); +} + +module_init(i5000_init); +module_exit(i5000_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR + ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>"); +MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " + I5000_REVISION); +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c new file mode 100644 index 000000000000..83bfe37c4bbb --- /dev/null +++ b/drivers/edac/i82443bxgx_edac.c @@ -0,0 +1,402 @@ +/* + * Intel 82443BX/GX (440BX/GX chipset) Memory Controller EDAC kernel + * module (C) 2006 Tim Small + * + * This file may be distributed under the terms of the GNU General + * Public License. + * + * Written by Tim Small <tim@buttersideup.com>, based on work by Linux + * Networx, Thayne Harbaugh, Dan Hollis <goemon at anime dot net> and + * others. + * + * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. + * + * Written with reference to 82443BX Host Bridge Datasheet: + * http://www.intel.com/design/chipsets/440/documentation.htm + * references to this document given in []. + * + * This module doesn't support the 440LX, but it may be possible to + * make it do so (the 440LX's register definitions are different, but + * not completely so - I haven't studied them in enough detail to know + * how easy this would be). + */ + +#include <linux/module.h> +#include <linux/init.h> + +#include <linux/pci.h> +#include <linux/pci_ids.h> + +#include <linux/slab.h> + +#include "edac_core.h" + +#define I82443_REVISION "0.1" + +#define EDAC_MOD_STR "i82443bxgx_edac" + +/* The 82443BX supports SDRAM, or EDO (EDO for mobile only), "Memory + * Size: 8 MB to 512 MB (1GB with Registered DIMMs) with eight memory + * rows" "The 82443BX supports multiple-bit error detection and + * single-bit error correction when ECC mode is enabled and + * single/multi-bit error detection when correction is disabled. + * During writes to the DRAM, the 82443BX generates ECC for the data + * on a QWord basis. Partial QWord writes require a read-modify-write + * cycle when ECC is enabled." +*/ + +/* "Additionally, the 82443BX ensures that the data is corrected in + * main memory so that accumulation of errors is prevented. Another + * error within the same QWord would result in a double-bit error + * which is unrecoverable. This is known as hardware scrubbing since + * it requires no software intervention to correct the data in memory." + */ + +/* [Also see page 100 (section 4.3), "DRAM Interface"] + * [Also see page 112 (section 4.6.1.4), ECC] + */ + +#define I82443BXGX_NR_CSROWS 8 +#define I82443BXGX_NR_CHANS 1 +#define I82443BXGX_NR_DIMMS 4 + +/* 82443 PCI Device 0 */ +#define I82443BXGX_NBXCFG 0x50 /* 32bit register starting at this PCI + * config space offset */ +#define I82443BXGX_NBXCFG_OFFSET_NON_ECCROW 24 /* Array of bits, zero if + * row is non-ECC */ +#define I82443BXGX_NBXCFG_OFFSET_DRAM_FREQ 12 /* 2 bits,00=100MHz,10=66 MHz */ + +#define I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY 7 /* 2 bits: */ +#define I82443BXGX_NBXCFG_INTEGRITY_NONE 0x0 /* 00 = Non-ECC */ +#define I82443BXGX_NBXCFG_INTEGRITY_EC 0x1 /* 01 = EC (only) */ +#define I82443BXGX_NBXCFG_INTEGRITY_ECC 0x2 /* 10 = ECC */ +#define I82443BXGX_NBXCFG_INTEGRITY_SCRUB 0x3 /* 11 = ECC + HW Scrub */ + +#define I82443BXGX_NBXCFG_OFFSET_ECC_DIAG_ENABLE 6 + +/* 82443 PCI Device 0 */ +#define I82443BXGX_EAP 0x80 /* 32bit register starting at this PCI + * config space offset, Error Address + * Pointer Register */ +#define I82443BXGX_EAP_OFFSET_EAP 12 /* High 20 bits of error address */ +#define I82443BXGX_EAP_OFFSET_MBE BIT(1) /* Err at EAP was multi-bit (W1TC) */ +#define I82443BXGX_EAP_OFFSET_SBE BIT(0) /* Err at EAP was single-bit (W1TC) */ + +#define I82443BXGX_ERRCMD 0x90 /* 8bit register starting at this PCI + * config space offset. */ +#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_MBE BIT(1) /* 1 = enable */ +#define I82443BXGX_ERRCMD_OFFSET_SERR_ON_SBE BIT(0) /* 1 = enable */ + +#define I82443BXGX_ERRSTS 0x91 /* 16bit register starting at this PCI + * config space offset. */ +#define I82443BXGX_ERRSTS_OFFSET_MBFRE 5 /* 3 bits - first err row multibit */ +#define I82443BXGX_ERRSTS_OFFSET_MEF BIT(4) /* 1 = MBE occurred */ +#define I82443BXGX_ERRSTS_OFFSET_SBFRE 1 /* 3 bits - first err row singlebit */ +#define I82443BXGX_ERRSTS_OFFSET_SEF BIT(0) /* 1 = SBE occurred */ + +#define I82443BXGX_DRAMC 0x57 /* 8bit register starting at this PCI + * config space offset. */ +#define I82443BXGX_DRAMC_OFFSET_DT 3 /* 2 bits, DRAM Type */ +#define I82443BXGX_DRAMC_DRAM_IS_EDO 0 /* 00 = EDO */ +#define I82443BXGX_DRAMC_DRAM_IS_SDRAM 1 /* 01 = SDRAM */ +#define I82443BXGX_DRAMC_DRAM_IS_RSDRAM 2 /* 10 = Registered SDRAM */ + +#define I82443BXGX_DRB 0x60 /* 8x 8bit registers starting at this PCI + * config space offset. */ + +/* FIXME - don't poll when ECC disabled? */ + +struct i82443bxgx_edacmc_error_info { + u32 eap; +}; + +static struct edac_pci_ctl_info *i82443bxgx_pci; + +static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci, + struct i82443bxgx_edacmc_error_info + *info) +{ + struct pci_dev *pdev; + pdev = to_pci_dev(mci->dev); + pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap); + if (info->eap & I82443BXGX_EAP_OFFSET_SBE) + /* Clear error to allow next error to be reported [p.61] */ + pci_write_bits32(pdev, I82443BXGX_EAP, + I82443BXGX_EAP_OFFSET_SBE, + I82443BXGX_EAP_OFFSET_SBE); + + if (info->eap & I82443BXGX_EAP_OFFSET_MBE) + /* Clear error to allow next error to be reported [p.61] */ + pci_write_bits32(pdev, I82443BXGX_EAP, + I82443BXGX_EAP_OFFSET_MBE, + I82443BXGX_EAP_OFFSET_MBE); +} + +static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci, + struct + i82443bxgx_edacmc_error_info + *info, int handle_errors) +{ + int error_found = 0; + u32 eapaddr, page, pageoffset; + + /* bits 30:12 hold the 4kb block in which the error occurred + * [p.61] */ + eapaddr = (info->eap & 0xfffff000); + page = eapaddr >> PAGE_SHIFT; + pageoffset = eapaddr - (page << PAGE_SHIFT); + + if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { + error_found = 1; + if (handle_errors) + edac_mc_handle_ce(mci, page, pageoffset, + /* 440BX/GX don't make syndrome information + * available */ + 0, edac_mc_find_csrow_by_page(mci, page), 0, + mci->ctl_name); + } + + if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { + error_found = 1; + if (handle_errors) + edac_mc_handle_ue(mci, page, pageoffset, + edac_mc_find_csrow_by_page(mci, page), + mci->ctl_name); + } + + return error_found; +} + +static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci) +{ + struct i82443bxgx_edacmc_error_info info; + + debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); + i82443bxgx_edacmc_get_error_info(mci, &info); + i82443bxgx_edacmc_process_error_info(mci, &info, 1); +} + +static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, + struct pci_dev *pdev, + enum edac_type edac_mode, + enum mem_type mtype) +{ + struct csrow_info *csrow; + int index; + u8 drbar, dramc; + u32 row_base, row_high_limit, row_high_limit_last; + + pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); + row_high_limit_last = 0; + for (index = 0; index < mci->nr_csrows; index++) { + csrow = &mci->csrows[index]; + pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); + debugf1("MC%d: " __FILE__ ": %s() Row=%d DRB = %#0x\n", + mci->mc_idx, __func__, index, drbar); + row_high_limit = ((u32) drbar << 23); + /* find the DRAM Chip Select Base address and mask */ + debugf1("MC%d: " __FILE__ ": %s() Row=%d, " + "Boundry Address=%#0x, Last = %#0x \n", + mci->mc_idx, __func__, index, row_high_limit, + row_high_limit_last); + + /* 440GX goes to 2GB, represented with a DRB of 0. */ + if (row_high_limit_last && !row_high_limit) + row_high_limit = 1UL << 31; + + /* This row is empty [p.49] */ + if (row_high_limit == row_high_limit_last) + continue; + row_base = row_high_limit_last; + csrow->first_page = row_base >> PAGE_SHIFT; + csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; + csrow->nr_pages = csrow->last_page - csrow->first_page + 1; + /* EAP reports in 4kilobyte granularity [61] */ + csrow->grain = 1 << 12; + csrow->mtype = mtype; + /* I don't think 440BX can tell you device type? FIXME? */ + csrow->dtype = DEV_UNKNOWN; + /* Mode is global to all rows on 440BX */ + csrow->edac_mode = edac_mode; + row_high_limit_last = row_high_limit; + } +} + +static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) +{ + struct mem_ctl_info *mci; + u8 dramc; + u32 nbxcfg, ecc_mode; + enum mem_type mtype; + enum edac_type edac_mode; + + debugf0("MC: " __FILE__ ": %s()\n", __func__); + + /* Something is really hosed if PCI config space reads from + * the MC aren't working. + */ + if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg)) + return -EIO; + + mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0); + + if (mci == NULL) + return -ENOMEM; + + debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + mci->dev = &pdev->dev; + mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; + pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); + switch ((dramc >> I82443BXGX_DRAMC_OFFSET_DT) & (BIT(0) | BIT(1))) { + case I82443BXGX_DRAMC_DRAM_IS_EDO: + mtype = MEM_EDO; + break; + case I82443BXGX_DRAMC_DRAM_IS_SDRAM: + mtype = MEM_SDR; + break; + case I82443BXGX_DRAMC_DRAM_IS_RSDRAM: + mtype = MEM_RDR; + break; + default: + debugf0("Unknown/reserved DRAM type value " + "in DRAMC register!\n"); + mtype = -MEM_UNKNOWN; + } + + if ((mtype == MEM_SDR) || (mtype == MEM_RDR)) + mci->edac_cap = mci->edac_ctl_cap; + else + mci->edac_cap = EDAC_FLAG_NONE; + + mci->scrub_cap = SCRUB_FLAG_HW_SRC; + pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg); + ecc_mode = ((nbxcfg >> I82443BXGX_NBXCFG_OFFSET_DRAM_INTEGRITY) & + (BIT(0) | BIT(1))); + + mci->scrub_mode = (ecc_mode == I82443BXGX_NBXCFG_INTEGRITY_SCRUB) + ? SCRUB_HW_SRC : SCRUB_NONE; + + switch (ecc_mode) { + case I82443BXGX_NBXCFG_INTEGRITY_NONE: + edac_mode = EDAC_NONE; + break; + case I82443BXGX_NBXCFG_INTEGRITY_EC: + edac_mode = EDAC_EC; + break; + case I82443BXGX_NBXCFG_INTEGRITY_ECC: + case I82443BXGX_NBXCFG_INTEGRITY_SCRUB: + edac_mode = EDAC_SECDED; + break; + default: + debugf0("%s(): Unknown/reserved ECC state " + "in NBXCFG register!\n", __func__); + edac_mode = EDAC_UNKNOWN; + break; + } + + i82443bxgx_init_csrows(mci, pdev, edac_mode, mtype); + + /* Many BIOSes don't clear error flags on boot, so do this + * here, or we get "phantom" errors occuring at module-load + * time. */ + pci_write_bits32(pdev, I82443BXGX_EAP, + (I82443BXGX_EAP_OFFSET_SBE | + I82443BXGX_EAP_OFFSET_MBE), + (I82443BXGX_EAP_OFFSET_SBE | + I82443BXGX_EAP_OFFSET_MBE)); + + mci->mod_name = EDAC_MOD_STR; + mci->mod_ver = I82443_REVISION; + mci->ctl_name = "I82443BXGX"; + mci->dev_name = pci_name(pdev); + mci->edac_check = i82443bxgx_edacmc_check; + mci->ctl_page_to_phys = NULL; + + if (edac_mc_add_mc(mci)) { + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + goto fail; + } + + /* allocating generic PCI control info */ + i82443bxgx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!i82443bxgx_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + + debugf3("MC: " __FILE__ ": %s(): success\n", __func__); + return 0; + +fail: + edac_mc_free(mci); + return -ENODEV; +} + +EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_probe1); + +/* returns count (>= 0), or negative on error */ +static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + debugf0("MC: " __FILE__ ": %s()\n", __func__); + + /* don't need to call pci_device_enable() */ + return i82443bxgx_edacmc_probe1(pdev, ent->driver_data); +} + +static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) +{ + struct mem_ctl_info *mci; + + debugf0(__FILE__ ": %s()\n", __func__); + + if (i82443bxgx_pci) + edac_pci_release_generic_ctl(i82443bxgx_pci); + + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) + return; + + edac_mc_free(mci); +} + +EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one); + +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2)}, + {0,} /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, i82443bxgx_pci_tbl); + +static struct pci_driver i82443bxgx_edacmc_driver = { + .name = EDAC_MOD_STR, + .probe = i82443bxgx_edacmc_init_one, + .remove = __devexit_p(i82443bxgx_edacmc_remove_one), + .id_table = i82443bxgx_pci_tbl, +}; + +static int __init i82443bxgx_edacmc_init(void) +{ + return pci_register_driver(&i82443bxgx_edacmc_driver); +} + +static void __exit i82443bxgx_edacmc_exit(void) +{ + pci_unregister_driver(&i82443bxgx_edacmc_driver); +} + +module_init(i82443bxgx_edacmc_init); +module_exit(i82443bxgx_edacmc_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD"); +MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers"); diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index e4bb298e613f..f5ecd2c4d813 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c @@ -14,9 +14,9 @@ #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> -#include "edac_mc.h" +#include "edac_core.h" -#define I82860_REVISION " Ver: 2.0.1 " __DATE__ +#define I82860_REVISION " Ver: 2.0.2 " __DATE__ #define EDAC_MOD_STR "i82860_edac" #define i82860_printk(level, fmt, arg...) \ @@ -54,16 +54,16 @@ struct i82860_error_info { static const struct i82860_dev_info i82860_devs[] = { [I82860] = { - .ctl_name = "i82860" - }, + .ctl_name = "i82860"}, }; -static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code +static struct pci_dev *mci_pdev; /* init dev: in case that AGP code * has already registered driver */ +static struct edac_pci_ctl_info *i82860_pci; static void i82860_get_error_info(struct mem_ctl_info *mci, - struct i82860_error_info *info) + struct i82860_error_info *info) { struct pci_dev *pdev; @@ -91,13 +91,13 @@ static void i82860_get_error_info(struct mem_ctl_info *mci, if ((info->errsts ^ info->errsts2) & 0x0003) { pci_read_config_dword(pdev, I82860_EAP, &info->eap); - pci_read_config_word(pdev, I82860_DERRCTL_STS, - &info->derrsyn); + pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn); } } static int i82860_process_error_info(struct mem_ctl_info *mci, - struct i82860_error_info *info, int handle_errors) + struct i82860_error_info *info, + int handle_errors) { int row; @@ -136,7 +136,7 @@ static void i82860_check(struct mem_ctl_info *mci) static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) { unsigned long last_cumul_size; - u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ + u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ u16 value; u32 cumul_size; struct csrow_info *csrow; @@ -155,7 +155,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) csrow = &mci->csrows[index]; pci_read_config_word(pdev, I82860_GBA + index * 2, &value); cumul_size = (value & I82860_GBA_MASK) << - (I82860_GBA_SHIFT - PAGE_SHIFT); + (I82860_GBA_SHIFT - PAGE_SHIFT); debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, cumul_size); @@ -186,7 +186,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) the channel and the GRA registers map to physical devices so we are going to make 1 channel for group. */ - mci = edac_mc_alloc(0, 16, 1); + mci = edac_mc_alloc(0, 16, 1, 0); if (!mci) return -ENOMEM; @@ -200,19 +200,31 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I82860_REVISION; mci->ctl_name = i82860_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); mci->edac_check = i82860_check; mci->ctl_page_to_phys = NULL; i82860_init_csrows(mci, pdev); - i82860_get_error_info(mci, &discard); /* clear counters */ + i82860_get_error_info(mci, &discard); /* clear counters */ /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ - if (edac_mc_add_mc(mci,0)) { + if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } + /* allocating generic PCI control info */ + i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!i82860_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + /* get this far and it's successful */ debugf3("%s(): success\n", __func__); @@ -225,7 +237,7 @@ fail: /* returns count (>= 0), or negative on error */ static int __devinit i82860_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { int rc; @@ -249,6 +261,9 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) debugf0("%s()\n", __func__); + if (i82860_pci) + edac_pci_release_generic_ctl(i82860_pci); + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; @@ -257,12 +272,11 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { { - PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I82860 - }, + PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82860}, { - 0, - } /* 0 terminated list. */ + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); @@ -329,5 +343,5 @@ module_exit(i82860_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " - "Ben Woodard <woodard@redhat.com>"); + "Ben Woodard <woodard@redhat.com>"); MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 2800b3e614a9..031abadc439a 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c @@ -18,9 +18,9 @@ #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> -#include "edac_mc.h" +#include "edac_core.h" -#define I82875P_REVISION " Ver: 2.0.1 " __DATE__ +#define I82875P_REVISION " Ver: 2.0.2 " __DATE__ #define EDAC_MOD_STR "i82875p_edac" #define i82875p_printk(level, fmt, arg...) \ @@ -174,18 +174,19 @@ struct i82875p_error_info { static const struct i82875p_dev_info i82875p_devs[] = { [I82875P] = { - .ctl_name = "i82875p" - }, + .ctl_name = "i82875p"}, }; -static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has +static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has * already registered driver */ static int i82875p_registered = 1; +static struct edac_pci_ctl_info *i82875p_pci; + static void i82875p_get_error_info(struct mem_ctl_info *mci, - struct i82875p_error_info *info) + struct i82875p_error_info *info) { struct pci_dev *pdev; @@ -197,38 +198,39 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci, * overwritten by UE. */ pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts); + + if (!(info->errsts & 0x0081)) + return; + pci_read_config_dword(pdev, I82875P_EAP, &info->eap); pci_read_config_byte(pdev, I82875P_DES, &info->des); pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2); - pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); - /* * If the error is the same then we can for both reads then * the first set of reads is valid. If there is a change then * there is a CE no info and the second set of reads is valid * and should be UE info. */ - if (!(info->errsts2 & 0x0081)) - return; - if ((info->errsts ^ info->errsts2) & 0x0081) { pci_read_config_dword(pdev, I82875P_EAP, &info->eap); pci_read_config_byte(pdev, I82875P_DES, &info->des); - pci_read_config_byte(pdev, I82875P_DERRSYN, - &info->derrsyn); + pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn); } + + pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081); } static int i82875p_process_error_info(struct mem_ctl_info *mci, - struct i82875p_error_info *info, int handle_errors) + struct i82875p_error_info *info, + int handle_errors) { int row, multi_chan; multi_chan = mci->csrows[0].nr_channels - 1; - if (!(info->errsts2 & 0x0081)) + if (!(info->errsts & 0x0081)) return 0; if (!handle_errors) @@ -263,10 +265,12 @@ static void i82875p_check(struct mem_ctl_info *mci) /* Return 0 on success or 1 on failure. */ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, - struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window) + struct pci_dev **ovrfl_pdev, + void __iomem **ovrfl_window) { struct pci_dev *dev; void __iomem *window; + int err; *ovrfl_pdev = NULL; *ovrfl_window = NULL; @@ -284,14 +288,19 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, if (dev == NULL) return 1; - pci_bus_add_device(dev); + err = pci_bus_add_device(dev); + if (err) { + i82875p_printk(KERN_ERR, + "%s(): pci_bus_add_device() Failed\n", + __func__); + } } *ovrfl_pdev = dev; if (pci_enable_device(dev)) { i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow " - "device\n", __func__); + "device\n", __func__); return 1; } @@ -307,7 +316,7 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, if (window == NULL) { i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", - __func__); + __func__); goto fail1; } @@ -325,21 +334,20 @@ fail0: return 1; } - /* Return 1 if dual channel mode is active. Else return 0. */ static inline int dual_channel_active(u32 drc) { return (drc >> 21) & 0x1; } - static void i82875p_init_csrows(struct mem_ctl_info *mci, - struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc) + struct pci_dev *pdev, + void __iomem * ovrfl_window, u32 drc) { struct csrow_info *csrow; unsigned long last_cumul_size; u8 value; - u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ + u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ u32 cumul_size; int index; @@ -392,7 +400,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) drc = readl(ovrfl_window + I82875P_DRC); nr_chans = dual_channel_active(drc) + 1; mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), - nr_chans); + nr_chans, 0); if (!mci) { rc = -ENOMEM; @@ -407,23 +415,35 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I82875P_REVISION; mci->ctl_name = i82875p_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); mci->edac_check = i82875p_check; mci->ctl_page_to_phys = NULL; debugf3("%s(): init pvt\n", __func__); - pvt = (struct i82875p_pvt *) mci->pvt_info; + pvt = (struct i82875p_pvt *)mci->pvt_info; pvt->ovrfl_pdev = ovrfl_pdev; pvt->ovrfl_window = ovrfl_window; i82875p_init_csrows(mci, pdev, ovrfl_window, drc); - i82875p_get_error_info(mci, &discard); /* clear counters */ + i82875p_get_error_info(mci, &discard); /* clear counters */ /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ - if (edac_mc_add_mc(mci,0)) { + if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail1; } + /* allocating generic PCI control info */ + i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!i82875p_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + /* get this far and it's successful */ debugf3("%s(): success\n", __func__); return 0; @@ -442,7 +462,7 @@ fail0: /* returns count (>= 0), or negative on error */ static int __devinit i82875p_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { int rc; @@ -467,10 +487,13 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) debugf0("%s()\n", __func__); + if (i82875p_pci) + edac_pci_release_generic_ctl(i82875p_pci); + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; - pvt = (struct i82875p_pvt *) mci->pvt_info; + pvt = (struct i82875p_pvt *)mci->pvt_info; if (pvt->ovrfl_window) iounmap(pvt->ovrfl_window); @@ -488,12 +511,11 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { { - PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, - I82875P - }, + PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82875P}, { - 0, - } /* 0 terminated list. */ + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); @@ -517,7 +539,7 @@ static int __init i82875p_init(void) if (mci_pdev == NULL) { mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_82875_0, NULL); + PCI_DEVICE_ID_INTEL_82875_0, NULL); if (!mci_pdev) { debugf0("875p pci_get_device fail\n"); diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c new file mode 100644 index 000000000000..0ee888456932 --- /dev/null +++ b/drivers/edac/i82975x_edac.c @@ -0,0 +1,666 @@ +/* + * Intel 82975X Memory Controller kernel module + * (C) 2007 aCarLab (India) Pvt. Ltd. (http://acarlab.com) + * (C) 2007 jetzbroadband (http://jetzbroadband.com) + * This file may be distributed under the terms of the + * GNU General Public License. + * + * Written by Arvind R. + * Copied from i82875p_edac.c source: + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> + +#include "edac_core.h" + +#define I82975X_REVISION " Ver: 1.0.0 " __DATE__ +#define EDAC_MOD_STR "i82975x_edac" + +#define i82975x_printk(level, fmt, arg...) \ + edac_printk(level, "i82975x", fmt, ##arg) + +#define i82975x_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "i82975x", fmt, ##arg) + +#ifndef PCI_DEVICE_ID_INTEL_82975_0 +#define PCI_DEVICE_ID_INTEL_82975_0 0x277c +#endif /* PCI_DEVICE_ID_INTEL_82975_0 */ + +#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans)) + +/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */ +#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b) + * + * 31:7 128 byte cache-line address + * 6:1 reserved + * 0 0: CH0; 1: CH1 + */ + +#define I82975X_DERRSYN 0x5c /* Dram Error SYNdrome (8b) + * + * 7:0 DRAM ECC Syndrome + */ + +#define I82975X_DES 0x5d /* Dram ERRor DeSTination (8b) + * 0h: Processor Memory Reads + * 1h:7h reserved + * More - See Page 65 of Intel DocSheet. + */ + +#define I82975X_ERRSTS 0xc8 /* Error Status Register (16b) + * + * 15:12 reserved + * 11 Thermal Sensor Event + * 10 reserved + * 9 non-DRAM lock error (ndlock) + * 8 Refresh Timeout + * 7:2 reserved + * 1 ECC UE (multibit DRAM error) + * 0 ECC CE (singlebit DRAM error) + */ + +/* Error Reporting is supported by 3 mechanisms: + 1. DMI SERR generation ( ERRCMD ) + 2. SMI DMI generation ( SMICMD ) + 3. SCI DMI generation ( SCICMD ) +NOTE: Only ONE of the three must be enabled +*/ +#define I82975X_ERRCMD 0xca /* Error Command (16b) + * + * 15:12 reserved + * 11 Thermal Sensor Event + * 10 reserved + * 9 non-DRAM lock error (ndlock) + * 8 Refresh Timeout + * 7:2 reserved + * 1 ECC UE (multibit DRAM error) + * 0 ECC CE (singlebit DRAM error) + */ + +#define I82975X_SMICMD 0xcc /* Error Command (16b) + * + * 15:2 reserved + * 1 ECC UE (multibit DRAM error) + * 0 ECC CE (singlebit DRAM error) + */ + +#define I82975X_SCICMD 0xce /* Error Command (16b) + * + * 15:2 reserved + * 1 ECC UE (multibit DRAM error) + * 0 ECC CE (singlebit DRAM error) + */ + +#define I82975X_XEAP 0xfc /* Extended Dram Error Address Pointer (8b) + * + * 7:1 reserved + * 0 Bit32 of the Dram Error Address + */ + +#define I82975X_MCHBAR 0x44 /* + * + * 31:14 Base Addr of 16K memory-mapped + * configuration space + * 13:1 reserverd + * 0 mem-mapped config space enable + */ + +/* NOTE: Following addresses have to indexed using MCHBAR offset (44h, 32b) */ +/* Intel 82975x memory mapped register space */ + +#define I82975X_DRB_SHIFT 25 /* fixed 32MiB grain */ + +#define I82975X_DRB 0x100 /* DRAM Row Boundary (8b x 8) + * + * 7 set to 1 in highest DRB of + * channel if 4GB in ch. + * 6:2 upper boundary of rank in + * 32MB grains + * 1:0 set to 0 + */ +#define I82975X_DRB_CH0R0 0x100 +#define I82975X_DRB_CH0R1 0x101 +#define I82975X_DRB_CH0R2 0x102 +#define I82975X_DRB_CH0R3 0x103 +#define I82975X_DRB_CH1R0 0x180 +#define I82975X_DRB_CH1R1 0x181 +#define I82975X_DRB_CH1R2 0x182 +#define I82975X_DRB_CH1R3 0x183 + + +#define I82975X_DRA 0x108 /* DRAM Row Attribute (4b x 8) + * defines the PAGE SIZE to be used + * for the rank + * 7 reserved + * 6:4 row attr of odd rank, i.e. 1 + * 3 reserved + * 2:0 row attr of even rank, i.e. 0 + * + * 000 = unpopulated + * 001 = reserved + * 010 = 4KiB + * 011 = 8KiB + * 100 = 16KiB + * others = reserved + */ +#define I82975X_DRA_CH0R01 0x108 +#define I82975X_DRA_CH0R23 0x109 +#define I82975X_DRA_CH1R01 0x188 +#define I82975X_DRA_CH1R23 0x189 + + +#define I82975X_BNKARC 0x10e /* Type of device in each rank - Bank Arch (16b) + * + * 15:8 reserved + * 7:6 Rank 3 architecture + * 5:4 Rank 2 architecture + * 3:2 Rank 1 architecture + * 1:0 Rank 0 architecture + * + * 00 => x16 devices; i.e 4 banks + * 01 => x8 devices; i.e 8 banks + */ +#define I82975X_C0BNKARC 0x10e +#define I82975X_C1BNKARC 0x18e + + + +#define I82975X_DRC 0x120 /* DRAM Controller Mode0 (32b) + * + * 31:30 reserved + * 29 init complete + * 28:11 reserved, according to Intel + * 22:21 number of channels + * 00=1 01=2 in 82875 + * seems to be ECC mode + * bits in 82975 in Asus + * P5W + * 19:18 Data Integ Mode + * 00=none 01=ECC in 82875 + * 10:8 refresh mode + * 7 reserved + * 6:4 mode select + * 3:2 reserved + * 1:0 DRAM type 10=Second Revision + * DDR2 SDRAM + * 00, 01, 11 reserved + */ +#define I82975X_DRC_CH0M0 0x120 +#define I82975X_DRC_CH1M0 0x1A0 + + +#define I82975X_DRC_M1 0x124 /* DRAM Controller Mode1 (32b) + * 31 0=Standard Address Map + * 1=Enhanced Address Map + * 30:0 reserved + */ + +#define I82975X_DRC_CH0M1 0x124 +#define I82975X_DRC_CH1M1 0x1A4 + +enum i82975x_chips { + I82975X = 0, +}; + +struct i82975x_pvt { + void __iomem *mch_window; +}; + +struct i82975x_dev_info { + const char *ctl_name; +}; + +struct i82975x_error_info { + u16 errsts; + u32 eap; + u8 des; + u8 derrsyn; + u16 errsts2; + u8 chan; /* the channel is bit 0 of EAP */ + u8 xeap; /* extended eap bit */ +}; + +static const struct i82975x_dev_info i82975x_devs[] = { + [I82975X] = { + .ctl_name = "i82975x" + }, +}; + +static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has + * already registered driver + */ + +static int i82975x_registered = 1; + +static void i82975x_get_error_info(struct mem_ctl_info *mci, + struct i82975x_error_info *info) +{ + struct pci_dev *pdev; + + pdev = to_pci_dev(mci->dev); + + /* + * This is a mess because there is no atomic way to read all the + * registers at once and the registers can transition from CE being + * overwritten by UE. + */ + pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts); + pci_read_config_dword(pdev, I82975X_EAP, &info->eap); + pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap); + pci_read_config_byte(pdev, I82975X_DES, &info->des); + pci_read_config_byte(pdev, I82975X_DERRSYN, &info->derrsyn); + pci_read_config_word(pdev, I82975X_ERRSTS, &info->errsts2); + + pci_write_bits16(pdev, I82975X_ERRSTS, 0x0003, 0x0003); + + /* + * If the error is the same then we can for both reads then + * the first set of reads is valid. If there is a change then + * there is a CE no info and the second set of reads is valid + * and should be UE info. + */ + if (!(info->errsts2 & 0x0003)) + return; + + if ((info->errsts ^ info->errsts2) & 0x0003) { + pci_read_config_dword(pdev, I82975X_EAP, &info->eap); + pci_read_config_byte(pdev, I82975X_XEAP, &info->xeap); + pci_read_config_byte(pdev, I82975X_DES, &info->des); + pci_read_config_byte(pdev, I82975X_DERRSYN, + &info->derrsyn); + } +} + +static int i82975x_process_error_info(struct mem_ctl_info *mci, + struct i82975x_error_info *info, int handle_errors) +{ + int row, multi_chan, chan; + + multi_chan = mci->csrows[0].nr_channels - 1; + + if (!(info->errsts2 & 0x0003)) + return 0; + + if (!handle_errors) + return 1; + + if ((info->errsts ^ info->errsts2) & 0x0003) { + edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); + info->errsts = info->errsts2; + } + + chan = info->eap & 1; + info->eap >>= 1; + if (info->xeap ) + info->eap |= 0x80000000; + info->eap >>= PAGE_SHIFT; + row = edac_mc_find_csrow_by_page(mci, info->eap); + + if (info->errsts & 0x0002) + edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE"); + else + edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, + multi_chan ? chan : 0, + "i82975x CE"); + + return 1; +} + +static void i82975x_check(struct mem_ctl_info *mci) +{ + struct i82975x_error_info info; + + debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + i82975x_get_error_info(mci, &info); + i82975x_process_error_info(mci, &info, 1); +} + +/* Return 1 if dual channel mode is active. Else return 0. */ +static int dual_channel_active(void __iomem *mch_window) +{ + /* + * We treat interleaved-symmetric configuration as dual-channel - EAP's + * bit-0 giving the channel of the error location. + * + * All other configurations are treated as single channel - the EAP's + * bit-0 will resolve ok in symmetric area of mixed + * (symmetric/asymmetric) configurations + */ + u8 drb[4][2]; + int row; + int dualch; + + for (dualch = 1, row = 0; dualch && (row < 4); row++) { + drb[row][0] = readb(mch_window + I82975X_DRB + row); + drb[row][1] = readb(mch_window + I82975X_DRB + row + 0x80); + dualch = dualch && (drb[row][0] == drb[row][1]); + } + return dualch; +} + +static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) +{ + /* + * ASUS P5W DH either does not program this register or programs + * it wrong! + * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val' + * for each rank should be 01b - the LSB of the word should be 0x55; + * but it reads 0! + */ + return DEV_X8; +} + +static void i82975x_init_csrows(struct mem_ctl_info *mci, + struct pci_dev *pdev, void __iomem *mch_window) +{ + struct csrow_info *csrow; + unsigned long last_cumul_size; + u8 value; + u32 cumul_size; + int index; + + last_cumul_size = 0; + + /* + * 82875 comment: + * The dram row boundary (DRB) reg values are boundary address + * for each DRAM row with a granularity of 32 or 64MB (single/dual + * channel operation). DRB regs are cumulative; therefore DRB7 will + * contain the total memory contained in all eight rows. + * + * FIXME: + * EDAC currently works for Dual-channel Interleaved configuration. + * Other configurations, which the chip supports, need fixing/testing. + * + */ + + for (index = 0; index < mci->nr_csrows; index++) { + csrow = &mci->csrows[index]; + + value = readb(mch_window + I82975X_DRB + index + + ((index >= 4) ? 0x80 : 0)); + cumul_size = value; + cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT); + debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, + cumul_size); + if (cumul_size == last_cumul_size) + continue; /* not populated */ + + csrow->first_page = last_cumul_size; + csrow->last_page = cumul_size - 1; + csrow->nr_pages = cumul_size - last_cumul_size; + last_cumul_size = cumul_size; + csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */ + csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */ + csrow->dtype = i82975x_dram_type(mch_window, index); + csrow->edac_mode = EDAC_SECDED; /* only supported */ + } +} + +/* #define i82975x_DEBUG_IOMEM */ + +#ifdef i82975x_DEBUG_IOMEM +static void i82975x_print_dram_timings(void __iomem *mch_window) +{ + /* + * The register meanings are from Intel specs; + * (shows 13-5-5-5 for 800-DDR2) + * Asus P5W Bios reports 15-5-4-4 + * What's your religion? + */ + static const int caslats[4] = { 5, 4, 3, 6 }; + u32 dtreg[2]; + + dtreg[0] = readl(mch_window + 0x114); + dtreg[1] = readl(mch_window + 0x194); + i82975x_printk(KERN_INFO, "DRAM Timings : Ch0 Ch1\n" + " RAS Active Min = %d %d\n" + " CAS latency = %d %d\n" + " RAS to CAS = %d %d\n" + " RAS precharge = %d %d\n", + (dtreg[0] >> 19 ) & 0x0f, + (dtreg[1] >> 19) & 0x0f, + caslats[(dtreg[0] >> 8) & 0x03], + caslats[(dtreg[1] >> 8) & 0x03], + ((dtreg[0] >> 4) & 0x07) + 2, + ((dtreg[1] >> 4) & 0x07) + 2, + (dtreg[0] & 0x07) + 2, + (dtreg[1] & 0x07) + 2 + ); + +} +#endif + +static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) +{ + int rc = -ENODEV; + struct mem_ctl_info *mci; + struct i82975x_pvt *pvt; + void __iomem *mch_window; + u32 mchbar; + u32 drc[2]; + struct i82975x_error_info discard; + int chans; +#ifdef i82975x_DEBUG_IOMEM + u8 c0drb[4]; + u8 c1drb[4]; +#endif + + debugf0("%s()\n", __func__); + + pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); + if (!(mchbar & 1)) { + debugf3("%s(): failed, MCHBAR disabled!\n", __func__); + goto fail0; + } + mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ + mch_window = ioremap_nocache(mchbar, 0x1000); + +#ifdef i82975x_DEBUG_IOMEM + i82975x_printk(KERN_INFO, "MCHBAR real = %0x, remapped = %p\n", + mchbar, mch_window); + + c0drb[0] = readb(mch_window + I82975X_DRB_CH0R0); + c0drb[1] = readb(mch_window + I82975X_DRB_CH0R1); + c0drb[2] = readb(mch_window + I82975X_DRB_CH0R2); + c0drb[3] = readb(mch_window + I82975X_DRB_CH0R3); + c1drb[0] = readb(mch_window + I82975X_DRB_CH1R0); + c1drb[1] = readb(mch_window + I82975X_DRB_CH1R1); + c1drb[2] = readb(mch_window + I82975X_DRB_CH1R2); + c1drb[3] = readb(mch_window + I82975X_DRB_CH1R3); + i82975x_printk(KERN_INFO, "DRBCH0R0 = 0x%02x\n", c0drb[0]); + i82975x_printk(KERN_INFO, "DRBCH0R1 = 0x%02x\n", c0drb[1]); + i82975x_printk(KERN_INFO, "DRBCH0R2 = 0x%02x\n", c0drb[2]); + i82975x_printk(KERN_INFO, "DRBCH0R3 = 0x%02x\n", c0drb[3]); + i82975x_printk(KERN_INFO, "DRBCH1R0 = 0x%02x\n", c1drb[0]); + i82975x_printk(KERN_INFO, "DRBCH1R1 = 0x%02x\n", c1drb[1]); + i82975x_printk(KERN_INFO, "DRBCH1R2 = 0x%02x\n", c1drb[2]); + i82975x_printk(KERN_INFO, "DRBCH1R3 = 0x%02x\n", c1drb[3]); +#endif + + drc[0] = readl(mch_window + I82975X_DRC_CH0M0); + drc[1] = readl(mch_window + I82975X_DRC_CH1M0); +#ifdef i82975x_DEBUG_IOMEM + i82975x_printk(KERN_INFO, "DRC_CH0 = %0x, %s\n", drc[0], + ((drc[0] >> 21) & 3) == 1 ? + "ECC enabled" : "ECC disabled"); + i82975x_printk(KERN_INFO, "DRC_CH1 = %0x, %s\n", drc[1], + ((drc[1] >> 21) & 3) == 1 ? + "ECC enabled" : "ECC disabled"); + + i82975x_printk(KERN_INFO, "C0 BNKARC = %0x\n", + readw(mch_window + I82975X_C0BNKARC)); + i82975x_printk(KERN_INFO, "C1 BNKARC = %0x\n", + readw(mch_window + I82975X_C1BNKARC)); + i82975x_print_dram_timings(mch_window); + goto fail1; +#endif + if (!(((drc[0] >> 21) & 3) == 1 || ((drc[1] >> 21) & 3) == 1)) { + i82975x_printk(KERN_INFO, "ECC disabled on both channels.\n"); + goto fail1; + } + + chans = dual_channel_active(mch_window) + 1; + + /* assuming only one controller, index thus is 0 */ + mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans), + chans, 0); + if (!mci) { + rc = -ENOMEM; + goto fail1; + } + + debugf3("%s(): init mci\n", __func__); + mci->dev = &pdev->dev; + mci->mtype_cap = MEM_FLAG_DDR; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->mod_name = EDAC_MOD_STR; + mci->mod_ver = I82975X_REVISION; + mci->ctl_name = i82975x_devs[dev_idx].ctl_name; + mci->edac_check = i82975x_check; + mci->ctl_page_to_phys = NULL; + debugf3("%s(): init pvt\n", __func__); + pvt = (struct i82975x_pvt *) mci->pvt_info; + pvt->mch_window = mch_window; + i82975x_init_csrows(mci, pdev, mch_window); + i82975x_get_error_info(mci, &discard); /* clear counters */ + + /* finalize this instance of memory controller with edac core */ + if (edac_mc_add_mc(mci)) { + debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + goto fail2; + } + + /* get this far and it's successful */ + debugf3("%s(): success\n", __func__); + return 0; + +fail2: + edac_mc_free(mci); + +fail1: + iounmap(mch_window); +fail0: + return rc; +} + +/* returns count (>= 0), or negative on error */ +static int __devinit i82975x_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc; + + debugf0("%s()\n", __func__); + + if (pci_enable_device(pdev) < 0) + return -EIO; + + rc = i82975x_probe1(pdev, ent->driver_data); + + if (mci_pdev == NULL) + mci_pdev = pci_dev_get(pdev); + + return rc; +} + +static void __devexit i82975x_remove_one(struct pci_dev *pdev) +{ + struct mem_ctl_info *mci; + struct i82975x_pvt *pvt; + + debugf0("%s()\n", __func__); + + mci = edac_mc_del_mc(&pdev->dev); + if (mci == NULL) + return; + + pvt = mci->pvt_info; + if (pvt->mch_window) + iounmap( pvt->mch_window ); + + edac_mc_free(mci); +} + +static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = { + { + PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, + I82975X + }, + { + 0, + } /* 0 terminated list. */ +}; + +MODULE_DEVICE_TABLE(pci, i82975x_pci_tbl); + +static struct pci_driver i82975x_driver = { + .name = EDAC_MOD_STR, + .probe = i82975x_init_one, + .remove = __devexit_p(i82975x_remove_one), + .id_table = i82975x_pci_tbl, +}; + +static int __init i82975x_init(void) +{ + int pci_rc; + + debugf3("%s()\n", __func__); + + pci_rc = pci_register_driver(&i82975x_driver); + if (pci_rc < 0) + goto fail0; + + if (mci_pdev == NULL) { + mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82975_0, NULL); + + if (!mci_pdev) { + debugf0("i82975x pci_get_device fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + + pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); + + if (pci_rc < 0) { + debugf0("i82975x init fail\n"); + pci_rc = -ENODEV; + goto fail1; + } + } + + return 0; + +fail1: + pci_unregister_driver(&i82975x_driver); + +fail0: + if (mci_pdev != NULL) + pci_dev_put(mci_pdev); + + return pci_rc; +} + +static void __exit i82975x_exit(void) +{ + debugf3("%s()\n", __func__); + + pci_unregister_driver(&i82975x_driver); + + if (!i82975x_registered) { + i82975x_remove_one(mci_pdev); + pci_dev_put(mci_pdev); + } +} + +module_init(i82975x_init); +module_exit(i82975x_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); +MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c new file mode 100644 index 000000000000..e66cdd42a392 --- /dev/null +++ b/drivers/edac/pasemi_edac.c @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2006-2007 PA Semi, Inc + * + * Author: Egor Martovetsky <egor@pasemi.com> + * Maintained by: Olof Johansson <olof@lixom.net> + * + * Driver for the PWRficient onchip memory controllers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/slab.h> +#include "edac_core.h" + +#define MODULE_NAME "pasemi_edac" + +#define MCCFG_MCEN 0x300 +#define MCCFG_MCEN_MMC_EN 0x00000001 +#define MCCFG_ERRCOR 0x388 +#define MCCFG_ERRCOR_RNK_FAIL_DET_EN 0x00000100 +#define MCCFG_ERRCOR_ECC_GEN_EN 0x00000010 +#define MCCFG_ERRCOR_ECC_CRR_EN 0x00000001 +#define MCCFG_SCRUB 0x384 +#define MCCFG_SCRUB_RGLR_SCRB_EN 0x00000001 +#define MCDEBUG_ERRCTL1 0x728 +#define MCDEBUG_ERRCTL1_RFL_LOG_EN 0x00080000 +#define MCDEBUG_ERRCTL1_MBE_LOG_EN 0x00040000 +#define MCDEBUG_ERRCTL1_SBE_LOG_EN 0x00020000 +#define MCDEBUG_ERRSTA 0x730 +#define MCDEBUG_ERRSTA_RFL_STATUS 0x00000004 +#define MCDEBUG_ERRSTA_MBE_STATUS 0x00000002 +#define MCDEBUG_ERRSTA_SBE_STATUS 0x00000001 +#define MCDEBUG_ERRCNT1 0x734 +#define MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO 0x00000080 +#define MCDEBUG_ERRLOG1A 0x738 +#define MCDEBUG_ERRLOG1A_MERR_TYPE_M 0x30000000 +#define MCDEBUG_ERRLOG1A_MERR_TYPE_NONE 0x00000000 +#define MCDEBUG_ERRLOG1A_MERR_TYPE_SBE 0x10000000 +#define MCDEBUG_ERRLOG1A_MERR_TYPE_MBE 0x20000000 +#define MCDEBUG_ERRLOG1A_MERR_TYPE_RFL 0x30000000 +#define MCDEBUG_ERRLOG1A_MERR_BA_M 0x00700000 +#define MCDEBUG_ERRLOG1A_MERR_BA_S 20 +#define MCDEBUG_ERRLOG1A_MERR_CS_M 0x00070000 +#define MCDEBUG_ERRLOG1A_MERR_CS_S 16 +#define MCDEBUG_ERRLOG1A_SYNDROME_M 0x0000ffff +#define MCDRAM_RANKCFG 0x114 +#define MCDRAM_RANKCFG_EN 0x00000001 +#define MCDRAM_RANKCFG_TYPE_SIZE_M 0x000001c0 +#define MCDRAM_RANKCFG_TYPE_SIZE_S 6 + +#define PASEMI_EDAC_NR_CSROWS 8 +#define PASEMI_EDAC_NR_CHANS 1 +#define PASEMI_EDAC_ERROR_GRAIN 64 + +static int last_page_in_mmc; +static int system_mmc_id; + + +static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) +{ + struct pci_dev *pdev = to_pci_dev(mci->dev); + u32 tmp; + + pci_read_config_dword(pdev, MCDEBUG_ERRSTA, + &tmp); + + tmp &= (MCDEBUG_ERRSTA_RFL_STATUS | MCDEBUG_ERRSTA_MBE_STATUS + | MCDEBUG_ERRSTA_SBE_STATUS); + + if (tmp) { + if (tmp & MCDEBUG_ERRSTA_SBE_STATUS) + pci_write_config_dword(pdev, MCDEBUG_ERRCNT1, + MCDEBUG_ERRCNT1_SBE_CNT_OVRFLO); + pci_write_config_dword(pdev, MCDEBUG_ERRSTA, tmp); + } + + return tmp; +} + +static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) +{ + struct pci_dev *pdev = to_pci_dev(mci->dev); + u32 errlog1a; + u32 cs; + + if (!errsta) + return; + + pci_read_config_dword(pdev, MCDEBUG_ERRLOG1A, &errlog1a); + + cs = (errlog1a & MCDEBUG_ERRLOG1A_MERR_CS_M) >> + MCDEBUG_ERRLOG1A_MERR_CS_S; + + /* uncorrectable/multi-bit errors */ + if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | + MCDEBUG_ERRSTA_RFL_STATUS)) { + edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0, + cs, mci->ctl_name); + } + + /* correctable/single-bit errors */ + if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) { + edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0, + 0, cs, 0, mci->ctl_name); + } +} + +static void pasemi_edac_check(struct mem_ctl_info *mci) +{ + u32 errsta; + + errsta = pasemi_edac_get_error_info(mci); + if (errsta) + pasemi_edac_process_error_info(mci, errsta); +} + +static int pasemi_edac_init_csrows(struct mem_ctl_info *mci, + struct pci_dev *pdev, + enum edac_type edac_mode) +{ + struct csrow_info *csrow; + u32 rankcfg; + int index; + + for (index = 0; index < mci->nr_csrows; index++) { + csrow = &mci->csrows[index]; + + pci_read_config_dword(pdev, + MCDRAM_RANKCFG + (index * 12), + &rankcfg); + + if (!(rankcfg & MCDRAM_RANKCFG_EN)) + continue; + + switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >> + MCDRAM_RANKCFG_TYPE_SIZE_S) { + case 0: + csrow->nr_pages = 128 << (20 - PAGE_SHIFT); + break; + case 1: + csrow->nr_pages = 256 << (20 - PAGE_SHIFT); + break; + case 2: + case 3: + csrow->nr_pages = 512 << (20 - PAGE_SHIFT); + break; + case 4: + csrow->nr_pages = 1024 << (20 - PAGE_SHIFT); + break; + case 5: + csrow->nr_pages = 2048 << (20 - PAGE_SHIFT); + break; + default: + edac_mc_printk(mci, KERN_ERR, + "Unrecognized Rank Config. rankcfg=%u\n", + rankcfg); + return -EINVAL; + } + + csrow->first_page = last_page_in_mmc; + csrow->last_page = csrow->first_page + csrow->nr_pages - 1; + last_page_in_mmc += csrow->nr_pages; + csrow->page_mask = 0; + csrow->grain = PASEMI_EDAC_ERROR_GRAIN; + csrow->mtype = MEM_DDR; + csrow->dtype = DEV_UNKNOWN; + csrow->edac_mode = edac_mode; + } + return 0; +} + +static int __devinit pasemi_edac_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct mem_ctl_info *mci = NULL; + u32 errctl1, errcor, scrub, mcen; + + pci_read_config_dword(pdev, MCCFG_MCEN, &mcen); + if (!(mcen & MCCFG_MCEN_MMC_EN)) + return -ENODEV; + + /* + * We should think about enabling other error detection later on + */ + + pci_read_config_dword(pdev, MCDEBUG_ERRCTL1, &errctl1); + errctl1 |= MCDEBUG_ERRCTL1_SBE_LOG_EN | + MCDEBUG_ERRCTL1_MBE_LOG_EN | + MCDEBUG_ERRCTL1_RFL_LOG_EN; + pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1); + + mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS, + system_mmc_id++); + + if (mci == NULL) + return -ENOMEM; + + pci_read_config_dword(pdev, MCCFG_ERRCOR, &errcor); + errcor |= MCCFG_ERRCOR_RNK_FAIL_DET_EN | + MCCFG_ERRCOR_ECC_GEN_EN | + MCCFG_ERRCOR_ECC_CRR_EN; + + mci->dev = &pdev->dev; + mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; + mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ? + ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? + (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_EC) : + EDAC_FLAG_NONE; + mci->mod_name = MODULE_NAME; + mci->dev_name = pci_name(pdev); + mci->ctl_name = "pasemi,1682m-mc"; + mci->edac_check = pasemi_edac_check; + mci->ctl_page_to_phys = NULL; + pci_read_config_dword(pdev, MCCFG_SCRUB, &scrub); + mci->scrub_cap = SCRUB_FLAG_HW_PROG | SCRUB_FLAG_HW_SRC; + mci->scrub_mode = + ((errcor & MCCFG_ERRCOR_ECC_CRR_EN) ? SCRUB_FLAG_HW_SRC : 0) | + ((scrub & MCCFG_SCRUB_RGLR_SCRB_EN) ? SCRUB_FLAG_HW_PROG : 0); + + if (pasemi_edac_init_csrows(mci, pdev, + (mci->edac_cap & EDAC_FLAG_SECDED) ? + EDAC_SECDED : + ((mci->edac_cap & EDAC_FLAG_EC) ? + EDAC_EC : EDAC_NONE))) + goto fail; + + /* + * Clear status + */ + pasemi_edac_get_error_info(mci); + + if (edac_mc_add_mc(mci)) + goto fail; + + /* get this far and it's successful */ + return 0; + +fail: + edac_mc_free(mci); + return -ENODEV; +} + +static void __devexit pasemi_edac_remove(struct pci_dev *pdev) +{ + struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev); + + if (!mci) + return; + + edac_mc_free(mci); +} + + +static const struct pci_device_id pasemi_edac_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa00a) }, +}; + +MODULE_DEVICE_TABLE(pci, pasemi_edac_pci_tbl); + +static struct pci_driver pasemi_edac_driver = { + .name = MODULE_NAME, + .probe = pasemi_edac_probe, + .remove = __devexit_p(pasemi_edac_remove), + .id_table = pasemi_edac_pci_tbl, +}; + +static int __init pasemi_edac_init(void) +{ + return pci_register_driver(&pasemi_edac_driver); +} + +static void __exit pasemi_edac_exit(void) +{ + pci_unregister_driver(&pasemi_edac_driver); +} + +module_init(pasemi_edac_init); +module_exit(pasemi_edac_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); +MODULE_DESCRIPTION("MC support for PA Semi PA6T-1682M memory controller"); diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index a49cf0a39398..e25f712f2dc3 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c @@ -11,7 +11,7 @@ * * Written with reference to 82600 High Integration Dual PCI System * Controller Data Book: - * http://www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf + * www.radisys.com/files/support_downloads/007-01277-0002.82600DataBook.pdf * references to this document given in [] */ @@ -20,9 +20,9 @@ #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/slab.h> -#include "edac_mc.h" +#include "edac_core.h" -#define R82600_REVISION " Ver: 2.0.1 " __DATE__ +#define R82600_REVISION " Ver: 2.0.2 " __DATE__ #define EDAC_MOD_STR "r82600_edac" #define r82600_printk(level, fmt, arg...) \ @@ -131,10 +131,12 @@ struct r82600_error_info { u32 eapr; }; -static unsigned int disable_hardware_scrub = 0; +static unsigned int disable_hardware_scrub; -static void r82600_get_error_info (struct mem_ctl_info *mci, - struct r82600_error_info *info) +static struct edac_pci_ctl_info *r82600_pci; + +static void r82600_get_error_info(struct mem_ctl_info *mci, + struct r82600_error_info *info) { struct pci_dev *pdev; @@ -144,18 +146,19 @@ static void r82600_get_error_info (struct mem_ctl_info *mci, if (info->eapr & BIT(0)) /* Clear error to allow next error to be reported [p.62] */ pci_write_bits32(pdev, R82600_EAP, - ((u32) BIT(0) & (u32) BIT(1)), - ((u32) BIT(0) & (u32) BIT(1))); + ((u32) BIT(0) & (u32) BIT(1)), + ((u32) BIT(0) & (u32) BIT(1))); if (info->eapr & BIT(1)) /* Clear error to allow next error to be reported [p.62] */ pci_write_bits32(pdev, R82600_EAP, - ((u32) BIT(0) & (u32) BIT(1)), - ((u32) BIT(0) & (u32) BIT(1))); + ((u32) BIT(0) & (u32) BIT(1)), + ((u32) BIT(0) & (u32) BIT(1))); } -static int r82600_process_error_info (struct mem_ctl_info *mci, - struct r82600_error_info *info, int handle_errors) +static int r82600_process_error_info(struct mem_ctl_info *mci, + struct r82600_error_info *info, + int handle_errors) { int error_found; u32 eapaddr, page; @@ -172,25 +175,24 @@ static int r82600_process_error_info (struct mem_ctl_info *mci, * granularity (upper 19 bits only) */ page = eapaddr >> PAGE_SHIFT; - if (info->eapr & BIT(0)) { /* CE? */ + if (info->eapr & BIT(0)) { /* CE? */ error_found = 1; if (handle_errors) - edac_mc_handle_ce(mci, page, 0, /* not avail */ + edac_mc_handle_ce(mci, page, 0, /* not avail */ syndrome, edac_mc_find_csrow_by_page(mci, page), - 0, /* channel */ - mci->ctl_name); + 0, mci->ctl_name); } - if (info->eapr & BIT(1)) { /* UE? */ + if (info->eapr & BIT(1)) { /* UE? */ error_found = 1; if (handle_errors) /* 82600 doesn't give enough info */ edac_mc_handle_ue(mci, page, 0, - edac_mc_find_csrow_by_page(mci, page), - mci->ctl_name); + edac_mc_find_csrow_by_page(mci, page), + mci->ctl_name); } return error_found; @@ -211,11 +213,11 @@ static inline int ecc_enabled(u8 dramcr) } static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, - u8 dramcr) + u8 dramcr) { struct csrow_info *csrow; int index; - u8 drbar; /* SDRAM Row Boundry Address Register */ + u8 drbar; /* SDRAM Row Boundry Address Register */ u32 row_high_limit, row_high_limit_last; u32 reg_sdram, ecc_on, row_base; @@ -276,7 +278,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) debugf2("%s(): sdram refresh rate = %#0x\n", __func__, sdram_refresh_rate); debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); - mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); + mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0); if (mci == NULL) return -ENOMEM; @@ -305,15 +307,16 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) mci->mod_name = EDAC_MOD_STR; mci->mod_ver = R82600_REVISION; mci->ctl_name = "R82600"; + mci->dev_name = pci_name(pdev); mci->edac_check = r82600_check; mci->ctl_page_to_phys = NULL; r82600_init_csrows(mci, pdev, dramcr); - r82600_get_error_info(mci, &discard); /* clear counters */ + r82600_get_error_info(mci, &discard); /* clear counters */ /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ - if (edac_mc_add_mc(mci,0)) { + if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } @@ -326,6 +329,17 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); } + /* allocating generic PCI control info */ + r82600_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); + if (!r82600_pci) { + printk(KERN_WARNING + "%s(): Unable to create PCI control\n", + __func__); + printk(KERN_WARNING + "%s(): PCI error report via EDAC not setup\n", + __func__); + } + debugf3("%s(): success\n", __func__); return 0; @@ -336,7 +350,7 @@ fail: /* returns count (>= 0), or negative on error */ static int __devinit r82600_init_one(struct pci_dev *pdev, - const struct pci_device_id *ent) + const struct pci_device_id *ent) { debugf0("%s()\n", __func__); @@ -350,6 +364,9 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev) debugf0("%s()\n", __func__); + if (r82600_pci) + edac_pci_release_generic_ctl(r82600_pci); + if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; @@ -358,11 +375,11 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev) static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { { - PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) - }, + PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) + }, { - 0, - } /* 0 terminated list. */ + 0, + } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); @@ -389,7 +406,7 @@ module_exit(r82600_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. " - "on behalf of EADS Astrium"); + "on behalf of EADS Astrium"); MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); module_param(disable_hardware_scrub, bool, 0644); |