Thanks
Yinghai
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majo...@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
remove smpboot_clear_io_apic, and only keep smpboot_clear_io_apic_irqs.
and check nr_legacy_irqs before clear it.
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/smpboot_hooks.h | 61 ----------------------------------
arch/x86/kernel/smpboot.c | 57 ++++++++++++++++++++++++++++++--
2 files changed, 54 insertions(+), 64 deletions(-)
delete mode 100644 arch/x86/include/asm/smpboot_hooks.h
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
deleted file mode 100644
index 1def601..0000000
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
- * which needs to alter them. */
-
-static inline void smpboot_clear_io_apic_irqs(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- io_apic_irqs = 0;
-#endif
-}
-
-static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
-{
- CMOS_WRITE(0xa, 0xf);
- local_flush_tlb();
- pr_debug("1.\n");
- *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
- start_eip >> 4;
- pr_debug("2.\n");
- *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
- start_eip & 0xf;
- pr_debug("3.\n");
-}
-
-static inline void smpboot_restore_warm_reset_vector(void)
-{
- /*
- * Install writable page 0 entry to set BIOS data area.
- */
- local_flush_tlb();
-
- /*
- * Paranoid: Set warm reset code and vector here back
- * to default values.
- */
- CMOS_WRITE(0, 0xf);
-
- *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
-}
-
-static inline void __init smpboot_setup_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- /*
- * Here we can be sure that there is an IO-APIC in the system. Let's
- * go and set it up:
- */
- if (!skip_ioapic_setup && nr_ioapics)
- setup_IO_APIC();
- else {
- nr_ioapics = 0;
- localise_nmi_watchdog();
- }
-#endif
-}
-
-static inline void smpboot_clear_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- nr_ioapics = 0;
-#endif
-}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 06d98ae..ba43b3b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -67,7 +67,6 @@
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
-#include <asm/smpboot_hooks.h>
#include <asm/i8259.h>
#ifdef CONFIG_X86_32
@@ -701,6 +700,35 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
node, cpu, apicid);
}
+static void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ pr_debug("1.\n");
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
+ start_eip >> 4;
+ pr_debug("2.\n");
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
+ start_eip & 0xf;
+ pr_debug("3.\n");
+}
+
+static void smpboot_restore_warm_reset_vector(void)
+{
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
+ * Paranoid: Set warm reset code and vector here back
+ * to default values.
+ */
+ CMOS_WRITE(0, 0xf);
+
+ *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+}
+
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -928,6 +956,13 @@ int __cpuinit native_cpu_up(unsigned int cpu)
return 0;
}
+static void smpboot_clear_io_apic_irqs(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ if (legacy_pic->nr_legacy_irqs)
+ io_apic_irqs = 0;
+#endif
+}
/*
* Fall back to non SMP mode after errors.
*
@@ -1027,7 +1062,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
pr_err("... forcing use of dummy APIC emulation."
"(tell your hw vendor)\n");
}
- smpboot_clear_io_apic();
+ smpboot_clear_io_apic_irqs();
arch_disable_smp_support();
return -1;
}
@@ -1039,7 +1074,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
*/
if (!max_cpus) {
printk(KERN_INFO "SMP mode deactivated.\n");
- smpboot_clear_io_apic();
+ smpboot_clear_io_apic_irqs();
localise_nmi_watchdog();
@@ -1064,6 +1099,22 @@ static void __init smp_cpu_index_default(void)
}
}
+static void __init smpboot_setup_io_apic(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * Here we can be sure that there is an IO-APIC in the system. Let's
+ * go and set it up:
+ */
+ if (!skip_ioapic_setup && nr_ioapics)
+ setup_IO_APIC();
+ else {
+ nr_ioapics = 0;
+ localise_nmi_watchdog();
+ }
+#endif
+}
+
/*
* Prepare for SMP bootup. The MP table or ACPI has been read
* earlier. Just do some sanity checking here and enable APIC mode.
--
1.6.4.2
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/gart.h | 22 ----------------------
arch/x86/kernel/aperture_64.c | 22 ++++++++++++++++++++++
drivers/char/agp/amd64-agp.c | 39 ++++++++++++++++++++++++++++++++++++++-
3 files changed, 60 insertions(+), 23 deletions(-)
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 4ac5b0f..2b63a91 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -74,26 +74,4 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
-static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
-{
- if (!aper_base)
- return 0;
-
- if (aper_base + aper_size > 0x100000000ULL) {
- printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
- return 0;
- }
- if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
- printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
- return 0;
- }
- if (aper_size < min_size) {
- printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
- aper_size>>20, min_size>>20);
- return 0;
- }
-
- return 1;
-}
-
#endif /* _ASM_X86_GART_H */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 3704997..f6e6270 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -145,6 +145,28 @@ static u32 __init find_cap(int bus, int slot, int func, int cap)
return 0;
}
+static int __init aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
+{
+ if (!aper_base)
+ return 0;
+
+ if (aper_base + aper_size > 0x100000000ULL) {
+ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
+ return 0;
+ }
+ if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
+ printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
+ return 0;
+ }
+ if (aper_size < min_size) {
+ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
+ aper_size>>20, min_size>>20);
+ return 0;
+ }
+
+ return 1;
+}
+
/* Read a standard AGPv3 bridge header */
static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order)
{
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index fd50ead..85cabd0 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -14,7 +14,6 @@
#include <linux/agp_backend.h>
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
-#include <asm/e820.h>
#include <asm/k8.h>
#include <asm/gart.h>
#include "agp.h"
@@ -231,6 +230,44 @@ static const struct agp_bridge_driver amd_8151_driver = {
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
+static int __devinit
+__is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
+{
+ return 1;
+}
+
+static int __devinit any_ram_in_range(u64 base, u64 size)
+{
+ unsigned long pfn, nr_pages;
+
+ pfn = base >> PAGE_SHIFT;
+ nr_pages = size >> PAGE_SHIFT;
+
+ return walk_system_ram_range(pfn, nr_pages, NULL, __is_ram) == 1;
+}
+
+static int __devinit aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
+{
+ if (!aper_base)
+ return 0;
+
+ if (aper_base + aper_size > 0x100000000ULL) {
+ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
+ return 0;
+ }
+ if (any_ram_in_range(aper_base, aper_size)) {
+ printk(KERN_INFO "Aperture pointing to E820 RAM. Ignoring.\n");
+ return 0;
+ }
+ if (aper_size < min_size) {
+ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
+ aper_size>>20, min_size>>20);
+ return 0;
+ }
+
+ return 1;
+}
+
/* Some basic sanity checks for the aperture. */
static int __devinit agp_aperture_valid(u64 aper, u32 size)
{
--
1.6.4.2
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/kernel/apic/io_apic.c | 22 ----------------------
1 files changed, 0 insertions(+), 22 deletions(-)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c03bcd4..ed8002b 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3917,28 +3917,6 @@ void __init probe_nr_irqs_gsi(void)
printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
}
-#ifdef CONFIG_SPARSE_IRQ
-int __init arch_probe_nr_irqs(void)
-{
- int nr;
-
- if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
- nr_irqs = NR_VECTORS * nr_cpu_ids;
-
- nr = nr_irqs_gsi + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
- /*
- * for MSI and HT dyn irq
- */
- nr += nr_irqs_gsi * 16;
-#endif
- if (nr < nr_irqs)
- nr_irqs = nr;
-
- return 0;
-}
-#endif
-
static int __io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr)
{
--
1.6.4.2
Move arch_init_copy_chip_data and arch_free_chip_data into function
pointers in struct irq_chip since they operate on irq_desc->chip_data.
arch_init_chip_data cannot be moved into struct irq_chip because
irq_desc->chip is not known at the time the irq_desc is setup. Instead
rename arch_init_chip_data to arch_init_irq_desc for PowerPC, the
only other user, whose usage better matches the new name.
To replace the x86 arch_init_chip_data functionality
irq_to_desc_alloc_node now takes a pointer to a function to allocate
the chip data. This is necessary to ensure the allocation happens
under the correct locking at the core level. On PowerPC and SH
architectures (the other users of irq_to_desc_alloc_node) pass in NULL
which retains existing chip_data behaviour.
I've retained the chip_data behaviour for uv_irq although it isn't
clear to me if these interrupt types support migration or how closely
related to the APIC modes they really are. If it weren't for this the
x86_{init,copy,free}_chip_data functions could be static to
io_apic.c.
I've tested by booting on an 64 bit x86 system with sparse IRQ enabled
and 32 bit without, but it's not clear to me what actions I need to
take to actually exercise some of these code paths.
-v4: yinghai add irq_to_desc_alloc_node_x...
so could leave default path not changed...
Signed-off-by: Ian Campbell <ian.ca...@citrix.com>
Acked-by: Michael Ellerman <mic...@ellerman.id.au> [PowerPC rename portion]
Cc: Thomas Gleixner <tg...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Eric W. Biederman <ebie...@xmission.com>
Cc: Yinghai Lu <yin...@kernel.org>
Cc: Jeremy Fitzhardinge <jer...@goop.org>
Cc: Benjamin Herrenschmidt <be...@kernel.crashing.org>
Cc: Paul Mackerras <pau...@samba.org>
Cc: x...@kernel.org
Cc: linuxp...@ozlabs.org
Cc: linux-...@vger.kernel.org
Cc: Rusty Russell <ru...@rustcorp.com.au>
Cc: lgu...@ozlabs.org
Cc: Paul Mundt <let...@linux-sh.org>
Cc: linu...@vger.kernel.org
---
arch/powerpc/kernel/irq.c | 2 +-
arch/x86/include/asm/hw_irq.h | 7 +++++-
arch/x86/kernel/apic/io_apic.c | 49 ++++++++++++++++++++++++++++++++++++---
arch/x86/kernel/uv_irq.c | 3 ++
drivers/xen/events.c | 7 +++++-
include/linux/interrupt.h | 1 -
include/linux/irq.h | 21 +++++++++++++----
kernel/irq/chip.c | 7 +++++
kernel/irq/handle.c | 13 ++++++----
kernel/irq/numa_migrate.c | 12 ++++++++-
kernel/softirq.c | 5 ----
11 files changed, 102 insertions(+), 25 deletions(-)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 64f6f20..cafd378 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -1088,7 +1088,7 @@ int arch_early_irq_init(void)
return 0;
}
-int arch_init_chip_data(struct irq_desc *desc, int node)
+int arch_init_irq_desc(struct irq_desc *desc, int node, init_chip_data_fn fn)
{
desc->status |= IRQ_NOREQUEST;
return 0;
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 46c0fe0..767d3f8 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -20,9 +20,9 @@
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/atomic.h>
-#include <asm/irq.h>
#include <asm/sections.h>
/* Interrupt handlers registered during init_IRQ */
@@ -61,6 +61,11 @@ extern void init_VISWS_APIC_irqs(void);
extern void setup_IO_APIC(void);
extern void disable_IO_APIC(void);
+extern void x86_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int node);
+extern void x86_free_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc);
+
struct io_apic_irq_attr {
int ioapic;
int ioapic_pin;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 463de9a..a917fdf 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -211,7 +211,7 @@ static struct irq_cfg *get_one_free_irq_cfg(int node)
return cfg;
}
-int arch_init_chip_data(struct irq_desc *desc, int node)
+static int x86_init_chip_data(struct irq_desc *desc, int node)
{
struct irq_cfg *cfg;
@@ -287,8 +287,8 @@ static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
old_cfg->irq_2_pin = NULL;
}
-void arch_init_copy_chip_data(struct irq_desc *old_desc,
- struct irq_desc *desc, int node)
+void x86_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int node)
{
struct irq_cfg *cfg;
struct irq_cfg *old_cfg;
@@ -312,7 +312,7 @@ static void free_irq_cfg(struct irq_cfg *old_cfg)
kfree(old_cfg);
}
-void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
+void x86_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
{
struct irq_cfg *old_cfg, *cfg;
@@ -329,6 +329,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
}
}
/* end for move_irq_desc */
+int arch_init_irq_desc(struct irq_desc *desc, int node,
+ init_chip_data_fn init_chip_data)
+{
+ if (!init_chip_data)
+ return x86_init_chip_data(desc, node);
+
+ return init_chip_data(desc, node);
+}
#else
struct irq_cfg *irq_cfg(unsigned int irq)
@@ -336,6 +344,15 @@ struct irq_cfg *irq_cfg(unsigned int irq)
return irq < nr_irqs ? irq_cfgx + irq : NULL;
}
+void x86_copy_chip_data(struct irq_desc *old_desc,
+ struct irq_desc *desc, int node)
+{
+}
+
+void x86_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+}
+
#endif
struct io_apic {
@@ -2747,6 +2764,9 @@ static struct irq_chip ioapic_chip __read_mostly = {
.set_affinity = set_ioapic_affinity_irq,
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
static struct irq_chip ir_ioapic_chip __read_mostly = {
@@ -2762,6 +2782,9 @@ static struct irq_chip ir_ioapic_chip __read_mostly = {
#endif
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
static inline void init_IO_APIC_traps(void)
@@ -3474,6 +3497,9 @@ static struct irq_chip msi_chip = {
.set_affinity = set_msi_irq_affinity,
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
static struct irq_chip msi_ir_chip = {
@@ -3487,6 +3513,9 @@ static struct irq_chip msi_ir_chip = {
#endif
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
/*
@@ -3646,6 +3675,9 @@ static struct irq_chip dmar_msi_type = {
.set_affinity = dmar_msi_set_affinity,
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
int arch_setup_dmar_msi(unsigned int irq)
@@ -3703,6 +3735,9 @@ static struct irq_chip ir_hpet_msi_type = {
#endif
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
static struct irq_chip hpet_msi_type = {
@@ -3714,6 +3749,9 @@ static struct irq_chip hpet_msi_type = {
.set_affinity = hpet_msi_set_affinity,
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
@@ -3800,6 +3838,9 @@ static struct irq_chip ht_irq_chip = {
.set_affinity = set_ht_irq_affinity,
#endif
.retrigger = ioapic_retrigger_irq,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index ece73d8..4c61f1b 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -55,6 +55,9 @@ struct irq_chip uv_irq_chip = {
.eoi = uv_ack_apic,
.end = uv_noop,
.set_affinity = uv_set_irq_affinity,
+
+ .copy_chip_data = x86_copy_chip_data,
+ .free_chip_data = x86_free_chip_data,
};
/*
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 2f84137..64cbbe4 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -329,6 +329,11 @@ static void unmask_evtchn(int port)
put_cpu();
}
+static int xen_init_chip_data(struct irq_desc *desc, int node)
+{
+ return 0;
+}
+
static int find_unbound_irq(void)
{
int irq;
@@ -341,7 +346,7 @@ static int find_unbound_irq(void)
if (irq == nr_irqs)
panic("No available IRQ to bind to: increase nr_irqs!\n");
- desc = irq_to_desc_alloc_node(irq, 0);
+ desc = irq_to_desc_alloc_node_x(irq, 0, xen_init_chip_data);
if (WARN_ON(desc == NULL))
return -1;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 75f3f00..0b0d679 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -611,6 +611,5 @@ struct irq_desc;
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-extern int arch_init_chip_data(struct irq_desc *desc, int node);
#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 707ab12..60f3368 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -140,6 +140,13 @@ struct irq_chip {
* Will disappear.
*/
const char *typename;
+
+ /* for move_irq_desc */
+ void (*copy_chip_data)(struct irq_desc *old_desc,
+ struct irq_desc *desc, int node);
+ void (*free_chip_data)(struct irq_desc *old_desc,
+ struct irq_desc *desc);
+
};
struct timer_rand_state;
@@ -208,10 +215,6 @@ struct irq_desc {
const char *name;
} ____cacheline_internodealigned_in_smp;
-extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
- struct irq_desc *desc, int node);
-extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
-
#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
#endif
@@ -225,7 +228,15 @@ static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
}
#endif
-extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
+typedef int (*init_chip_data_fn)(struct irq_desc *, int node);
+int arch_init_irq_desc(struct irq_desc *desc, int node, init_chip_data_fn fn);
+struct irq_desc *irq_to_desc_alloc_node_x(unsigned int irq, int node,
+ init_chip_data_fn fn);
+static inline struct irq_desc *irq_to_desc_alloc_node(unsigned int irq,
+ int node)
+{
+ return irq_to_desc_alloc_node_x(irq, node, NULL);
+}
/*
* Pick up the arch-dependent methods:
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index bbba585..3dcdd2f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -758,3 +758,10 @@ void __init set_irq_probe(unsigned int irq)
desc->status &= ~IRQ_NOPROBE;
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+
+int __weak arch_init_irq_desc(struct irq_desc *desc, int node,
+ init_chip_data_fn init_chip_data)
+{
+ return 0;
+}
+
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 76d5a67..f30c9c7 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -100,7 +100,8 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
}
}
-static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
+static void init_one_irq_desc(int irq, struct irq_desc *desc, int node,
+ init_chip_data_fn init_chip_data)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
@@ -120,7 +121,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
BUG_ON(1);
}
init_desc_masks(desc);
- arch_init_chip_data(desc, node);
+ arch_init_irq_desc(desc, node, init_chip_data);
}
/*
@@ -198,7 +199,8 @@ int __init early_irq_init(void)
return arch_early_irq_init();
}
-struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
+struct irq_desc * __ref irq_to_desc_alloc_node_x(unsigned int irq, int node,
+ init_chip_data_fn init_chip_data)
{
struct irq_desc *desc;
unsigned long flags;
@@ -227,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
printk(KERN_ERR "can not alloc irq_desc\n");
BUG_ON(1);
}
- init_one_irq_desc(irq, desc, node);
+ init_one_irq_desc(irq, desc, node, init_chip_data);
set_irq_desc(irq, desc);
@@ -277,7 +279,8 @@ struct irq_desc *irq_to_desc(unsigned int irq)
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
-struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
+struct irq_desc *irq_to_desc_alloc_node_x(unsigned int irq, int node,
+ init_chip_data_fn init_chip_data)
{
return irq_to_desc(irq);
}
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 963559d..9ea09c9 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -47,7 +47,8 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
init_copy_desc_masks(old_desc, desc);
- arch_init_copy_chip_data(old_desc, desc, node);
+ if (desc->chip->copy_chip_data)
+ desc->chip->copy_chip_data(old_desc, desc, node);
return true;
}
@@ -55,7 +56,8 @@ static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
{
free_kstat_irqs(old_desc, desc);
free_desc_masks(old_desc, desc);
- arch_free_chip_data(old_desc, desc);
+ if (desc->chip->free_chip_data)
+ desc->chip->free_chip_data(old_desc, desc);
}
static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
@@ -107,9 +109,15 @@ out_unlock:
struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
{
+
/* those static or target node is -1, do not move them */
if (desc->irq < NR_IRQS_LEGACY || node == -1)
return desc;
+ /* IRQ chip does not support movement */
+ if (desc->chip_data &&
+ (desc->chip->copy_chip_data == NULL ||
+ desc->chip->free_chip_data == NULL))
+ return desc;
if (desc->node != node)
desc = __real_move_irq_desc(desc, node);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 7c1a67e..7df0209 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -895,8 +895,3 @@ int __init __weak arch_early_irq_init(void)
{
return 0;
}
-
-int __weak arch_init_chip_data(struct irq_desc *desc, int node)
-{
- return 0;
-}
--
1.6.4.2
|
| commit b9c61b70075c87a8612624736faf4a2de5b1ed30
|
| x86/pci: update pirq_enable_irq() to setup io apic routing
|
ACPI: IOAPIC (id[0x10] address[0xfecff000] gsi_base[0])
IOAPIC[0]: apic_id 16, version 0, address 0xfecff000, GSI 0-2
ACPI: IOAPIC (id[0x0f] address[0xfec00000] gsi_base[3])
IOAPIC[1]: apic_id 15, version 0, address 0xfec00000, GSI 3-38
ACPI: IOAPIC (id[0x0e] address[0xfec01000] gsi_base[39])
IOAPIC[2]: apic_id 14, version 0, address 0xfec01000, GSI 39-74
As explained in the previous patch ("x86: Fix out of order gsi)
need to remap those gsis
This patch adds boot_ioapic_idx and gsi_to_irq/irq_to_gsi
So we could make sure for those kind of system will have
irq: 0 - 15 for legacy irq
irq: 16 after will be gsi + 16
-v13: move gsi_to_irq/irq_to_gsi to acpi/boot.c
Reported-by: Iranna D Ankad <iranna...@in.ibm.com>
Bisected-by: Iranna D Ankad <iranna...@in.ibm.com>
Tested-by: Gary Hade <gary...@us.ibm.com>
Signed-off-by: Yinghai Lu <yin...@kernel.org>
Cc: Thomas Renninger <tr...@suse.de>
Cc: Eric W. Biederman <ebie...@xmission.com>
Cc: Suresh Siddha <suresh....@intel.com>
Cc: len....@intel.com
---
arch/x86/include/asm/io_apic.h | 2 +-
arch/x86/include/asm/mpspec.h | 14 ++++++++
arch/x86/kernel/acpi/boot.c | 64 +++++++++++++++++++++++++++++------
arch/x86/kernel/apic/io_apic.c | 72 +++++++++++++++++++++++++++++----------
drivers/pnp/pnpacpi/rsparser.c | 4 ++
5 files changed, 125 insertions(+), 31 deletions(-)
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 35832a0..c4683b9 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -158,7 +158,7 @@ extern int io_apic_get_redir_entries(int ioapic);
struct io_apic_irq_attr;
extern int io_apic_set_pci_routing(struct device *dev, int irq,
struct io_apic_irq_attr *irq_attr);
-void setup_IO_APIC_irq_extra(u32 gsi);
+void setup_IO_APIC_irq_extra(u32 gsi, unsigned int *irq);
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
extern void ioapic_insert_resources(void);
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index d8bf23a..1a221e0 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -109,6 +109,9 @@ extern int acpi_probe_gsi(void);
#ifdef CONFIG_X86_IO_APIC
extern int mp_find_ioapic(int gsi);
extern int mp_find_ioapic_pin(int ioapic, int gsi);
+extern int gsi_delta;
+int gsi_to_irq(unsigned int gsi);
+unsigned int irq_to_gsi(int irq);
#endif
#else /* !CONFIG_ACPI: */
static inline int acpi_probe_gsi(void)
@@ -117,6 +120,17 @@ static inline int acpi_probe_gsi(void)
}
#endif /* CONFIG_ACPI */
+#if !defined(CONFIG_ACPI) || !defined(CONFIG_X86_IO_APIC)
+static inline int gsi_to_irq(unsigned int gsi)
+{
+ return gsi;
+}
+static inline unsigned int irq_to_gsi(int irq)
+{
+ return irq;
+}
+#endif
+
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
struct physid_mask {
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 37de00f..2450c95 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -311,7 +311,8 @@ acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
/*
* Parse Interrupt Source Override for the ACPI SCI
*/
-static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
+static void __init
+acpi_sci_ioapic_setup(u8 bus_irq, u32 gsi, u16 polarity, u16 trigger)
{
if (trigger == 0) /* compatible SCI trigger is level */
trigger = 3;
@@ -331,7 +332,7 @@ static void __init acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
* If GSI is < 16, this will update its flags,
* else it will create a new mp_irqs[] entry.
*/
- mp_override_legacy_irq(gsi, polarity, trigger, gsi);
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
/*
* stash over-ride to indicate we've been here
@@ -355,7 +356,8 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
acpi_table_print_madt_entry(header);
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
- acpi_sci_ioapic_setup(intsrc->global_irq,
+ acpi_sci_ioapic_setup(intsrc->source_irq,
+ intsrc->global_irq,
intsrc->inti_flags & ACPI_MADT_POLARITY_MASK,
(intsrc->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
return 0;
@@ -446,11 +448,11 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
{
- *irq = gsi;
+ *irq = gsi_to_irq(gsi);
#ifdef CONFIG_X86_IO_APIC
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
- setup_IO_APIC_irq_extra(gsi);
+ setup_IO_APIC_irq_extra(gsi, irq);
#endif
return 0;
@@ -914,6 +916,40 @@ static void save_mp_irq(struct mpc_intsrc *m)
panic("Max # of irq sources exceeded!!\n");
}
+/* By default isa irqs are identity mapped to gsis */
+static unsigned int isa_irq_to_gsi[NR_IRQS_LEGACY] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+};
+
+int gsi_delta;
+int gsi_to_irq(unsigned int gsi)
+{
+ unsigned int irq = gsi;
+ unsigned int i;
+
+ irq += gsi_delta;
+ for (i = 0; i < NR_IRQS_LEGACY; i++) {
+ if (isa_irq_to_gsi[i] == gsi) {
+ irq = i;
+ break;
+ }
+ }
+
+ return irq;
+}
+
+unsigned int irq_to_gsi(int irq)
+{
+ unsigned int gsi;
+
+ if (irq < NR_IRQS_LEGACY)
+ gsi = isa_irq_to_gsi[irq];
+ else
+ gsi = irq - gsi_delta;
+
+ return gsi;
+}
+
void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
{
int ioapic;
@@ -945,6 +981,8 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
mp_irq.dstirq = pin; /* INTIN# */
save_mp_irq(&mp_irq);
+
+ isa_irq_to_gsi[bus_irq] = gsi;
}
void __init mp_config_acpi_legacy_irqs(void)
@@ -974,7 +1012,7 @@ void __init mp_config_acpi_legacy_irqs(void)
/*
* Locate the IOAPIC that manages the ISA IRQs (0-15).
*/
- ioapic = mp_find_ioapic(0);
+ ioapic = mp_find_ioapic(irq_to_gsi(0));
if (ioapic < 0)
return;
dstapic = mp_ioapics[ioapic].apicid;
@@ -1057,6 +1095,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
{
int ioapic;
int ioapic_pin;
+ int irq;
struct io_apic_irq_attr irq_attr;
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
@@ -1079,11 +1118,12 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
gsi = ioapic_renumber_irq(ioapic, gsi);
#endif
+ irq = gsi_to_irq(gsi);
if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
printk(KERN_ERR "Invalid reference to IOAPIC pin "
"%d-%d\n", mp_ioapics[ioapic].apicid,
ioapic_pin);
- return gsi;
+ return irq;
}
if (enable_update_mptable)
@@ -1092,9 +1132,9 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
- io_apic_set_pci_routing(dev, gsi, &irq_attr);
+ io_apic_set_pci_routing(dev, irq, &irq_attr);
- return gsi;
+ return irq;
}
/*
@@ -1151,8 +1191,10 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* If BIOS did not supply an INT_SRC_OVR for the SCI
* pretend we got one so we can set the SCI flags.
*/
- if (!acpi_sci_override_gsi)
- acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0);
+ if (!acpi_sci_override_gsi) {
+ int irq = gsi_to_irq(acpi_gbl_FADT.sci_interrupt);
+ acpi_sci_ioapic_setup(irq, acpi_gbl_FADT.sci_interrupt, 0, 0);
+ }
/* Fill in identity legacy mappings where no override */
mp_config_acpi_legacy_irqs();
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a917fdf..61b59ef 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -97,6 +97,8 @@ int mp_irq_entries;
/* GSI interrupts */
static int nr_irqs_gsi = NR_IRQS_LEGACY;
+static int boot_ioapic_idx;
+
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
int mp_bus_id_to_type[MAX_MP_BUSSES];
#endif
@@ -1032,7 +1034,7 @@ static inline int irq_trigger(int idx)
int (*ioapic_renumber_irq)(int ioapic, int irq);
static int pin_2_irq(int idx, int apic, int pin)
{
- int irq, i;
+ int irq;
int bus = mp_irqs[idx].srcbus;
/*
@@ -1044,18 +1046,28 @@ static int pin_2_irq(int idx, int apic, int pin)
if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].srcbusirq;
} else {
- /*
- * PCI IRQs are mapped in order
- */
- i = irq = 0;
- while (i < apic)
- irq += nr_ioapic_registers[i++];
- irq += pin;
+ unsigned int gsi;
+ if (!acpi_ioapic) {
+ int i;
+ /*
+ * PCI IRQs are mapped in order
+ */
+ i = gsi = 0;
+ while (i < apic)
+ gsi += nr_ioapic_registers[i++];
+ gsi += pin;
+ } else
+ gsi = pin + mp_gsi_routing[apic].gsi_base;
+
+#ifdef CONFIG_X86_32
/*
* For MPS mode, so far only needed by ES7000 platform
*/
if (ioapic_renumber_irq)
- irq = ioapic_renumber_irq(apic, irq);
+ gsi = ioapic_renumber_irq(apic, gsi);
+#endif
+
+ irq = gsi_to_irq(gsi);
}
#ifdef CONFIG_X86_32
@@ -1505,9 +1517,10 @@ static void __init setup_IO_APIC_irqs(void)
struct irq_cfg *cfg;
int node = cpu_to_node(boot_cpu_id);
+ apic_id = boot_ioapic_idx;
+
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
- for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
idx = find_irq_entry(apic_id, pin, mp_INT);
if (idx == -1) {
@@ -1529,9 +1542,6 @@ static void __init setup_IO_APIC_irqs(void)
irq = pin_2_irq(idx, apic_id, pin);
- if ((apic_id > 0) && (irq > 16))
- continue;
-
/*
* Skip the timer IRQ if there's a quirk handler
* installed and if it returns 1:
@@ -1565,7 +1575,7 @@ static void __init setup_IO_APIC_irqs(void)
* but could not use acpi_register_gsi()
* like some special sci in IBM x3330
*/
-void setup_IO_APIC_irq_extra(u32 gsi)
+void setup_IO_APIC_irq_extra(u32 gsi, unsigned int *pirq)
{
int apic_id = 0, pin, idx, irq;
int node = cpu_to_node(boot_cpu_id);
@@ -1585,6 +1595,7 @@ void setup_IO_APIC_irq_extra(u32 gsi)
return;
irq = pin_2_irq(idx, apic_id, pin);
+ *pirq = irq;
#ifdef CONFIG_SPARSE_IRQ
desc = irq_to_desc(irq);
if (desc)
@@ -2028,6 +2039,30 @@ void __init enable_IO_APIC(void)
clear_IO_APIC();
}
+static void __init probe_ioapic_i8259(void)
+{
+ /* probe boot ioapic idx */
+ boot_ioapic_idx = ioapic_i8259.apic;
+ if (boot_ioapic_idx < 0)
+ boot_ioapic_idx = find_isa_irq_apic(0, mp_INT);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled && acpi_ioapic && boot_ioapic_idx < 0)
+ boot_ioapic_idx = mp_find_ioapic(irq_to_gsi(0));
+#endif
+ if (boot_ioapic_idx < 0)
+ boot_ioapic_idx = 0;
+
+#ifdef CONFIG_ACPI
+ if (mp_gsi_routing[boot_ioapic_idx].gsi_base) {
+ gsi_delta = NR_IRQS_LEGACY;
+ nr_irqs_gsi += NR_IRQS_LEGACY;
+ printk(KERN_DEBUG "new nr_irqs_gsi: %d\n", nr_irqs_gsi);
+ }
+#endif
+
+ printk(KERN_INFO "boot_ioapic_idx: %d\n", boot_ioapic_idx);
+}
+
/*
* Not an __init, needed by the reboot code
*/
@@ -3045,7 +3080,7 @@ static inline void __init check_timer(void)
legacy_pic->chip->unmask(0);
}
if (disable_timer_pin_1 > 0)
- clear_IO_APIC_pin(0, pin1);
+ clear_IO_APIC_pin(apic1, pin1);
goto out;
}
if (intr_remapping_enabled)
@@ -3165,6 +3200,7 @@ void __init setup_IO_APIC(void)
x86_init.mpparse.setup_ioapic_ids();
sync_Arb_IDs();
+ probe_ioapic_i8259();
setup_IO_APIC_irqs();
init_IO_APIC_traps();
if (legacy_pic->nr_legacy_irqs)
@@ -4156,16 +4192,14 @@ void __init setup_ioapic_dest(void)
if (skip_ioapic_setup == 1)
return;
- for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
+ ioapic = boot_ioapic_idx;
+
for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
irq_entry = find_irq_entry(ioapic, pin, mp_INT);
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
- if ((ioapic > 0) && (irq > 16))
- continue;
-
desc = irq_to_desc(irq);
/*
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 54514aa..1dcf64d 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -123,6 +123,10 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
}
flags = irq_flags(triggering, polarity, shareable);
+#ifdef CONFIG_X86
+ /* bus_irq or gsi ? */
+ gsi = irq_to_gsi(gsi);
+#endif
irq = acpi_register_gsi(&dev->dev, gsi, triggering, polarity);
if (irq >= 0)
pcibios_penalize_isa_irq(irq, 1);
--
1.6.4.2
-v2: add fw_memmap wrapper to some func...
move some functions back to e820.c
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/e820.h | 176 ++++++-------
arch/x86/kernel/e820.c | 638 ++----------------------------------------
include/linux/bootmem.h | 2 +-
include/linux/early_res.h | 1 +
include/linux/fw_memmap.h | 40 +++
kernel/Makefile | 2 +-
kernel/fw_memmap.c | 625 +++++++++++++++++++++++++++++++++++++++++
kernel/fw_memmap_internals.h | 49 ++++
8 files changed, 822 insertions(+), 711 deletions(-)
create mode 100644 include/linux/fw_memmap.h
create mode 100644 kernel/fw_memmap.c
create mode 100644 kernel/fw_memmap_internals.h
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 71c0348..c038616 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -1,65 +1,10 @@
#ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H
-#define E820MAP 0x2d0 /* our map */
-#define E820MAX 128 /* number of entries in E820MAP */
-
-/*
- * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
- * constrained space in the zeropage. If we have more nodes than
- * that, and if we've booted off EFI firmware, then the EFI tables
- * passed us from the EFI firmware can list more nodes. Size our
- * internal memory map tables to have room for these additional
- * nodes, based on up to three entries per node for which the
- * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
- * plus E820MAX, allowing space for the possible duplicate E820
- * entries that might need room in the same arrays, prior to the
- * call to sanitize_e820_map() to remove duplicates. The allowance
- * of three memory map entries per node is "enough" entries for
- * the initial hardware platform motivating this mechanism to make
- * use of additional EFI map entries. Future platforms may want
- * to allow more than three entries per node or otherwise refine
- * this size.
- */
-
-/*
- * Odd: 'make headers_check' complains about numa.h if I try
- * to collapse the next two #ifdef lines to a single line:
- * #if defined(__KERNEL__) && defined(CONFIG_EFI)
- */
-#ifdef __KERNEL__
-#ifdef CONFIG_EFI
-#include <linux/numa.h>
-#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
-#else /* ! CONFIG_EFI */
-#define E820_X_MAX E820MAX
-#endif
-#else /* ! __KERNEL__ */
-#define E820_X_MAX E820MAX
-#endif
-
-#define E820NR 0x1e8 /* # entries in E820MAP */
-
-#define E820_RAM 1
-#define E820_RESERVED 2
-#define E820_ACPI 3
-#define E820_NVS 4
-#define E820_UNUSABLE 5
/* reserved RAM used by kernel itself */
#define E820_RESERVED_KERN 128
#ifndef __ASSEMBLY__
-#include <linux/types.h>
-struct e820entry {
- __u64 addr; /* start of memory segment */
- __u64 size; /* size of memory segment */
- __u32 type; /* type of memory segment */
-} __attribute__((packed));
-
-struct e820map {
- __u32 nr_map;
- struct e820entry map[E820_X_MAX];
-};
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
@@ -69,32 +14,18 @@ struct e820map {
#ifdef __KERNEL__
-#ifdef CONFIG_X86_OOSTORE
-extern int centaur_ram_top;
-void get_centaur_ram_top(void);
+#include <linux/fw_memmap.h>
+
+#ifdef CONFIG_MEMTEST
+extern void early_memtest(unsigned long start, unsigned long end);
#else
-static inline void get_centaur_ram_top(void)
+static inline void early_memtest(unsigned long start, unsigned long end)
{
}
#endif
extern unsigned long pci_mem_start;
-extern int e820_any_mapped(u64 start, u64 end, unsigned type);
-extern int e820_all_mapped(u64 start, u64 end, unsigned type);
-extern void e820_add_region(u64 start, u64 size, int type);
-extern void e820_print_map(char *who);
-int sanitize_e820_map(void);
-void save_e820_map(void);
-extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
- unsigned new_type);
-extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
- int checktype);
-extern void update_e820(void);
extern void e820_setup_gap(void);
-extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
- unsigned long start_addr, unsigned long long end_addr);
-struct setup_data;
-extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
#if defined(CONFIG_X86_64) || \
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
@@ -105,37 +36,80 @@ static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
}
#endif
-#ifdef CONFIG_MEMTEST
-extern void early_memtest(unsigned long start, unsigned long end);
-#else
-static inline void early_memtest(unsigned long start, unsigned long end)
+static inline void e820_add_region(u64 start, u64 size, int type)
{
+ fw_memmap_add_region(start, size, type);
+}
+
+static inline void e820_print_map(char *who)
+{
+ fw_memmap_print_map(who);
+}
+
+static inline int sanitize_e820_map(void)
+{
+ return sanitize_fw_memmap();
+}
+
+static inline void finish_e820_parsing(void)
+{
+ finish_fw_memmap_parsing();
+}
+
+static inline void e820_register_active_regions(int nid,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ fw_memmap_register_active_regions(nid, start_pfn, end_pfn);
+}
+
+static inline u64 e820_hole_size(u64 start, u64 end)
+{
+ return fw_memmap_hole_size(start, end);
+}
+
+static inline u64 find_e820_area(u64 start, u64 end, u64 size, u64 align)
+{
+ return find_fw_memmap_area(start, end, size, align);
+}
+
+static inline u64 find_e820_area_node(int nid, u64 start, u64 end,
+ u64 size, u64 align)
+{
+ return find_fw_memmap_area_node(nid, start, end, size, align);
}
-#endif
-extern unsigned long end_user_pfn;
+static inline unsigned long e820_end_of_ram_pfn(void)
+{
+ return fw_memmap_end_of_ram_pfn();
+}
+
+void clear_e820_map(void);
+
+extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
+ int checktype);
+struct e820entry;
+int __sanitize_e820_map(struct e820entry *biosmap, int max_nr, u32 *pnr_map);
+extern unsigned long e820_end_of_low_ram_pfn(void);
+
+extern int e820_any_mapped(u64 start, u64 end, unsigned type);
+extern int e820_all_mapped(u64 start, u64 end, unsigned type);
+extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
+ unsigned new_type);
+
+extern void update_e820(void);
+void save_e820_map(void);
+struct setup_data;
+extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
+extern char *default_machine_specific_memory_setup(void);
+extern void setup_memory_map(void);
-extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
-u64 find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
+
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
-#include <linux/early_res.h>
-extern unsigned long e820_end_of_ram_pfn(void);
-extern unsigned long e820_end_of_low_ram_pfn(void);
-extern int e820_find_active_region(const struct e820entry *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn);
-extern void e820_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern u64 e820_hole_size(u64 start, u64 end);
-extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
-extern void setup_memory_map(void);
-extern char *default_machine_specific_memory_setup(void);
/*
* Returns true iff the specified range [s,e) is completely contained inside
@@ -146,7 +120,17 @@ static inline bool is_ISA_range(u64 s, u64 e)
return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
}
+#ifdef CONFIG_X86_OOSTORE
+extern int centaur_ram_top;
+void get_centaur_ram_top(void);
+#else
+static inline void get_centaur_ram_top(void)
+{
+}
+#endif
+
#endif /* __KERNEL__ */
+
#endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index a558609..9f125ca 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -12,18 +12,15 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
-#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
#include <asm/e820.h>
-#include <asm/proto.h>
#include <asm/setup.h>
+#include "../../../kernel/fw_memmap_internals.h"
+
/*
- * The e820 map is the map that gets modified e.g. with command line parameters
- * and that is also registered with modifications in the kernel resource tree
- * with the iomem_resource as parent.
*
* The e820_saved is directly saved after the BIOS-provided memory map is
* copied. It doesn't get modified afterwards. It's registered for the
@@ -34,7 +31,6 @@
* user can e.g. boot the original kernel with mem=1G while still booting the
* next kernel with full memory.
*/
-static struct e820map __initdata e820;
static struct e820map __initdata e820_saved;
/* For PCI or other memory-mapped resources */
@@ -99,295 +95,6 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
return 0;
}
-/*
- * Add a memory region to the kernel e820 map.
- */
-static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
- int type)
-{
- int x = e820x->nr_map;
-
- if (x >= ARRAY_SIZE(e820x->map)) {
- printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
- return;
- }
-
- e820x->map[x].addr = start;
- e820x->map[x].size = size;
- e820x->map[x].type = type;
- e820x->nr_map++;
-}
-
-void __init e820_add_region(u64 start, u64 size, int type)
-{
- __e820_add_region(&e820, start, size, type);
-}
-
-static void __init e820_print_type(u32 type)
-{
- switch (type) {
- case E820_RAM:
- case E820_RESERVED_KERN:
- printk(KERN_CONT "(usable)");
- break;
- case E820_RESERVED:
- printk(KERN_CONT "(reserved)");
- break;
- case E820_ACPI:
- printk(KERN_CONT "(ACPI data)");
- break;
- case E820_NVS:
- printk(KERN_CONT "(ACPI NVS)");
- break;
- case E820_UNUSABLE:
- printk(KERN_CONT "(unusable)");
- break;
- default:
- printk(KERN_CONT "type %u", type);
- break;
- }
-}
-
-void __init e820_print_map(char *who)
-{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
- (unsigned long long) e820.map[i].addr,
- (unsigned long long)
- (e820.map[i].addr + e820.map[i].size));
- e820_print_type(e820.map[i].type);
- printk(KERN_CONT "\n");
- }
-}
-
-/*
- * Sanitize the BIOS e820 map.
- *
- * Some e820 responses include overlapping entries. The following
- * replaces the original e820 map with a new one, removing overlaps,
- * and resolving conflicting memory types in favor of highest
- * numbered type.
- *
- * The input parameter biosmap points to an array of 'struct
- * e820entry' which on entry has elements in the range [0, *pnr_map)
- * valid, and which has space for up to max_nr_map entries.
- * On return, the resulting sanitized e820 map entries will be in
- * overwritten in the same location, starting at biosmap.
- *
- * The integer pointed to by pnr_map must be valid on entry (the
- * current number of valid entries located at biosmap) and will
- * be updated on return, with the new number of valid entries
- * (something no more than max_nr_map.)
- *
- * The return value from sanitize_e820_map() is zero if it
- * successfully 'sanitized' the map entries passed in, and is -1
- * if it did nothing, which can happen if either of (1) it was
- * only passed one map entry, or (2) any of the input map entries
- * were invalid (start + size < start, meaning that the size was
- * so big the described memory range wrapped around through zero.)
- *
- * Visually we're performing the following
- * (1,2,3,4 = memory types)...
- *
- * Sample memory map (w/overlaps):
- * ____22__________________
- * ______________________4_
- * ____1111________________
- * _44_____________________
- * 11111111________________
- * ____________________33__
- * ___________44___________
- * __________33333_________
- * ______________22________
- * ___________________2222_
- * _________111111111______
- * _____________________11_
- * _________________4______
- *
- * Sanitized equivalent (no overlap):
- * 1_______________________
- * _44_____________________
- * ___1____________________
- * ____22__________________
- * ______11________________
- * _________1______________
- * __________3_____________
- * ___________44___________
- * _____________33_________
- * _______________2________
- * ________________1_______
- * _________________4______
- * ___________________2____
- * ____________________33__
- * ______________________4_
- */
-
-static int __init __sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
- u32 *pnr_map)
-{
- struct change_member {
- struct e820entry *pbios; /* pointer to original bios entry */
- unsigned long long addr; /* address for this change point */
- };
- static struct change_member change_point_list[2*E820_X_MAX] __initdata;
- static struct change_member *change_point[2*E820_X_MAX] __initdata;
- static struct e820entry *overlap_list[E820_X_MAX] __initdata;
- static struct e820entry new_bios[E820_X_MAX] __initdata;
- struct change_member *change_tmp;
- unsigned long current_type, last_type;
- unsigned long long last_addr;
- int chgidx, still_changing;
- int overlap_entries;
- int new_bios_entry;
- int old_nr, new_nr, chg_nr;
- int i;
-
- /* if there's only one memory region, don't bother */
- if (*pnr_map < 2)
- return -1;
-
- old_nr = *pnr_map;
- BUG_ON(old_nr > max_nr_map);
-
- /* bail out if we find any unreasonable addresses in bios map */
- for (i = 0; i < old_nr; i++)
- if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
- return -1;
-
- /* create pointers for initial change-point information (for sorting) */
- for (i = 0; i < 2 * old_nr; i++)
- change_point[i] = &change_point_list[i];
-
- /* record all known change-points (starting and ending addresses),
- omitting those that are for empty memory regions */
- chgidx = 0;
- for (i = 0; i < old_nr; i++) {
- if (biosmap[i].size != 0) {
- change_point[chgidx]->addr = biosmap[i].addr;
- change_point[chgidx++]->pbios = &biosmap[i];
- change_point[chgidx]->addr = biosmap[i].addr +
- biosmap[i].size;
- change_point[chgidx++]->pbios = &biosmap[i];
- }
- }
- chg_nr = chgidx;
-
- /* sort change-point list by memory addresses (low -> high) */
- still_changing = 1;
- while (still_changing) {
- still_changing = 0;
- for (i = 1; i < chg_nr; i++) {
- unsigned long long curaddr, lastaddr;
- unsigned long long curpbaddr, lastpbaddr;
-
- curaddr = change_point[i]->addr;
- lastaddr = change_point[i - 1]->addr;
- curpbaddr = change_point[i]->pbios->addr;
- lastpbaddr = change_point[i - 1]->pbios->addr;
-
- /*
- * swap entries, when:
- *
- * curaddr > lastaddr or
- * curaddr == lastaddr and curaddr == curpbaddr and
- * lastaddr != lastpbaddr
- */
- if (curaddr < lastaddr ||
- (curaddr == lastaddr && curaddr == curpbaddr &&
- lastaddr != lastpbaddr)) {
- change_tmp = change_point[i];
- change_point[i] = change_point[i-1];
- change_point[i-1] = change_tmp;
- still_changing = 1;
- }
- }
- }
-
- /* create a new bios memory map, removing overlaps */
- overlap_entries = 0; /* number of entries in the overlap table */
- new_bios_entry = 0; /* index for creating new bios map entries */
- last_type = 0; /* start with undefined memory type */
- last_addr = 0; /* start with 0 as last starting address */
-
- /* loop through change-points, determining affect on the new bios map */
- for (chgidx = 0; chgidx < chg_nr; chgidx++) {
- /* keep track of all overlapping bios entries */
- if (change_point[chgidx]->addr ==
- change_point[chgidx]->pbios->addr) {
- /*
- * add map entry to overlap list (> 1 entry
- * implies an overlap)
- */
- overlap_list[overlap_entries++] =
- change_point[chgidx]->pbios;
- } else {
- /*
- * remove entry from list (order independent,
- * so swap with last)
- */
- for (i = 0; i < overlap_entries; i++) {
- if (overlap_list[i] ==
- change_point[chgidx]->pbios)
- overlap_list[i] =
- overlap_list[overlap_entries-1];
- }
- overlap_entries--;
- }
- /*
- * if there are overlapping entries, decide which
- * "type" to use (larger value takes precedence --
- * 1=usable, 2,3,4,4+=unusable)
- */
- current_type = 0;
- for (i = 0; i < overlap_entries; i++)
- if (overlap_list[i]->type > current_type)
- current_type = overlap_list[i]->type;
- /*
- * continue building up new bios map based on this
- * information
- */
- if (current_type != last_type) {
- if (last_type != 0) {
- new_bios[new_bios_entry].size =
- change_point[chgidx]->addr - last_addr;
- /*
- * move forward only if the new size
- * was non-zero
- */
- if (new_bios[new_bios_entry].size != 0)
- /*
- * no more space left for new
- * bios entries ?
- */
- if (++new_bios_entry >= max_nr_map)
- break;
- }
- if (current_type != 0) {
- new_bios[new_bios_entry].addr =
- change_point[chgidx]->addr;
- new_bios[new_bios_entry].type = current_type;
- last_addr = change_point[chgidx]->addr;
- }
- last_type = current_type;
- }
- }
- /* retain count for new bios entries */
- new_nr = new_bios_entry;
-
- /* copy new bios mapping into original location */
- memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
- *pnr_map = new_nr;
-
- return 0;
-}
-
-int __init sanitize_e820_map(void)
-{
- return __sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
-}
-
static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
{
while (nr_map) {
@@ -509,52 +216,6 @@ static u64 __init e820_update_range_saved(u64 start, u64 size,
new_type);
}
-/* make e820 not cover the range */
-u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
- int checktype)
-{
- int i;
- u64 end;
- u64 real_removed_size = 0;
-
- if (size > (ULLONG_MAX - start))
- size = ULLONG_MAX - start;
-
- end = start + size;
- printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ",
- (unsigned long long) start,
- (unsigned long long) end);
- e820_print_type(old_type);
- printk(KERN_CONT "\n");
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- u64 final_start, final_end;
-
- if (checktype && ei->type != old_type)
- continue;
- /* totally covered? */
- if (ei->addr >= start &&
- (ei->addr + ei->size) <= (start + size)) {
- real_removed_size += ei->size;
- memset(ei, 0, sizeof(struct e820entry));
- continue;
- }
- /* partially covered */
- final_start = max(start, ei->addr);
- final_end = min(start + size, ei->addr + ei->size);
- if (final_start >= final_end)
- continue;
- real_removed_size += final_end - final_start;
-
- ei->size -= final_end - final_start;
- if (ei->addr < final_start)
- continue;
- ei->addr = final_end;
- }
- return real_removed_size;
-}
-
void __init update_e820(void)
{
u32 nr_map;
@@ -566,20 +227,24 @@ void __init update_e820(void)
printk(KERN_INFO "modified physical RAM map:\n");
e820_print_map("modified");
}
+
static void __init update_e820_saved(void)
{
u32 nr_map;
+ int max_nr_map = ARRAY_SIZE(e820_saved.map);
nr_map = e820_saved.nr_map;
- if (__sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
+ if (__sanitize_e820_map(e820_saved.map, max_nr_map, &nr_map))
return;
e820_saved.nr_map = nr_map;
}
+
#define MAX_GAP_END 0x100000000ull
/*
* Search for a gap in the e820 memory space from start_addr to end_addr.
*/
-__init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
+static int __init
+e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
unsigned long start_addr, unsigned long long end_addr)
{
unsigned long long last;
@@ -726,37 +391,6 @@ static int __init e820_mark_nvs_memory(void)
core_initcall(e820_mark_nvs_memory);
#endif
-/*
- * Find a free area with specified alignment in a specific range.
- */
-u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
-{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- u64 addr;
- u64 ei_start, ei_last;
-
- if (ei->type != E820_RAM)
- continue;
-
- ei_last = ei->addr + ei->size;
- ei_start = ei->addr;
- addr = find_early_area(ei_start, ei_last, start, end,
- size, align);
-
- if (addr != -1ULL)
- return addr;
- }
- return -1ULL;
-}
-
-u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
-{
- return find_e820_area(start, end, size, align);
-}
-
u64 __init get_max_mapped(void)
{
u64 end = max_pfn_mapped;
@@ -765,6 +399,7 @@ u64 __init get_max_mapped(void)
return end;
}
+
/*
* Find next free range after *start
*/
@@ -792,21 +427,6 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
return -1ULL;
}
-u64 __init find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
-{
- u64 addr;
- /*
- * need to call this function after e820_register_active_regions
- * so early_node_map[] is set
- */
- addr = find_memory_core_early(nid, size, align, start, end);
- if (addr != -1ULL)
- return addr;
-
- /* fallback, should already have start end in the node range */
- return find_e820_area(start, end, size, align);
-}
-
/*
* pre allocated 4k and reserved it in e820
*/
@@ -843,220 +463,6 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
return addr;
}
-#ifdef CONFIG_X86_32
-# ifdef CONFIG_X86_PAE
-# define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
-# else
-# define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
-# endif
-#else /* CONFIG_X86_32 */
-# define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
-#endif
-
-/*
- * Find the highest page frame number we have available
- */
-static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
-{
- int i;
- unsigned long last_pfn = 0;
- unsigned long max_arch_pfn = MAX_ARCH_PFN;
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- unsigned long start_pfn;
- unsigned long end_pfn;
-
- if (ei->type != type)
- continue;
-
- start_pfn = ei->addr >> PAGE_SHIFT;
- end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
-
- if (start_pfn >= limit_pfn)
- continue;
- if (end_pfn > limit_pfn) {
- last_pfn = limit_pfn;
- break;
- }
- if (end_pfn > last_pfn)
- last_pfn = end_pfn;
- }
-
- if (last_pfn > max_arch_pfn)
- last_pfn = max_arch_pfn;
-
- printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
- last_pfn, max_arch_pfn);
- return last_pfn;
-}
-unsigned long __init e820_end_of_ram_pfn(void)
-{
- return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
-}
-
-unsigned long __init e820_end_of_low_ram_pfn(void)
-{
- return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
-}
-/*
- * Finds an active region in the address range from start_pfn to last_pfn and
- * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
- */
-int __init e820_find_active_region(const struct e820entry *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn)
-{
- u64 align = PAGE_SIZE;
-
- *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
- *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
-
- /* Skip map entries smaller than a page */
- if (*ei_startpfn >= *ei_endpfn)
- return 0;
-
- /* Skip if map is outside the node */
- if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
- *ei_startpfn >= last_pfn)
- return 0;
-
- /* Check for overlaps */
- if (*ei_startpfn < start_pfn)
- *ei_startpfn = start_pfn;
- if (*ei_endpfn > last_pfn)
- *ei_endpfn = last_pfn;
-
- return 1;
-}
-
-/* Walk the e820 map and register active regions within a node */
-void __init e820_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long last_pfn)
-{
- unsigned long ei_startpfn;
- unsigned long ei_endpfn;
- int i;
-
- for (i = 0; i < e820.nr_map; i++)
- if (e820_find_active_region(&e820.map[i],
- start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- add_active_range(nid, ei_startpfn, ei_endpfn);
-}
-
-/*
- * Find the hole size (in bytes) in the memory range.
- * @start: starting address of the memory range to scan
- * @end: ending address of the memory range to scan
- */
-u64 __init e820_hole_size(u64 start, u64 end)
-{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long last_pfn = end >> PAGE_SHIFT;
- unsigned long ei_startpfn, ei_endpfn, ram = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- if (e820_find_active_region(&e820.map[i],
- start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- ram += ei_endpfn - ei_startpfn;
- }
- return end - start - ((u64)ram << PAGE_SHIFT);
-}
-
-static void early_panic(char *msg)
-{
- early_printk(msg);
- panic(msg);
-}
-
-static int userdef __initdata;
-
-/* "mem=nopentium" disables the 4MB page tables. */
-static int __init parse_memopt(char *p)
-{
- u64 mem_size;
-
- if (!p)
- return -EINVAL;
-
-#ifdef CONFIG_X86_32
- if (!strcmp(p, "nopentium")) {
- setup_clear_cpu_cap(X86_FEATURE_PSE);
- return 0;
- }
-#endif
-
- userdef = 1;
- mem_size = memparse(p, &p);
- e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
-
- return 0;
-}
-early_param("mem", parse_memopt);
-
-static int __init parse_memmap_opt(char *p)
-{
- char *oldp;
- u64 start_at, mem_size;
-
- if (!p)
- return -EINVAL;
-
- if (!strncmp(p, "exactmap", 8)) {
-#ifdef CONFIG_CRASH_DUMP
- /*
- * If we are doing a crash dump, we still need to know
- * the real mem size before original memory map is
- * reset.
- */
- saved_max_pfn = e820_end_of_ram_pfn();
-#endif
- e820.nr_map = 0;
- userdef = 1;
- return 0;
- }
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- return -EINVAL;
-
- userdef = 1;
- if (*p == '@') {
- start_at = memparse(p+1, &p);
- e820_add_region(start_at, mem_size, E820_RAM);
- } else if (*p == '#') {
- start_at = memparse(p+1, &p);
- e820_add_region(start_at, mem_size, E820_ACPI);
- } else if (*p == '$') {
- start_at = memparse(p+1, &p);
- e820_add_region(start_at, mem_size, E820_RESERVED);
- } else
- e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
-
- return *p == '\0' ? 0 : -EINVAL;
-}
-early_param("memmap", parse_memmap_opt);
-
-void __init finish_e820_parsing(void)
-{
- if (userdef) {
- u32 nr = e820.nr_map;
-
- if (__sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
- early_panic("Invalid user supplied memory map");
- e820.nr_map = nr;
-
- printk(KERN_INFO "user-defined physical RAM map:\n");
- e820_print_map("user");
- }
-}
-
static inline const char *e820_type_to_string(int e820_type)
{
switch (e820_type) {
@@ -1098,7 +504,8 @@ void __init e820_reserve_resources(void)
* pci device BAR resource and insert them later in
* pcibios_resource_survey()
*/
- if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
+ if (e820.map[i].type != E820_RESERVED ||
+ res->start < (1ULL<<20)) {
res->flags |= IORESOURCE_BUSY;
insert_resource(&iomem_resource, res);
}
@@ -1114,7 +521,7 @@ void __init e820_reserve_resources(void)
}
/* How much should we pad RAM ending depending on where it is? */
-static unsigned long ram_alignment(resource_size_t pos)
+static unsigned long __init ram_alignment(resource_size_t pos)
{
unsigned long mb = pos >> 20;
@@ -1196,7 +603,7 @@ char *__init default_machine_specific_memory_setup(void)
who = "BIOS-e801";
}
- e820.nr_map = 0;
+ clear_e820_map();
e820_add_region(0, LOWMEMSIZE(), E820_RAM);
e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
}
@@ -1204,7 +611,6 @@ char *__init default_machine_specific_memory_setup(void)
/* In case someone cares... */
return who;
}
-
void __init save_e820_map(void)
{
memcpy(&e820_saved, &e820, sizeof(struct e820map));
@@ -1221,20 +627,18 @@ void __init setup_memory_map(void)
}
#ifdef CONFIG_X86_OOSTORE
+
/*
* Figure what we can cover with MCR's
*
* Shortcut: We know you can't put 4Gig of RAM on a winchip
*/
-void __init get_centaur_ram_top(void)
+static void __init __get_special_low_ram_top(void)
{
u32 clip = 0xFFFFFFFFUL;
u32 top = 0;
int i;
- if (boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
- return;
-
for (i = 0; i < e820.nr_map; i++) {
unsigned long start, end;
@@ -1272,7 +676,15 @@ void __init get_centaur_ram_top(void)
if (top > clip)
top = clip;
- centaur_ram_top = top;
+ return top;
}
-#endif
+int centaur_ram_top;
+void __init get_centaur_ram_top(void)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
+ return;
+
+ centaur_ram_top = __get_special_low_ram_top();
+}
+#endif
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 266ab92..c341c18 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -6,7 +6,7 @@
#include <linux/mmzone.h>
#include <asm/dma.h>
-
+#include <linux/early_res.h>
/*
* simple boot-time physical memory area allocator.
*/
diff --git a/include/linux/early_res.h b/include/linux/early_res.h
index 29c09f5..0f4590f 100644
--- a/include/linux/early_res.h
+++ b/include/linux/early_res.h
@@ -14,6 +14,7 @@ u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align);
u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_fw_memmap_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
u64 get_max_mapped(void);
#include <linux/range.h>
int get_free_all_memory_range(struct range **rangep, int nodeid);
diff --git a/include/linux/fw_memmap.h b/include/linux/fw_memmap.h
new file mode 100644
index 0000000..e0fcc1b
--- /dev/null
+++ b/include/linux/fw_memmap.h
@@ -0,0 +1,40 @@
+#ifndef _LINUX_FW_MEMMAP_H
+#define _LINUX_FW_MEMMAP_H
+#define E820MAX 128 /* number of entries in E820MAP */
+
+#define FW_MEMMAP_RAM 1
+#define FW_MEMMAP_RESERVED 2
+
+#define E820_RAM FW_MEMMAP_RAM
+#define E820_RESERVED FW_MEMMAP_RESERVED
+
+#define E820_ACPI 3
+#define E820_NVS 4
+#define E820_UNUSABLE 5
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+struct e820entry {
+ __u64 addr; /* start of memory segment */
+ __u64 size; /* size of memory segment */
+ __u32 type; /* type of memory segment */
+} __attribute__((packed));
+
+#ifdef __KERNEL__
+
+void fw_memmap_add_region(u64 start, u64 size, int type);
+void fw_memmap_print_map(char *who);
+int sanitize_fw_memmap(void);
+void finish_fw_memmap_parsing(void);
+
+#include <linux/early_res.h>
+
+unsigned long fw_memmap_end_of_ram_pfn(void);
+void fw_memmap_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
+u64 fw_memmap_hole_size(u64 start, u64 end);
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_FW_MEMMAP_H */
diff --git a/kernel/Makefile b/kernel/Makefile
index d5c3006..b0afaa5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -11,7 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
async.o range.o
-obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o
+obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o fw_memmap.o
obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER
diff --git a/kernel/fw_memmap.c b/kernel/fw_memmap.c
new file mode 100644
index 0000000..11067f3
--- /dev/null
+++ b/kernel/fw_memmap.c
@@ -0,0 +1,625 @@
+/*
+ * Handle the memory map.
+ * The functions here do the job until bootmem takes over.
+ *
+ * Getting sanitize_e820_map() in sync with i386 version by applying change:
+ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
+ * Alex Achenbach <xe...@slit.de>, December 2002.
+ * Venkatesh Pallipadi <venkatesh...@intel.com>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/suspend.h>
+#include <linux/ioport.h>
+
+#include <linux/fw_memmap.h>
+#include "fw_memmap_internals.h"
+
+/*
+ * The e820 map is the map that gets modified e.g. with command line parameters
+ * and that is also registered with modifications in the kernel resource tree
+ * with the iomem_resource as parent.
+ */
+struct e820map __initdata e820;
+
+/*
+ * Add a memory region to the kernel e820 map.
+ */
+void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
+ int type)
+{
+ int x = e820x->nr_map;
+
+ if (x >= ARRAY_SIZE(e820x->map)) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ e820x->map[x].addr = start;
+ e820x->map[x].size = size;
+ e820x->map[x].type = type;
+ e820x->nr_map++;
+}
+
+void __init fw_memmap_add_region(u64 start, u64 size, int type)
+{
+ __e820_add_region(&e820, start, size, type);
+}
+
+/* make e820 not cover the range */
+u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
+ int checktype)
+{
+ int i;
+ u64 end;
+ u64 real_removed_size = 0;
+
+ if (size > (ULLONG_MAX - start))
+ size = ULLONG_MAX - start;
+
+ end = start + size;
+ printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ",
+ (unsigned long long) start,
+ (unsigned long long) end);
+ e820_print_type(old_type);
+ printk(KERN_CONT "\n");
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ u64 final_start, final_end;
+
+ if (checktype && ei->type != old_type)
+ continue;
+ /* totally covered? */
+ if (ei->addr >= start &&
+ (ei->addr + ei->size) <= (start + size)) {
+ real_removed_size += ei->size;
+ memset(ei, 0, sizeof(struct e820entry));
+ continue;
+ }
+ /* partially covered */
+ final_start = max(start, ei->addr);
+ final_end = min(start + size, ei->addr + ei->size);
+ if (final_start >= final_end)
+ continue;
+ real_removed_size += final_end - final_start;
+
+ ei->size -= final_end - final_start;
+ if (ei->addr < final_start)
+ continue;
+ ei->addr = final_end;
+ }
+ return real_removed_size;
+}
+
+void __init e820_print_type(u32 type)
+{
+ switch (type) {
+ case E820_RAM:
+ case E820_RESERVED_KERN:
+ printk(KERN_CONT "(usable)");
+ break;
+ case E820_RESERVED:
+ printk(KERN_CONT "(reserved)");
+ break;
+ case E820_ACPI:
+ printk(KERN_CONT "(ACPI data)");
+ break;
+ case E820_NVS:
+ printk(KERN_CONT "(ACPI NVS)");
+ break;
+ case E820_UNUSABLE:
+ printk(KERN_CONT "(unusable)");
+ break;
+ default:
+ printk(KERN_CONT "type %u", type);
+ break;
+ }
+}
+
+void __init fw_memmap_print_map(char *who)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
+ (unsigned long long) e820.map[i].addr,
+ (unsigned long long)
+ (e820.map[i].addr + e820.map[i].size));
+ e820_print_type(e820.map[i].type);
+ printk(KERN_CONT "\n");
+ }
+}
+
+/*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps,
+ * and resolving conflicting memory types in favor of highest
+ * numbered type.
+ *
+ * The input parameter biosmap points to an array of 'struct
+ * e820entry' which on entry has elements in the range [0, *pnr_map)
+ * valid, and which has space for up to max_nr_map entries.
+ * On return, the resulting sanitized e820 map entries will be in
+ * overwritten in the same location, starting at biosmap.
+ *
+ * The integer pointed to by pnr_map must be valid on entry (the
+ * current number of valid entries located at biosmap) and will
+ * be updated on return, with the new number of valid entries
+ * (something no more than max_nr_map.)
+ *
+ * The return value from sanitize_e820_map() is zero if it
+ * successfully 'sanitized' the map entries passed in, and is -1
+ * if it did nothing, which can happen if either of (1) it was
+ * only passed one map entry, or (2) any of the input map entries
+ * were invalid (start + size < start, meaning that the size was
+ * so big the described memory range wrapped around through zero.)
+ *
+ * Visually we're performing the following
+ * (1,2,3,4 = memory types)...
+ *
+ * Sample memory map (w/overlaps):
+ * ____22__________________
+ * ______________________4_
+ * ____1111________________
+ * _44_____________________
+ * 11111111________________
+ * ____________________33__
+ * ___________44___________
+ * __________33333_________
+ * ______________22________
+ * ___________________2222_
+ * _________111111111______
+ * _____________________11_
+ * _________________4______
+ *
+ * Sanitized equivalent (no overlap):
+ * 1_______________________
+ * _44_____________________
+ * ___1____________________
+ * ____22__________________
+ * ______11________________
+ * _________1______________
+ * __________3_____________
+ * ___________44___________
+ * _____________33_________
+ * _______________2________
+ * ________________1_______
+ * _________________4______
+ * ___________________2____
+ * ____________________33__
+ * ______________________4_
+ */
+
+int __init __sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
+ u32 *pnr_map)
+{
+ struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+ };
+ static struct change_member change_point_list[2*E820_X_MAX] __initdata;
+ static struct change_member *change_point[2*E820_X_MAX] __initdata;
+ static struct e820entry *overlap_list[E820_X_MAX] __initdata;
+ static struct e820entry new_bios[E820_X_MAX] __initdata;
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr, chg_nr;
+ int i;
+
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+ BUG_ON(old_nr > max_nr_map);
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i = 0; i < old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i = 0; i < 2 * old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses),
+ omitting those that are for empty memory regions */
+ chgidx = 0;
+ for (i = 0; i < old_nr; i++) {
+ if (biosmap[i].size != 0) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr +
+ biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+ }
+ chg_nr = chgidx;
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i = 1; i < chg_nr; i++) {
+ unsigned long long curaddr, lastaddr;
+ unsigned long long curpbaddr, lastpbaddr;
+
+ curaddr = change_point[i]->addr;
+ lastaddr = change_point[i - 1]->addr;
+ curpbaddr = change_point[i]->pbios->addr;
+ lastpbaddr = change_point[i - 1]->pbios->addr;
+
+ /*
+ * swap entries, when:
+ *
+ * curaddr > lastaddr or
+ * curaddr == lastaddr and curaddr == curpbaddr and
+ * lastaddr != lastpbaddr
+ */
+ if (curaddr < lastaddr ||
+ (curaddr == lastaddr && curaddr == curpbaddr &&
+ lastaddr != lastpbaddr)) {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing = 1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries = 0; /* number of entries in the overlap table */
+ new_bios_entry = 0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx = 0; chgidx < chg_nr; chgidx++) {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr ==
+ change_point[chgidx]->pbios->addr) {
+ /*
+ * add map entry to overlap list (> 1 entry
+ * implies an overlap)
+ */
+ overlap_list[overlap_entries++] =
+ change_point[chgidx]->pbios;
+ } else {
+ /*
+ * remove entry from list (order independent,
+ * so swap with last)
+ */
+ for (i = 0; i < overlap_entries; i++) {
+ if (overlap_list[i] ==
+ change_point[chgidx]->pbios)
+ overlap_list[i] =
+ overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /*
+ * if there are overlapping entries, decide which
+ * "type" to use (larger value takes precedence --
+ * 1=usable, 2,3,4,4+=unusable)
+ */
+ current_type = 0;
+ for (i = 0; i < overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /*
+ * continue building up new bios map based on this
+ * information
+ */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /*
+ * move forward only if the new size
+ * was non-zero
+ */
+ if (new_bios[new_bios_entry].size != 0)
+ /*
+ * no more space left for new
+ * bios entries ?
+ */
+ if (++new_bios_entry >= max_nr_map)
+ break;
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr =
+ change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr = change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ /* retain count for new bios entries */
+ new_nr = new_bios_entry;
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ return 0;
+}
+
+int __init sanitize_fw_memmap(void)
+{
+ int max_nr_map = ARRAY_SIZE(e820.map);
+
+ return __sanitize_e820_map(e820.map, max_nr_map, &e820.nr_map);
+}
+
+void __init clear_e820_map(void)
+{
+ e820.nr_map = 0;
+}
+
+static int userdef __initdata;
+
+/* "mem=nopentium" disables the 4MB page tables. */
+static int __init parse_memopt(char *p)
+{
+ u64 mem_size;
+
+ if (!p)
+ return -EINVAL;
+
+#ifdef CONFIG_X86_32
+ if (!strcmp(p, "nopentium")) {
+ setup_clear_cpu_cap(X86_FEATURE_PSE);
+ return 0;
+ }
+#endif
+
+ userdef = 1;
+ mem_size = memparse(p, &p);
+ e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
+
+ return 0;
+}
+early_param("mem", parse_memopt);
+
+static int __init parse_memmap_opt(char *p)
+{
+ char *oldp;
+ u64 start_at, mem_size;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!strncmp(p, "exactmap", 8)) {
+#ifdef CONFIG_CRASH_DUMP
+ /*
+ * If we are doing a crash dump, we still need to know
+ * the real mem size before original memory map is
+ * reset.
+ */
+ saved_max_pfn = fw_memmap_end_of_ram_pfn();
+#endif
+ e820.nr_map = 0;
+ userdef = 1;
+ return 0;
+ }
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return -EINVAL;
+
+ userdef = 1;
+ if (*p == '@') {
+ start_at = memparse(p+1, &p);
+ e820_add_region(start_at, mem_size, E820_RAM);
+ } else if (*p == '#') {
+ start_at = memparse(p+1, &p);
+ e820_add_region(start_at, mem_size, E820_ACPI);
+ } else if (*p == '$') {
+ start_at = memparse(p+1, &p);
+ e820_add_region(start_at, mem_size, E820_RESERVED);
+ } else
+ e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
+
+ return *p == '\0' ? 0 : -EINVAL;
+}
+early_param("memmap", parse_memmap_opt);
+
+static void early_panic(char *msg)
+{
+ early_printk(msg);
+ panic(msg);
+}
+
+void __init finish_fw_memmap_parsing(void)
+{
+ if (userdef) {
+ u32 nr = e820.nr_map;
+ int max_nr_map = ARRAY_SIZE(e820.map);
+
+ if (__sanitize_e820_map(e820.map, max_nr_map, &nr) < 0)
+ early_panic("Invalid user supplied memory map");
+ e820.nr_map = nr;
+
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+ e820_print_map("user");
+ }
+}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ */
+u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ u64 addr;
+ u64 ei_start, ei_last;
+
+ if (ei->type != E820_RAM)
+ continue;
+
+ ei_last = ei->addr + ei->size;
+ ei_start = ei->addr;
+ addr = find_early_area(ei_start, ei_last, start, end,
+ size, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+ return -1ULL;
+}
+
+u64 __init
+find_fw_memmap_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
+ u64 addr;
+ /*
+ * need to call this function after e820_register_active_regions
+ * so early_node_map[] is set
+ */
+ addr = find_memory_core_early(nid, size, align, start, end);
+ if (addr != -1ULL)
+ return addr;
+
+ /* fallback, should already have start end in the node range */
+ return find_fw_memmap_area(start, end, size, align);
+}
+
+#ifdef CONFIG_X86_32
+# ifdef CONFIG_X86_PAE
+# define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
+# else
+# define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
+# endif
+#else /* CONFIG_X86_32 */
+# define MAX_ARCH_PFN (MAXMEM>>PAGE_SHIFT)
+#endif
+
+/*
+ * Find the highest page frame number we have available
+ */
+static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
+{
+ int i;
+ unsigned long last_pfn = 0;
+ unsigned long max_arch_pfn = MAX_ARCH_PFN;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+ unsigned long start_pfn;
+ unsigned long end_pfn;
+
+ if (ei->type != type)
+ continue;
+
+ start_pfn = ei->addr >> PAGE_SHIFT;
+ end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
+
+ if (start_pfn >= limit_pfn)
+ continue;
+ if (end_pfn > limit_pfn) {
+ last_pfn = limit_pfn;
+ break;
+ }
+ if (end_pfn > last_pfn)
+ last_pfn = end_pfn;
+ }
+
+ if (last_pfn > max_arch_pfn)
+ last_pfn = max_arch_pfn;
+
+ printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
+ last_pfn, max_arch_pfn);
+ return last_pfn;
+}
+unsigned long __init fw_memmap_end_of_ram_pfn(void)
+{
+ return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
+}
+
+unsigned long __init e820_end_of_low_ram_pfn(void)
+{
+ return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
+}
+/*
+ * Finds an active region in the address range from start_pfn to last_pfn and
+ * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
+ */
+static int __init e820_find_active_region(const struct e820entry *ei,
+ unsigned long start_pfn,
+ unsigned long last_pfn,
+ unsigned long *ei_startpfn,
+ unsigned long *ei_endpfn)
+{
+ u64 align = PAGE_SIZE;
+
+ *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
+ *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
+
+ /* Skip map entries smaller than a page */
+ if (*ei_startpfn >= *ei_endpfn)
+ return 0;
+
+ /* Skip if map is outside the node */
+ if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
+ *ei_startpfn >= last_pfn)
+ return 0;
+
+ /* Check for overlaps */
+ if (*ei_startpfn < start_pfn)
+ *ei_startpfn = start_pfn;
+ if (*ei_endpfn > last_pfn)
+ *ei_endpfn = last_pfn;
+
+ return 1;
+}
+
+/* Walk the e820 map and register active regions within a node */
+void __init fw_memmap_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn)
+{
+ unsigned long ei_startpfn;
+ unsigned long ei_endpfn;
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++)
+ if (e820_find_active_region(&e820.map[i],
+ start_pfn, last_pfn,
+ &ei_startpfn, &ei_endpfn))
+ add_active_range(nid, ei_startpfn, ei_endpfn);
+}
+
+/*
+ * Find the hole size (in bytes) in the memory range.
+ * @start: starting address of the memory range to scan
+ * @end: ending address of the memory range to scan
+ */
+u64 __init fw_memmap_hole_size(u64 start, u64 end)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long last_pfn = end >> PAGE_SHIFT;
+ unsigned long ei_startpfn, ei_endpfn, ram = 0;
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ if (e820_find_active_region(&e820.map[i],
+ start_pfn, last_pfn,
+ &ei_startpfn, &ei_endpfn))
+ ram += ei_endpfn - ei_startpfn;
+ }
+ return end - start - ((u64)ram << PAGE_SHIFT);
+}
diff --git a/kernel/fw_memmap_internals.h b/kernel/fw_memmap_internals.h
new file mode 100644
index 0000000..f217602
--- /dev/null
+++ b/kernel/fw_memmap_internals.h
@@ -0,0 +1,49 @@
+#ifndef __KERNEL_FW_MEMMAP_INTERNALS_H
+#define __KERNEL_FW_MEMMAP_INTERNALS_H
+
+/*
+ * Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
+ * constrained space in the zeropage. If we have more nodes than
+ * that, and if we've booted off EFI firmware, then the EFI tables
+ * passed us from the EFI firmware can list more nodes. Size our
+ * internal memory map tables to have room for these additional
+ * nodes, based on up to three entries per node for which the
+ * kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
+ * plus E820MAX, allowing space for the possible duplicate E820
+ * entries that might need room in the same arrays, prior to the
+ * call to sanitize_e820_map() to remove duplicates. The allowance
+ * of three memory map entries per node is "enough" entries for
+ * the initial hardware platform motivating this mechanism to make
+ * use of additional EFI map entries. Future platforms may want
+ * to allow more than three entries per node or otherwise refine
+ * this size.
+ */
+
+/*
+ * Odd: 'make headers_check' complains about numa.h if I try
+ * to collapse the next two #ifdef lines to a single line:
+ * #if defined(__KERNEL__) && defined(CONFIG_EFI)
+ */
+#ifdef __KERNEL__
+#ifdef CONFIG_EFI
+#include <linux/numa.h>
+#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
+#else /* ! CONFIG_EFI */
+#define E820_X_MAX E820MAX
+#endif
+#else /* ! __KERNEL__ */
+#define E820_X_MAX E820MAX
+#endif
+
+#ifndef __ASSEMBLY__
+struct e820map {
+ __u32 nr_map;
+ struct e820entry map[E820_X_MAX];
+};
+#endif
+
+extern struct e820map __initdata e820;
+void e820_print_type(u32 type);
+void __e820_add_region(struct e820map *e820x, u64 start, u64 size, int type);
+
+#endif
--
1.6.4.2
so add it to e820
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/kernel/mmconf-fam10h_64.c | 40 ++++++++++++++++++++---------------
1 files changed, 23 insertions(+), 17 deletions(-)
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 7182580..4426fd2 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -16,6 +16,7 @@
#include <asm/acpi.h>
#include <asm/mmconfig.h>
#include <asm/pci_x86.h>
+#include <asm/e820.h>
struct pci_hostbridge_probe {
u32 bus;
@@ -27,23 +28,26 @@ struct pci_hostbridge_probe {
static u64 __cpuinitdata fam10h_pci_mmconf_base;
static int __cpuinitdata fam10h_pci_mmconf_base_status;
+/* only on BSP */
+static void __init_refok e820_add_mmconf_range(int busnbits)
+{
+ u64 end;
+
+ end = fam10h_pci_mmconf_base + (1ULL<<(busnbits + 20)) - 1;
+ if (!e820_all_mapped(fam10h_pci_mmconf_base, end+1, E820_RESERVED)) {
+ printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n",
+ fam10h_pci_mmconf_base, end);
+ e820_add_region(fam10h_pci_mmconf_base, 1ULL<<(busnbits + 20),
+ E820_RESERVED);
+ sanitize_e820_map();
+ }
+}
+
static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
{ 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
{ 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
};
-static int __cpuinit cmp_range(const void *x1, const void *x2)
-{
- const struct range *r1 = x1;
- const struct range *r2 = x2;
- int start1, start2;
-
- start1 = r1->start >> 32;
- start2 = r2->start >> 32;
-
- return start1 - start2;
-}
-
/*[47:0] */
/* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */
#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
@@ -115,6 +119,7 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
* above 4G
*/
hi_mmio_num = 0;
+ memset(range, 0, sizeof(range));
for (i = 0; i < 8; i++) {
u32 reg;
u64 start;
@@ -130,16 +135,14 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
if (!end)
continue;
- range[hi_mmio_num].start = start;
- range[hi_mmio_num].end = end;
- hi_mmio_num++;
+ hi_mmio_num = add_range(range, 8, hi_mmio_num, start, end);
}
if (!hi_mmio_num)
goto out;
/* sort the range */
- sort(range, hi_mmio_num, sizeof(struct range), cmp_range, NULL);
+ sort_range(range, hi_mmio_num);
if (range[hi_mmio_num - 1].end < base)
goto out;
@@ -169,6 +172,7 @@ fail:
out:
fam10h_pci_mmconf_base = base;
fam10h_pci_mmconf_base_status = 1;
+ e820_add_mmconf_range(8);
}
void __cpuinit fam10h_check_enable_mmcfg(void)
@@ -191,10 +195,12 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
/* only trust the one handle 256 buses, if acpi=off */
if (!acpi_pci_disabled || busnbits >= 8) {
u64 base;
- base = val & (0xffffULL << 32);
+ base = val & (FAM10H_MMIO_CONF_BASE_MASK <<
+ FAM10H_MMIO_CONF_BASE_SHIFT);
if (fam10h_pci_mmconf_base_status <= 0) {
fam10h_pci_mmconf_base = base;
fam10h_pci_mmconf_base_status = 1;
+ e820_add_mmconf_range(busnbits);
return;
} else if (fam10h_pci_mmconf_base == base)
return;
--
1.6.4.2
use vector_desc to reduce the calling of irq_to_desc.
next step: need to change all ack, mask, umask, eoi for all irq_chip to take irq_desc
-v2: irq should be unsigned in 32bit handle_irq according to Eric
also reset vector_desc for lguest in setup_irq
-v3: keep irq in x+execute_on_irq_stack() ...
-v4: update after legacy_pic
Signed-off-by: Yinghai Lu <yin...@kernel.org>
Acked-by: "Eric W. Biederman" <ebie...@xmission.com>
---
arch/x86/include/asm/desc.h | 2 +-
arch/x86/include/asm/hw_irq.h | 13 ++++---
arch/x86/include/asm/irq.h | 3 +-
arch/x86/kernel/apic/io_apic.c | 77 +++++++++++++++++++++-------------------
arch/x86/kernel/irq.c | 15 ++++----
arch/x86/kernel/irq_32.c | 13 ++++---
arch/x86/kernel/irq_64.c | 7 +---
arch/x86/kernel/irqinit.c | 14 +++----
arch/x86/kernel/smpboot.c | 2 +-
arch/x86/kernel/uv_irq.c | 2 +-
arch/x86/kernel/vmiclock_32.c | 2 +-
arch/x86/lguest/boot.c | 7 +++-
12 files changed, 82 insertions(+), 75 deletions(-)
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 617bd56..25c5635 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -334,7 +334,7 @@ static inline void set_intr_gate(unsigned int n, void *addr)
}
extern int first_system_vector;
-/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
+/* used_vectors is BITMAP for irq is not managed by percpu vector_desc */
extern unsigned long used_vectors[];
static inline void alloc_system_vector(int vector)
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 767d3f8..d23cf94 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -97,7 +97,8 @@ struct irq_cfg {
};
extern struct irq_cfg *irq_cfg(unsigned int);
-extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
+int assign_irq_vector(struct irq_desc *, struct irq_cfg *,
+ const struct cpumask *);
extern void send_cleanup_vector(struct irq_cfg *);
struct irq_desc;
@@ -136,18 +137,18 @@ extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
-typedef int vector_irq_t[NR_VECTORS];
-DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern void setup_vector_irq(int cpu);
+typedef struct irq_desc *vector_desc_t[NR_VECTORS];
+DECLARE_PER_CPU(vector_desc_t, vector_desc);
+extern void setup_vector_desc(int cpu);
#ifdef CONFIG_X86_IO_APIC
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
-extern void __setup_vector_irq(int cpu);
+extern void __setup_vector_desc(int cpu);
#else
static inline void lock_vector_lock(void) {}
static inline void unlock_vector_lock(void) {}
-static inline void __setup_vector_irq(int cpu) {}
+static inline void __setup_vector_desc(int cpu) {}
#endif
#endif /* !ASSEMBLY_ */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380..64c5f6f 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -39,7 +39,8 @@ extern void irq_force_complete_move(int);
extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
-extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+struct irq_desc;
+extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
extern unsigned int do_IRQ(struct pt_regs *regs);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ba469f8..cd2f193 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1159,7 +1159,8 @@ void unlock_vector_lock(void)
}
static int
-__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+__assign_irq_vector(struct irq_desc *desc, struct irq_cfg *cfg,
+ const struct cpumask *mask)
{
/*
* NOTE! The local APIC isn't very good at handling
@@ -1218,7 +1219,7 @@ next:
goto next;
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
- if (per_cpu(vector_irq, new_cpu)[vector] != -1)
+ if (per_cpu(vector_desc, new_cpu)[vector] != NULL)
goto next;
/* Found one! */
current_vector = vector;
@@ -1228,7 +1229,7 @@ next:
cpumask_copy(cfg->old_domain, cfg->domain);
}
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
- per_cpu(vector_irq, new_cpu)[vector] = irq;
+ per_cpu(vector_desc, new_cpu)[vector] = desc;
cfg->vector = vector;
cpumask_copy(cfg->domain, tmp_mask);
err = 0;
@@ -1238,18 +1239,19 @@ next:
return err;
}
-int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+int assign_irq_vector(struct irq_desc *desc, struct irq_cfg *cfg,
+ const struct cpumask *mask)
{
int err;
unsigned long flags;
raw_spin_lock_irqsave(&vector_lock, flags);
- err = __assign_irq_vector(irq, cfg, mask);
+ err = __assign_irq_vector(desc, cfg, mask);
raw_spin_unlock_irqrestore(&vector_lock, flags);
return err;
}
-static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
+static void __clear_irq_vector(struct irq_desc *desc, struct irq_cfg *cfg)
{
int cpu, vector;
@@ -1257,7 +1259,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
vector = cfg->vector;
for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_desc, cpu)[vector] = NULL;
cfg->vector = 0;
cpumask_clear(cfg->domain);
@@ -1267,18 +1269,18 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
- if (per_cpu(vector_irq, cpu)[vector] != irq)
+ if (per_cpu(vector_desc, cpu)[vector] != desc)
continue;
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_desc, cpu)[vector] = NULL;
break;
}
}
cfg->move_in_progress = 0;
}
-void __setup_vector_irq(int cpu)
+void __setup_vector_desc(int cpu)
{
- /* Initialize vector_irq on a new cpu */
+ /* Initialize vector_desc on a new cpu */
int irq, vector;
struct irq_cfg *cfg;
struct irq_desc *desc;
@@ -1303,17 +1305,17 @@ void __setup_vector_irq(int cpu)
if (!cpumask_test_cpu(cpu, cfg->domain))
continue;
vector = cfg->vector;
- per_cpu(vector_irq, cpu)[vector] = irq;
+ per_cpu(vector_desc, cpu)[vector] = desc;
}
/* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) {
- irq = per_cpu(vector_irq, cpu)[vector];
- if (irq < 0)
+ desc = per_cpu(vector_desc, cpu)[vector];
+ if (!desc)
continue;
- cfg = irq_cfg(irq);
+ cfg = desc->chip_data;
if (!cpumask_test_cpu(cpu, cfg->domain))
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_desc, cpu)[vector] = NULL;
}
raw_spin_unlock(&vector_lock);
}
@@ -1473,7 +1475,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
apic->vector_allocation_domain(0, cfg->domain);
- if (assign_irq_vector(irq, cfg, apic->target_cpus()))
+ if (assign_irq_vector(desc, cfg, apic->target_cpus()))
return;
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
@@ -1489,7 +1491,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
dest, trigger, polarity, cfg->vector, pin)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mp_ioapics[apic_id].apicid, pin);
- __clear_irq_vector(irq, cfg);
+ __clear_irq_vector(desc, cfg);
return;
}
@@ -2386,14 +2388,12 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask,
unsigned int *dest_id)
{
struct irq_cfg *cfg;
- unsigned int irq;
if (!cpumask_intersects(mask, cpu_online_mask))
return -1;
- irq = desc->irq;
cfg = desc->chip_data;
- if (assign_irq_vector(irq, cfg, mask))
+ if (assign_irq_vector(desc, cfg, mask))
return -1;
cpumask_copy(desc->affinity, mask);
@@ -2466,7 +2466,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
return ret;
cfg = desc->chip_data;
- if (assign_irq_vector(irq, cfg, mask))
+ if (assign_irq_vector(desc, cfg, mask))
return ret;
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
@@ -2520,20 +2520,15 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- unsigned int irq;
unsigned int irr;
struct irq_desc *desc;
struct irq_cfg *cfg;
- irq = __get_cpu_var(vector_irq)[vector];
-
- if (irq == -1)
- continue;
+ desc = __get_cpu_var(vector_desc)[vector];
- desc = irq_to_desc(irq);
if (!desc)
continue;
- cfg = irq_cfg(irq);
+ cfg = desc->chip_data;
raw_spin_lock(&desc->lock);
/*
@@ -2558,7 +2553,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
goto unlock;
}
- __get_cpu_var(vector_irq)[vector] = -1;
+ __get_cpu_var(vector_desc)[vector] = NULL;
unlock:
raw_spin_unlock(&desc->lock);
}
@@ -3001,7 +2996,7 @@ static inline void __init check_timer(void)
* get/set the timer IRQ vector:
*/
legacy_pic->chip->mask(0);
- assign_irq_vector(0, cfg, apic->target_cpus());
+ assign_irq_vector(desc, cfg, apic->target_cpus());
/*
* As IRQ0 is to be enabled in the 8259A, the virtual
@@ -3331,7 +3326,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
desc_new = move_irq_desc(desc_new, node);
cfg_new = desc_new->chip_data;
- if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
+ if (__assign_irq_vector(desc_new, cfg_new, apic->target_cpus()) == 0)
irq = new;
break;
}
@@ -3361,12 +3356,16 @@ int create_irq(void)
void destroy_irq(unsigned int irq)
{
unsigned long flags;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
dynamic_irq_cleanup_keep_chip_data(irq);
free_irte(irq);
raw_spin_lock_irqsave(&vector_lock, flags);
- __clear_irq_vector(irq, get_irq_chip_data(irq));
+ desc = irq_to_desc(irq);
+ cfg = desc->chip_data;
+ __clear_irq_vector(desc, cfg);
raw_spin_unlock_irqrestore(&vector_lock, flags);
}
@@ -3377,6 +3376,7 @@ void destroy_irq(unsigned int irq)
static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
struct msi_msg *msg, u8 hpet_id)
{
+ struct irq_desc *desc;
struct irq_cfg *cfg;
int err;
unsigned dest;
@@ -3384,8 +3384,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
if (disable_apic)
return -ENXIO;
- cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, apic->target_cpus());
+ desc = irq_to_desc(irq);
+ cfg = desc->chip_data;
+ err = assign_irq_vector(desc, cfg, apic->target_cpus());
if (err)
return err;
@@ -3876,14 +3877,16 @@ static struct irq_chip ht_irq_chip = {
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
{
+ struct irq_desc *desc;
struct irq_cfg *cfg;
int err;
if (disable_apic)
return -ENXIO;
- cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, apic->target_cpus());
+ desc = irq_to_desc(irq);
+ cfg = desc->chip_data;
+ err = assign_irq_vector(desc, cfg, apic->target_cpus());
if (!err) {
struct ht_irq_msg msg;
unsigned dest;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 91fd0c7..f71625c 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -229,19 +229,19 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
/* high bit used in ret_from_ code */
unsigned vector = ~regs->orig_ax;
- unsigned irq;
+ struct irq_desc *desc;
exit_idle();
irq_enter();
- irq = __get_cpu_var(vector_irq)[vector];
+ desc = __get_cpu_var(vector_desc)[vector];
- if (!handle_irq(irq, regs)) {
+ if (!handle_irq(desc, regs)) {
ack_APIC_irq();
if (printk_ratelimit())
- pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
- __func__, smp_processor_id(), vector, irq);
+ pr_emerg("%s: %d.%d No irq handler for vector\n",
+ __func__, smp_processor_id(), vector);
}
irq_exit();
@@ -348,14 +348,13 @@ void fixup_irqs(void)
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr;
- if (__get_cpu_var(vector_irq)[vector] < 0)
+ if (__get_cpu_var(vector_desc)[vector] == NULL)
continue;
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
if (irr & (1 << (vector % 32))) {
- irq = __get_cpu_var(vector_irq)[vector];
+ desc = __get_cpu_var(vector_desc)[vector];
- desc = irq_to_desc(irq);
raw_spin_lock(&desc->lock);
if (desc->chip->retrigger)
desc->chip->retrigger(irq);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 10709f2..f5daa3d 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -76,7 +76,7 @@ static void call_on_stack(void *func, void *stack)
}
static inline int
-execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+execute_on_irq_stack(int overflow, struct irq_desc *desc, unsigned int irq)
{
union irq_ctx *curctx, *irqctx;
u32 *isp, arg1, arg2;
@@ -189,20 +189,23 @@ asmlinkage void do_softirq(void)
#else
static inline int
-execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
+execute_on_irq_stack(int overflow, struct irq_desc *desc, unsigned int irq)
+{
+ return 0;
+}
#endif
-bool handle_irq(unsigned irq, struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
- struct irq_desc *desc;
int overflow;
+ unsigned int irq;
overflow = check_stack_overflow();
- desc = irq_to_desc(irq);
if (unlikely(!desc))
return false;
+ irq = desc->irq;
if (!execute_on_irq_stack(overflow, desc, irq)) {
if (unlikely(overflow))
print_stack_overflow();
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index acf8fbf..5e6e493 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -48,17 +48,14 @@ static inline void stack_overflow_check(struct pt_regs *regs)
#endif
}
-bool handle_irq(unsigned irq, struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
- struct irq_desc *desc;
-
stack_overflow_check(regs);
- desc = irq_to_desc(irq);
if (unlikely(!desc))
return false;
- generic_handle_irq_desc(irq, desc);
+ generic_handle_irq_desc(desc->irq, desc);
return true;
}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index f01d390..7b77458 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -83,16 +83,14 @@ static struct irqaction irq2 = {
.name = "cascade",
};
-DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
- [0 ... NR_VECTORS - 1] = -1,
-};
+DEFINE_PER_CPU(vector_desc_t, vector_desc);
int vector_used_by_percpu_irq(unsigned int vector)
{
int cpu;
for_each_online_cpu(cpu) {
- if (per_cpu(vector_irq, cpu)[vector] != -1)
+ if (per_cpu(vector_desc, cpu)[vector] != NULL)
return 1;
}
@@ -136,7 +134,7 @@ void __init init_IRQ(void)
* irq's migrate etc.
*/
for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
- per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
+ per_cpu(vector_desc, 0)[IRQ0_VECTOR + i] = irq_to_desc(i);
x86_init.irqs.intr_init();
}
@@ -144,7 +142,7 @@ void __init init_IRQ(void)
/*
* Setup the vector to irq mappings.
*/
-void setup_vector_irq(int cpu)
+void setup_vector_desc(int cpu)
{
#ifndef CONFIG_X86_IO_APIC
int irq;
@@ -157,10 +155,10 @@ void setup_vector_irq(int cpu)
* legacy vector to irq mapping:
*/
for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
- per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+ per_cpu(vector_desc, cpu)[IRQ0_VECTOR + irq] = irq_to_desc(irq);
#endif
- __setup_vector_irq(cpu);
+ __setup_vector_desc(cpu);
}
static void __init smp_intr_init(void)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ba43b3b..a1483ac 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -246,7 +246,7 @@ static void __cpuinit smp_callin(void)
/*
* Need to setup vector mappings before we enable interrupts.
*/
- setup_vector_irq(smp_processor_id());
+ setup_vector_desc(smp_processor_id());
/*
* Get our bogomips.
*
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 4c61f1b..44c430d 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -158,7 +158,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
cfg = irq_cfg(irq);
- err = assign_irq_vector(irq, cfg, eligible_cpu);
+ err = assign_irq_vector(desc, cfg, eligible_cpu);
if (err != 0)
return err;
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 5e1ff66..fb65235 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -236,7 +236,7 @@ void __init vmi_time_init(void)
vmi_time_init_clockevent();
setup_irq(0, &vmi_clock_action);
for_each_possible_cpu(cpu)
- per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0;
+ per_cpu(vector_desc, cpu)[vmi_get_timer_vector()] = irq_to_desc(0);
}
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 8eb9eed..e0f6b26 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -819,7 +819,7 @@ static void __init lguest_init_IRQ(void)
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
/* Some systems map "vectors" to interrupts weirdly. Not us! */
- __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
+ __get_cpu_var(vector_desc)[i] = irq_to_desc(i - FIRST_EXTERNAL_VECTOR);
if (i != SYSCALL_VECTOR)
set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
}
@@ -842,6 +842,11 @@ static void __init lguest_init_IRQ(void)
void lguest_setup_irq(unsigned int irq)
{
irq_to_desc_alloc_node(irq, 0);
+ /*
+ * for sparseirq, we could get new desc other than legacy irq,
+ * so set vector_desc again for that irq
+ */
+ __get_cpu_var(vector_desc)[irq + FIRST_EXTERNAL_VECTOR] = irq_to_desc(irq);
set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
handle_level_irq, "level");
}
--
1.6.4.2
later we could move e820 to static and _initdata
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/e820.h | 9 ++++++
arch/x86/kernel/cpu/centaur.c | 53 +------------------------------------
arch/x86/kernel/e820.c | 57 +++++++++++++++++++++++++++++++++++++++++
arch/x86/kernel/setup.c | 2 +
4 files changed, 70 insertions(+), 51 deletions(-)
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index b48f371..38828c7 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -72,6 +72,15 @@ struct e820map {
extern struct e820map e820;
extern struct e820map e820_saved;
+#ifdef CONFIG_X86_OOSTORE
+extern int centaur_ram_top;
+void get_centaur_ram_top(void);
+#else
+static inline void get_centaur_ram_top(void)
+{
+}
+#endif
+
extern unsigned long pci_mem_start;
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index e58d978..bb49358 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -37,63 +37,14 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
}
-/*
- * Figure what we can cover with MCR's
- *
- * Shortcut: We know you can't put 4Gig of RAM on a winchip
- */
-static u32 __cpuinit ramtop(void)
-{
- u32 clip = 0xFFFFFFFFUL;
- u32 top = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
-
- if (e820.map[i].addr > 0xFFFFFFFFUL)
- continue;
- /*
- * Don't MCR over reserved space. Ignore the ISA hole
- * we frob around that catastrophe already
- */
- if (e820.map[i].type == E820_RESERVED) {
- if (e820.map[i].addr >= 0x100000UL &&
- e820.map[i].addr < clip)
- clip = e820.map[i].addr;
- continue;
- }
- start = e820.map[i].addr;
- end = e820.map[i].addr + e820.map[i].size;
- if (start >= end)
- continue;
- if (end > top)
- top = end;
- }
- /*
- * Everything below 'top' should be RAM except for the ISA hole.
- * Because of the limited MCR's we want to map NV/ACPI into our
- * MCR range for gunk in RAM
- *
- * Clip might cause us to MCR insufficient RAM but that is an
- * acceptable failure mode and should only bite obscure boxes with
- * a VESA hole at 15Mb
- *
- * The second case Clip sometimes kicks in is when the EBDA is marked
- * as reserved. Again we fail safe with reasonable results
- */
- if (top > clip)
- top = clip;
-
- return top;
-}
+int __cpuinitdata centaur_ram_top;
/*
* Compute a set of MCR's to give maximum coverage
*/
static int __cpuinit centaur_mcr_compute(int nr, int key)
{
- u32 mem = ramtop();
+ u32 mem = centaur_ram_top;
u32 root = power2(mem);
u32 base = root;
u32 top = root;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 05ee724..119c0e1 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1209,3 +1209,60 @@ void __init setup_memory_map(void)
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
+
+#ifdef CONFIG_X86_OOSTORE
+/*
+ * Figure what we can cover with MCR's
+ *
+ * Shortcut: We know you can't put 4Gig of RAM on a winchip
+ */
+void __init get_centaur_ram_top(void)
+{
+ u32 clip = 0xFFFFFFFFUL;
+ u32 top = 0;
+ int i;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
+ return;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long start, end;
+
+ if (e820.map[i].addr > 0xFFFFFFFFUL)
+ continue;
+ /*
+ * Don't MCR over reserved space. Ignore the ISA hole
+ * we frob around that catastrophe already
+ */
+ if (e820.map[i].type == E820_RESERVED) {
+ if (e820.map[i].addr >= 0x100000UL &&
+ e820.map[i].addr < clip)
+ clip = e820.map[i].addr;
+ continue;
+ }
+ start = e820.map[i].addr;
+ end = e820.map[i].addr + e820.map[i].size;
+ if (start >= end)
+ continue;
+ if (end > top)
+ top = end;
+ }
+ /*
+ * Everything below 'top' should be RAM except for the ISA hole.
+ * Because of the limited MCR's we want to map NV/ACPI into our
+ * MCR range for gunk in RAM
+ *
+ * Clip might cause us to MCR insufficient RAM but that is an
+ * acceptable failure mode and should only bite obscure boxes with
+ * a VESA hole at 15Mb
+ *
+ * The second case Clip sometimes kicks in is when the EBDA is marked
+ * as reserved. Again we fail safe with reasonable results
+ */
+ if (top > clip)
+ top = clip;
+
+ centaur_ram_top = top;
+}
+#endif
+
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5d7ba1a..c5ea524 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -875,6 +875,8 @@ void __init setup_arch(char **cmdline_p)
if (mtrr_trim_uncached_memory(max_pfn))
max_pfn = e820_end_of_ram_pfn();
+ get_centaur_ram_top();
+
#ifdef CONFIG_X86_32
/* max_low_pfn get updated here */
find_low_pfn_range();
--
1.6.4.2
to take desc instead of irq
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/kernel/apic/io_apic.c | 4 +-
include/linux/irq.h | 10 +++-
include/linux/irqnr.h | 1 +
kernel/irq/chip.c | 127 +++++++++++++++++++++++-----------------
4 files changed, 85 insertions(+), 57 deletions(-)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d3b2f0b..5f061b7 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3302,7 +3302,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
raw_spin_unlock_irqrestore(&vector_lock, flags);
if (irq > 0)
- dynamic_irq_init_keep_chip_data(irq);
+ dynamic_irq_init_keep_chip_data(irq_to_desc(irq));
return irq;
}
@@ -3328,7 +3328,7 @@ void destroy_irq(unsigned int irq)
struct irq_desc *desc;
struct irq_cfg *cfg;
- dynamic_irq_cleanup_keep_chip_data(irq);
+ dynamic_irq_cleanup_keep_chip_data(irq_to_desc(irq));
free_irte(irq);
raw_spin_lock_irqsave(&vector_lock, flags);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fb8c376..89d49e8 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -412,9 +412,9 @@ static inline int irq_has_action(unsigned int irq)
/* Dynamic irq helper functions */
extern void dynamic_irq_init(unsigned int irq);
-void dynamic_irq_init_keep_chip_data(unsigned int irq);
+void dynamic_irq_init_keep_chip_data(struct irq_desc *desc);
extern void dynamic_irq_cleanup(unsigned int irq);
-void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
+void dynamic_irq_cleanup_keep_chip_data(struct irq_desc *desc);
/* Set/get chip/data for an IRQ: */
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
@@ -423,6 +423,12 @@ extern int set_irq_chip_data(unsigned int irq, void *data);
extern int set_irq_type(unsigned int irq, unsigned int type);
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
+int set_irq_desc_chip(struct irq_desc *desc, struct irq_chip *chip);
+int set_irq_desc_data(struct irq_desc *desc, void *data);
+int set_irq_desc_chip_data(struct irq_desc *desc, void *data);
+int set_irq_desc_type(struct irq_desc *desc, unsigned int type);
+int set_irq_desc_msi(struct irq_desc *desc, struct msi_desc *entry);
+
#define get_irq_chip(irq) (irq_to_desc(irq)->chip)
#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 7bf89bc..dee8f2b 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -43,6 +43,7 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
#ifdef CONFIG_SMP
#define irq_node(irq) (irq_to_desc(irq)->node)
+#define irq_desc_node(desc) ((desc)->node)
#else
#define irq_node(irq) 0
#endif
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 043557a..502168d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,17 +18,10 @@
#include "internals.h"
-static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
+static void dynamic_irq_init_x(struct irq_desc *desc, bool keep_chip_data)
{
- struct irq_desc *desc;
unsigned long flags;
- desc = irq_to_desc(irq);
- if (!desc) {
- WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
- return;
- }
-
/* Ensure we don't have left over values from a previous use of this irq */
raw_spin_lock_irqsave(&desc->lock, flags);
desc->status = IRQ_DISABLED;
@@ -57,7 +50,15 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
*/
void dynamic_irq_init(unsigned int irq)
{
- dynamic_irq_init_x(irq, false);
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ if (!desc) {
+ WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
+ return;
+ }
+
+ dynamic_irq_init_x(desc, false);
}
/**
@@ -66,26 +67,20 @@ void dynamic_irq_init(unsigned int irq)
*
* does not set irq_to_desc(irq)->chip_data to NULL
*/
-void dynamic_irq_init_keep_chip_data(unsigned int irq)
+void dynamic_irq_init_keep_chip_data(struct irq_desc *desc)
{
- dynamic_irq_init_x(irq, true);
+ dynamic_irq_init_x(desc, true);
}
-static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
+static void dynamic_irq_cleanup_x(struct irq_desc *desc, bool keep_chip_data)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
- if (!desc) {
- WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
- return;
- }
-
raw_spin_lock_irqsave(&desc->lock, flags);
if (desc->action) {
raw_spin_unlock_irqrestore(&desc->lock, flags);
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
- irq);
+ desc->irq);
return;
}
desc->msi_desc = NULL;
@@ -105,7 +100,14 @@ static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
*/
void dynamic_irq_cleanup(unsigned int irq)
{
- dynamic_irq_cleanup_x(irq, false);
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc) {
+ WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
+ return;
+ }
+
+ dynamic_irq_cleanup_x(desc, false);
}
/**
@@ -114,9 +116,9 @@ void dynamic_irq_cleanup(unsigned int irq)
*
* does not set irq_to_desc(irq)->chip_data to NULL
*/
-void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
+void dynamic_irq_cleanup_keep_chip_data(struct irq_desc *desc)
{
- dynamic_irq_cleanup_x(irq, true);
+ dynamic_irq_cleanup_x(desc, true);
}
@@ -152,26 +154,31 @@ EXPORT_SYMBOL(set_irq_chip);
* @irq: irq number
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/
-int set_irq_type(unsigned int irq, unsigned int type)
+int set_irq_desc_type(struct irq_desc *desc, unsigned int type)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
int ret = -ENXIO;
- if (!desc) {
- printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
- return -ENODEV;
- }
-
type &= IRQ_TYPE_SENSE_MASK;
if (type == IRQ_TYPE_NONE)
return 0;
raw_spin_lock_irqsave(&desc->lock, flags);
- ret = __irq_set_trigger(desc, irq, type);
+ ret = __irq_set_trigger(desc, desc->irq, type);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}
+int set_irq_type(unsigned int irq, unsigned int type)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc) {
+ printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
+ return -ENODEV;
+ }
+
+ return set_irq_desc_type(desc, type);
+}
EXPORT_SYMBOL(set_irq_type);
/**
@@ -181,10 +188,18 @@ EXPORT_SYMBOL(set_irq_type);
*
* Set the hardware irq controller data for an irq
*/
+int set_irq_desc_data(struct irq_desc *desc, void *data)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ desc->handler_data = data;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ return 0;
+}
int set_irq_data(unsigned int irq, void *data)
{
struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
if (!desc) {
printk(KERN_ERR
@@ -192,10 +207,7 @@ int set_irq_data(unsigned int irq, void *data)
return -EINVAL;
}
- raw_spin_lock_irqsave(&desc->lock, flags);
- desc->handler_data = data;
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- return 0;
+ return set_irq_desc_data(desc, data);
}
EXPORT_SYMBOL(set_irq_data);
@@ -206,24 +218,28 @@ EXPORT_SYMBOL(set_irq_data);
*
* Set the MSI descriptor entry for an irq
*/
-int set_irq_msi(unsigned int irq, struct msi_desc *entry)
+int set_irq_desc_msi(struct irq_desc *desc, struct msi_desc *entry)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
- if (!desc) {
- printk(KERN_ERR
- "Trying to install msi data for IRQ%d\n", irq);
- return -EINVAL;
- }
-
raw_spin_lock_irqsave(&desc->lock, flags);
desc->msi_desc = entry;
if (entry)
- entry->irq = irq;
+ entry->irq = desc->irq;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
}
+int set_irq_msi(unsigned int irq, struct msi_desc *entry)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ if (!desc) {
+ printk(KERN_ERR
+ "Trying to install msi data for IRQ%d\n", desc->irq);
+ return -EINVAL;
+ }
+
+ return set_irq_desc_msi(desc, entry);
+}
/**
* set_irq_chip_data - set irq chip data for an irq
@@ -232,19 +248,12 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
*
* Set the hardware irq chip data for an irq
*/
-int set_irq_chip_data(unsigned int irq, void *data)
+int set_irq_desc_chip_data(struct irq_desc *desc, void *data)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
- if (!desc) {
- printk(KERN_ERR
- "Trying to install chip data for IRQ%d\n", irq);
- return -EINVAL;
- }
-
if (!desc->chip) {
- printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
+ printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", desc->irq);
return -EINVAL;
}
@@ -254,6 +263,18 @@ int set_irq_chip_data(unsigned int irq, void *data)
return 0;
}
+int set_irq_chip_data(unsigned int irq, void *data)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (!desc) {
+ printk(KERN_ERR
+ "Trying to install chip data for IRQ%d\n", irq);
+ return -EINVAL;
+ }
+
+ return set_irq_desc_chip_data(desc, data);
+}
EXPORT_SYMBOL(set_irq_chip_data);
/**
--
1.6.4.2
the problem is there for x86 bits even before we are using early_res for bootmem replacement.
after early_res for bootmem replacement, alloc_bootmem_node still can get range on correct node
this patch is fixing problem before bootmem or early_res replacement for bootmem.
now only user is for x86 64bit numa to find node data.
the point is use early_node_map with find_e820_area_node()
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/e820.h | 1 +
arch/x86/kernel/e820.c | 15 +++++++++++++++
arch/x86/mm/numa_64.c | 4 ++--
include/linux/mm.h | 2 ++
mm/page_alloc.c | 37 +++++++++++++++++++++++--------------
5 files changed, 43 insertions(+), 16 deletions(-)
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 0e22296..b48f371 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -111,6 +111,7 @@ extern unsigned long end_user_pfn;
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
+u64 find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
#include <linux/early_res.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 740b440..05ee724 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -787,6 +787,21 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
return -1ULL;
}
+u64 __init find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
+ u64 addr;
+ /*
+ * need to call this function after e820_register_active_regions
+ * so early_node_map[] is set
+ */
+ addr = find_memory_core_early(nid, size, align, start, end);
+ if (addr != -1ULL)
+ return addr;
+
+ /* fallback, should already have start end in the node range */
+ return find_e820_area(start, end, size, align);
+}
+
/*
* pre allocated 4k and reserved it in e820
*/
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 8948f47..ffc5ad5 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -174,7 +174,7 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
end > (MAX_DMA32_PFN<<PAGE_SHIFT))
start = MAX_DMA32_PFN<<PAGE_SHIFT;
- mem = find_e820_area(start, end, size, align);
+ mem = find_e820_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);
@@ -184,7 +184,7 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
start = MAX_DMA32_PFN<<PAGE_SHIFT;
else
start = MAX_DMA_PFN<<PAGE_SHIFT;
- mem = find_e820_area(start, end, size, align);
+ mem = find_e820_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e70f21b..5c2d17e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1160,6 +1160,8 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
+u64 __init find_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit);
void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d03c946..eef3757 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3408,12 +3408,11 @@ int __init add_from_early_node_map(struct range *range, int az,
return nr_range;
}
-#ifdef CONFIG_NO_BOOTMEM
-void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+#ifdef CONFIG_HAVE_EARLY_RES
+u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
int i;
- void *ptr;
/* need to go over early_node_map to find out good range for node */
for_each_active_range_index_in_nid(i, nid) {
@@ -3430,20 +3429,30 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
if (addr == -1ULL)
continue;
-#if 0
- printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
- nid,
- ei_start, ei_last, goal, limit, size,
- align, addr);
+ return addr;
+ }
+
+ return -1ULL;
+}
#endif
- ptr = phys_to_virt(addr);
- memset(ptr, 0, size);
- reserve_early_without_check(addr, addr + size, "BOOTMEM");
- return ptr;
- }
+#ifdef CONFIG_NO_BOOTMEM
+void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit)
+{
+ void *ptr;
- return NULL;
+ u64 addr;
+
+ addr = find_memory_core_early(nid, size, align, goal, limit);
+
+ if (addr == -1ULL)
+ return NULL;
+
+ ptr = phys_to_virt(addr);
+ memset(ptr, 0, size);
+ reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ return ptr;
}
#endif
--
1.6.4.2
so for sparseirq with raidix tree, we don't call extra irq_to_desc, and could use desc directly
-v2: change all member of irq_chip to use desc only.
-v2.1: update after legacy_pic
-v2.2: update to irq one short fix
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/alpha/include/asm/hardirq.h | 3 +-
arch/alpha/kernel/irq.c | 4 +-
arch/arm/include/asm/hw_irq.h | 3 +-
arch/arm/kernel/irq.c | 2 +-
arch/blackfin/include/asm/hardirq.h | 3 +-
arch/blackfin/kernel/irqchip.c | 4 +-
arch/frv/include/asm/hardirq.h | 3 +-
arch/ia64/hp/sim/hpsim_irq.c | 6 +-
arch/ia64/include/asm/hardirq.h | 2 +-
arch/ia64/kernel/iosapic.c | 41 ++++----
arch/ia64/kernel/irq.c | 10 +-
arch/ia64/kernel/irq_lsapic.c | 8 +-
arch/ia64/kernel/msi_ia64.c | 21 +++--
arch/ia64/kernel/smpboot.c | 6 +-
arch/ia64/sn/kernel/irq.c | 25 +++--
arch/ia64/sn/kernel/msi_sn.c | 12 ++-
arch/mips/include/asm/hardirq.h | 3 +-
arch/mips/kernel/irq.c | 3 +-
arch/mn10300/include/asm/hardirq.h | 2 +-
arch/mn10300/kernel/irq.c | 4 +-
arch/powerpc/include/asm/hardirq.h | 4 +-
arch/sh/include/asm/hardirq.h | 3 +-
arch/sh/kernel/irq.c | 4 +-
arch/sparc/include/asm/hardirq_64.h | 3 +-
arch/sparc/kernel/irq_64.c | 3 +-
arch/um/kernel/irq.c | 2 +-
arch/x86/include/asm/hardirq.h | 2 +-
arch/x86/include/asm/hpet.h | 8 +-
arch/x86/include/asm/hw_irq.h | 1 -
arch/x86/include/asm/i8259.h | 2 +-
arch/x86/kernel/apic/io_apic.c | 143 ++++++++++-----------------
arch/x86/kernel/hpet.c | 16 ++--
arch/x86/kernel/i8259.c | 31 ++++---
arch/x86/kernel/irq.c | 12 +-
arch/x86/kernel/uv_irq.c | 14 ++--
arch/x86/kernel/visws_quirks.c | 29 +++---
arch/x86/kernel/vmiclock_32.c | 8 +-
arch/x86/lguest/boot.c | 8 +-
arch/xtensa/include/asm/hardirq.h | 3 +-
arch/xtensa/kernel/irq.c | 4 +-
drivers/dma/ipu/ipu_irq.c | 18 ++--
drivers/gpio/langwell_gpio.c | 11 +-
drivers/gpio/pca953x.c | 23 +++--
drivers/gpio/pl061.c | 19 ++--
drivers/gpio/timbgpio.c | 17 ++--
drivers/gpio/vr41xx_giu.c | 32 +++---
drivers/infiniband/hw/ipath/ipath_iba6110.c | 2 +-
drivers/mfd/asic3.c | 27 +++--
drivers/mfd/ezx-pcap.c | 12 ++-
drivers/mfd/htc-egpio.c | 12 ++-
drivers/mfd/t7l66xb.c | 8 +-
drivers/mfd/tc6393xb.c | 14 ++-
drivers/mfd/twl4030-irq.c | 16 ++--
drivers/mfd/wm831x-irq.c | 18 ++--
drivers/misc/sgi-gru/grufile.c | 2 +-
drivers/parisc/dino.c | 12 +-
drivers/parisc/eisa.c | 10 +-
drivers/parisc/gsc.c | 12 +-
drivers/parisc/iosapic.c | 16 ++-
drivers/parisc/superio.c | 10 +-
drivers/pci/dmar.c | 16 ++--
drivers/pci/htirq.c | 22 ++--
drivers/pci/msi.c | 13 ++-
drivers/vlynq/vlynq.c | 23 +++--
drivers/xen/events.c | 22 ++--
include/asm-generic/hardirq.h | 4 +-
include/linux/dmar.h | 8 +-
include/linux/htirq.h | 11 +-
include/linux/irq.h | 49 +++++-----
include/linux/msi.h | 4 +-
kernel/irq/autoprobe.c | 12 +-
kernel/irq/chip.c | 68 ++++++-------
kernel/irq/handle.c | 24 ++---
kernel/irq/internals.h | 16 ++--
kernel/irq/manage.c | 67 ++++++-------
kernel/irq/migration.c | 16 +--
kernel/irq/pm.c | 4 +-
kernel/irq/resend.c | 8 +-
kernel/irq/spurious.c | 4 +-
79 files changed, 585 insertions(+), 562 deletions(-)
diff --git a/arch/alpha/include/asm/hardirq.h b/arch/alpha/include/asm/hardirq.h
index 242c09b..80f79f4 100644
--- a/arch/alpha/include/asm/hardirq.h
+++ b/arch/alpha/include/asm/hardirq.h
@@ -1,7 +1,8 @@
#ifndef _ALPHA_HARDIRQ_H
#define _ALPHA_HARDIRQ_H
-void ack_bad_irq(unsigned int irq);
+struct irq_desc;
+void ack_bad_irq(struct irq_desc *desc);
#define ack_bad_irq ack_bad_irq
#include <asm-generic/hardirq.h>
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 5f2cf23..08a6384 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -33,10 +33,10 @@
volatile unsigned long irq_err_count;
-void ack_bad_irq(unsigned int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
irq_err_count++;
- printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
+ printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", desc->irq);
}
#ifdef CONFIG_SMP
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index 90831f6..43a8c03 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -4,7 +4,8 @@
#ifndef _ARCH_ARM_HW_IRQ_H
#define _ARCH_ARM_HW_IRQ_H
-static inline void ack_bad_irq(int irq)
+struct irq_desc;
+static inline void ack_bad_irq(struct irq_desc *desc)
{
extern unsigned long irq_err_count;
irq_err_count++;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index b7cb45b..265e78c 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -116,7 +116,7 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
if (unlikely(irq >= NR_IRQS)) {
if (printk_ratelimit())
printk(KERN_WARNING "Bad IRQ%u\n", irq);
- ack_bad_irq(irq);
+ ack_bad_irq(irq_to_desc(irq));
} else {
generic_handle_irq(irq);
}
diff --git a/arch/blackfin/include/asm/hardirq.h b/arch/blackfin/include/asm/hardirq.h
index c078dd7..e5ed5d5 100644
--- a/arch/blackfin/include/asm/hardirq.h
+++ b/arch/blackfin/include/asm/hardirq.h
@@ -9,7 +9,8 @@
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
-extern void ack_bad_irq(unsigned int irq);
+struct irq_desc;
+extern void ack_bad_irq(struct irq_desc *desc);
#define ack_bad_irq ack_bad_irq
/* Define until common code gets sane defaults */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 64cff54..707a93b 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -15,10 +15,10 @@
#include <asm/pda.h>
static atomic_t irq_err_count;
-void ack_bad_irq(unsigned int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
atomic_inc(&irq_err_count);
- printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
+ printk(KERN_ERR "IRQ: spurious interrupt %d\n", desc->irq);
}
static struct irq_desc bad_irq_desc = {
diff --git a/arch/frv/include/asm/hardirq.h b/arch/frv/include/asm/hardirq.h
index 5fc8b6f..2c49141 100644
--- a/arch/frv/include/asm/hardirq.h
+++ b/arch/frv/include/asm/hardirq.h
@@ -14,8 +14,9 @@
#include <asm/atomic.h>
+struct irq_desc;
extern atomic_t irq_err_count;
-static inline void ack_bad_irq(int irq)
+static inline void ack_bad_irq(struct irq_desc *desc)
{
atomic_inc(&irq_err_count);
}
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index b272261..de7c5d1 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -11,18 +11,18 @@
#include <linux/irq.h>
static unsigned int
-hpsim_irq_startup (unsigned int irq)
+hpsim_irq_startup(struct irq_desc *desc)
{
return 0;
}
static void
-hpsim_irq_noop (unsigned int irq)
+hpsim_irq_noop(struct irq_desc *desc)
{
}
static int
-hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
+hpsim_set_affinity_noop(struct irq_desc *desc, const struct cpumask *b)
{
return 0;
}
diff --git a/arch/ia64/include/asm/hardirq.h b/arch/ia64/include/asm/hardirq.h
index d514cd9..cc9950b 100644
--- a/arch/ia64/include/asm/hardirq.h
+++ b/arch/ia64/include/asm/hardirq.h
@@ -22,6 +22,6 @@
extern void __iomem *ipi_base_addr;
-void ack_bad_irq(unsigned int irq);
+void ack_bad_irq(struct irq_desc *desc);
#endif /* _ASM_IA64_HARDIRQ_H */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 95ac77a..0edfbb4 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -264,7 +264,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
}
static void
-nop (unsigned int irq)
+nop(struct irq_desc *desc)
{
/* do nothing... */
}
@@ -294,8 +294,9 @@ kexec_disable_iosapic(void)
#endif
static void
-mask_irq (unsigned int irq)
+mask_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
u32 low32;
int rte_index;
struct iosapic_rte_info *rte;
@@ -312,8 +313,9 @@ mask_irq (unsigned int irq)
}
static void
-unmask_irq (unsigned int irq)
+unmask_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
u32 low32;
int rte_index;
struct iosapic_rte_info *rte;
@@ -328,11 +330,11 @@ unmask_irq (unsigned int irq)
}
}
-
static int
-iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
+iosapic_set_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
#ifdef CONFIG_SMP
+ unsigned int irq = desc->irq;
u32 high32, low32;
int cpu, dest, rte_index;
int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
@@ -386,31 +388,32 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
*/
static unsigned int
-iosapic_startup_level_irq (unsigned int irq)
+iosapic_startup_level_irq (struct irq_desc *desc)
{
- unmask_irq(irq);
+ unmask_irq(desc);
return 0;
}
static void
-iosapic_end_level_irq (unsigned int irq)
+iosapic_end_level_irq (struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
int do_unmask_irq = 0;
irq_complete_move(irq);
- if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
+ if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
- mask_irq(irq);
+ mask_irq(desc);
}
list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
iosapic_eoi(rte->iosapic->addr, vec);
if (unlikely(do_unmask_irq)) {
- move_masked_irq(irq);
- unmask_irq(irq);
+ move_masked_irq(desc);
+ unmask_irq(desc);
}
}
@@ -437,9 +440,9 @@ static struct irq_chip irq_type_iosapic_level = {
*/
static unsigned int
-iosapic_startup_edge_irq (unsigned int irq)
+iosapic_startup_edge_irq (struct irq_desc *desc)
{
- unmask_irq(irq);
+ unmask_irq(desc);
/*
* IOSAPIC simply drops interrupts pended while the
* corresponding pin was masked, so we can't know if an
@@ -449,20 +452,20 @@ iosapic_startup_edge_irq (unsigned int irq)
}
static void
-iosapic_ack_edge_irq (unsigned int irq)
+iosapic_ack_edge_irq (struct irq_desc *desc)
{
- struct irq_desc *idesc = irq_desc + irq;
+ unsigned int irq = desc->irq;
irq_complete_move(irq);
- move_native_irq(irq);
+ move_native_irq(desc);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled
* devices.
*/
- if ((idesc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
+ if ((desc->status & (IRQ_PENDING|IRQ_DISABLED)) ==
(IRQ_PENDING|IRQ_DISABLED))
- mask_irq(irq);
+ mask_irq(desc);
}
#define iosapic_enable_edge_irq unmask_irq
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 94ee9d0..d4fe756 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -27,9 +27,9 @@
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
-void ack_bad_irq(unsigned int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
- printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
+ printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", desc->irq, smp_processor_id());
}
#ifdef CONFIG_IA64_GENERIC
@@ -162,10 +162,10 @@ static void migrate_irqs(void)
*/
if (desc->chip && desc->chip->disable &&
desc->chip->enable && desc->chip->set_affinity) {
- desc->chip->disable(irq);
- desc->chip->set_affinity(irq,
+ desc->chip->disable(desc);
+ desc->chip->set_affinity(desc,
cpumask_of(new_cpu));
- desc->chip->enable(irq);
+ desc->chip->enable(desc);
} else {
WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
!(desc->chip->enable) ||
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index fc1549d..438641a 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -15,19 +15,21 @@
#include <linux/irq.h>
static unsigned int
-lsapic_noop_startup (unsigned int irq)
+lsapic_noop_startup(struct irq_desc *desc)
{
return 0;
}
static void
-lsapic_noop (unsigned int irq)
+lsapic_noop(struct irq_desc *desc)
{
/* nothing to do... */
}
-static int lsapic_retrigger(unsigned int irq)
+static int lsapic_retrigger(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
+
ia64_resend_irq(irq);
return 1;
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 6c89228..d33f88c 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -12,9 +12,10 @@
static struct irq_chip ia64_msi_chip;
#ifdef CONFIG_SMP
-static int ia64_set_msi_irq_affinity(unsigned int irq,
+static int ia64_set_msi_irq_affinity(struct irq_desc *desc,
const cpumask_t *cpu_mask)
{
+ unsigned int irq = desc->irq;
struct msi_msg msg;
u32 addr, data;
int cpu = first_cpu(*cpu_mask);
@@ -38,7 +39,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq,
msg.data = data;
write_msi_msg(irq, &msg);
- cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
+ cpumask_copy(desc->affinity, cpumask_of(cpu));
return 0;
}
@@ -84,15 +85,17 @@ void ia64_teardown_msi_irq(unsigned int irq)
destroy_irq(irq);
}
-static void ia64_ack_msi_irq(unsigned int irq)
+static void ia64_ack_msi_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
irq_complete_move(irq);
- move_native_irq(irq);
+ move_native_irq(desc);
ia64_eoi();
}
-static int ia64_msi_retrigger_irq(unsigned int irq)
+static int ia64_msi_retrigger_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned int vector = irq_to_vector(irq);
ia64_resend_irq(vector);
@@ -132,8 +135,9 @@ void arch_teardown_msi_irq(unsigned int irq)
#ifdef CONFIG_DMAR
#ifdef CONFIG_SMP
-static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int dmar_msi_set_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
+ unsigned int irq = desc->irq;
struct irq_cfg *cfg = irq_cfg + irq;
struct msi_msg msg;
int cpu = cpumask_first(mask);
@@ -152,7 +156,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg);
- cpumask_copy(irq_desc[irq].affinity, mask);
+ cpumask_copy(desc->affinity, mask);
return 0;
}
@@ -198,11 +202,12 @@ int arch_setup_dmar_msi(unsigned int irq)
{
int ret;
struct msi_msg msg;
+ struct irq_desc *desc = irq_to_desc(irq);
ret = msi_compose_msg(NULL, irq, &msg);
if (ret < 0)
return ret;
- dmar_msi_write(irq, &msg);
+ dmar_msi_write(desc, &msg);
set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
"edge");
return 0;
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index e5230b2..7241118 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -696,9 +696,9 @@ int migrate_platform_irqs(unsigned int cpu)
* polling before making changes.
*/
if (desc) {
- desc->chip->disable(ia64_cpe_irq);
- desc->chip->set_affinity(ia64_cpe_irq, mask);
- desc->chip->enable(ia64_cpe_irq);
+ desc->chip->disable(desc);
+ desc->chip->set_affinity(desc, mask);
+ desc->chip->enable(desc);
printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
}
}
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 40d6eed..78bf9c3 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -77,31 +77,34 @@ u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
return ret_stuff.status;
}
-static unsigned int sn_startup_irq(unsigned int irq)
+static unsigned int sn_startup_irq(struct irq_desc *desc)
{
return 0;
}
-static void sn_shutdown_irq(unsigned int irq)
+static void sn_shutdown_irq(struct irq_desc *desc)
{
}
extern void ia64_mca_register_cpev(int);
-static void sn_disable_irq(unsigned int irq)
+static void sn_disable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(0);
}
-static void sn_enable_irq(unsigned int irq)
+static void sn_enable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(irq);
}
-static void sn_ack_irq(unsigned int irq)
+static void sn_ack_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
u64 event_occurred, mask;
irq = irq & 0xff;
@@ -110,11 +113,12 @@ static void sn_ack_irq(unsigned int irq)
HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
- move_native_irq(irq);
+ move_native_irq(desc);
}
-static void sn_end_irq(unsigned int irq)
+static void sn_end_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
int ivec;
u64 event_occurred;
@@ -227,8 +231,9 @@ finish_up:
return new_irq_info;
}
-static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
+static int sn_set_affinity_irq(struct irq_desc *desc, const struct cpumask *mask)
{
+ unsigned int irq = desc->irq;
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
nasid_t nasid;
int slice;
@@ -258,12 +263,12 @@ void sn_set_err_irq_affinity(unsigned int irq) { }
#endif
static void
-sn_mask_irq(unsigned int irq)
+sn_mask_irq(struct irq_desc *desc)
{
}
static void
-sn_unmask_irq(unsigned int irq)
+sn_unmask_irq(struct irq_desc *desc)
{
}
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index fbbfb97..df63df0 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -151,9 +151,10 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
}
#ifdef CONFIG_SMP
-static int sn_set_msi_irq_affinity(unsigned int irq,
+static int sn_set_msi_irq_affinity(struct irq_desc *desc,
const struct cpumask *cpu_mask)
{
+ unsigned int irq = desc->irq;
struct msi_msg msg;
int slice;
nasid_t nasid;
@@ -205,20 +206,21 @@ static int sn_set_msi_irq_affinity(unsigned int irq,
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg);
- cpumask_copy(irq_desc[irq].affinity, cpu_mask);
+ cpumask_copy(desc->affinity, cpu_mask);
return 0;
}
#endif /* CONFIG_SMP */
-static void sn_ack_msi_irq(unsigned int irq)
+static void sn_ack_msi_irq(struct irq_desc *desc)
{
- move_native_irq(irq);
+ move_native_irq(desc);
ia64_eoi();
}
-static int sn_msi_retrigger_irq(unsigned int irq)
+static int sn_msi_retrigger_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned int vector = irq;
ia64_resend_irq(vector);
diff --git a/arch/mips/include/asm/hardirq.h b/arch/mips/include/asm/hardirq.h
index c977a86..a230b5e 100644
--- a/arch/mips/include/asm/hardirq.h
+++ b/arch/mips/include/asm/hardirq.h
@@ -10,7 +10,8 @@
#ifndef _ASM_HARDIRQ_H
#define _ASM_HARDIRQ_H
-extern void ack_bad_irq(unsigned int irq);
+struct irq_desc;
+extern void ack_bad_irq(struct irq_desc *desc);
#define ack_bad_irq ack_bad_irq
#include <asm-generic/hardirq.h>
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 981f86c..619db4a 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -74,8 +74,9 @@ void free_irqno(unsigned int irq)
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
-void ack_bad_irq(unsigned int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
smtc_im_ack_irq(irq);
printk("unexpected IRQ # %d\n", irq);
}
diff --git a/arch/mn10300/include/asm/hardirq.h b/arch/mn10300/include/asm/hardirq.h
index 54d9501..725a812 100644
--- a/arch/mn10300/include/asm/hardirq.h
+++ b/arch/mn10300/include/asm/hardirq.h
@@ -26,7 +26,7 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-extern void ack_bad_irq(int irq);
+extern void ack_bad_irq(struct irq_desc *desc);
/*
* manipulate stubs in the MN10300 CPU Trap/Interrupt Vector table
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index e2d5ed8..bc98665 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -100,9 +100,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
-void ack_bad_irq(int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
- printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
+ printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", desc->irq);
}
/*
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 3147a29..945c7d2 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -18,9 +18,9 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending
-static inline void ack_bad_irq(unsigned int irq)
+static inline void ack_bad_irq(struct irq_desc *desc)
{
- printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", desc->irq);
}
extern u64 arch_irq_stat_cpu(unsigned int cpu);
diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h
index 48b1913..c5c08c8 100644
--- a/arch/sh/include/asm/hardirq.h
+++ b/arch/sh/include/asm/hardirq.h
@@ -11,6 +11,7 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-extern void ack_bad_irq(unsigned int irq);
+struct irq_desc;
+extern void ack_bad_irq(struct irq_desc *desc);
#endif /* __ASM_SH_HARDIRQ_H */
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index d2d41d0..556e3d3 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -25,10 +25,10 @@ atomic_t irq_err_count;
* each architecture has to answer this themselves, it doesn't deserve
* a generic callback i think.
*/
-void ack_bad_irq(unsigned int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
atomic_inc(&irq_err_count);
- printk("unexpected IRQ trap at vector %02x\n", irq);
+ printk("unexpected IRQ trap at vector %02x\n", desc->irq);
}
#if defined(CONFIG_PROC_FS)
diff --git a/arch/sparc/include/asm/hardirq_64.h b/arch/sparc/include/asm/hardirq_64.h
index 7c29fd1..6601974 100644
--- a/arch/sparc/include/asm/hardirq_64.h
+++ b/arch/sparc/include/asm/hardirq_64.h
@@ -12,7 +12,8 @@
#define local_softirq_pending() \
(local_cpu_data().__softirq_pending)
-void ack_bad_irq(unsigned int irq);
+struct irq_desc;
+void ack_bad_irq(struct irq_desc *desc);
#define HARDIRQ_BITS 8
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e1cbdb9..4e7419c 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -689,8 +689,9 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
return virt_irq;
}
-void ack_bad_irq(unsigned int virt_irq)
+void ack_bad_irq(struct irq_desc_desc *desc)
{
+ unsigned int virt_irq = desc->irq;
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
if (!ino)
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 89474ba..2c21218 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -362,7 +362,7 @@ EXPORT_SYMBOL(reactivate_fd);
* irq_chip must define (startup || enable) &&
* (shutdown || disable) && end
*/
-static void dummy(unsigned int irq)
+static void dummy(struct irq_desc *desc)
{
}
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 0f85764..9b4b8f3 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -44,7 +44,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
-extern void ack_bad_irq(unsigned int irq);
+extern void ack_bad_irq(struct irq_desc *desc);
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 1d5c08a..16c2257 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -74,10 +74,10 @@ extern void hpet_disable(void);
extern unsigned int hpet_readl(unsigned int a);
extern void force_hpet_resume(void);
-extern void hpet_msi_unmask(unsigned int irq);
-extern void hpet_msi_mask(unsigned int irq);
-extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg);
-extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg);
+extern void hpet_msi_unmask(struct irq_desc *);
+extern void hpet_msi_mask(struct irq_desc *);
+extern void hpet_msi_write(struct irq_desc *desc, struct msi_msg *msg);
+extern void hpet_msi_read(struct irq_desc *desc, struct msi_msg *msg);
#ifdef CONFIG_PCI_MSI
extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id);
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index d23cf94..2417e29 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -96,7 +96,6 @@ struct irq_cfg {
u8 move_in_progress : 1;
};
-extern struct irq_cfg *irq_cfg(unsigned int);
int assign_irq_vector(struct irq_desc *, struct irq_cfg *,
const struct cpumask *);
extern void send_cleanup_vector(struct irq_cfg *);
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 1655147..0b2ad6f 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -58,7 +58,7 @@ struct legacy_pic {
void (*mask_all)(void);
void (*restore_mask)(void);
void (*init)(int auto_eoi);
- int (*irq_pending)(unsigned int irq);
+ int (*irq_pending)(struct irq_desc *desc);
void (*make_irq)(unsigned int irq);
};
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index cd2f193..d3b2f0b 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -176,18 +176,6 @@ int __init arch_early_irq_init(void)
}
#ifdef CONFIG_SPARSE_IRQ
-struct irq_cfg *irq_cfg(unsigned int irq)
-{
- struct irq_cfg *cfg = NULL;
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- if (desc)
- cfg = desc->chip_data;
-
- return cfg;
-}
-
static struct irq_cfg *get_one_free_irq_cfg(int node)
{
struct irq_cfg *cfg;
@@ -336,10 +324,6 @@ int arch_init_irq_desc(struct irq_desc *desc, int node,
}
#else
-struct irq_cfg *irq_cfg(unsigned int irq)
-{
- return irq < nr_irqs ? irq_cfgx + irq : NULL;
-}
void x86_copy_chip_data(struct irq_desc *old_desc,
struct irq_desc *desc, int node)
@@ -619,16 +603,12 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void mask_IO_APIC_irq(unsigned int irq)
+static void mask_IO_APIC_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
mask_IO_APIC_irq_desc(desc);
}
-static void unmask_IO_APIC_irq(unsigned int irq)
+static void unmask_IO_APIC_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
unmask_IO_APIC_irq_desc(desc);
}
@@ -1497,7 +1477,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
ioapic_register_intr(irq, desc, trigger);
if (irq < legacy_pic->nr_legacy_irqs)
- legacy_pic->chip->mask(irq);
+ legacy_pic->chip->mask(desc);
ioapic_write_entry(apic_id, pin, entry);
}
@@ -2296,29 +2276,29 @@ static int __init timer_irq_works(void)
* an edge even if it isn't on the 8259A...
*/
-static unsigned int startup_ioapic_irq(unsigned int irq)
+static unsigned int startup_ioapic_irq(struct irq_desc *desc)
{
int was_pending = 0;
unsigned long flags;
struct irq_cfg *cfg;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- if (irq < legacy_pic->nr_legacy_irqs) {
- legacy_pic->chip->mask(irq);
- if (legacy_pic->irq_pending(irq))
+ if (desc->irq < legacy_pic->nr_legacy_irqs) {
+ legacy_pic->chip->mask(desc);
+ if (legacy_pic->irq_pending(desc))
was_pending = 1;
}
- cfg = irq_cfg(irq);
+ cfg = desc->chip_data;
__unmask_IO_APIC_irq(cfg);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
return was_pending;
}
-static int ioapic_retrigger_irq(unsigned int irq)
+static int ioapic_retrigger_irq(struct irq_desc *desc)
{
- struct irq_cfg *cfg = irq_cfg(irq);
+ struct irq_cfg *cfg = desc->chip_data;
unsigned long flags;
raw_spin_lock_irqsave(&vector_lock, flags);
@@ -2427,12 +2407,8 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
}
static int
-set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
+set_ioapic_affinity_irq(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
return set_ioapic_affinity_irq_desc(desc, mask);
}
@@ -2495,11 +2471,9 @@ static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
{
return migrate_ioapic_irq_desc(desc, mask);
}
-static int set_ir_ioapic_affinity_irq(unsigned int irq,
+static int set_ir_ioapic_affinity_irq(struct irq_desc *desc,
const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
return set_ir_ioapic_affinity_irq_desc(desc, mask);
}
#else
@@ -2592,12 +2566,10 @@ void irq_force_complete_move(int irq)
static inline void irq_complete_move(struct irq_desc **descp) {}
#endif
-static void ack_apic_edge(unsigned int irq)
+static void ack_apic_edge(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
irq_complete_move(&desc);
- move_native_irq(irq);
+ move_native_irq(desc);
ack_APIC_irq();
}
@@ -2656,9 +2628,8 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void ack_apic_level(unsigned int irq)
+static void ack_apic_level(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long v;
int i;
struct irq_cfg *cfg;
@@ -2758,21 +2729,19 @@ static void ack_apic_level(unsigned int irq)
*/
cfg = desc->chip_data;
if (!io_apic_level_ack_pending(cfg))
- move_masked_irq(irq);
+ move_masked_irq(desc);
unmask_IO_APIC_irq_desc(desc);
}
}
#ifdef CONFIG_INTR_REMAP
-static void ir_ack_apic_edge(unsigned int irq)
+static void ir_ack_apic_edge(struct irq_desc *desc)
{
ack_APIC_irq();
}
-static void ir_ack_apic_level(unsigned int irq)
+static void ir_ack_apic_level(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
ack_APIC_irq();
eoi_ioapic_irq(desc);
}
@@ -2850,7 +2819,7 @@ static inline void init_IO_APIC_traps(void)
* The local APIC irq-chip implementation:
*/
-static void mask_lapic_irq(unsigned int irq)
+static void mask_lapic_irq(struct irq_desc *desc)
{
unsigned long v;
@@ -2858,7 +2827,7 @@ static void mask_lapic_irq(unsigned int irq)
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
}
-static void unmask_lapic_irq(unsigned int irq)
+static void unmask_lapic_irq(struct irq_desc *desc)
{
unsigned long v;
@@ -2866,7 +2835,7 @@ static void unmask_lapic_irq(unsigned int irq)
apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
-static void ack_lapic_irq(unsigned int irq)
+static void ack_lapic_irq(struct irq_desc *desc)
{
ack_APIC_irq();
}
@@ -2995,7 +2964,7 @@ static inline void __init check_timer(void)
/*
* get/set the timer IRQ vector:
*/
- legacy_pic->chip->mask(0);
+ legacy_pic->chip->mask(desc);
assign_irq_vector(desc, cfg, apic->target_cpus());
/*
@@ -3067,7 +3036,7 @@ static inline void __init check_timer(void)
if (timer_irq_works()) {
if (nmi_watchdog == NMI_IO_APIC) {
setup_nmi();
- legacy_pic->chip->unmask(0);
+ legacy_pic->chip->unmask(desc);
}
if (disable_timer_pin_1 > 0)
clear_IO_APIC_pin(apic1, pin1);
@@ -3090,14 +3059,14 @@ static inline void __init check_timer(void)
*/
replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
- legacy_pic->chip->unmask(0);
+ legacy_pic->chip->unmask(desc);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
timer_through_8259 = 1;
if (nmi_watchdog == NMI_IO_APIC) {
- legacy_pic->chip->mask(0);
+ legacy_pic->chip->mask(desc);
setup_nmi();
- legacy_pic->chip->unmask(0);
+ legacy_pic->chip->unmask(desc);
}
goto out;
}
@@ -3105,7 +3074,7 @@ static inline void __init check_timer(void)
* Cleanup, just in case ...
*/
local_irq_disable();
- legacy_pic->chip->mask(0);
+ legacy_pic->chip->mask(desc);
clear_IO_APIC_pin(apic2, pin2);
apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
}
@@ -3124,14 +3093,14 @@ static inline void __init check_timer(void)
lapic_register_intr(0, desc);
apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
- legacy_pic->chip->unmask(0);
+ legacy_pic->chip->unmask(desc);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
goto out;
}
local_irq_disable();
- legacy_pic->chip->mask(0);
+ legacy_pic->chip->mask(desc);
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -3373,10 +3342,10 @@ void destroy_irq(unsigned int irq)
* MSI message composition
*/
#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
+static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
struct msi_msg *msg, u8 hpet_id)
{
- struct irq_desc *desc;
+ unsigned int irq = desc->irq;
struct irq_cfg *cfg;
int err;
unsigned dest;
@@ -3384,7 +3353,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
if (disable_apic)
return -ENXIO;
- desc = irq_to_desc(irq);
cfg = desc->chip_data;
err = assign_irq_vector(desc, cfg, apic->target_cpus());
if (err)
@@ -3452,9 +3420,8 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
}
#ifdef CONFIG_SMP
-static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
+static int set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
@@ -3481,9 +3448,9 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
* done in the process context using interrupt-remapping hardware.
*/
static int
-ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
+ir_set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest;
struct irte irte;
@@ -3581,8 +3548,9 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
{
int ret;
struct msi_msg msg;
+ struct irq_desc *desc = irq_to_desc(irq);
- ret = msi_compose_msg(dev, irq, &msg, -1);
+ ret = msi_compose_msg(dev, desc, &msg, -1);
if (ret < 0)
return ret;
@@ -3590,7 +3558,6 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
write_msi_msg(irq, &msg);
if (irq_remapped(irq)) {
- struct irq_desc *desc = irq_to_desc(irq);
/*
* irq migration in process context
*/
@@ -3672,9 +3639,8 @@ void arch_teardown_msi_irq(unsigned int irq)
#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
#ifdef CONFIG_SMP
-static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int dmar_msi_set_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
@@ -3684,14 +3650,14 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
cfg = desc->chip_data;
- dmar_msi_read(irq, &msg);
+ dmar_msi_read(desc, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- dmar_msi_write(irq, &msg);
+ dmar_msi_write(desc, &msg);
return 0;
}
@@ -3716,11 +3682,12 @@ int arch_setup_dmar_msi(unsigned int irq)
{
int ret;
struct msi_msg msg;
+ struct irq_desc *desc = irq_to_desc(irq);
- ret = msi_compose_msg(NULL, irq, &msg, -1);
+ ret = msi_compose_msg(NULL, desc, &msg, -1);
if (ret < 0)
return ret;
- dmar_msi_write(irq, &msg);
+ dmar_msi_write(desc, &msg);
set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
"edge");
return 0;
@@ -3730,9 +3697,8 @@ int arch_setup_dmar_msi(unsigned int irq)
#ifdef CONFIG_HPET_TIMER
#ifdef CONFIG_SMP
-static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int hpet_msi_set_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
@@ -3742,14 +3708,14 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
cfg = desc->chip_data;
- hpet_msi_read(irq, &msg);
+ hpet_msi_read(desc, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- hpet_msi_write(irq, &msg);
+ hpet_msi_write(desc, &msg);
return 0;
}
@@ -3804,11 +3770,11 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
return -1;
}
- ret = msi_compose_msg(NULL, irq, &msg, id);
+ ret = msi_compose_msg(NULL, desc, &msg, id);
if (ret < 0)
return ret;
- hpet_msi_write(irq, &msg);
+ hpet_msi_write(desc, &msg);
desc->status |= IRQ_MOVE_PCNTXT;
if (irq_remapped(irq))
set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
@@ -3829,10 +3795,10 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
#ifdef CONFIG_SMP
-static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
+static void target_ht_irq(struct irq_desc *desc, unsigned int dest, u8 vector)
{
struct ht_irq_msg msg;
- fetch_ht_irq_msg(irq, &msg);
+ fetch_ht_irq_msg(desc, &msg);
msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
@@ -3840,12 +3806,11 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
- write_ht_irq_msg(irq, &msg);
+ write_ht_irq_msg(desc, &msg);
}
-static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
+static int set_ht_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
unsigned int dest;
@@ -3854,7 +3819,7 @@ static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
cfg = desc->chip_data;
- target_ht_irq(irq, dest, cfg->vector);
+ target_ht_irq(desc, dest, cfg->vector);
return 0;
}
@@ -3909,7 +3874,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
HT_IRQ_LOW_MT_ARBITRATED) |
HT_IRQ_LOW_IRQ_MASKED;
- write_ht_irq_msg(irq, &msg);
+ write_ht_irq_msg(desc, &msg);
set_irq_chip_and_handler_name(irq, &ht_irq_chip,
handle_edge_irq, "edge");
@@ -4399,7 +4364,7 @@ void __init pre_init_apic_IRQ0(void)
setup_local_APIC();
- cfg = irq_cfg(0);
+ cfg = desc->chip_data;
add_pin_to_irq_node(cfg, 0, 0, 0);
set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index ee4fa1b..3355b99 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -426,9 +426,9 @@ static int hpet_legacy_next_event(unsigned long delta,
static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
static struct hpet_dev *hpet_devs;
-void hpet_msi_unmask(unsigned int irq)
+void hpet_msi_unmask(struct irq_desc *desc)
{
- struct hpet_dev *hdev = get_irq_data(irq);
+ struct hpet_dev *hdev = get_irq_desc_data(desc);
unsigned int cfg;
/* unmask it */
@@ -437,10 +437,10 @@ void hpet_msi_unmask(unsigned int irq)
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}
-void hpet_msi_mask(unsigned int irq)
+void hpet_msi_mask(struct irq_desc *desc)
{
unsigned int cfg;
- struct hpet_dev *hdev = get_irq_data(irq);
+ struct hpet_dev *hdev = get_irq_desc_data(desc);
/* mask it */
cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
@@ -448,17 +448,17 @@ void hpet_msi_mask(unsigned int irq)
hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
}
-void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
+void hpet_msi_write(struct irq_desc *desc, struct msi_msg *msg)
{
- struct hpet_dev *hdev = get_irq_data(irq);
+ struct hpet_dev *hdev = get_irq_desc_data(desc);
hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
}
-void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
+void hpet_msi_read(struct irq_desc *desc, struct msi_msg *msg)
{
- struct hpet_dev *hdev = get_irq_data(irq);
+ struct hpet_dev *hdev = get_irq_desc_data(desc);
msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index fb725ee..b248555 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -33,13 +33,13 @@
static int i8259A_auto_eoi;
DEFINE_RAW_SPINLOCK(i8259A_lock);
-static void mask_and_ack_8259A(unsigned int);
+static void mask_and_ack_8259A(struct irq_desc *desc);
static void mask_8259A(void);
static void unmask_8259A(void);
-static void disable_8259A_irq(unsigned int irq);
-static void enable_8259A_irq(unsigned int irq);
+static void disable_8259A_irq(struct irq_desc *desc);
+static void enable_8259A_irq(struct irq_desc *desc);
static void init_8259A(int auto_eoi);
-static int i8259A_irq_pending(unsigned int irq);
+static int i8259A_irq_pending(struct irq_desc *desc);
struct irq_chip i8259A_chip = {
.name = "XT-PIC",
@@ -69,8 +69,9 @@ unsigned int cached_irq_mask = 0xffff;
*/
unsigned long io_apic_irqs;
-static void disable_8259A_irq(unsigned int irq)
+static void disable_8259A_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned int mask = 1 << irq;
unsigned long flags;
@@ -83,8 +84,9 @@ static void disable_8259A_irq(unsigned int irq)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-static void enable_8259A_irq(unsigned int irq)
+static void enable_8259A_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned int mask = ~(1 << irq);
unsigned long flags;
@@ -97,8 +99,9 @@ static void enable_8259A_irq(unsigned int irq)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-static int i8259A_irq_pending(unsigned int irq)
+static int i8259A_irq_pending(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned int mask = 1<<irq;
unsigned long flags;
int ret;
@@ -151,8 +154,9 @@ static inline int i8259A_irq_real(unsigned int irq)
* first, _then_ send the EOI, and the order of EOI
* to the two 8259s is important!
*/
-static void mask_and_ack_8259A(unsigned int irq)
+static void mask_and_ack_8259A(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned int irqmask = 1 << irq;
unsigned long flags;
@@ -372,17 +376,18 @@ static void init_8259A(int auto_eoi)
*/
static void legacy_pic_noop(void) { };
+static void legacy_pic_desc_noop(struct irq_desc *desc) { };
static void legacy_pic_uint_noop(unsigned int unused) { };
static void legacy_pic_int_noop(int unused) { };
static struct irq_chip dummy_pic_chip = {
.name = "dummy pic",
- .mask = legacy_pic_uint_noop,
- .unmask = legacy_pic_uint_noop,
- .disable = legacy_pic_uint_noop,
- .mask_ack = legacy_pic_uint_noop,
+ .mask = legacy_pic_desc_noop,
+ .unmask = legacy_pic_desc_noop,
+ .disable = legacy_pic_desc_noop,
+ .mask_ack = legacy_pic_desc_noop,
};
-static int legacy_pic_irq_pending_noop(unsigned int irq)
+static int legacy_pic_irq_pending_noop(struct irq_desc *desc)
{
return 0;
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index f71625c..ae70844 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -24,10 +24,10 @@ void (*x86_platform_ipi_callback)(void) = NULL;
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
-void ack_bad_irq(unsigned int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
if (printk_ratelimit())
- pr_err("unexpected IRQ trap at vector %02x\n", irq);
+ pr_err("unexpected IRQ trap at irq %02x\n", desc->irq);
/*
* Currently unexpected vectors happen only on SMP and APIC.
@@ -316,15 +316,15 @@ void fixup_irqs(void)
}
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
- desc->chip->mask(irq);
+ desc->chip->mask(desc);
if (desc->chip->set_affinity)
- desc->chip->set_affinity(irq, affinity);
+ desc->chip->set_affinity(desc, affinity);
else if (!(warned++))
set_affinity = 0;
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
- desc->chip->unmask(irq);
+ desc->chip->unmask(desc);
raw_spin_unlock(&desc->lock);
@@ -357,7 +357,7 @@ void fixup_irqs(void)
raw_spin_lock(&desc->lock);
if (desc->chip->retrigger)
- desc->chip->retrigger(irq);
+ desc->chip->retrigger(desc);
raw_spin_unlock(&desc->lock);
}
}
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 44c430d..2b601d3 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -27,18 +27,18 @@ struct uv_irq_2_mmr_pnode{
static spinlock_t uv_irq_lock;
static struct rb_root uv_irq_root;
-static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
+static int uv_set_irq_affinity(struct irq_desc *desc, const struct cpumask *);
-static void uv_noop(unsigned int irq)
+static void uv_noop(struct irq_desc *desc)
{
}
-static unsigned int uv_noop_ret(unsigned int irq)
+static unsigned int uv_noop_ret(struct irq_desc *desc)
{
return 0;
}
-static void uv_ack_apic(unsigned int irq)
+static void uv_ack_apic(struct irq_desc *desc)
{
ack_APIC_irq();
}
@@ -156,7 +156,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
- cfg = irq_cfg(irq);
+ cfg = desc->chip_data;
err = assign_irq_vector(desc, cfg, eligible_cpu);
if (err != 0)
@@ -208,9 +208,9 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
}
-static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
+static int uv_set_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest;
unsigned long mmr_value;
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index e680ea5..8bd5075 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -430,14 +430,15 @@ static int is_co_apic(unsigned int irq)
* This is the SGI Cobalt (IO-)APIC:
*/
-static void enable_cobalt_irq(unsigned int irq)
+static void enable_cobalt_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
co_apic_set(is_co_apic(irq), irq);
}
-static void disable_cobalt_irq(unsigned int irq)
+static void disable_cobalt_irq(struct irq_desc *desc)
{
- int entry = is_co_apic(irq);
+ int entry = is_co_apic(desc->irq);
co_apic_write(CO_APIC_LO(entry), CO_APIC_MASK);
co_apic_read(CO_APIC_LO(entry));
@@ -448,37 +449,35 @@ static void disable_cobalt_irq(unsigned int irq)
* map this to the Cobalt APIC entry where it's physically wired.
* This is called via request_irq -> setup_irq -> irq_desc->startup()
*/
-static unsigned int startup_cobalt_irq(unsigned int irq)
+static unsigned int startup_cobalt_irq(struct irq_desc *desc)
{
unsigned long flags;
- struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags);
if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
- enable_cobalt_irq(irq);
+ enable_cobalt_irq(desc);
spin_unlock_irqrestore(&cobalt_lock, flags);
return 0;
}
-static void ack_cobalt_irq(unsigned int irq)
+static void ack_cobalt_irq(struct irq_desc *desc)
{
unsigned long flags;
spin_lock_irqsave(&cobalt_lock, flags);
- disable_cobalt_irq(irq);
+ disable_cobalt_irq(desc);
apic_write(APIC_EOI, APIC_EIO_ACK);
spin_unlock_irqrestore(&cobalt_lock, flags);
}
-static void end_cobalt_irq(unsigned int irq)
+static void end_cobalt_irq(struct irq_desc *desc)
{
unsigned long flags;
- struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags);
if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- enable_cobalt_irq(irq);
+ enable_cobalt_irq(desc);
spin_unlock_irqrestore(&cobalt_lock, flags);
}
@@ -503,19 +502,19 @@ static struct irq_chip cobalt_irq_type = {
* interrupt controller type, and through a special virtual interrupt-
* controller. Device drivers only see the virtual interrupt sources.
*/
-static unsigned int startup_piix4_master_irq(unsigned int irq)
+static unsigned int startup_piix4_master_irq(struct irq_desc *desc)
{
legacy_pic->init(0);
- return startup_cobalt_irq(irq);
+ return startup_cobalt_irq(desc);
}
-static void end_piix4_master_irq(unsigned int irq)
+static void end_piix4_master_irq(struct irq_desc *desc)
{
unsigned long flags;
spin_lock_irqsave(&cobalt_lock, flags);
- enable_cobalt_irq(irq);
+ enable_cobalt_irq(desc);
spin_unlock_irqrestore(&cobalt_lock, flags);
}
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index fb65235..41257f6 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -84,7 +84,7 @@ static inline unsigned int vmi_get_timer_vector(void)
/** vmi clockchip */
#ifdef CONFIG_X86_LOCAL_APIC
-static unsigned int startup_timer_irq(unsigned int irq)
+static unsigned int startup_timer_irq(struct irq_desc *desc)
{
unsigned long val = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, vmi_get_timer_vector());
@@ -92,19 +92,19 @@ static unsigned int startup_timer_irq(unsigned int irq)
return (val & APIC_SEND_PENDING);
}
-static void mask_timer_irq(unsigned int irq)
+static void mask_timer_irq(struct irq_desc *desc)
{
unsigned long val = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, val | APIC_LVT_MASKED);
}
-static void unmask_timer_irq(unsigned int irq)
+static void unmask_timer_irq(struct irq_desc *desc)
{
unsigned long val = apic_read(APIC_LVTT);
apic_write(APIC_LVTT, val & ~APIC_LVT_MASKED);
}
-static void ack_timer_irq(unsigned int irq)
+static void ack_timer_irq(struct irq_desc *desc)
{
ack_APIC_irq();
}
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e0f6b26..be7a653 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -789,14 +789,14 @@ static void lguest_flush_tlb_kernel(void)
* simple as setting a bit. We don't actually "ack" interrupts as such, we
* just mask and unmask them. I wonder if we should be cleverer?
*/
-static void disable_lguest_irq(unsigned int irq)
+static void disable_lguest_irq(struct irq_desc *desc)
{
- set_bit(irq, lguest_data.blocked_interrupts);
+ set_bit(desc->irq, lguest_data.blocked_interrupts);
}
-static void enable_lguest_irq(unsigned int irq)
+static void enable_lguest_irq(struct irq_desc *desc)
{
- clear_bit(irq, lguest_data.blocked_interrupts);
+ clear_bit(desc->irq, lguest_data.blocked_interrupts);
}
/* This structure describes the lguest IRQ controller. */
diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h
index 87cb19d..f0230ff 100644
--- a/arch/xtensa/include/asm/hardirq.h
+++ b/arch/xtensa/include/asm/hardirq.h
@@ -22,7 +22,8 @@ typedef struct {
unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t;
-void ack_bad_irq(unsigned int irq);
+struct irq_desc;
+void ack_bad_irq(struct irq_desc *desc);
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#endif /* _XTENSA_HARDIRQ_H */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 8cd3848..f86cf05 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -30,9 +30,9 @@ atomic_t irq_err_count;
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
-void ack_bad_irq(unsigned int irq)
+void ack_bad_irq(struct irq_desc *desc)
{
- printk("unexpected IRQ trap at vector %02x\n", irq);
+ printk("unexpected IRQ trap at vector %02x\n", desc->irq);
}
/*
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index dd8ebc7..62f4307 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -94,9 +94,9 @@ static struct ipu_irq_map *src2map(unsigned int src)
return NULL;
}
-static void ipu_irq_unmask(unsigned int irq)
+static void ipu_irq_unmask(struct irq_desc *desc)
{
- struct ipu_irq_map *map = get_irq_chip_data(irq);
+ struct ipu_irq_map *map = get_irq_desc_chip_data(desc);
struct ipu_irq_bank *bank;
uint32_t reg;
unsigned long lock_flags;
@@ -106,7 +106,7 @@ static void ipu_irq_unmask(unsigned int irq)
bank = map->bank;
if (!bank) {
spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, desc->irq);
return;
}
@@ -117,9 +117,9 @@ static void ipu_irq_unmask(unsigned int irq)
spin_unlock_irqrestore(&bank_lock, lock_flags);
}
-static void ipu_irq_mask(unsigned int irq)
+static void ipu_irq_mask(struct irq_desc *desc)
{
- struct ipu_irq_map *map = get_irq_chip_data(irq);
+ struct ipu_irq_map *map = get_irq_desc_chip_data(desc);
struct ipu_irq_bank *bank;
uint32_t reg;
unsigned long lock_flags;
@@ -129,7 +129,7 @@ static void ipu_irq_mask(unsigned int irq)
bank = map->bank;
if (!bank) {
spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, desc->irq);
return;
}
@@ -140,9 +140,9 @@ static void ipu_irq_mask(unsigned int irq)
spin_unlock_irqrestore(&bank_lock, lock_flags);
}
-static void ipu_irq_ack(unsigned int irq)
+static void ipu_irq_ack(struct irq_desc *desc)
{
- struct ipu_irq_map *map = get_irq_chip_data(irq);
+ struct ipu_irq_map *map = get_irq_desc_chip_data(desc);
struct ipu_irq_bank *bank;
unsigned long lock_flags;
@@ -151,7 +151,7 @@ static void ipu_irq_ack(unsigned int irq)
bank = map->bank;
if (!bank) {
spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
+ pr_err("IPU: %s(%u) - unmapped!\n", __func__, desc->irq);
return;
}
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 6c0ebbd..0eb44e7 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -113,9 +113,10 @@ static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return lnw->irq_base + offset;
}
-static int lnw_irq_type(unsigned irq, unsigned type)
+static int lnw_irq_type(struct irq_desc *desc, unsigned type)
{
- struct lnw_gpio *lnw = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct lnw_gpio *lnw = get_irq_desc_chip_data(desc);
u32 gpio = irq - lnw->irq_base;
u8 reg = gpio / 32;
unsigned long flags;
@@ -142,11 +143,11 @@ static int lnw_irq_type(unsigned irq, unsigned type)
return 0;
};
-static void lnw_irq_unmask(unsigned irq)
+static void lnw_irq_unmask(struct irq_desc *desc)
{
};
-static void lnw_irq_mask(unsigned irq)
+static void lnw_irq_mask(struct irq_desc *desc)
{
};
@@ -184,7 +185,7 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
/* clear the edge detect status bit */
writel(gedr_v, gedr);
}
- desc->chip->eoi(irq);
+ desc->chip->eoi(desc);
}
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index ab5daab..9fb1ba3 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -227,37 +227,40 @@ static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
return chip->irq_base + off;
}
-static void pca953x_irq_mask(unsigned int irq)
+static void pca953x_irq_mask(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
chip->irq_mask &= ~(1 << (irq - chip->irq_base));
}
-static void pca953x_irq_unmask(unsigned int irq)
+static void pca953x_irq_unmask(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
chip->irq_mask |= 1 << (irq - chip->irq_base);
}
-static void pca953x_irq_bus_lock(unsigned int irq)
+static void pca953x_irq_bus_lock(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
mutex_lock(&chip->irq_lock);
}
-static void pca953x_irq_bus_sync_unlock(unsigned int irq)
+static void pca953x_irq_bus_sync_unlock(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
mutex_unlock(&chip->irq_lock);
}
-static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
+static int pca953x_irq_set_type(struct irq_desc *desc, unsigned int type)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
uint16_t level = irq - chip->irq_base;
uint16_t mask = 1 << level;
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 3ad1eeb..9dddd6e 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -122,9 +122,10 @@ static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
/*
* PL061 GPIO IRQ
*/
-static void pl061_irq_disable(unsigned irq)
+static void pl061_irq_disable(struct irq_desc *desc)
{
- struct pl061_gpio *chip = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pl061_gpio *chip = get_irq_desc_chip_data(desc);
int offset = irq - chip->irq_base;
unsigned long flags;
u8 gpioie;
@@ -136,9 +137,10 @@ static void pl061_irq_disable(unsigned irq)
spin_unlock_irqrestore(&chip->irq_lock, flags);
}
-static void pl061_irq_enable(unsigned irq)
+static void pl061_irq_enable(struct irq_desc *desc)
{
- struct pl061_gpio *chip = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pl061_gpio *chip = get_irq_desc_chip_data(desc);
int offset = irq - chip->irq_base;
unsigned long flags;
u8 gpioie;
@@ -150,9 +152,10 @@ static void pl061_irq_enable(unsigned irq)
spin_unlock_irqrestore(&chip->irq_lock, flags);
}
-static int pl061_irq_type(unsigned irq, unsigned trigger)
+static int pl061_irq_type(struct irq_desc *desc, unsigned trigger)
{
- struct pl061_gpio *chip = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pl061_gpio *chip = get_irq_desc_chip_data(desc);
int offset = irq - chip->irq_base;
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
@@ -207,7 +210,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
struct list_head *ptr;
struct pl061_gpio *chip;
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
list_for_each(ptr, chip_list) {
unsigned long pending;
int offset;
@@ -222,7 +225,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
- desc->chip->unmask(irq);
+ desc->chip->unmask(desc);
}
static int __init pl061_probe(struct amba_device *dev, struct amba_id *id)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index d4295fa..55c51f5 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -107,25 +107,28 @@ static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
/*
* GPIO IRQ
*/
-static void timbgpio_irq_disable(unsigned irq)
+static void timbgpio_irq_disable(struct irq_desc *desc)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct timbgpio *tgpio = get_irq_desc_chip_data(desc);
int offset = irq - tgpio->irq_base;
timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
}
-static void timbgpio_irq_enable(unsigned irq)
+static void timbgpio_irq_enable(struct irq_desc *desc)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct timbgpio *tgpio = get_irq_desc_chip_data(desc);
int offset = irq - tgpio->irq_base;
timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
}
-static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+static int timbgpio_irq_type(struct irq_desc *desc, unsigned trigger)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct timbgpio *tgpio = get_irq_desc_chip_data(desc);
int offset = irq - tgpio->irq_base;
unsigned long flags;
u32 lvr, flr, bflr = 0;
@@ -185,7 +188,7 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
unsigned long ipr;
int offset;
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
index b16c9a8..ea2e00f 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/vr41xx_giu.c
@@ -111,28 +111,28 @@ static inline u16 giu_clear(u16 offset, u16 clear)
return data;
}
-static void ack_giuint_low(unsigned int irq)
+static void ack_giuint_low(struct irq_desc *desc)
{
- giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
+ giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(desc->irq));
}
-static void mask_giuint_low(unsigned int irq)
+static void mask_giuint_low(struct irq_desc *desc)
{
- giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+ giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(desc->irq));
}
-static void mask_ack_giuint_low(unsigned int irq)
+static void mask_ack_giuint_low(struct irq_desc *desc)
{
unsigned int pin;
- pin = GPIO_PIN_OF_IRQ(irq);
+ pin = GPIO_PIN_OF_IRQ(desc->irq);
giu_clear(GIUINTENL, 1 << pin);
giu_write(GIUINTSTATL, 1 << pin);
}
-static void unmask_giuint_low(unsigned int irq)
+static void unmask_giuint_low(struct irq_desc *desc)
{
- giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+ giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(desc->irq));
}
static struct irq_chip giuint_low_irq_chip = {
@@ -143,29 +143,29 @@ static struct irq_chip giuint_low_irq_chip = {
.unmask = unmask_giuint_low,
};
-static void ack_giuint_high(unsigned int irq)
+static void ack_giuint_high(struct irq_desc *desc)
{
giu_write(GIUINTSTATH,
- 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+ 1 << (GPIO_PIN_OF_IRQ(desc->irq) - GIUINT_HIGH_OFFSET));
}
-static void mask_giuint_high(unsigned int irq)
+static void mask_giuint_high(struct irq_desc *desc)
{
- giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+ giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(desc->irq) - GIUINT_HIGH_OFFSET));
}
-static void mask_ack_giuint_high(unsigned int irq)
+static void mask_ack_giuint_high(struct irq_desc *desc)
{
unsigned int pin;
- pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
+ pin = GPIO_PIN_OF_IRQ(desc->irq) - GIUINT_HIGH_OFFSET;
giu_clear(GIUINTENH, 1 << pin);
giu_write(GIUINTSTATH, 1 << pin);
}
-static void unmask_giuint_high(unsigned int irq)
+static void unmask_giuint_high(struct irq_desc *desc)
{
- giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+ giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(desc->irq) - GIUINT_HIGH_OFFSET));
}
static struct irq_chip giuint_high_irq_chip = {
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 37d12e5..4cbae45 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -986,7 +986,7 @@ static int ipath_ht_intconfig(struct ipath_devdata *dd)
return ret;
}
-static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
+static void ipath_ht_irq_update(struct pci_dev *dev, struct irq_desc *desc,
struct ht_irq_msg *msg)
{
struct ipath_devdata *dd = pci_get_drvdata(dev);
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 95c1e6b..609da5b 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -142,7 +142,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
unsigned long flags;
struct asic3 *asic;
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
asic = desc->handler_data;
@@ -225,9 +225,10 @@ static inline int asic3_irq_to_index(struct asic3 *asic, int irq)
return (irq - asic->irq_base) & 0xf;
}
-static void asic3_mask_gpio_irq(unsigned int irq)
+static void asic3_mask_gpio_irq(struct irq_desc *desc)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct asic3 *asic = get_irq_desc_chip_data(desc);
u32 val, bank, index;
unsigned long flags;
@@ -241,9 +242,10 @@ static void asic3_mask_gpio_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static void asic3_mask_irq(unsigned int irq)
+static void asic3_mask_irq(struct irq_desc *desc)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct asic3 *asic = get_irq_desc_chip_data(desc);
int regval;
unsigned long flags;
@@ -262,9 +264,10 @@ static void asic3_mask_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static void asic3_unmask_gpio_irq(unsigned int irq)
+static void asic3_unmask_gpio_irq(struct irq_desc *desc)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct asic3 *asic = get_irq_desc_chip_data(desc);
u32 val, bank, index;
unsigned long flags;
@@ -278,9 +281,10 @@ static void asic3_unmask_gpio_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static void asic3_unmask_irq(unsigned int irq)
+static void asic3_unmask_irq(struct irq_desc *desc)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct asic3 *asic = get_irq_desc_chip_data(desc);
int regval;
unsigned long flags;
@@ -299,9 +303,10 @@ static void asic3_unmask_irq(unsigned int irq)
spin_unlock_irqrestore(&asic->lock, flags);
}
-static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
+static int asic3_gpio_irq_type(struct irq_desc *desc, unsigned int type)
{
- struct asic3 *asic = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct asic3 *asic = get_irq_desc_chip_data(desc);
u32 bank, index;
u16 trigger, level, edge, bit;
unsigned long flags;
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index df405af..26d6f9b 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -143,17 +143,19 @@ int pcap_to_irq(struct pcap_chip *pcap, int irq)
}
EXPORT_SYMBOL_GPL(pcap_to_irq);
-static void pcap_mask_irq(unsigned int irq)
+static void pcap_mask_irq(struct irq_desc *desc)
{
- struct pcap_chip *pcap = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pcap_chip *pcap = get_irq_desc_chip_data(desc);
pcap->msr |= 1 << irq_to_pcap(pcap, irq);
queue_work(pcap->workqueue, &pcap->msr_work);
}
-static void pcap_unmask_irq(unsigned int irq)
+static void pcap_unmask_irq(struct irq_desc *desc)
{
- struct pcap_chip *pcap = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct pcap_chip *pcap = get_irq_desc_chip_data(desc);
pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
queue_work(pcap->workqueue, &pcap->msr_work);
@@ -217,7 +219,7 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct pcap_chip *pcap = get_irq_data(irq);
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
queue_work(pcap->workqueue, &pcap->isr_work);
return;
}
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index addb846..629176b 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -69,22 +69,24 @@ static inline void ack_irqs(struct egpio_info *ei)
ei->ack_write, ei->ack_register << ei->bus_shift);
}
-static void egpio_ack(unsigned int irq)
+static void egpio_ack(struct irq_desc *desc)
{
}
/* There does not appear to be a way to proactively mask interrupts
* on the egpio chip itself. So, we simply ignore interrupts that
* aren't desired. */
-static void egpio_mask(unsigned int irq)
+static void egpio_mask(struct irq_desc *desc)
{
- struct egpio_info *ei = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct egpio_info *ei = get_irq_desc_chip_data(desc);
ei->irqs_enabled &= ~(1 << (irq - ei->irq_start));
pr_debug("EGPIO mask %d %04x\n", irq, ei->irqs_enabled);
}
-static void egpio_unmask(unsigned int irq)
+static void egpio_unmask(struct irq_desc *desc)
{
- struct egpio_info *ei = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct egpio_info *ei = get_irq_desc_chip_data(desc);
ei->irqs_enabled |= 1 << (irq - ei->irq_start);
pr_debug("EGPIO unmask %d %04x\n", irq, ei->irqs_enabled);
}
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 26d9176..05f23cd 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -198,9 +198,10 @@ static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(irq_base + i);
}
-static void t7l66xb_irq_mask(unsigned int irq)
+static void t7l66xb_irq_mask(struct irq_desc *desc)
{
- struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct t7l66xb *t7l66xb = get_irq_desc_chip_data(desc);
unsigned long flags;
u8 imr;
@@ -211,8 +212,9 @@ static void t7l66xb_irq_mask(unsigned int irq)
spin_unlock_irqrestore(&t7l66xb->lock, flags);
}
-static void t7l66xb_irq_unmask(unsigned int irq)
+static void t7l66xb_irq_unmask(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
unsigned long flags;
u8 imr;
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index c59e5c5..882a463 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -512,7 +512,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
static void
tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
{
- struct tc6393xb *tc6393xb = get_irq_data(irq);
+ struct tc6393xb *tc6393xb = get_irq_desc_chip_data(desc);
unsigned int isr;
unsigned int i, irq_base;
@@ -526,13 +526,14 @@ tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
}
}
-static void tc6393xb_irq_ack(unsigned int irq)
+static void tc6393xb_irq_ack(struct irq_desc *desc)
{
}
-static void tc6393xb_irq_mask(unsigned int irq)
+static void tc6393xb_irq_mask(struct irq_desc *desc)
{
- struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct tc6393xb *tc6393xb = get_irq_desc_chip_data(desc);
unsigned long flags;
u8 imr;
@@ -543,9 +544,10 @@ static void tc6393xb_irq_mask(unsigned int irq)
spin_unlock_irqrestore(&tc6393xb->lock, flags);
}
-static void tc6393xb_irq_unmask(unsigned int irq)
+static void tc6393xb_irq_unmask(struct irq_desc *desc)
{
- struct tc6393xb *tc6393xb = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct tc6393xb *tc6393xb = get_irq_desc_chip_data(desc);
unsigned long flags;
u8 imr;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 9df9a5a..b5e30d5 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -595,9 +595,10 @@ static void twl4030_sih_do_edge(struct work_struct *work)
* completion, potentially including some re-ordering, of these requests.
*/
-static void twl4030_sih_mask(unsigned irq)
+static void twl4030_sih_mask(struct irq_desc *desc)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct sih_agent *sih = get_irq_desc_chip_data(desc);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
@@ -607,9 +608,10 @@ static void twl4030_sih_mask(unsigned irq)
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
-static void twl4030_sih_unmask(unsigned irq)
+static void twl4030_sih_unmask(struct irq_desc *desc)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct sih_agent *sih = get_irq_desc_chip_data(desc);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
@@ -619,10 +621,10 @@ static void twl4030_sih_unmask(unsigned irq)
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
-static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
+static int twl4030_sih_set_type(struct irq_desc *desc, unsigned trigger)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
+ struct sih_agent *sih = get_irq_desc_chip_data(desc);
unsigned long flags;
if (!desc) {
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 3013276..18ec226 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -346,16 +346,16 @@ static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
return &wm831x_irqs[irq - wm831x->irq_base];
}
-static void wm831x_irq_lock(unsigned int irq)
+static void wm831x_irq_lock(struct irq_desc *desc)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x *wm831x = get_irq_desc_chip_data(desc);
mutex_lock(&wm831x->irq_lock);
}
-static void wm831x_irq_sync_unlock(unsigned int irq)
+static void wm831x_irq_sync_unlock(struct irq_desc *des)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ struct wm831x *wm831x = get_irq_desc_chip_data(desc);
int i;
for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
@@ -372,17 +372,19 @@ static void wm831x_irq_sync_unlock(unsigned int irq)
mutex_unlock(&wm831x->irq_lock);
}
-static void wm831x_irq_unmask(unsigned int irq)
+static void wm831x_irq_unmask(struct irq_desc *desc)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct wm831x *wm831x = get_irq_desc_chip_data(desc);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm831x_irq_mask(unsigned int irq)
+static void wm831x_irq_mask(struct irq_desc *desc)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct wm831x *wm831x = get_irq_desc_chip_data(desc);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index cb3b4d2..36281e3 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -348,7 +348,7 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
-static void gru_noop(unsigned int irq)
+static void gru_noop(struct irq_desc *desc)
{
}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index c542c7b..3c89196 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -296,9 +296,9 @@ static struct pci_port_ops dino_port_ops = {
.outl = dino_out32
};
-static void dino_disable_irq(unsigned int irq)
+static void dino_disable_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct dino_device *dino_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
@@ -309,9 +309,9 @@ static void dino_disable_irq(unsigned int irq)
__raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
}
-static void dino_enable_irq(unsigned int irq)
+static void dino_enable_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct dino_device *dino_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
u32 tmp;
@@ -347,9 +347,9 @@ static void dino_enable_irq(unsigned int irq)
}
}
-static unsigned int dino_startup_irq(unsigned int irq)
+static unsigned int dino_startup_irq(struct irq_desc *desc)
{
- dino_enable_irq(irq);
+ dino_enable_irq(desc);
return 0;
}
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 46f503f..f00d28e 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -144,8 +144,9 @@ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered *
/* called by free irq */
-static void eisa_disable_irq(unsigned int irq)
+static void eisa_disable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned long flags;
EISA_DBG("disable irq %d\n", irq);
@@ -164,8 +165,9 @@ static void eisa_disable_irq(unsigned int irq)
}
/* called by request irq */
-static void eisa_enable_irq(unsigned int irq)
+static void eisa_enable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned long flags;
EISA_DBG("enable irq %d\n", irq);
@@ -182,9 +184,9 @@ static void eisa_enable_irq(unsigned int irq)
EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
}
-static unsigned int eisa_startup_irq(unsigned int irq)
+static unsigned int eisa_startup_irq(struct irq_desc *desc)
{
- eisa_enable_irq(irq);
+ eisa_enable_irq(desc);
return 0;
}
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index c4e1f3c..d2fc47b 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -106,9 +106,9 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
return NO_IRQ;
}
-static void gsc_asic_disable_irq(unsigned int irq)
+static void gsc_asic_disable_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct gsc_asic *irq_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -122,9 +122,9 @@ static void gsc_asic_disable_irq(unsigned int irq)
gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
}
-static void gsc_asic_enable_irq(unsigned int irq)
+static void gsc_asic_enable_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct gsc_asic *irq_dev = desc->chip_data;
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
u32 imr;
@@ -142,9 +142,9 @@ static void gsc_asic_enable_irq(unsigned int irq)
*/
}
-static unsigned int gsc_asic_startup_irq(unsigned int irq)
+static unsigned int gsc_asic_startup_irq(struct irq_desc *desc)
{
- gsc_asic_enable_irq(irq);
+ gsc_asic_enable_irq(desc);
return 0;
}
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index c768367..2ac290b 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -622,8 +622,9 @@ static struct vector_info *iosapic_get_vector(unsigned int irq)
return desc->chip_data;
}
-static void iosapic_disable_irq(unsigned int irq)
+static void iosapic_disable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
unsigned long flags;
struct vector_info *vi = iosapic_get_vector(irq);
u32 d0, d1;
@@ -635,8 +636,9 @@ static void iosapic_disable_irq(unsigned int irq)
spin_unlock_irqrestore(&iosapic_lock, flags);
}
-static void iosapic_enable_irq(unsigned int irq)
+static void iosapic_enable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
struct vector_info *vi = iosapic_get_vector(irq);
u32 d0, d1;
@@ -686,8 +688,9 @@ printk("\n");
* i386/ia64 support ISA devices and have to deal with
* edge-triggered interrupts too.
*/
-static void iosapic_end_irq(unsigned int irq)
+static void iosapic_end_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
struct vector_info *vi = iosapic_get_vector(irq);
DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq,
vi->eoi_addr, vi->eoi_data);
@@ -695,16 +698,17 @@ static void iosapic_end_irq(unsigned int irq)
cpu_end_irq(irq);
}
-static unsigned int iosapic_startup_irq(unsigned int irq)
+static unsigned int iosapic_startup_irq(struct irq_desc *desc)
{
- iosapic_enable_irq(irq);
+ iosapic_enable_irq(desc);
return 0;
}
#ifdef CONFIG_SMP
-static int iosapic_set_affinity_irq(unsigned int irq,
+static int iosapic_set_affinity_irq(struct irq_desc *desc,
const struct cpumask *dest)
{
+ unsigned int irq = desc->irq;
struct vector_info *vi = iosapic_get_vector(irq);
u32 d0, d1, dummy_d0;
unsigned long flags;
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index f7806d8..084b1c7 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -286,8 +286,9 @@ superio_init(struct pci_dev *pcidev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
-static void superio_disable_irq(unsigned int irq)
+static void superio_disable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
@@ -303,8 +304,9 @@ static void superio_disable_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static void superio_enable_irq(unsigned int irq)
+static void superio_enable_irq(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
@@ -319,9 +321,9 @@ static void superio_enable_irq(unsigned int irq)
outb (r8,IC_PIC1+1);
}
-static unsigned int superio_startup_irq(unsigned int irq)
+static unsigned int superio_startup_irq(struct irq_desc *desc)
{
- superio_enable_irq(irq);
+ superio_enable_irq(desc);
return 0;
}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 83aae47..6b5ad63 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -1230,9 +1230,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
}
}
-void dmar_msi_unmask(unsigned int irq)
+void dmar_msi_unmask(struct irq_desc *desc)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ struct intel_iommu *iommu = get_irq_desc_data(desc);
unsigned long flag;
/* unmask it */
@@ -1243,10 +1243,10 @@ void dmar_msi_unmask(unsigned int irq)
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-void dmar_msi_mask(unsigned int irq)
+void dmar_msi_mask(struct irq_desc *desc)
{
unsigned long flag;
- struct intel_iommu *iommu = get_irq_data(irq);
+ struct intel_iommu *iommu = get_irq_desc_data(desc);
/* mask it */
spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1256,9 +1256,9 @@ void dmar_msi_mask(unsigned int irq)
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-void dmar_msi_write(int irq, struct msi_msg *msg)
+void dmar_msi_write(struct irq_desc *desc, struct msi_msg *msg)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ struct intel_iommu *iommu = get_irq_desc_data(desc);
unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1268,9 +1268,9 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-void dmar_msi_read(int irq, struct msi_msg *msg)
+void dmar_msi_read(struct irq_desc *desc, struct msi_msg *msg)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ struct intel_iommu *iommu = get_irq_desc_data(desc);
unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag);
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 737a1c4..d9c5f0d 100644
--- a/drivers/pci/htirq.c
+++ b/drivers/pci/htirq.c
@@ -33,9 +33,9 @@ struct ht_irq_cfg {
};
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
+void write_ht_irq_msg(struct irq_desc *desc, struct ht_irq_msg *msg)
{
- struct ht_irq_cfg *cfg = get_irq_data(irq);
+ struct ht_irq_cfg *cfg = get_irq_desc_data(desc);
unsigned long flags;
spin_lock_irqsave(&ht_irq_lock, flags);
if (cfg->msg.address_lo != msg->address_lo) {
@@ -47,39 +47,39 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
}
if (cfg->update)
- cfg->update(cfg->dev, irq, msg);
+ cfg->update(cfg->dev, desc, msg);
spin_unlock_irqrestore(&ht_irq_lock, flags);
cfg->msg = *msg;
}
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
+void fetch_ht_irq_msg(struct irq_desc *desc, struct ht_irq_msg *msg)
{
- struct ht_irq_cfg *cfg = get_irq_data(irq);
+ struct ht_irq_cfg *cfg = get_irq_desc_data(desc);
*msg = cfg->msg;
}
-void mask_ht_irq(unsigned int irq)
+void mask_ht_irq(struct irq_desc *desc)
{
struct ht_irq_cfg *cfg;
struct ht_irq_msg msg;
- cfg = get_irq_data(irq);
+ cfg = get_irq_desc_data(desc);
msg = cfg->msg;
msg.address_lo |= 1;
- write_ht_irq_msg(irq, &msg);
+ write_ht_irq_msg(desc, &msg);
}
-void unmask_ht_irq(unsigned int irq)
+void unmask_ht_irq(struct irq_desc *desc)
{
struct ht_irq_cfg *cfg;
struct ht_irq_msg msg;
- cfg = get_irq_data(irq);
+ cfg = get_irq_desc_data(desc);
msg = cfg->msg;
msg.address_lo &= ~1;
- write_ht_irq_msg(irq, &msg);
+ write_ht_irq_msg(desc, &msg);
}
/**
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index f9cf317..3eecee3 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -169,9 +169,10 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
desc->masked = __msix_mask_irq(desc, flag);
}
-static void msi_set_mask_bit(unsigned irq, u32 flag)
+static void msi_set_mask_bit(struct irq_desc *descx, u32 flag)
{
- struct msi_desc *desc = get_irq_msi(irq);
+ unsigned int irq = descx->irq;
+ struct msi_desc *desc = get_irq_desc_msi(descx);
if (desc->msi_attrib.is_msix) {
msix_mask_irq(desc, flag);
@@ -182,14 +183,14 @@ static void msi_set_mask_bit(unsigned irq, u32 flag)
}
}
-void mask_msi_irq(unsigned int irq)
+void mask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(irq, 1);
+ msi_set_mask_bit(desc, 1);
}
-void unmask_msi_irq(unsigned int irq)
+void unmask_msi_irq(struct irq_desc *desc)
{
- msi_set_mask_bit(irq, 0);
+ msi_set_mask_bit(desc, 0);
}
void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
index 9554ad5..34a11e5 100644
--- a/drivers/vlynq/vlynq.c
+++ b/drivers/vlynq/vlynq.c
@@ -133,10 +133,11 @@ static void vlynq_reset(struct vlynq_device *dev)
msleep(5);
}
-static void vlynq_irq_unmask(unsigned int irq)
+static void vlynq_irq_unmask(struct irq_desc *desc)
{
u32 val;
- struct vlynq_device *dev = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct vlynq_device *dev = get_irq_desc_chip_data(desc);
int virq;
BUG_ON(!dev);
@@ -146,10 +147,11 @@ static void vlynq_irq_unmask(unsigned int irq)
writel(val, &dev->remote->int_device[virq >> 2]);
}
-static void vlynq_irq_mask(unsigned int irq)
+static void vlynq_irq_mask(struct irq_desc *desc)
{
u32 val;
- struct vlynq_device *dev = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct vlynq_device *dev = get_irq_desc_chip_data(desc);
int virq;
BUG_ON(!dev);
@@ -159,10 +161,11 @@ static void vlynq_irq_mask(unsigned int irq)
writel(val, &dev->remote->int_device[virq >> 2]);
}
-static int vlynq_irq_type(unsigned int irq, unsigned int flow_type)
+static int vlynq_irq_type(struct irq_desc *desc, unsigned int flow_type)
{
u32 val;
- struct vlynq_device *dev = get_irq_chip_data(irq);
+ unsigned int irq = desc->irq;
+ struct vlynq_device *dev = get_irq_desc_chip_data(desc);
int virq;
BUG_ON(!dev);
@@ -190,9 +193,9 @@ static int vlynq_irq_type(unsigned int irq, unsigned int flow_type)
return 0;
}
-static void vlynq_local_ack(unsigned int irq)
+static void vlynq_local_ack(struct irq_desc *desc)
{
- struct vlynq_device *dev = get_irq_chip_data(irq);
+ struct vlynq_device *dev = get_irq_desc_chip_data(desc);
u32 status = readl(&dev->local->status);
@@ -201,9 +204,9 @@ static void vlynq_local_ack(unsigned int irq)
writel(status, &dev->local->status);
}
-static void vlynq_remote_ack(unsigned int irq)
+static void vlynq_remote_ack(struct irq_desc *desc)
{
- struct vlynq_device *dev = get_irq_chip_data(irq);
+ struct vlynq_device *dev = get_irq_desc_chip_data(desc);
u32 status = readl(&dev->remote->status);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 64cbbe4..fd16b80 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -730,11 +730,11 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
return 0;
}
-static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(struct irq_desc *desc, const struct cpumask *dest)
{
unsigned tcpu = cpumask_first(dest);
- return rebind_irq_to_cpu(irq, tcpu);
+ return rebind_irq_to_cpu(desc->irq, tcpu);
}
int resend_irq_on_evtchn(unsigned int irq)
@@ -753,35 +753,35 @@ int resend_irq_on_evtchn(unsigned int irq)
return 1;
}
-static void enable_dynirq(unsigned int irq)
+static void enable_dynirq(struct irq_desc *desc)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(desc->irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
}
-static void disable_dynirq(unsigned int irq)
+static void disable_dynirq(struct irq_desc *desc)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(desc->irq);
if (VALID_EVTCHN(evtchn))
mask_evtchn(evtchn);
}
-static void ack_dynirq(unsigned int irq)
+static void ack_dynirq(struct irq_desc *desc)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(desc->irq);
- move_native_irq(irq);
+ move_native_irq(desc);
if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn);
}
-static int retrigger_dynirq(unsigned int irq)
+static int retrigger_dynirq(struct irq_desc *desc)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(desc->irq);
struct shared_info *sh = HYPERVISOR_shared_info;
int ret = 0;
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index 62f5908..afc7506 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -12,9 +12,9 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#ifndef ack_bad_irq
-static inline void ack_bad_irq(unsigned int irq)
+static inline void ack_bad_irq(struct irq_desc *desc)
{
- printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", desc->irq);
}
#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 659a765..6b4227a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -187,10 +187,10 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
-extern void dmar_msi_unmask(unsigned int irq);
-extern void dmar_msi_mask(unsigned int irq);
-extern void dmar_msi_read(int irq, struct msi_msg *msg);
-extern void dmar_msi_write(int irq, struct msi_msg *msg);
+extern void dmar_msi_unmask(struct irq_desc *);
+extern void dmar_msi_mask(struct irq_desc *);
+extern void dmar_msi_read(struct irq_desc *desc, struct msi_msg *msg);
+extern void dmar_msi_write(struct irq_desc *desc, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
extern int arch_setup_dmar_msi(unsigned int irq);
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
index c96ea46..91cf055 100644
--- a/include/linux/htirq.h
+++ b/include/linux/htirq.h
@@ -7,16 +7,17 @@ struct ht_irq_msg {
};
/* Helper functions.. */
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void mask_ht_irq(unsigned int irq);
-void unmask_ht_irq(unsigned int irq);
+struct irq_desc;
+void fetch_ht_irq_msg(struct irq_desc *desc, struct ht_irq_msg *msg);
+void write_ht_irq_msg(struct irq_desc *desc, struct ht_irq_msg *msg);
+void mask_ht_irq(struct irq_desc *);
+void unmask_ht_irq(struct irq_desc *);
/* The arch hook for getting things started */
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
/* For drivers of buggy hardware */
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
+typedef void (ht_irq_update_t)(struct pci_dev *dev, struct irq_desc *desc,
struct ht_irq_msg *msg);
int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 60f3368..fb8c376 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -110,30 +110,31 @@ struct msi_desc;
*/
struct irq_chip {
const char *name;
- unsigned int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
-
- void (*ack)(unsigned int irq);
- void (*mask)(unsigned int irq);
- void (*mask_ack)(unsigned int irq);
- void (*unmask)(unsigned int irq);
- void (*eoi)(unsigned int irq);
-
- void (*end)(unsigned int irq);
- int (*set_affinity)(unsigned int irq,
+ unsigned int (*startup)(struct irq_desc *desc);
+ void (*shutdown)(struct irq_desc *desc);
+ void (*enable)(struct irq_desc *desc);
+ void (*disable)(struct irq_desc *desc);
+
+ void (*ack)(struct irq_desc *desc);
+ void (*mask)(struct irq_desc *desc);
+ void (*mask_ack)(struct irq_desc *desc);
+ void (*unmask)(struct irq_desc *desc);
+ void (*eoi)(struct irq_desc *desc);
+
+ void (*end)(struct irq_desc *desc);
+ int (*set_affinity)(struct irq_desc *desc,
const struct cpumask *dest);
- int (*retrigger)(unsigned int irq);
- int (*set_type)(unsigned int irq, unsigned int flow_type);
- int (*set_wake)(unsigned int irq, unsigned int on);
+ int (*retrigger)(struct irq_desc *desc);
- void (*bus_lock)(unsigned int irq);
- void (*bus_sync_unlock)(unsigned int irq);
+ int (*set_type)(struct irq_desc *desc, unsigned int flow_type);
+ int (*set_wake)(struct irq_desc *desc, unsigned int on);
+
+ void (*bus_lock)(struct irq_desc *desc);
+ void (*bus_sync_unlock)(struct irq_desc *desc);
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
- void (*release)(unsigned int irq, void *dev_id);
+ void (*release)(struct irq_desc *desc, void *dev_id);
#endif
/*
* For compatibility, ->typename is copied into ->name.
@@ -252,8 +253,8 @@ extern void remove_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_GENERIC_PENDING_IRQ
-void move_native_irq(int irq);
-void move_masked_irq(int irq);
+void move_native_irq(struct irq_desc *desc);
+void move_masked_irq(struct irq_desc *desc);
#else /* CONFIG_GENERIC_PENDING_IRQ */
@@ -261,11 +262,11 @@ static inline void move_irq(int irq)
{
}
-static inline void move_native_irq(int irq)
+static inline void move_native_irq(struct irq_desc *desc)
{
}
-static inline void move_masked_irq(int irq)
+static inline void move_masked_irq(struct irq_desc *desc)
{
}
@@ -338,7 +339,7 @@ extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret);
/* Resending of interrupts :*/
-void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+void check_irq_resend(struct irq_desc *desc);
/* Enable/disable irq debugging output: */
extern int noirqdebug_setup(char *str);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 6991ab5..dc6a904 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -11,8 +11,8 @@ struct msi_msg {
/* Helper functions */
struct irq_desc;
-extern void mask_msi_irq(unsigned int irq);
-extern void unmask_msi_irq(unsigned int irq);
+extern void mask_msi_irq(struct irq_desc *);
+extern void unmask_msi_irq(struct irq_desc *);
extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 2295a31..02ab31e 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -58,8 +58,8 @@ unsigned long probe_irq_on(void)
* progress:
*/
if (desc->chip->set_type)
- desc->chip->set_type(i, IRQ_TYPE_PROBE);
- desc->chip->startup(i);
+ desc->chip->set_type(desc, IRQ_TYPE_PROBE);
+ desc->chip->startup(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -76,7 +76,7 @@ unsigned long probe_irq_on(void)
raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->chip->startup(i))
+ if (desc->chip->startup(desc))
desc->status |= IRQ_PENDING;
}
raw_spin_unlock_irq(&desc->lock);
@@ -98,7 +98,7 @@ unsigned long probe_irq_on(void)
/* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->chip->shutdown(desc);
} else
if (i < 32)
mask |= 1 << i;
@@ -137,7 +137,7 @@ unsigned int probe_irq_mask(unsigned long val)
mask |= 1 << i;
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->chip->shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -181,7 +181,7 @@ int probe_irq_off(unsigned long val)
nr_of_irqs++;
}
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->chip->shutdown(desc);
}
raw_spin_unlock_irq(&desc->lock);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3dcdd2f..043557a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -131,7 +131,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
unsigned long flags;
if (!desc) {
- WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
+ WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n",irq);
return -EINVAL;
}
@@ -287,40 +287,34 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread);
/*
* default enable function
*/
-static void default_enable(unsigned int irq)
+static void default_enable(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- desc->chip->unmask(irq);
+ desc->chip->unmask(desc);
desc->status &= ~IRQ_MASKED;
}
/*
* default disable function
*/
-static void default_disable(unsigned int irq)
+static void default_disable(struct irq_desc *desc)
{
}
/*
* default startup function
*/
-static unsigned int default_startup(unsigned int irq)
+static unsigned int default_startup(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- desc->chip->enable(irq);
+ desc->chip->enable(desc);
return 0;
}
/*
* default shutdown function
*/
-static void default_shutdown(unsigned int irq)
+static void default_shutdown(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- desc->chip->mask(irq);
+ desc->chip->mask(desc);
desc->status |= IRQ_MASKED;
}
@@ -350,30 +344,30 @@ void irq_chip_set_defaults(struct irq_chip *chip)
chip->end = dummy_irq_chip.end;
}
-static inline void mask_ack_irq(struct irq_desc *desc, int irq)
+static inline void mask_ack_irq(struct irq_desc *desc)
{
if (desc->chip->mask_ack)
- desc->chip->mask_ack(irq);
+ desc->chip->mask_ack(desc);
else {
- desc->chip->mask(irq);
+ desc->chip->mask(desc);
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
}
desc->status |= IRQ_MASKED;
}
-static inline void mask_irq(struct irq_desc *desc, int irq)
+static inline void mask_irq(struct irq_desc *desc)
{
if (desc->chip->mask) {
- desc->chip->mask(irq);
+ desc->chip->mask(desc);
desc->status |= IRQ_MASKED;
}
}
-static inline void unmask_irq(struct irq_desc *desc, int irq)
+static inline void unmask_irq(struct irq_desc *desc)
{
if (desc->chip->unmask) {
- desc->chip->unmask(irq);
+ desc->chip->unmask(desc);
desc->status &= ~IRQ_MASKED;
}
}
@@ -476,7 +470,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
irqreturn_t action_ret;
raw_spin_lock(&desc->lock);
- mask_ack_irq(desc, irq);
+ mask_ack_irq(desc);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -502,7 +496,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_INPROGRESS;
if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
- unmask_irq(desc, irq);
+ unmask_irq(desc);
out_unlock:
raw_spin_unlock(&desc->lock);
}
@@ -539,7 +533,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
desc->status |= IRQ_PENDING;
- mask_irq(desc, irq);
+ mask_irq(desc);
goto out;
}
@@ -554,7 +548,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
- desc->chip->eoi(irq);
+ desc->chip->eoi(desc);
raw_spin_unlock(&desc->lock);
}
@@ -590,14 +584,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
!desc->action)) {
desc->status |= (IRQ_PENDING | IRQ_MASKED);
- mask_ack_irq(desc, irq);
+ mask_ack_irq(desc);
goto out_unlock;
}
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
@@ -607,7 +601,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
irqreturn_t action_ret;
if (unlikely(!action)) {
- mask_irq(desc, irq);
+ mask_irq(desc);
goto out_unlock;
}
@@ -619,7 +613,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely((desc->status &
(IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
(IRQ_PENDING | IRQ_MASKED))) {
- unmask_irq(desc, irq);
+ unmask_irq(desc);
}
desc->status &= ~IRQ_PENDING;
@@ -651,14 +645,14 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
kstat_incr_irqs_this_cpu(irq, desc);
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
if (desc->chip->eoi)
- desc->chip->eoi(irq);
+ desc->chip->eoi(desc);
}
void
@@ -670,7 +664,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
if (!desc) {
printk(KERN_ERR
- "Trying to install type control for IRQ%d\n", irq);
+ "Trying to install type control for IRQ%d\n",irq);
return;
}
@@ -689,13 +683,13 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->chip = &dummy_irq_chip;
}
- chip_bus_lock(irq, desc);
+ chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
/* Uninstall? */
if (handle == handle_bad_irq) {
if (desc->chip != &no_irq_chip)
- mask_ack_irq(desc, irq);
+ mask_ack_irq(desc);
desc->status |= IRQ_DISABLED;
desc->depth = 1;
}
@@ -706,10 +700,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->status &= ~IRQ_DISABLED;
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
desc->depth = 0;
- desc->chip->startup(irq);
+ desc->chip->startup(desc);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
- chip_bus_sync_unlock(irq, desc);
+ chip_bus_sync_unlock(desc);
}
EXPORT_SYMBOL_GPL(__set_irq_handler);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index f30c9c7..d7a61a0 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -38,9 +38,9 @@ struct lock_class_key irq_desc_lock_class;
*/
void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
- print_irq_desc(irq, desc);
+ print_irq_desc(desc);
kstat_incr_irqs_this_cpu(irq, desc);
- ack_bad_irq(irq);
+ ack_bad_irq(desc);
}
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
@@ -295,22 +295,20 @@ void clear_kstat_irqs(struct irq_desc *desc)
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
*/
-static void ack_bad(unsigned int irq)
+static void ack_bad(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
- print_irq_desc(irq, desc);
- ack_bad_irq(irq);
+ print_irq_desc(desc);
+ ack_bad_irq(desc);
}
/*
* NOP functions
*/
-static void noop(unsigned int irq)
+static void noop(struct irq_desc *desc)
{
}
-static unsigned int noop_ret(unsigned int irq)
+static unsigned int noop_ret(struct irq_desc *des)
{
return 0;
}
@@ -464,19 +462,19 @@ unsigned int __do_IRQ(unsigned int irq)
* No locking required for CPU-local interrupts:
*/
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
}
- desc->chip->end(irq);
+ desc->chip->end(desc);
return 1;
}
raw_spin_lock(&desc->lock);
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
@@ -536,7 +534,7 @@ out:
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
- desc->chip->end(irq);
+ desc->chip->end(desc);
raw_spin_unlock(&desc->lock);
return 1;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c63f3bc..f74a64f 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -12,8 +12,8 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
-extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
-extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
+extern void __disable_irq(struct irq_desc *desc, bool susp);
+extern void __enable_irq(struct irq_desc *desc, bool resume);
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
@@ -41,16 +41,16 @@ extern int irq_select_affinity_usr(unsigned int irq);
extern void irq_set_thread_affinity(struct irq_desc *desc);
/* Inline functions for support of irq chips on slow busses */
-static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
+static inline void chip_bus_lock(struct irq_desc *desc)
{
if (unlikely(desc->chip->bus_lock))
- desc->chip->bus_lock(irq);
+ desc->chip->bus_lock(desc);
}
-static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
+static inline void chip_bus_sync_unlock(struct irq_desc *desc)
{
if (unlikely(desc->chip->bus_sync_unlock))
- desc->chip->bus_sync_unlock(irq);
+ desc->chip->bus_sync_unlock(desc);
}
/*
@@ -61,10 +61,10 @@ static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
#define P(f) if (desc->status & f) printk("%14s set\n", #f)
-static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void print_irq_desc(struct irq_desc *desc)
{
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
- irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
+ desc->irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
printk("->chip(): %p, ", desc->chip);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 69a3d7b..868521a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -118,7 +118,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
- if (!desc->chip->set_affinity(irq, cpumask)) {
+ if (!desc->chip->set_affinity(desc, cpumask)) {
cpumask_copy(desc->affinity, cpumask);
irq_set_thread_affinity(desc);
}
@@ -128,7 +128,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
cpumask_copy(desc->pending_mask, cpumask);
}
#else
- if (!desc->chip->set_affinity(irq, cpumask)) {
+ if (!desc->chip->set_affinity(desc, cpumask)) {
cpumask_copy(desc->affinity, cpumask);
irq_set_thread_affinity(desc);
}
@@ -161,7 +161,7 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
- desc->chip->set_affinity(irq, desc->affinity);
+ desc->chip->set_affinity(desc, desc->affinity);
return 0;
}
@@ -197,7 +197,7 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
}
#endif
-void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
+void __disable_irq(struct irq_desc *desc, bool suspend)
{
if (suspend) {
if (!desc->action || (desc->action->flags & IRQF_TIMER))
@@ -207,7 +207,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
if (!desc->depth++) {
desc->status |= IRQ_DISABLED;
- desc->chip->disable(irq);
+ desc->chip->disable(desc);
}
}
@@ -230,11 +230,11 @@ void disable_irq_nosync(unsigned int irq)
if (!desc)
return;
- chip_bus_lock(irq, desc);
+ chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
- __disable_irq(desc, irq, false);
+ __disable_irq(desc, false);
raw_spin_unlock_irqrestore(&desc->lock, flags);
- chip_bus_sync_unlock(irq, desc);
+ chip_bus_sync_unlock(desc);
}
EXPORT_SYMBOL(disable_irq_nosync);
@@ -263,7 +263,7 @@ void disable_irq(unsigned int irq)
}
EXPORT_SYMBOL(disable_irq);
-void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
+void __enable_irq(struct irq_desc *desc, bool resume)
{
if (resume)
desc->status &= ~IRQ_SUSPENDED;
@@ -271,7 +271,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
switch (desc->depth) {
case 0:
err_out:
- WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
+ WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", desc->irq);
break;
case 1: {
unsigned int status = desc->status & ~IRQ_DISABLED;
@@ -280,7 +280,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
goto err_out;
/* Prevent probing on this irq: */
desc->status = status | IRQ_NOPROBE;
- check_irq_resend(desc, irq);
+ check_irq_resend(desc);
/* fall-through */
}
default:
@@ -307,21 +307,20 @@ void enable_irq(unsigned int irq)
if (!desc)
return;
- chip_bus_lock(irq, desc);
+ chip_bus_lock(desc);
raw_spin_lock_irqsave(&desc->lock, flags);
- __enable_irq(desc, irq, false);
+ __enable_irq(desc, false);
raw_spin_unlock_irqrestore(&desc->lock, flags);
- chip_bus_sync_unlock(irq, desc);
+ chip_bus_sync_unlock(desc);
}
EXPORT_SYMBOL(enable_irq);
-static int set_irq_wake_real(unsigned int irq, unsigned int on)
+static int set_irq_wake_real(struct irq_desc *desc, unsigned int on)
{
- struct irq_desc *desc = irq_to_desc(irq);
int ret = -ENXIO;
if (desc->chip->set_wake)
- ret = desc->chip->set_wake(irq, on);
+ ret = desc->chip->set_wake(desc, on);
return ret;
}
@@ -350,7 +349,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
raw_spin_lock_irqsave(&desc->lock, flags);
if (on) {
if (desc->wake_depth++ == 0) {
- ret = set_irq_wake_real(irq, on);
+ ret = set_irq_wake_real(desc, on);
if (ret)
desc->wake_depth = 0;
else
@@ -360,7 +359,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
if (desc->wake_depth == 0) {
WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
} else if (--desc->wake_depth == 0) {
- ret = set_irq_wake_real(irq, on);
+ ret = set_irq_wake_real(desc, on);
if (ret)
desc->wake_depth = 1;
else
@@ -425,7 +424,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
}
/* caller masked out all except trigger mode flags */
- ret = chip->set_type(irq, flags);
+ ret = chip->set_type(desc, flags);
if (ret)
pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
@@ -481,10 +480,10 @@ static int irq_wait_for_interrupt(struct irqaction *action)
* handler finished. unmask if the interrupt has not been disabled and
* is marked MASKED.
*/
-static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
+static void irq_finalize_oneshot(struct irq_desc *desc)
{
again:
- chip_bus_lock(irq, desc);
+ chip_bus_lock(desc);
raw_spin_lock_irq(&desc->lock);
/*
@@ -498,17 +497,17 @@ again:
*/
if (unlikely(desc->status & IRQ_INPROGRESS)) {
raw_spin_unlock_irq(&desc->lock);
- chip_bus_sync_unlock(irq, desc);
+ chip_bus_sync_unlock(desc);
cpu_relax();
goto again;
}
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
- desc->chip->unmask(irq);
+ desc->chip->unmask(desc);
}
raw_spin_unlock_irq(&desc->lock);
- chip_bus_sync_unlock(irq, desc);
+ chip_bus_sync_unlock(desc);
}
#ifdef CONFIG_SMP
@@ -580,7 +579,7 @@ static int irq_thread(void *data)
action->thread_fn(action->irq, action->dev_id);
if (oneshot)
- irq_finalize_oneshot(action->irq, desc);
+ irq_finalize_oneshot(desc);
}
wake = atomic_dec_and_test(&desc->threads_active);
@@ -756,7 +755,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
- desc->chip->startup(irq);
+ desc->chip->startup(desc);
} else
/* Undo nested disables: */
desc->depth = 1;
@@ -790,7 +789,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
desc->status &= ~IRQ_SPURIOUS_DISABLED;
- __enable_irq(desc, irq, false);
+ __enable_irq(desc, false);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -897,9 +896,9 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!desc->action) {
desc->status |= IRQ_DISABLED;
if (desc->chip->shutdown)
- desc->chip->shutdown(irq);
+ desc->chip->shutdown(desc);
else
- desc->chip->disable(irq);
+ desc->chip->disable(desc);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -968,9 +967,9 @@ void free_irq(unsigned int irq, void *dev_id)
if (!desc)
return;
- chip_bus_lock(irq, desc);
+ chip_bus_lock(desc);
kfree(__free_irq(irq, dev_id));
- chip_bus_sync_unlock(irq, desc);
+ chip_bus_sync_unlock(desc);
}
EXPORT_SYMBOL(free_irq);
@@ -1077,9 +1076,9 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
action->name = devname;
action->dev_id = dev_id;
- chip_bus_lock(irq, desc);
+ chip_bus_lock(desc);
retval = __setup_irq(irq, desc, action);
- chip_bus_sync_unlock(irq, desc);
+ chip_bus_sync_unlock(desc);
if (retval)
kfree(action);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 2419622..6b3d1aa 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,10 +4,8 @@
#include "internals.h"
-void move_masked_irq(int irq)
+void move_masked_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
@@ -43,7 +41,7 @@ void move_masked_irq(int irq)
*/
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
< nr_cpu_ids))
- if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
+ if (!desc->chip->set_affinity(desc, desc->pending_mask)) {
cpumask_copy(desc->affinity, desc->pending_mask);
irq_set_thread_affinity(desc);
}
@@ -51,18 +49,16 @@ void move_masked_irq(int irq)
cpumask_clear(desc->pending_mask);
}
-void move_native_irq(int irq)
+void move_native_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
if (unlikely(desc->status & IRQ_DISABLED))
return;
- desc->chip->mask(irq);
- move_masked_irq(irq);
- desc->chip->unmask(irq);
+ desc->chip->mask(desc);
+ move_masked_irq(desc);
+ desc->chip->unmask(desc);
}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 0d4005d..a43c93d 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -29,7 +29,7 @@ void suspend_device_irqs(void)
unsigned long flags;
raw_spin_lock_irqsave(&desc->lock, flags);
- __disable_irq(desc, irq, true);
+ __disable_irq(desc, true);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
@@ -57,7 +57,7 @@ void resume_device_irqs(void)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
- __enable_irq(desc, irq, true);
+ __enable_irq(desc, true);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
}
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 090c376..ddf75cd 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -53,14 +53,14 @@ static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
*
* Is called with interrupts disabled and desc->lock held.
*/
-void check_irq_resend(struct irq_desc *desc, unsigned int irq)
+void check_irq_resend(struct irq_desc *desc)
{
unsigned int status = desc->status;
/*
* Make sure the interrupt is enabled, before resending it:
*/
- desc->chip->enable(irq);
+ desc->chip->enable(desc);
/*
* We do not resend level type interrupts. Level type
@@ -70,10 +70,10 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
- if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
+ if (!desc->chip->retrigger || !desc->chip->retrigger(desc)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* Set it pending and activate the softirq: */
- set_bit(irq, irqs_resend);
+ set_bit(desc->irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
#endif
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 89fb90a..42b0972 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -79,7 +79,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
* IRQ controller clean up too
*/
if (work && desc->chip && desc->chip->end)
- desc->chip->end(irq);
+ desc->chip->end(desc);
raw_spin_unlock(&desc->lock);
return ok;
@@ -254,7 +254,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
desc->depth++;
- desc->chip->disable(irq);
+ desc->chip->disable(desc);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
--
1.6.4.2
-v2: change to black list instead
-v3: just remove that, and the one use possible_cpus= directly.
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/kernel/smpboot.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a1483ac..3f92885 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1251,7 +1251,6 @@ early_param("possible_cpus", _setup_possible_cpus);
* - Ashok Raj
*
* Three ways to find out the number of additional hotplug CPUs:
- * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
* - The user can overwrite it with possible_cpus=NUM
* - Otherwise don't reserve additional CPUs.
* We do this because additional CPUs waste a lot of memory.
@@ -1266,7 +1265,7 @@ __init void prefill_possible_map(void)
num_processors = 1;
if (setup_possible_cpus == -1)
- possible = num_processors + disabled_cpus;
+ possible = num_processors;
else
possible = setup_possible_cpus;
--
1.6.4.2
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/kernel/e820.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 40c04cf..a558609 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -34,7 +34,7 @@
* user can e.g. boot the original kernel with mem=1G while still booting the
* next kernel with full memory.
*/
-static struct e820map e820;
+static struct e820map __initdata e820;
static struct e820map __initdata e820_saved;
/* For PCI or other memory-mapped resources */
@@ -46,9 +46,10 @@ EXPORT_SYMBOL(pci_mem_start);
/*
* This function checks if any part of the range <start,end> is mapped
* with type.
+ * phys_pud_init() is using it and is _meminit, but we have !after_bootmem
+ * so could use refok here
*/
-int
-e820_any_mapped(u64 start, u64 end, unsigned type)
+int __init_refok e820_any_mapped(u64 start, u64 end, unsigned type)
{
int i;
@@ -63,7 +64,6 @@ e820_any_mapped(u64 start, u64 end, unsigned type)
}
return 0;
}
-EXPORT_SYMBOL_GPL(e820_any_mapped);
/*
* This function checks if the entire range <start,end> is mapped with type.
--
1.6.4.2
and could change e820_saved to initdata
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/e820.h | 7 ++-----
arch/x86/kernel/e820.c | 28 +++++++++++++++++++---------
arch/x86/kernel/efi.c | 2 +-
arch/x86/kernel/setup.c | 10 +++++-----
arch/x86/xen/setup.c | 4 +---
5 files changed, 28 insertions(+), 23 deletions(-)
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 38828c7..71c0348 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -68,9 +68,6 @@ struct e820map {
#define BIOS_END 0x00100000
#ifdef __KERNEL__
-/* see comment in arch/x86/kernel/e820.c */
-extern struct e820map e820;
-extern struct e820map e820_saved;
#ifdef CONFIG_X86_OOSTORE
extern int centaur_ram_top;
@@ -86,8 +83,8 @@ extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
extern void e820_print_map(char *who);
-extern int
-sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
+int sanitize_e820_map(void);
+void save_e820_map(void);
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type);
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 119c0e1..40c04cf 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -34,8 +34,8 @@
* user can e.g. boot the original kernel with mem=1G while still booting the
* next kernel with full memory.
*/
-struct e820map e820;
-struct e820map e820_saved;
+static struct e820map e820;
+static struct e820map __initdata e820_saved;
/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0xaeedbabe;
@@ -224,7 +224,7 @@ void __init e820_print_map(char *who)
* ______________________4_
*/
-int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
+static int __init __sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
u32 *pnr_map)
{
struct change_member {
@@ -383,6 +383,11 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
return 0;
}
+int __init sanitize_e820_map(void)
+{
+ return __sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+}
+
static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
{
while (nr_map) {
@@ -555,7 +560,7 @@ void __init update_e820(void)
u32 nr_map;
nr_map = e820.nr_map;
- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
+ if (__sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
return;
e820.nr_map = nr_map;
printk(KERN_INFO "modified physical RAM map:\n");
@@ -566,7 +571,7 @@ static void __init update_e820_saved(void)
u32 nr_map;
nr_map = e820_saved.nr_map;
- if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
+ if (__sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
return;
e820_saved.nr_map = nr_map;
}
@@ -661,7 +666,7 @@ void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
sdata = early_ioremap(pa_data, map_len);
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
if (map_len > PAGE_SIZE)
early_iounmap(sdata, map_len);
printk(KERN_INFO "extended physical RAM map:\n");
@@ -1043,7 +1048,7 @@ void __init finish_e820_parsing(void)
if (userdef) {
u32 nr = e820.nr_map;
- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
+ if (__sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
early_panic("Invalid user supplied memory map");
e820.nr_map = nr;
@@ -1173,7 +1178,7 @@ char *__init default_machine_specific_memory_setup(void)
* the next section from 1mb->appropriate_mem_k
*/
new_nr = boot_params.e820_entries;
- sanitize_e820_map(boot_params.e820_map,
+ __sanitize_e820_map(boot_params.e820_map,
ARRAY_SIZE(boot_params.e820_map),
&new_nr);
boot_params.e820_entries = new_nr;
@@ -1200,12 +1205,17 @@ char *__init default_machine_specific_memory_setup(void)
return who;
}
+void __init save_e820_map(void)
+{
+ memcpy(&e820_saved, &e820, sizeof(struct e820map));
+}
+
void __init setup_memory_map(void)
{
char *who;
who = x86_init.resources.memory_setup();
- memcpy(&e820_saved, &e820, sizeof(struct e820map));
+ save_e820_map();
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index c2fa9b8..299f03f 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -272,7 +272,7 @@ static void __init do_add_efi_memmap(void)
}
e820_add_region(start, size, e820_type);
}
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
}
void __init efi_reserve_early(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c5ea524..82533cf 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -461,8 +461,8 @@ static void __init e820_reserve_setup_data(void)
if (!found)
return;
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- memcpy(&e820_saved, &e820, sizeof(struct e820map));
+ sanitize_e820_map();
+ save_e820_map();
printk(KERN_INFO "extended physical RAM map:\n");
e820_print_map("reserve setup_data");
}
@@ -614,7 +614,7 @@ static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
d->ident);
e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
return 0;
}
@@ -683,7 +683,7 @@ static void __init trim_bios_range(void)
* take them out.
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
}
/*
@@ -854,7 +854,7 @@ void __init setup_arch(char **cmdline_p)
if (ppro_with_ram_bug()) {
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
printk(KERN_INFO "fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ad0047f..3f2c411 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -43,8 +43,6 @@ char * __init xen_memory_setup(void)
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
- e820.nr_map = 0;
-
e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
/*
@@ -65,7 +63,7 @@ char * __init xen_memory_setup(void)
__pa(xen_start_info->pt_base),
"XEN START INFO");
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
return "Xen";
}
--
1.6.4.2
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/io_apic.h | 4 --
arch/x86/kernel/apic/io_apic.c | 80 +++++++++++++++++++---------------------
drivers/pci/intr_remapping.c | 69 ++++++++++++++--------------------
include/linux/dmar.h | 36 +++++++++--------
4 files changed, 86 insertions(+), 103 deletions(-)
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index c4683b9..d249186 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -171,10 +171,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void probe_nr_irqs_gsi(void);
-extern int setup_ioapic_entry(int apic, int irq,
- struct IO_APIC_route_entry *entry,
- unsigned int destination, int trigger,
- int polarity, int vector, int pin);
extern void ioapic_write_entry(int apic, int pin,
struct IO_APIC_route_entry e);
extern void setup_ioapic_ids_from_mpc(void);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5f061b7..c03bcd4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1340,7 +1340,7 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
else
desc->status &= ~IRQ_LEVEL;
- if (irq_remapped(irq)) {
+ if (irq_remapped(desc)) {
desc->status |= IRQ_MOVE_PCNTXT;
if (trigger)
set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
@@ -1362,7 +1362,7 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
handle_edge_irq, "edge");
}
-int setup_ioapic_entry(int apic_id, int irq,
+static int setup_ioapic_entry(int apic_id, struct irq_desc *desc,
struct IO_APIC_route_entry *entry,
unsigned int destination, int trigger,
int polarity, int vector, int pin)
@@ -1382,7 +1382,7 @@ int setup_ioapic_entry(int apic_id, int irq,
if (!iommu)
panic("No mapping iommu for ioapic %d\n", apic_id);
- index = alloc_irte(iommu, irq, 1);
+ index = alloc_irte(iommu, desc, 1);
if (index < 0)
panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
@@ -1405,7 +1405,7 @@ int setup_ioapic_entry(int apic_id, int irq,
/* Set source-id of interrupt request */
set_ioapic_sid(&irte, apic_id);
- modify_irte(irq, &irte);
+ modify_irte(desc, &irte);
ir_entry->index2 = (index >> 15) & 0x1;
ir_entry->zero = 0;
@@ -1467,7 +1467,7 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
irq, trigger, polarity);
- if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
+ if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, desc, &entry,
dest, trigger, polarity, cfg->vector, pin)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mp_ioapics[apic_id].apicid, pin);
@@ -2334,7 +2334,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
cfg->move_in_progress = 0;
}
-static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
+static void __target_IO_APIC_irq(struct irq_desc *desc, unsigned int dest, struct irq_cfg *cfg)
{
int apic, pin;
struct irq_pin_list *entry;
@@ -2349,7 +2349,7 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
* With interrupt-remapping, destination information comes
* from interrupt-remapping table entry.
*/
- if (!irq_remapped(irq))
+ if (!irq_remapped(desc))
io_apic_write(apic, 0x11 + pin*2, dest);
reg = io_apic_read(apic, 0x10 + pin*2);
reg &= ~IO_APIC_REDIR_VECTOR_MASK;
@@ -2388,10 +2388,8 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
struct irq_cfg *cfg;
unsigned long flags;
unsigned int dest;
- unsigned int irq;
int ret = -1;
- irq = desc->irq;
cfg = desc->chip_data;
raw_spin_lock_irqsave(&ioapic_lock, flags);
@@ -2399,7 +2397,7 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
if (!ret) {
/* Only the high 8 bits are valid. */
dest = SET_APIC_LOGICAL_ID(dest);
- __target_IO_APIC_irq(irq, dest, cfg);
+ __target_IO_APIC_irq(desc, dest, cfg);
}
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -2431,14 +2429,12 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
struct irq_cfg *cfg;
struct irte irte;
unsigned int dest;
- unsigned int irq;
int ret = -1;
if (!cpumask_intersects(mask, cpu_online_mask))
return ret;
- irq = desc->irq;
- if (get_irte(irq, &irte))
+ if (get_irte(desc, &irte))
return ret;
cfg = desc->chip_data;
@@ -2453,7 +2449,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
/*
* Modified the IRTE and flushes the Interrupt entry cache.
*/
- modify_irte(irq, &irte);
+ modify_irte(desc, &irte);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
@@ -2591,7 +2587,7 @@ atomic_t irq_mis_count;
* Otherwise, we simulate the EOI message manually by changing the trigger
* mode to edge and then back to level, with RTE being masked during this.
*/
-static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
+static void __eoi_ioapic_irq(struct irq_desc *desc, struct irq_cfg *cfg)
{
struct irq_pin_list *entry;
@@ -2603,7 +2599,7 @@ static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
* intr-remapping table entry. Hence for the io-apic
* EOI we use the pin number.
*/
- if (irq_remapped(irq))
+ if (irq_remapped(desc))
io_apic_eoi(entry->apic, entry->pin);
else
io_apic_eoi(entry->apic, cfg->vector);
@@ -2618,13 +2614,11 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
{
struct irq_cfg *cfg;
unsigned long flags;
- unsigned int irq;
- irq = desc->irq;
cfg = desc->chip_data;
raw_spin_lock_irqsave(&ioapic_lock, flags);
- __eoi_ioapic_irq(irq, cfg);
+ __eoi_ioapic_irq(desc, cfg);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -3328,11 +3322,11 @@ void destroy_irq(unsigned int irq)
struct irq_desc *desc;
struct irq_cfg *cfg;
- dynamic_irq_cleanup_keep_chip_data(irq_to_desc(irq));
+ desc = irq_to_desc(irq);
+ dynamic_irq_cleanup_keep_chip_data(desc);
- free_irte(irq);
+ free_irte(desc);
raw_spin_lock_irqsave(&vector_lock, flags);
- desc = irq_to_desc(irq);
cfg = desc->chip_data;
__clear_irq_vector(desc, cfg);
raw_spin_unlock_irqrestore(&vector_lock, flags);
@@ -3345,7 +3339,6 @@ void destroy_irq(unsigned int irq)
static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
struct msi_msg *msg, u8 hpet_id)
{
- unsigned int irq = desc->irq;
struct irq_cfg *cfg;
int err;
unsigned dest;
@@ -3360,15 +3353,15 @@ static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
- if (irq_remapped(irq)) {
+ if (irq_remapped(desc)) {
struct irte irte;
int ir_index;
u16 sub_handle;
- ir_index = map_irq_to_irte_handle(irq, &sub_handle);
+ ir_index = map_irq_to_irte_handle(desc, &sub_handle);
BUG_ON(ir_index == -1);
- memset (&irte, 0, sizeof(irte));
+ memset(&irte, 0, sizeof(irte));
irte.present = 1;
irte.dst_mode = apic->irq_dest_mode;
@@ -3383,7 +3376,7 @@ static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
else
set_hpet_sid(&irte, hpet_id);
- modify_irte(irq, &irte);
+ modify_irte(desc, &irte);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->data = sub_handle;
@@ -3450,12 +3443,11 @@ static int set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mas
static int
ir_set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- unsigned int irq = desc->irq;
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest;
struct irte irte;
- if (get_irte(irq, &irte))
+ if (get_irte(desc, &irte))
return -1;
if (set_desc_affinity(desc, mask, &dest))
@@ -3467,7 +3459,7 @@ ir_set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
/*
* atomically update the IRTE with the new destination and vector.
*/
- modify_irte(irq, &irte);
+ modify_irte(desc, &irte);
/*
* After this point, all the interrupts will start arriving
@@ -3522,7 +3514,7 @@ static struct irq_chip msi_ir_chip = {
* and allocate 'nvec' consecutive interrupt-remapping table entries
* in it.
*/
-static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
+static int msi_alloc_irte(struct pci_dev *dev, struct irq_desc *desc, int nvec)
{
struct intel_iommu *iommu;
int index;
@@ -3534,7 +3526,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
return -ENOENT;
}
- index = alloc_irte(iommu, irq, nvec);
+ index = alloc_irte(iommu, desc, nvec);
if (index < 0) {
printk(KERN_ERR
"Unable to allocate %d IRTE for PCI %s\n", nvec,
@@ -3544,20 +3536,22 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
return index;
}
-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+static int
+setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, struct irq_desc *desc)
{
int ret;
struct msi_msg msg;
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq;
ret = msi_compose_msg(dev, desc, &msg, -1);
if (ret < 0)
return ret;
- set_irq_msi(irq, msidesc);
- write_msi_msg(irq, &msg);
+ set_irq_desc_msi(desc, msidesc);
+ write_msi_msg_desc(desc, &msg);
- if (irq_remapped(irq)) {
+ irq = desc->irq;
+ if (irq_remapped(desc)) {
/*
* irq migration in process context
*/
@@ -3574,6 +3568,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
unsigned int irq;
+ struct irq_desc *desc;
int ret, sub_handle;
struct msi_desc *msidesc;
unsigned int irq_want;
@@ -3593,6 +3588,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (irq == 0)
return -1;
irq_want = irq + 1;
+ desc = irq_to_desc(irq);
if (!intr_remapping_enabled)
goto no_ir;
@@ -3601,7 +3597,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
* allocate the consecutive block of IRTE's
* for 'nvec'
*/
- index = msi_alloc_irte(dev, irq, nvec);
+ index = msi_alloc_irte(dev, desc, nvec);
if (index < 0) {
ret = index;
goto error;
@@ -3617,10 +3613,10 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
* base index, the sub_handle pointing to the
* appropriate interrupt remap table entry.
*/
- set_irte_irq(irq, iommu, index, sub_handle);
+ set_irte_irq(desc, iommu, index, sub_handle);
}
no_ir:
- ret = setup_msi_irq(dev, msidesc, irq);
+ ret = setup_msi_irq(dev, msidesc, desc);
if (ret < 0)
goto error;
sub_handle++;
@@ -3765,7 +3761,7 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
if (!iommu)
return -1;
- index = alloc_irte(iommu, irq, 1);
+ index = alloc_irte(iommu, desc, 1);
if (index < 0)
return -1;
}
@@ -3776,7 +3772,7 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
hpet_msi_write(desc, &msg);
desc->status |= IRQ_MOVE_PCNTXT;
- if (irq_remapped(irq))
+ if (irq_remapped(desc))
set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
handle_edge_irq, "edge");
else
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 95b8491..1c03bc7 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -45,33 +45,27 @@ static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
return iommu;
}
-static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu(struct irq_desc *desc)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
if (WARN_ON_ONCE(!desc))
return NULL;
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu_alloc(struct irq_desc *desc)
{
- struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
- desc = irq_to_desc(irq);
if (!desc) {
- printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ printk(KERN_INFO "can not get irq_desc\n");
return NULL;
}
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_desc_node(desc));
return desc->irq_2_iommu;
}
@@ -80,26 +74,27 @@ static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
-static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
if (irq < nr_irqs)
return &irq_2_iommuX[irq];
return NULL;
}
-static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu_alloc(struct irq_desc *desc)
{
- return irq_2_iommu(irq);
+ return irq_2_iommu(desc);
}
#endif
static DEFINE_SPINLOCK(irq_2_ir_lock);
-static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
+static struct irq_2_iommu *valid_irq_2_iommu(struct irq_desc *desc)
{
struct irq_2_iommu *irq_iommu;
- irq_iommu = irq_2_iommu(irq);
+ irq_iommu = irq_2_iommu(desc);
if (!irq_iommu)
return NULL;
@@ -110,12 +105,12 @@ static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
return irq_iommu;
}
-int irq_remapped(int irq)
+int irq_remapped(struct irq_desc *desc)
{
- return valid_irq_2_iommu(irq) != NULL;
+ return valid_irq_2_iommu(desc) != NULL;
}
-int get_irte(int irq, struct irte *entry)
+int get_irte(struct irq_desc *desc, struct irte *entry)
{
int index;
struct irq_2_iommu *irq_iommu;
@@ -125,7 +120,7 @@ int get_irte(int irq, struct irte *entry)
return -1;
spin_lock_irqsave(&irq_2_ir_lock, flags);
- irq_iommu = valid_irq_2_iommu(irq);
+ irq_iommu = valid_irq_2_iommu(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
@@ -138,7 +133,7 @@ int get_irte(int irq, struct irte *entry)
return 0;
}
-int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
+int alloc_irte(struct intel_iommu *iommu, struct irq_desc *desc, u16 count)
{
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu;
@@ -150,12 +145,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
if (!count)
return -1;
-#ifndef CONFIG_SPARSE_IRQ
- /* protect irq_2_iommu_alloc later */
- if (irq >= nr_irqs)
- return -1;
-#endif
-
/*
* start the IRTE search from index 0.
*/
@@ -195,7 +184,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
for (i = index; i < index + count; i++)
table->base[i].present = 1;
- irq_iommu = irq_2_iommu_alloc(irq);
+ irq_iommu = irq_2_iommu_alloc(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
printk(KERN_ERR "can't allocate irq_2_iommu\n");
@@ -223,14 +212,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
return qi_submit_sync(&desc, iommu);
}
-int map_irq_to_irte_handle(int irq, u16 *sub_handle)
+int map_irq_to_irte_handle(struct irq_desc *desc, u16 *sub_handle)
{
int index;
struct irq_2_iommu *irq_iommu;
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
- irq_iommu = valid_irq_2_iommu(irq);
+ irq_iommu = valid_irq_2_iommu(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
@@ -242,14 +231,14 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle)
return index;
}
-int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
+int set_irte_irq(struct irq_desc *desc, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
struct irq_2_iommu *irq_iommu;
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
- irq_iommu = irq_2_iommu_alloc(irq);
+ irq_iommu = irq_2_iommu_alloc(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
@@ -267,13 +256,13 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
return 0;
}
-int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
+int clear_irte_irq(struct irq_desc *desc, struct intel_iommu *iommu, u16 index)
{
struct irq_2_iommu *irq_iommu;
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
- irq_iommu = valid_irq_2_iommu(irq);
+ irq_iommu = valid_irq_2_iommu(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
@@ -282,14 +271,14 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
irq_iommu->sub_handle = 0;
- irq_2_iommu(irq)->irte_mask = 0;
+ irq_2_iommu(desc)->irte_mask = 0;
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return 0;
}
-int modify_irte(int irq, struct irte *irte_modified)
+int modify_irte(struct irq_desc *desc, struct irte *irte_modified)
{
int rc;
int index;
@@ -299,7 +288,7 @@ int modify_irte(int irq, struct irte *irte_modified)
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
- irq_iommu = valid_irq_2_iommu(irq);
+ irq_iommu = valid_irq_2_iommu(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
@@ -320,7 +309,7 @@ int modify_irte(int irq, struct irte *irte_modified)
return rc;
}
-int flush_irte(int irq)
+int flush_irte(struct irq_desc *desc)
{
int rc;
int index;
@@ -329,7 +318,7 @@ int flush_irte(int irq)
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
- irq_iommu = valid_irq_2_iommu(irq);
+ irq_iommu = valid_irq_2_iommu(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
@@ -399,14 +388,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
-int free_irte(int irq)
+int free_irte(struct irq_desc *desc)
{
int rc = 0;
struct irq_2_iommu *irq_iommu;
unsigned long flags;
spin_lock_irqsave(&irq_2_ir_lock, flags);
- irq_iommu = valid_irq_2_iommu(irq);
+ irq_iommu = valid_irq_2_iommu(desc);
if (!irq_iommu) {
spin_unlock_irqrestore(&irq_2_ir_lock, flags);
return -1;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 6b4227a..a5f04a1 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -113,17 +113,17 @@ extern int enable_intr_remapping(int);
extern void disable_intr_remapping(void);
extern int reenable_intr_remapping(int);
-extern int get_irte(int irq, struct irte *entry);
-extern int modify_irte(int irq, struct irte *irte_modified);
-extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
-extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
- u16 sub_handle);
-extern int map_irq_to_irte_handle(int irq, u16 *sub_handle);
-extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index);
-extern int flush_irte(int irq);
-extern int free_irte(int irq);
-
-extern int irq_remapped(int irq);
+int get_irte(struct irq_desc *desc, struct irte *entry);
+int modify_irte(struct irq_desc *desc, struct irte *irte_modified);
+int alloc_irte(struct intel_iommu *iommu, struct irq_desc *desc, u16 count);
+int set_irte_irq(struct irq_desc *desc, struct intel_iommu *iommu, u16 index,
+ u16 sub_handle);
+int map_irq_to_irte_handle(struct irq_desc *desc, u16 *sub_handle);
+int clear_irte_irq(struct irq_desc *desc, struct intel_iommu *iommu, u16 index);
+int flush_irte(struct irq_desc *desc);
+int free_irte(struct irq_desc *desc);
+
+extern int irq_remapped(struct irq_desc *desc);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
extern struct intel_iommu *map_hpet_to_ir(u8 id);
@@ -131,23 +131,25 @@ extern int set_ioapic_sid(struct irte *irte, int apic);
extern int set_hpet_sid(struct irte *irte, u8 id);
extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
#else
-static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
+static inline int
+alloc_irte(struct intel_iommu *iommu, struct irq_desc *desc, u16 count)
{
return -1;
}
-static inline int modify_irte(int irq, struct irte *irte_modified)
+static inline int modify_irte(struct irq_desc *desc, struct irte *irte_modified)
{
return -1;
}
-static inline int free_irte(int irq)
+static inline int free_irte(struct irq_desc *desc)
{
return -1;
}
-static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle)
+static inline int map_irq_to_irte_handle(struct irq_desc *desc, u16 *sub_handle)
{
return -1;
}
-static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
+static inline int
+set_irte_irq(struct irq_desc *desc, struct intel_iommu *iommu, u16 index,
u16 sub_handle)
{
return -1;
@@ -177,7 +179,7 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
return 0;
}
-#define irq_remapped(irq) (0)
+#define irq_remapped(desc) (0)
#define enable_intr_remapping(mode) (-1)
#define disable_intr_remapping() (0)
#define reenable_intr_remapping(mode) (0)
--
1.6.4.2
probe_nr_irqs_gsi is always called when ioapic is selected in config.
so even for mrst, print out from probe_nr_irqs_gsi is report correct
nr_irqs_gsi
-v2: remove io_apic_irqs assignement, setup_IO_APIC will do that.
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/kernel/apic/io_apic.c | 7 ++-----
1 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 61b59ef..ba469f8 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -153,11 +153,6 @@ int __init arch_early_irq_init(void)
int node;
int i;
- if (!legacy_pic->nr_legacy_irqs) {
- nr_irqs_gsi = 0;
- io_apic_irqs = ~0UL;
- }
-
cfg = irq_cfgx;
count = ARRAY_SIZE(irq_cfgx);
node= cpu_to_node(boot_cpu_id);
@@ -3938,6 +3933,8 @@ void __init probe_nr_irqs_gsi(void)
{
int nr = 0;
+ nr_irqs_gsi = legacy_pic->nr_legacy_irqs;
+
nr = acpi_probe_gsi();
if (nr > nr_irqs_gsi) {
nr_irqs_gsi = nr;
--
1.6.4.2
remove smpboot_clear_io_apic, and only keep smpboot_clear_io_apic_irqs.
and check nr_legacy_irqs before clear it.
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
-{
- /*
- * Install writable page 0 entry to set BIOS data area.
- */
- local_flush_tlb();
-
- /*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 06d98ae..ba43b3b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -67,7 +67,6 @@
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
-#include <asm/smpboot_hooks.h>
#include <asm/i8259.h>
#ifdef CONFIG_X86_32
@@ -701,6 +700,35 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
node, cpu, apicid);
}
+static void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+ CMOS_WRITE(0xa, 0xf);
+ local_flush_tlb();
+ pr_debug("1.\n");
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
+ start_eip >> 4;
+ pr_debug("2.\n");
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
+ start_eip & 0xf;
+ pr_debug("3.\n");
+}
+
+static void smpboot_restore_warm_reset_vector(void)
+{
+ /*
+ * Install writable page 0 entry to set BIOS data area.
+ */
+ local_flush_tlb();
+
+ /*
With SPARSE_IRQ irq_to_desc becomes an unnecessary lookup operation on
the fast path of dispatching irqs to their handlers. We can avoid
this cost by passing an irq_desc pointer instead of using an integer
irq token to the irq_chip methods.
A single patch to convert all of the architectures is an unreviewable
2000+ line patch. A gradual transition scenario with two sets of
irq_chip methods in irq_chip is an unmanageable mess in kernel/irq.
So instead I define some macros so the generic irq code in kernel/irq/
can compile either way based on a boolean Kconfig variable
CONFIG_CHIP_PARAM_DESC. This allows us to convert one architecture at
a time, reducing the follow on patches to manageable proportions. It
is a little bit ugly but it is much better than the alternatives, and
as soon as we finish the transition we can kill the macros.
I introduce the macros CHIP_ARG, CHIP_VAR, and CHIP_PARAM where
appropriate. I change a few declarations of irq as int to unsigned
int. I normalize the variables names in the functions that call
chip methods to ensure that I have the variables irq and desc present
allowing CHIP_ARG to work properly. Most importantly none of the irq
logic changes with this patch.
-v2: add CHIP_VAR_IRQ
Signed-off-by: Eric W. Biederman <ebie...@xmission.com>
---
arch/Kconfig | 3 ++
include/linux/irq.h | 60 ++++++++++++++++++++++++++++-------------------
kernel/irq/autoprobe.c | 39 ++++++++++++++++---------------
kernel/irq/chip.c | 40 ++++++++++++++++----------------
kernel/irq/handle.c | 18 +++++++-------
kernel/irq/internals.h | 4 +-
kernel/irq/manage.c | 22 ++++++++--------
kernel/irq/migration.c | 10 ++++----
kernel/irq/pm.c | 6 ++--
kernel/irq/resend.c | 5 ++-
kernel/irq/spurious.c | 4 +-
11 files changed, 114 insertions(+), 97 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig
index f06010f..7331bbb 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -2,6 +2,9 @@
# General architecture dependent options
#
+config CHIP_PARAM_DESC
+ def_bool n
+
config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 60f3368..5a110a4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -83,6 +83,18 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
struct proc_dir_entry;
struct msi_desc;
+#ifdef CONFIG_CHIP_PARAM_DESC
+#define CHIP_PARAM struct irq_desc *desc
+#define CHIP_VAR
+#define CHIP_VAR_IRQ unsigned int irq = desc->irq;
+#define CHIP_ARG desc
+#else
+#define CHIP_PARAM unsigned int irq
+#define CHIP_VAR struct irq_desc *desc = irq_to_desc(irq);
+#define CHIP_VAR_IRQ
+#define CHIP_ARG irq
+#endif
+
/**
* struct irq_chip - hardware interrupt chip descriptor
*
@@ -110,30 +122,30 @@ struct msi_desc;
*/
struct irq_chip {
const char *name;
- unsigned int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
-
- void (*ack)(unsigned int irq);
- void (*mask)(unsigned int irq);
- void (*mask_ack)(unsigned int irq);
- void (*unmask)(unsigned int irq);
- void (*eoi)(unsigned int irq);
-
- void (*end)(unsigned int irq);
- int (*set_affinity)(unsigned int irq,
+ unsigned int (*startup)(CHIP_PARAM);
+ void (*shutdown)(CHIP_PARAM);
+ void (*enable)(CHIP_PARAM);
+ void (*disable)(CHIP_PARAM);
+
+ void (*ack)(CHIP_PARAM);
+ void (*mask)(CHIP_PARAM);
+ void (*mask_ack)(CHIP_PARAM);
+ void (*unmask)(CHIP_PARAM);
+ void (*eoi)(CHIP_PARAM);
+
+ void (*end)(CHIP_PARAM);
+ int (*set_affinity)(CHIP_PARAM,
const struct cpumask *dest);
- int (*retrigger)(unsigned int irq);
- int (*set_type)(unsigned int irq, unsigned int flow_type);
- int (*set_wake)(unsigned int irq, unsigned int on);
+ int (*retrigger)(CHIP_PARAM);
+ int (*set_type)(CHIP_PARAM, unsigned int flow_type);
+ int (*set_wake)(CHIP_PARAM, unsigned int on);
- void (*bus_lock)(unsigned int irq);
- void (*bus_sync_unlock)(unsigned int irq);
+ void (*bus_lock)(CHIP_PARAM);
+ void (*bus_sync_unlock)(CHIP_PARAM);
/* Currently used only by UML, might disappear one day.*/
#ifdef CONFIG_IRQ_RELEASE_METHOD
- void (*release)(unsigned int irq, void *dev_id);
+ void (*release)(CHIP_PARAM, void *dev_id);
#endif
/*
* For compatibility, ->typename is copied into ->name.
@@ -252,20 +264,20 @@ extern void remove_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_GENERIC_PENDING_IRQ
-void move_native_irq(int irq);
-void move_masked_irq(int irq);
+void move_native_irq(unsigned int irq);
+void move_masked_irq(unsigned int irq);
#else /* CONFIG_GENERIC_PENDING_IRQ */
-static inline void move_irq(int irq)
+static inline void move_irq(unsigned int irq)
{
}
-static inline void move_native_irq(int irq)
+static inline void move_native_irq(unsigned int irq)
{
}
-static inline void move_masked_irq(int irq)
+static inline void move_masked_irq(unsigned int irq)
{
}
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 2295a31..2a8702f 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -33,7 +33,7 @@ unsigned long probe_irq_on(void)
struct irq_desc *desc;
unsigned long mask = 0;
unsigned int status;
- int i;
+ unsigned int irq;
/*
* quiesce the kernel, or at least the asynchronous portion
@@ -44,7 +44,7 @@ unsigned long probe_irq_on(void)
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
- for_each_irq_desc_reverse(i, desc) {
+ for_each_irq_desc_reverse(irq, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
/*
@@ -58,8 +58,8 @@ unsigned long probe_irq_on(void)
* progress:
*/
if (desc->chip->set_type)
- desc->chip->set_type(i, IRQ_TYPE_PROBE);
- desc->chip->startup(i);
+ desc->chip->set_type(CHIP_ARG, IRQ_TYPE_PROBE);
+ desc->chip->startup(CHIP_ARG);
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -72,11 +72,11 @@ unsigned long probe_irq_on(void)
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
- for_each_irq_desc_reverse(i, desc) {
+ for_each_irq_desc_reverse(irq, desc) {
raw_spin_lock_irq(&desc->lock);
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
- if (desc->chip->startup(i))
+ if (desc->chip->startup(CHIP_ARG))
desc->status |= IRQ_PENDING;
}
raw_spin_unlock_irq(&desc->lock);
@@ -90,7 +90,7 @@ unsigned long probe_irq_on(void)
/*
* Now filter out any obviously spurious interrupts
*/
- for_each_irq_desc(i, desc) {
+ for_each_irq_desc(irq, desc) {
raw_spin_lock_irq(&desc->lock);
status = desc->status;
@@ -98,10 +98,10 @@ unsigned long probe_irq_on(void)
/* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->chip->shutdown(CHIP_ARG);
} else
- if (i < 32)
- mask |= 1 << i;
+ if (irq < 32)
+ mask |= 1 << irq;
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -126,18 +126,18 @@ unsigned int probe_irq_mask(unsigned long val)
{
unsigned int status, mask = 0;
struct irq_desc *desc;
- int i;
+ unsigned int irq;
- for_each_irq_desc(i, desc) {
+ for_each_irq_desc(irq, desc) {
raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
- if (i < 16 && !(status & IRQ_WAITING))
- mask |= 1 << i;
+ if (irq < 16 && !(status & IRQ_WAITING))
+ mask |= 1 << irq;
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->chip->shutdown(CHIP_ARG);
}
raw_spin_unlock_irq(&desc->lock);
}
@@ -166,22 +166,23 @@ EXPORT_SYMBOL(probe_irq_mask);
*/
int probe_irq_off(unsigned long val)
{
- int i, irq_found = 0, nr_of_irqs = 0;
+ int irq_found = 0, nr_of_irqs = 0;
struct irq_desc *desc;
unsigned int status;
+ unsigned int irq;
- for_each_irq_desc(i, desc) {
+ for_each_irq_desc(irq, desc) {
raw_spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
if (!(status & IRQ_WAITING)) {
if (!nr_of_irqs)
- irq_found = i;
+ irq_found = irq;
nr_of_irqs++;
}
desc->status = status & ~IRQ_AUTODETECT;
- desc->chip->shutdown(i);
+ desc->chip->shutdown(CHIP_ARG);
}
raw_spin_unlock_irq(&desc->lock);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3dcdd2f..190360d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -287,40 +287,40 @@ EXPORT_SYMBOL_GPL(set_irq_nested_thread);
/*
* default enable function
*/
-static void default_enable(unsigned int irq)
+static void default_enable(CHIP_PARAM)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ CHIP_VAR
- desc->chip->unmask(irq);
+ desc->chip->unmask(CHIP_ARG);
desc->status &= ~IRQ_MASKED;
}
/*
* default disable function
*/
-static void default_disable(unsigned int irq)
+static void default_disable(CHIP_PARAM)
{
}
/*
* default startup function
*/
-static unsigned int default_startup(unsigned int irq)
+static unsigned int default_startup(CHIP_PARAM)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ CHIP_VAR;
- desc->chip->enable(irq);
+ desc->chip->enable(CHIP_ARG);
return 0;
}
/*
* default shutdown function
*/
-static void default_shutdown(unsigned int irq)
+static void default_shutdown(CHIP_PARAM)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ CHIP_VAR;
- desc->chip->mask(irq);
+ desc->chip->mask(CHIP_ARG);
desc->status |= IRQ_MASKED;
}
@@ -353,11 +353,11 @@ void irq_chip_set_defaults(struct irq_chip *chip)
static inline void mask_ack_irq(struct irq_desc *desc, int irq)
{
if (desc->chip->mask_ack)
- desc->chip->mask_ack(irq);
+ desc->chip->mask_ack(CHIP_ARG);
else {
- desc->chip->mask(irq);
+ desc->chip->mask(CHIP_ARG);
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(CHIP_ARG);
}
desc->status |= IRQ_MASKED;
}
@@ -365,7 +365,7 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
static inline void mask_irq(struct irq_desc *desc, int irq)
{
if (desc->chip->mask) {
- desc->chip->mask(irq);
+ desc->chip->mask(CHIP_ARG);
desc->status |= IRQ_MASKED;
}
}
@@ -373,7 +373,7 @@ static inline void mask_irq(struct irq_desc *desc, int irq)
static inline void unmask_irq(struct irq_desc *desc, int irq)
{
if (desc->chip->unmask) {
- desc->chip->unmask(irq);
+ desc->chip->unmask(CHIP_ARG);
desc->status &= ~IRQ_MASKED;
}
}
@@ -554,7 +554,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
out:
- desc->chip->eoi(irq);
+ desc->chip->eoi(CHIP_ARG);
raw_spin_unlock(&desc->lock);
}
@@ -597,7 +597,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
/* Start handling the irq */
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(CHIP_ARG);
/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
@@ -651,14 +651,14 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
kstat_incr_irqs_this_cpu(irq, desc);
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(CHIP_ARG);
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
if (desc->chip->eoi)
- desc->chip->eoi(irq);
+ desc->chip->eoi(CHIP_ARG);
}
void
@@ -706,7 +706,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
desc->status &= ~IRQ_DISABLED;
desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
desc->depth = 0;
- desc->chip->startup(irq);
+ desc->chip->startup(CHIP_ARG);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
chip_bus_sync_unlock(irq, desc);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index f30c9c7..9e4a0a0 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -295,22 +295,22 @@ void clear_kstat_irqs(struct irq_desc *desc)
* What should we do if we get a hw irq event on an illegal vector?
* Each architecture has to answer this themself.
*/
-static void ack_bad(unsigned int irq)
+static void ack_bad(CHIP_PARAM)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ CHIP_VAR;
- print_irq_desc(irq, desc);
- ack_bad_irq(irq);
+ print_irq_desc(desc->irq, desc);
+ ack_bad_irq(desc->irq);
}
/*
* NOP functions
*/
-static void noop(unsigned int irq)
+static void noop(CHIP_PARAM)
{
}
-static unsigned int noop_ret(unsigned int irq)
+static unsigned int noop_ret(CHIP_PARAM)
{
return 0;
}
@@ -470,13 +470,13 @@ unsigned int __do_IRQ(unsigned int irq)
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
}
- desc->chip->end(irq);
+ desc->chip->end(CHIP_ARG);
return 1;
}
raw_spin_lock(&desc->lock);
if (desc->chip->ack)
- desc->chip->ack(irq);
+ desc->chip->ack(CHIP_ARG);
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
@@ -536,7 +536,7 @@ out:
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
- desc->chip->end(irq);
+ desc->chip->end(CHIP_ARG);
raw_spin_unlock(&desc->lock);
return 1;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c63f3bc..2a9ec5e 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -44,13 +44,13 @@ extern void irq_set_thread_affinity(struct irq_desc *desc);
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
{
if (unlikely(desc->chip->bus_lock))
- desc->chip->bus_lock(irq);
+ desc->chip->bus_lock(CHIP_ARG);
}
static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
{
if (unlikely(desc->chip->bus_sync_unlock))
- desc->chip->bus_sync_unlock(irq);
+ desc->chip->bus_sync_unlock(CHIP_ARG);
}
/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 69a3d7b..706b320 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -118,7 +118,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT) {
- if (!desc->chip->set_affinity(irq, cpumask)) {
+ if (!desc->chip->set_affinity(CHIP_ARG, cpumask)) {
cpumask_copy(desc->affinity, cpumask);
irq_set_thread_affinity(desc);
}
@@ -128,7 +128,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
cpumask_copy(desc->pending_mask, cpumask);
}
#else
- if (!desc->chip->set_affinity(irq, cpumask)) {
+ if (!desc->chip->set_affinity(CHIP_ARG, cpumask)) {
cpumask_copy(desc->affinity, cpumask);
irq_set_thread_affinity(desc);
}
@@ -161,7 +161,7 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity:
- desc->chip->set_affinity(irq, desc->affinity);
+ desc->chip->set_affinity(CHIP_ARG, desc->affinity);
return 0;
}
@@ -207,7 +207,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
if (!desc->depth++) {
desc->status |= IRQ_DISABLED;
- desc->chip->disable(irq);
+ desc->chip->disable(CHIP_ARG);
}
}
@@ -321,7 +321,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
int ret = -ENXIO;
if (desc->chip->set_wake)
- ret = desc->chip->set_wake(irq, on);
+ ret = desc->chip->set_wake(CHIP_ARG, on);
return ret;
}
@@ -425,7 +425,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
}
/* caller masked out all except trigger mode flags */
- ret = chip->set_type(irq, flags);
+ ret = chip->set_type(CHIP_ARG, flags);
if (ret)
pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
@@ -505,7 +505,7 @@ again:
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
- desc->chip->unmask(irq);
+ desc->chip->unmask(CHIP_ARG);
}
raw_spin_unlock_irq(&desc->lock);
chip_bus_sync_unlock(irq, desc);
@@ -756,7 +756,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (!(desc->status & IRQ_NOAUTOEN)) {
desc->depth = 0;
desc->status &= ~IRQ_DISABLED;
- desc->chip->startup(irq);
+ desc->chip->startup(CHIP_ARG);
} else
/* Undo nested disables: */
desc->depth = 1;
@@ -890,16 +890,16 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
/* Currently used only by UML, might disappear one day: */
#ifdef CONFIG_IRQ_RELEASE_METHOD
if (desc->chip->release)
- desc->chip->release(irq, dev_id);
+ desc->chip->release(CHIP_ARG, dev_id);
#endif
/* If this was the last handler, shut down the IRQ line: */
if (!desc->action) {
desc->status |= IRQ_DISABLED;
if (desc->chip->shutdown)
- desc->chip->shutdown(irq);
+ desc->chip->shutdown(CHIP_ARG);
else
- desc->chip->disable(irq);
+ desc->chip->disable(CHIP_ARG);
}
raw_spin_unlock_irqrestore(&desc->lock, flags);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 2419622..5821159 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,7 +4,7 @@
#include "internals.h"
-void move_masked_irq(int irq)
+void move_masked_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -43,7 +43,7 @@ void move_masked_irq(int irq)
*/
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
< nr_cpu_ids))
- if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
+ if (!desc->chip->set_affinity(CHIP_ARG, desc->pending_mask)) {
cpumask_copy(desc->affinity, desc->pending_mask);
irq_set_thread_affinity(desc);
}
@@ -51,7 +51,7 @@ void move_masked_irq(int irq)
cpumask_clear(desc->pending_mask);
}
-void move_native_irq(int irq)
+void move_native_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
@@ -61,8 +61,8 @@ void move_native_irq(int irq)
if (unlikely(desc->status & IRQ_DISABLED))
return;
- desc->chip->mask(irq);
+ desc->chip->mask(CHIP_ARG);
move_masked_irq(irq);
- desc->chip->unmask(irq);
+ desc->chip->unmask(CHIP_ARG);
}
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index 0d4005d..94767d1 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -23,7 +23,7 @@
void suspend_device_irqs(void)
{
struct irq_desc *desc;
- int irq;
+ unsigned int irq;
for_each_irq_desc(irq, desc) {
unsigned long flags;
@@ -48,7 +48,7 @@ EXPORT_SYMBOL_GPL(suspend_device_irqs);
void resume_device_irqs(void)
{
struct irq_desc *desc;
- int irq;
+ unsigned int irq;
for_each_irq_desc(irq, desc) {
unsigned long flags;
@@ -69,7 +69,7 @@ EXPORT_SYMBOL_GPL(resume_device_irqs);
int check_wakeup_irqs(void)
{
struct irq_desc *desc;
- int irq;
+ unsigned int irq;
for_each_irq_desc(irq, desc)
if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 090c376..dc9dff8 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -60,7 +60,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
/*
* Make sure the interrupt is enabled, before resending it:
*/
- desc->chip->enable(irq);
+ desc->chip->enable(CHIP_ARG);
/*
* We do not resend level type interrupts. Level type
@@ -70,7 +70,8 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY;
- if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) {
+ if (!desc->chip->retrigger ||
+ !desc->chip->retrigger(CHIP_ARG)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 89fb90a..6cc2cb9 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -79,7 +79,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
* IRQ controller clean up too
*/
if (work && desc->chip && desc->chip->end)
- desc->chip->end(irq);
+ desc->chip->end(CHIP_ARG);
raw_spin_unlock(&desc->lock);
return ok;
@@ -254,7 +254,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
desc->depth++;
- desc->chip->disable(irq);
+ desc->chip->disable(CHIP_ARG);
mod_timer(&poll_spurious_irq_timer,
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/Kconfig | 1 +
arch/x86/include/asm/hardirq.h | 2 +-
arch/x86/include/asm/hpet.h | 8 +-
arch/x86/include/asm/hw_irq.h | 1 -
arch/x86/include/asm/i8259.h | 2 +-
arch/x86/kernel/apic/io_apic.c | 153 +++++++++++----------------
arch/x86/kernel/hpet.c | 16 ++--
arch/x86/kernel/i8259.c | 31 +++---
arch/x86/kernel/irq.c | 12 +-
arch/x86/kernel/uv_irq.c | 15 ++--
arch/x86/kernel/visws_quirks.c | 29 +++---
arch/x86/kernel/vmiclock_32.c | 8 +-
arch/x86/lguest/boot.c | 8 +-
drivers/gpio/langwell_gpio.c | 11 +-
drivers/gpio/pca953x.c | 23 +++--
drivers/gpio/timbgpio.c | 17 ++--
drivers/infiniband/hw/ipath/ipath_iba6110.c | 2 +-
drivers/mfd/ezx-pcap.c | 12 ++-
drivers/mfd/twl4030-irq.c | 16 ++--
drivers/mfd/wm831x-irq.c | 18 ++--
drivers/misc/sgi-gru/grufile.c | 2 +-
drivers/pci/dmar.c | 20 ++--
drivers/pci/htirq.c | 22 ++--
drivers/pci/msi.c | 48 ++++-----
drivers/xen/events.c | 22 ++--
include/asm-generic/hardirq.h | 4 +-
include/linux/dmar.h | 8 +-
include/linux/htirq.h | 11 +-
include/linux/irq.h | 7 +-
include/linux/msi.h | 13 +--
kernel/irq/handle.c | 8 +-
kernel/irq/internals.h | 7 +-
kernel/irq/migration.c | 10 +-
33 files changed, 279 insertions(+), 288 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a80bce..1b47c54 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -58,6 +58,7 @@ config X86
select ANON_INODES
select HAVE_ARCH_KMEMCHECK
select HAVE_USER_RETURN_NOTIFIER
+ select CHIP_PARAM_DESC
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index cd2f193..f908af5 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -176,18 +176,6 @@ int __init arch_early_irq_init(void)
}
#ifdef CONFIG_SPARSE_IRQ
-struct irq_cfg *irq_cfg(unsigned int irq)
-{
- struct irq_cfg *cfg = NULL;
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
- if (desc)
- cfg = desc->chip_data;
-
- return cfg;
-}
-
static struct irq_cfg *get_one_free_irq_cfg(int node)
{
struct irq_cfg *cfg;
@@ -336,10 +324,6 @@ int arch_init_irq_desc(struct irq_desc *desc, int node,
}
#else
-struct irq_cfg *irq_cfg(unsigned int irq)
-{
- return irq < nr_irqs ? irq_cfgx + irq : NULL;
-}
void x86_copy_chip_data(struct irq_desc *old_desc,
struct irq_desc *desc, int node)
@@ -619,16 +603,12 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void mask_IO_APIC_irq(unsigned int irq)
+static void mask_IO_APIC_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
mask_IO_APIC_irq_desc(desc);
}
-static void unmask_IO_APIC_irq(unsigned int irq)
+static void unmask_IO_APIC_irq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
@@ -2427,12 +2407,8 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
}
static int
-set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
+set_ioapic_affinity_irq(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
return set_ioapic_affinity_irq_desc(desc, mask);
}
@@ -2495,11 +2471,9 @@ static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
{
return migrate_ioapic_irq_desc(desc, mask);
}
-static int set_ir_ioapic_affinity_irq(unsigned int irq,
+static int set_ir_ioapic_affinity_irq(struct irq_desc *desc,
const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
return set_ir_ioapic_affinity_irq_desc(desc, mask);
}
#else
@@ -2592,12 +2566,10 @@ void irq_force_complete_move(int irq)
static inline void irq_complete_move(struct irq_desc **descp) {}
#endif
-static void ack_apic_edge(unsigned int irq)
+static void ack_apic_edge(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
-
irq_complete_move(&desc);
- move_native_irq(irq);
+ move_native_irq(desc);
ack_APIC_irq();
}
@@ -2656,9 +2628,8 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
}
-static void ack_apic_level(unsigned int irq)
+static void ack_apic_level(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
unsigned long v;
int i;
struct irq_cfg *cfg;
@@ -2758,21 +2729,19 @@ static void ack_apic_level(unsigned int irq)
*/
cfg = desc->chip_data;
if (!io_apic_level_ack_pending(cfg))
- move_masked_irq(irq);
+ move_masked_irq(desc);
unmask_IO_APIC_irq_desc(desc);
}
}
#ifdef CONFIG_INTR_REMAP
-static void ir_ack_apic_edge(unsigned int irq)
+static void ir_ack_apic_edge(struct irq_desc *desc)
{
ack_APIC_irq();
}
-static void ir_ack_apic_level(unsigned int irq)
+static void ir_ack_apic_level(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
+static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
struct msi_msg *msg, u8 hpet_id)
{
- struct irq_desc *desc;
+ unsigned int irq = desc->irq;
struct irq_cfg *cfg;
int err;
unsigned dest;
@@ -3384,7 +3353,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
if (disable_apic)
return -ENXIO;
- desc = irq_to_desc(irq);
cfg = desc->chip_data;
err = assign_irq_vector(desc, cfg, apic->target_cpus());
if (err)
@@ -3452,9 +3420,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
}
#ifdef CONFIG_SMP
-static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
+static int
+set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
@@ -3464,14 +3432,14 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
cfg = desc->chip_data;
- read_msi_msg_desc(desc, &msg);
+ read_msi_msg(desc, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- write_msi_msg_desc(desc, &msg);
+ write_msi_msg(desc, &msg);
return 0;
}
@@ -3481,9 +3449,9 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
* done in the process context using interrupt-remapping hardware.
*/
static int
-ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
+ir_set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest;
struct irte irte;
@@ -3581,16 +3549,16 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
{
int ret;
struct msi_msg msg;
+ struct irq_desc *desc = irq_to_desc(irq);
- ret = msi_compose_msg(dev, irq, &msg, -1);
+ ret = msi_compose_msg(dev, desc, &msg, -1);
if (ret < 0)
return ret;
set_irq_msi(irq, msidesc);
- write_msi_msg(irq, &msg);
+ write_msi_msg(desc, &msg);
if (irq_remapped(irq)) {
- struct irq_desc *desc = irq_to_desc(irq);
/*
* irq migration in process context
*/
@@ -3672,9 +3640,9 @@ void arch_teardown_msi_irq(unsigned int irq)
#if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
#ifdef CONFIG_SMP
-static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int
+dmar_msi_set_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
@@ -3684,14 +3652,14 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
cfg = desc->chip_data;
- dmar_msi_read(irq, &msg);
+ dmar_msi_read(desc, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- dmar_msi_write(irq, &msg);
+ dmar_msi_write(desc, &msg);
return 0;
}
@@ -3716,11 +3684,12 @@ int arch_setup_dmar_msi(unsigned int irq)
{
int ret;
struct msi_msg msg;
+ struct irq_desc *desc = irq_to_desc(irq);
- ret = msi_compose_msg(NULL, irq, &msg, -1);
+ ret = msi_compose_msg(NULL, desc, &msg, -1);
if (ret < 0)
return ret;
- dmar_msi_write(irq, &msg);
+ dmar_msi_write(desc, &msg);
set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
"edge");
return 0;
@@ -3730,9 +3699,9 @@ int arch_setup_dmar_msi(unsigned int irq)
#ifdef CONFIG_HPET_TIMER
#ifdef CONFIG_SMP
-static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int
+hpet_msi_set_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
struct msi_msg msg;
unsigned int dest;
@@ -3742,14 +3711,14 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
cfg = desc->chip_data;
- hpet_msi_read(irq, &msg);
+ hpet_msi_read(desc, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- hpet_msi_write(irq, &msg);
+ hpet_msi_write(desc, &msg);
return 0;
}
@@ -3804,11 +3773,11 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
return -1;
}
- ret = msi_compose_msg(NULL, irq, &msg, id);
+ ret = msi_compose_msg(NULL, desc, &msg, id);
if (ret < 0)
return ret;
- hpet_msi_write(irq, &msg);
+ hpet_msi_write(desc, &msg);
desc->status |= IRQ_MOVE_PCNTXT;
if (irq_remapped(irq))
set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
@@ -3829,10 +3798,10 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
#ifdef CONFIG_SMP
-static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
+static void target_ht_irq(struct irq_desc *desc, unsigned int dest, u8 vector)
{
struct ht_irq_msg msg;
- fetch_ht_irq_msg(irq, &msg);
+ fetch_ht_irq_msg(desc, &msg);
msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
@@ -3840,12 +3809,12 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
- write_ht_irq_msg(irq, &msg);
+ write_ht_irq_msg(desc, &msg);
}
-static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
+static int
+set_ht_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
struct irq_cfg *cfg;
unsigned int dest;
@@ -3854,7 +3823,7 @@ static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
cfg = desc->chip_data;
- target_ht_irq(irq, dest, cfg->vector);
+ target_ht_irq(desc, dest, cfg->vector);
return 0;
}
@@ -3909,7 +3878,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
HT_IRQ_LOW_MT_ARBITRATED) |
HT_IRQ_LOW_IRQ_MASKED;
- write_ht_irq_msg(irq, &msg);
+ write_ht_irq_msg(desc, &msg);
set_irq_chip_and_handler_name(irq, &ht_irq_chip,
handle_edge_irq, "edge");
@@ -4399,7 +4368,7 @@ void __init pre_init_apic_IRQ0(void)
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask)
- desc->chip->mask(irq);
+ desc->chip->mask(desc);
if (desc->chip->set_affinity)
- desc->chip->set_affinity(irq, affinity);
+ desc->chip->set_affinity(desc, affinity);
else if (!(warned++))
set_affinity = 0;
if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask)
- desc->chip->unmask(irq);
+ desc->chip->unmask(desc);
raw_spin_unlock(&desc->lock);
@@ -357,7 +357,7 @@ void fixup_irqs(void)
raw_spin_lock(&desc->lock);
if (desc->chip->retrigger)
- desc->chip->retrigger(irq);
+ desc->chip->retrigger(desc);
raw_spin_unlock(&desc->lock);
}
}
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c
index 44c430d..ed4ce50 100644
--- a/arch/x86/kernel/uv_irq.c
+++ b/arch/x86/kernel/uv_irq.c
@@ -27,18 +27,18 @@ struct uv_irq_2_mmr_pnode{
static spinlock_t uv_irq_lock;
static struct rb_root uv_irq_root;
-static int uv_set_irq_affinity(unsigned int, const struct cpumask *);
+static int uv_set_irq_affinity(struct irq_desc *desc, const struct cpumask *);
-static void uv_noop(unsigned int irq)
+static void uv_noop(struct irq_desc *desc)
{
}
-static unsigned int uv_noop_ret(unsigned int irq)
+static unsigned int uv_noop_ret(struct irq_desc *desc)
{
return 0;
}
-static void uv_ack_apic(unsigned int irq)
+static void uv_ack_apic(struct irq_desc *desc)
{
ack_APIC_irq();
}
@@ -156,7 +156,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
- cfg = irq_cfg(irq);
+ cfg = desc->chip_data;
err = assign_irq_vector(desc, cfg, eligible_cpu);
if (err != 0)
@@ -208,9 +208,10 @@ static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
}
-static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask)
+static int
+uv_set_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq = desc->irq;
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest;
- struct irq_desc *desc = irq_to_desc(irq);
spin_lock_irqsave(&cobalt_lock, flags);
if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
- enable_cobalt_irq(irq);
+ enable_cobalt_irq(desc);
spin_unlock_irqrestore(&cobalt_lock, flags);
return 0;
}
-static void ack_cobalt_irq(unsigned int irq)
+static void ack_cobalt_irq(struct irq_desc *desc)
{
unsigned long flags;
spin_lock_irqsave(&cobalt_lock, flags);
- disable_cobalt_irq(irq);
+ disable_cobalt_irq(desc);
apic_write(APIC_EOI, APIC_EIO_ACK);
spin_unlock_irqrestore(&cobalt_lock, flags);
}
-static void end_cobalt_irq(unsigned int irq)
+static void end_cobalt_irq(struct irq_desc *desc)
{
unsigned long flags;
- struct irq_desc *desc = irq_to_desc(irq);
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 6c0ebbd..9283da1 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -113,9 +113,10 @@ static int lnw_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return lnw->irq_base + offset;
}
-static int lnw_irq_type(unsigned irq, unsigned type)
+static int lnw_irq_type(CHIP_PARAM, unsigned type)
{
- struct lnw_gpio *lnw = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct lnw_gpio *lnw = get_irq_desc_chip_data(desc);
u32 gpio = irq - lnw->irq_base;
u8 reg = gpio / 32;
unsigned long flags;
@@ -142,11 +143,11 @@ static int lnw_irq_type(unsigned irq, unsigned type)
return 0;
};
-static void lnw_irq_unmask(unsigned irq)
+static void lnw_irq_unmask(CHIP_PARAM)
{
};
-static void lnw_irq_mask(unsigned irq)
+static void lnw_irq_mask(CHIP_PARAM)
{
};
@@ -184,7 +185,7 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
/* clear the edge detect status bit */
writel(gedr_v, gedr);
}
- desc->chip->eoi(irq);
+ desc->chip->eoi(CHIP_ARG);
}
static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index ab5daab..ed615c0 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -227,37 +227,40 @@ static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
return chip->irq_base + off;
}
-static void pca953x_irq_mask(unsigned int irq)
+static void pca953x_irq_mask(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
chip->irq_mask &= ~(1 << (irq - chip->irq_base));
}
-static void pca953x_irq_unmask(unsigned int irq)
+static void pca953x_irq_unmask(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
chip->irq_mask |= 1 << (irq - chip->irq_base);
}
-static void pca953x_irq_bus_lock(unsigned int irq)
+static void pca953x_irq_bus_lock(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ CHIP_VAR struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
mutex_lock(&chip->irq_lock);
}
-static void pca953x_irq_bus_sync_unlock(unsigned int irq)
+static void pca953x_irq_bus_sync_unlock(struct irq_desc *desc)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ CHIP_VAR struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
mutex_unlock(&chip->irq_lock);
}
-static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
+static int pca953x_irq_set_type(struct irq_desc *desc, unsigned int type)
{
- struct pca953x_chip *chip = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct pca953x_chip *chip = get_irq_desc_chip_data(desc);
uint16_t level = irq - chip->irq_base;
uint16_t mask = 1 << level;
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index d4295fa..83160dc 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -107,25 +107,28 @@ static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
/*
* GPIO IRQ
*/
-static void timbgpio_irq_disable(unsigned irq)
+static void timbgpio_irq_disable(CHIP_PARAM)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct timbgpio *tgpio = get_irq_desc_chip_data(desc);
int offset = irq - tgpio->irq_base;
timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 0);
}
-static void timbgpio_irq_enable(unsigned irq)
+static void timbgpio_irq_enable(struct irq_desc *desc)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct timbgpio *tgpio = get_irq_desc_chip_data(desc);
int offset = irq - tgpio->irq_base;
timbgpio_update_bit(&tgpio->gpio, offset, TGPIO_IER, 1);
}
-static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+static int timbgpio_irq_type(CHIP_PARAM, unsigned trigger)
{
- struct timbgpio *tgpio = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct timbgpio *tgpio = get_irq_desc_chip_data(desc);
int offset = irq - tgpio->irq_base;
unsigned long flags;
u32 lvr, flr, bflr = 0;
@@ -185,7 +188,7 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
unsigned long ipr;
int offset;
- desc->chip->ack(irq);
+ desc->chip->ack(CHIP_ARG);
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 37d12e5..4cbae45 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -986,7 +986,7 @@ static int ipath_ht_intconfig(struct ipath_devdata *dd)
return ret;
}
-static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
+static void ipath_ht_irq_update(struct pci_dev *dev, struct irq_desc *desc,
struct ht_irq_msg *msg)
{
struct ipath_devdata *dd = pci_get_drvdata(dev);
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index df405af..f63bb23 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -143,17 +143,19 @@ int pcap_to_irq(struct pcap_chip *pcap, int irq)
}
EXPORT_SYMBOL_GPL(pcap_to_irq);
-static void pcap_mask_irq(unsigned int irq)
+static void pcap_mask_irq(CHIP_PARAM)
{
- struct pcap_chip *pcap = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct pcap_chip *pcap = get_irq_desc_chip_data(desc);
pcap->msr |= 1 << irq_to_pcap(pcap, irq);
queue_work(pcap->workqueue, &pcap->msr_work);
}
-static void pcap_unmask_irq(unsigned int irq)
+static void pcap_unmask_irq(CHIP_PARAM)
{
- struct pcap_chip *pcap = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct pcap_chip *pcap = get_irq_desc_chip_data(desc);
pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
queue_work(pcap->workqueue, &pcap->msr_work);
@@ -217,7 +219,7 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct pcap_chip *pcap = get_irq_data(irq);
- desc->chip->ack(irq);
+ desc->chip->ack(desc);
queue_work(pcap->workqueue, &pcap->isr_work);
return;
}
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 9df9a5a..3e69ad3 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -595,9 +595,10 @@ static void twl4030_sih_do_edge(struct work_struct *work)
* completion, potentially including some re-ordering, of these requests.
*/
-static void twl4030_sih_mask(unsigned irq)
+static void twl4030_sih_mask(CHIP_PARAM)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct sih_agent *sih = get_irq_desc_chip_data(desc);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
@@ -607,9 +608,10 @@ static void twl4030_sih_mask(unsigned irq)
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
-static void twl4030_sih_unmask(unsigned irq)
+static void twl4030_sih_unmask(CHIP_PARAM)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct sih_agent *sih = get_irq_desc_chip_data(desc);
unsigned long flags;
spin_lock_irqsave(&sih_agent_lock, flags);
@@ -619,10 +621,10 @@ static void twl4030_sih_unmask(unsigned irq)
spin_unlock_irqrestore(&sih_agent_lock, flags);
}
-static int twl4030_sih_set_type(unsigned irq, unsigned trigger)
+static int twl4030_sih_set_type(CHIP_PARAM, unsigned trigger)
{
- struct sih_agent *sih = get_irq_chip_data(irq);
- struct irq_desc *desc = irq_to_desc(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct sih_agent *sih = get_irq_desc_chip_data(desc);
unsigned long flags;
if (!desc) {
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 3013276..eac701e 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -346,16 +346,16 @@ static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
return &wm831x_irqs[irq - wm831x->irq_base];
}
-static void wm831x_irq_lock(unsigned int irq)
+static void wm831x_irq_lock(CHIP_PARAM)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ CHIP_VAR struct wm831x *wm831x = get_irq_desc_chip_data(desc);
mutex_lock(&wm831x->irq_lock);
}
-static void wm831x_irq_sync_unlock(unsigned int irq)
+static void wm831x_irq_sync_unlock(CHIP_PARAM
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ CHIP_VAR struct wm831x *wm831x = get_irq_desc_chip_data(desc);
int i;
for (i = 0; i < ARRAY_SIZE(wm831x->irq_masks_cur); i++) {
@@ -372,17 +372,19 @@ static void wm831x_irq_sync_unlock(unsigned int irq)
mutex_unlock(&wm831x->irq_lock);
}
-static void wm831x_irq_unmask(unsigned int irq)
+static void wm831x_irq_unmask(CHIP_PARAM)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct wm831x *wm831x = get_irq_desc_chip_data(desc);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
-static void wm831x_irq_mask(unsigned int irq)
+static void wm831x_irq_mask(CHIP_PARAM)
{
- struct wm831x *wm831x = get_irq_chip_data(irq);
+ CHIP_VAR_IRQ
+ CHIP_VAR struct wm831x *wm831x = get_irq_desc_chip_data(desc);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, irq);
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index cb3b4d2..71aed9f 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -348,7 +348,7 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
-static void gru_noop(unsigned int irq)
+static void gru_noop(CHIP_PARAM)
{
}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 83aae47..00b6aee 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -1230,9 +1230,10 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
}
}
-void dmar_msi_unmask(unsigned int irq)
+void dmar_msi_unmask(CHIP_PARAM)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ CHIP_VAR
+ struct intel_iommu *iommu = get_irq_desc_data(desc);
unsigned long flag;
/* unmask it */
@@ -1243,10 +1244,11 @@ void dmar_msi_unmask(unsigned int irq)
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-void dmar_msi_mask(unsigned int irq)
+void dmar_msi_mask(CHIP_PARAM)
{
+ CHIP_VAR
unsigned long flag;
- struct intel_iommu *iommu = get_irq_data(irq);
+ struct intel_iommu *iommu = get_irq_desc_data(desc);
/* mask it */
spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1256,9 +1258,10 @@ void dmar_msi_mask(unsigned int irq)
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-void dmar_msi_write(int irq, struct msi_msg *msg)
+void dmar_msi_write(CHIP_PARAM, struct msi_msg *msg)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ CHIP_VAR
+ struct intel_iommu *iommu = get_irq_desc_data(desc);
unsigned long flag;
spin_lock_irqsave(&iommu->register_lock, flag);
@@ -1268,9 +1271,10 @@ void dmar_msi_write(int irq, struct msi_msg *msg)
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-void dmar_msi_read(int irq, struct msi_msg *msg)
+void dmar_msi_read(CHIP_PARAM, struct msi_msg *msg)
{
- struct intel_iommu *iommu = get_irq_data(irq);
+ CHIP_VAR
index f9cf317..98ac7a6 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -169,9 +169,10 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
desc->masked = __msix_mask_irq(desc, flag);
}
-static void msi_set_mask_bit(unsigned irq, u32 flag)
+static void msi_set_mask_bit(struct irq_desc *descx, u32 flag)
{
- struct msi_desc *desc = get_irq_msi(irq);
+ unsigned int irq = descx->irq;
+ struct msi_desc *desc = get_irq_desc_msi(descx);
if (desc->msi_attrib.is_msix) {
msix_mask_irq(desc, flag);
@@ -182,18 +183,21 @@ static void msi_set_mask_bit(unsigned irq, u32 flag)
}
}
-void mask_msi_irq(unsigned int irq)
+void mask_msi_irq(CHIP_PARAM)
{
- msi_set_mask_bit(irq, 1);
+ CHIP_VAR;
+ msi_set_mask_bit(desc, 1);
}
-void unmask_msi_irq(unsigned int irq)
+void unmask_msi_irq(CHIP_PARAM)
{
- msi_set_mask_bit(irq, 0);
+ CHIP_VAR;
+ msi_set_mask_bit(desc, 0);
}
-void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+void read_msi_msg(CHIP_PARAM, struct msi_msg *msg)
{
+ CHIP_VAR
struct msi_desc *entry = get_irq_desc_msi(desc);
if (entry->msi_attrib.is_msix) {
void __iomem *base = entry->mask_base +
@@ -221,15 +225,9 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
}
}
-void read_msi_msg(unsigned int irq, struct msi_msg *msg)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- read_msi_msg_desc(desc, msg);
-}
-
-void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
+void write_msi_msg(CHIP_PARAM, struct msi_msg *msg)
{
+ CHIP_VAR
struct msi_desc *entry = get_irq_desc_msi(desc);
if (entry->msi_attrib.is_msix) {
void __iomem *base;
@@ -264,13 +262,6 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
entry->msg = *msg;
}
-void write_msi_msg(unsigned int irq, struct msi_msg *msg)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- write_msi_msg_desc(desc, msg);
-}
-
static void free_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry, *tmp;
@@ -319,16 +310,19 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
int pos;
u16 control;
struct msi_desc *entry;
+ unsigned int irq = dev->irq;
+ struct irq_desc *desc;
if (!dev->msi_enabled)
return;
- entry = get_irq_msi(dev->irq);
+ desc = irq_to_desc(irq);
+ entry = get_irq_desc_msi(desc);
pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, pos, 0);
- write_msi_msg(dev->irq, &entry->msg);
+ write_msi_msg(CHIP_ARG, &entry->msg);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
@@ -356,7 +350,11 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
- write_msi_msg(entry->irq, &entry->msg);
+ unsigned int irq = entry->irq;
+#ifdef CONFIG_CHIP_PARAM_DESC
+ struct irq_desc *desc = irq_to_desc(irq);
+#endif
+ write_msi_msg(CHIP_ARG, &entry->msg);
msix_mask_irq(entry, entry->masked);
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 64cbbe4..65372ad 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -730,11 +730,11 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
return 0;
}
-static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(CHIP_PARAM, const struct cpumask *dest)
{
unsigned tcpu = cpumask_first(dest);
- return rebind_irq_to_cpu(irq, tcpu);
+ CHIP_VAR_IRQ return rebind_irq_to_cpu(irq, tcpu);
}
int resend_irq_on_evtchn(unsigned int irq)
@@ -753,35 +753,35 @@ int resend_irq_on_evtchn(unsigned int irq)
return 1;
}
-static void enable_dynirq(unsigned int irq)
+static void enable_dynirq(CHIP_PARAM)
{
- int evtchn = evtchn_from_irq(irq);
+ CHIP_VAR_IRQ int evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
}
-static void disable_dynirq(unsigned int irq)
+static void disable_dynirq(CHIP_PARAM)
{
- int evtchn = evtchn_from_irq(irq);
+ CHIP_VAR_IRQ int evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
mask_evtchn(evtchn);
}
-static void ack_dynirq(unsigned int irq)
+static void ack_dynirq(CHIP_PARAM)
{
- int evtchn = evtchn_from_irq(irq);
+ CHIP_VAR_IRQ int evtchn = evtchn_from_irq(irq);
- move_native_irq(irq);
+ move_native_irq(CHIP_ARG);
if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn);
}
-static int retrigger_dynirq(unsigned int irq)
+static int retrigger_dynirq(CHIP_PARAM)
{
- int evtchn = evtchn_from_irq(irq);
+ CHIP_VAR_IRQ int evtchn = evtchn_from_irq(irq);
struct shared_info *sh = HYPERVISOR_shared_info;
int ret = 0;
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
index 62f5908..23b0724 100644
--- a/include/asm-generic/hardirq.h
+++ b/include/asm-generic/hardirq.h
@@ -12,8 +12,10 @@ typedef struct {
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#ifndef ack_bad_irq
-static inline void ack_bad_irq(unsigned int irq)
+static inline void ack_bad_irq(CHIP_PARAM)
{
+ CHIP_VAR_IRQ
+
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
}
#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 659a765..425c38a 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -187,10 +187,10 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
-extern void dmar_msi_unmask(unsigned int irq);
-extern void dmar_msi_mask(unsigned int irq);
-extern void dmar_msi_read(int irq, struct msi_msg *msg);
-extern void dmar_msi_write(int irq, struct msi_msg *msg);
+extern void dmar_msi_unmask(CHIP_PARAM);
+extern void dmar_msi_mask(CHIP_PARAM);
+extern void dmar_msi_read(CHIP_PARAM, struct msi_msg *msg);
+extern void dmar_msi_write(CHIP_PARAM, struct msi_msg *msg);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 5a110a4..36ea6ac 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -264,8 +264,8 @@ extern void remove_irq(unsigned int irq, struct irqaction *act);
#ifdef CONFIG_GENERIC_PENDING_IRQ
-void move_native_irq(unsigned int irq);
-void move_masked_irq(unsigned int irq);
+void move_native_irq(CHIP_PARAM);
+void move_masked_irq(CHIP_PARAM);
#else /* CONFIG_GENERIC_PENDING_IRQ */
@@ -349,9 +349,6 @@ static inline void generic_handle_irq(unsigned int irq)
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret);
-/* Resending of interrupts :*/
-void check_irq_resend(struct irq_desc *desc, unsigned int irq);
-
/* Enable/disable irq debugging output: */
extern int noirqdebug_setup(char *str);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 6991ab5..2ab1cd3 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -3,6 +3,8 @@
#include <linux/list.h>
+#include <linux/irq.h>
+
struct msi_msg {
u32 address_lo; /* low 32 bits of msi message address */
u32 address_hi; /* high 32 bits of msi message address */
@@ -10,13 +12,10 @@ struct msi_msg {
};
/* Helper functions */
-struct irq_desc;
-extern void mask_msi_irq(unsigned int irq);
-extern void unmask_msi_irq(unsigned int irq);
-extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
-extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
-extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
-extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
+extern void mask_msi_irq(CHIP_PARAM);
+extern void unmask_msi_irq(CHIP_PARAM);
+extern void read_msi_msg(CHIP_PARAM, struct msi_msg *msg);
+extern void write_msi_msg(CHIP_PARAM, struct msi_msg *msg);
struct msi_desc {
struct {
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 9e4a0a0..1c1d465 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -38,9 +38,9 @@ struct lock_class_key irq_desc_lock_class;
*/
void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
{
- print_irq_desc(irq, desc);
+ print_irq_desc(desc);
kstat_incr_irqs_this_cpu(irq, desc);
- ack_bad_irq(irq);
+ ack_bad_irq(CHIP_ARG);
}
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
@@ -299,8 +299,8 @@ static void ack_bad(CHIP_PARAM)
{
CHIP_VAR;
- print_irq_desc(desc->irq, desc);
- ack_bad_irq(desc->irq);
+ print_irq_desc(desc);
+ ack_bad_irq(CHIP_ARG);
}
/*
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 2a9ec5e..dca36c4 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -14,6 +14,8 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
unsigned long flags);
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
+/* Resending of interrupts :*/
+void check_irq_resend(struct irq_desc *desc, unsigned int irq);
extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
@@ -61,10 +63,11 @@ static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
#define P(f) if (desc->status & f) printk("%14s set\n", #f)
-static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
+static inline void print_irq_desc(struct irq_desc *desc)
{
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
- irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
+ desc->irq, desc, desc->depth, desc->irq_count,
+ desc->irqs_unhandled);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
printk("->chip(): %p, ", desc->chip);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 5821159..849b3d0 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,9 +4,9 @@
#include "internals.h"
-void move_masked_irq(unsigned int irq)
+void move_masked_irq(CHIP_PARAM)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ CHIP_VAR
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
@@ -51,9 +51,9 @@ void move_masked_irq(unsigned int irq)
cpumask_clear(desc->pending_mask);
}
-void move_native_irq(unsigned int irq)
+void move_native_irq(CHIP_PARAM)
{
- struct irq_desc *desc = irq_to_desc(irq);
+ CHIP_VAR
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
return;
@@ -62,7 +62,7 @@ void move_native_irq(unsigned int irq)
return;
desc->chip->mask(CHIP_ARG);
- move_masked_irq(irq);
+ move_masked_irq(CHIP_ARG);
desc->chip->unmask(CHIP_ARG);
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/io_apic.h | 4 --
arch/x86/kernel/apic/io_apic.c | 78 +++++++++++++++++++---------------------
drivers/pci/intr_remapping.c | 69 +++++++++++++++--------------------
include/linux/dmar.h | 36 ++++++++++---------
4 files changed, 85 insertions(+), 102 deletions(-)
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index c4683b9..d249186 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -171,10 +171,6 @@ extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
extern void probe_nr_irqs_gsi(void);
-extern int setup_ioapic_entry(int apic, int irq,
- struct IO_APIC_route_entry *entry,
- unsigned int destination, int trigger,
- int polarity, int vector, int pin);
extern void ioapic_write_entry(int apic, int pin,
struct IO_APIC_route_entry e);
extern void setup_ioapic_ids_from_mpc(void);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 432bea1..ce93428 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1340,7 +1340,7 @@ static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long t
else
desc->status &= ~IRQ_LEVEL;
- if (irq_remapped(irq)) {
+ if (irq_remapped(desc)) {
desc->status |= IRQ_MOVE_PCNTXT;
+ desc = irq_to_desc(irq);
+ dynamic_irq_cleanup_keep_chip_data(desc);
- free_irte(irq);
+ free_irte(desc);
raw_spin_lock_irqsave(&vector_lock, flags);
- desc = irq_to_desc(irq);
cfg = desc->chip_data;
__clear_irq_vector(desc, cfg);
raw_spin_unlock_irqrestore(&vector_lock, flags);
@@ -3345,7 +3339,6 @@ void destroy_irq(unsigned int irq)
static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
struct msi_msg *msg, u8 hpet_id)
{
- unsigned int irq = desc->irq;
struct irq_cfg *cfg;
int err;
unsigned dest;
@@ -3360,15 +3353,15 @@ static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
- if (irq_remapped(irq)) {
+ if (irq_remapped(desc)) {
struct irte irte;
int ir_index;
u16 sub_handle;
- ir_index = map_irq_to_irte_handle(irq, &sub_handle);
+ ir_index = map_irq_to_irte_handle(desc, &sub_handle);
BUG_ON(ir_index == -1);
- memset (&irte, 0, sizeof(irte));
+ memset(&irte, 0, sizeof(irte));
irte.present = 1;
irte.dst_mode = apic->irq_dest_mode;
@@ -3383,7 +3376,7 @@ static int msi_compose_msg(struct pci_dev *pdev, struct irq_desc *desc,
else
set_hpet_sid(&irte, hpet_id);
- modify_irte(irq, &irte);
+ modify_irte(desc, &irte);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->data = sub_handle;
@@ -3451,12 +3444,11 @@ set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
static int
ir_set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
{
- unsigned int irq = desc->irq;
struct irq_cfg *cfg = desc->chip_data;
unsigned int dest;
struct irte irte;
- if (get_irte(irq, &irte))
+ if (get_irte(desc, &irte))
return -1;
if (set_desc_affinity(desc, mask, &dest))
@@ -3468,7 +3460,7 @@ ir_set_msi_irq_affinity(struct irq_desc *desc, const struct cpumask *mask)
/*
* atomically update the IRTE with the new destination and vector.
*/
- modify_irte(irq, &irte);
+ modify_irte(desc, &irte);
/*
* After this point, all the interrupts will start arriving
@@ -3523,7 +3515,7 @@ static struct irq_chip msi_ir_chip = {
* and allocate 'nvec' consecutive interrupt-remapping table entries
* in it.
*/
-static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
+static int msi_alloc_irte(struct pci_dev *dev, struct irq_desc *desc, int nvec)
{
struct intel_iommu *iommu;
int index;
@@ -3535,7 +3527,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
return -ENOENT;
}
- index = alloc_irte(iommu, irq, nvec);
+ index = alloc_irte(iommu, desc, nvec);
if (index < 0) {
printk(KERN_ERR
"Unable to allocate %d IRTE for PCI %s\n", nvec,
@@ -3545,20 +3537,22 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
return index;
}
-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+static int
+setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, struct irq_desc *desc)
{
int ret;
struct msi_msg msg;
- struct irq_desc *desc = irq_to_desc(irq);
+ unsigned int irq;
ret = msi_compose_msg(dev, desc, &msg, -1);
if (ret < 0)
return ret;
- set_irq_msi(irq, msidesc);
+ set_irq_desc_msi(desc, msidesc);
write_msi_msg(desc, &msg);
- if (irq_remapped(irq)) {
+ irq = desc->irq;
+ if (irq_remapped(desc)) {
/*
* irq migration in process context
*/
@@ -3575,6 +3569,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
unsigned int irq;
+ struct irq_desc *desc;
int ret, sub_handle;
struct msi_desc *msidesc;
unsigned int irq_want;
@@ -3594,6 +3589,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (irq == 0)
return -1;
irq_want = irq + 1;
+ desc = irq_to_desc(irq);
if (!intr_remapping_enabled)
goto no_ir;
@@ -3602,7 +3598,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
* allocate the consecutive block of IRTE's
* for 'nvec'
*/
- index = msi_alloc_irte(dev, irq, nvec);
+ index = msi_alloc_irte(dev, desc, nvec);
if (index < 0) {
ret = index;
goto error;
@@ -3618,10 +3614,10 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
* base index, the sub_handle pointing to the
* appropriate interrupt remap table entry.
*/
- set_irte_irq(irq, iommu, index, sub_handle);
+ set_irte_irq(desc, iommu, index, sub_handle);
}
no_ir:
- ret = setup_msi_irq(dev, msidesc, irq);
+ ret = setup_msi_irq(dev, msidesc, desc);
if (ret < 0)
goto error;
sub_handle++;
@@ -3768,7 +3764,7 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
if (!iommu)
return -1;
- index = alloc_irte(iommu, irq, 1);
+ index = alloc_irte(iommu, desc, 1);
if (index < 0)
return -1;
}
@@ -3779,7 +3775,7 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
hpet_msi_write(desc, &msg);
desc->status |= IRQ_MOVE_PCNTXT;
- if (irq_remapped(irq))
+ if (irq_remapped(desc))
set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
handle_edge_irq, "edge");
else
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 95b8491..1c03bc7 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -45,33 +45,27 @@ static struct irq_2_iommu *get_one_free_irq_2_iommu(int node)
return iommu;
}
-static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu(struct irq_desc *desc)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
if (WARN_ON_ONCE(!desc))
return NULL;
return desc->irq_2_iommu;
}
-static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu_alloc(struct irq_desc *desc)
{
- struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
- desc = irq_to_desc(irq);
if (!desc) {
- printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+ printk(KERN_INFO "can not get irq_desc\n");
return NULL;
}
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
- desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq));
+ desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_desc_node(desc));
return desc->irq_2_iommu;
}
@@ -80,26 +74,27 @@ static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
-static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
+static struct irq_2_iommu *irq_2_iommu(struct irq_desc *desc)
{
+ unsigned int irq = desc->irq;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 425c38a..8b0e4f5 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
First of all, please post separate series, it's hard enough as it-is.
Cheers,
Ben.
NAK
This is even worse than before. You are now moving that entire pile of
x86 gunk into "generic" code, but even keep it names e820 there !
What happened to the discussion we had earlier, which iirc concluded
that a better approach would be to adapt x86 to use LMB ?
Cheers,
Ben.
--
I want to do some modification on the SMP architecture.
Purpose:
Only the first CPU is running the linux OS, while others do some private
services processing.
My solution:
In the end of the start_secondary() function, I try to schedu the slave
cpu to call my private endless loop instead of cpu_idle();
Result:
The system can NOT up, there is no interactive cli.
Question:
Is there some wrong with my modification or I go to the wrong way?
Thank you very much.
> Hi All:
>
> I want to do some modification on the SMP architecture.
>
> Purpose:
> Only the first CPU is running the linux OS, while others do some private
> services processing.
>
> My solution:
> In the end of the start_secondary() function, I try to schedu the slave
> cpu to call my private endless loop instead of cpu_idle();
>
> Result:
> The system can NOT up, there is no interactive cli.
>
> Question:
> Is there some wrong with my modification or I go to the wrong way?
Presumably you're doing this to own that CPU exclusively.
Hooking at cpu_idle is not very useful then because interrupts will be
already enabled and the system participate in IPIs etc, so you can't
simply disable them, the others will miss them.
You would rather need to prevent them from being started in the
first place, e.g. by exluding them with maxcpus=..
A better alternative might be to use isolcpus=... and schedule
a standard program.
-Andi
--
a...@linux.intel.com -- Speaking for myself only.
YH
obj-$(CONFIG_HAVE_EARLY_RES) += early_res.o fw_memmap.o
so it will not increase size that doesn't support early_res/bootmem yet.
>
> This is even worse than before. You are now moving that entire pile of
> x86 gunk into "generic" code, but even keep it names e820 there !
not all of the e820 code. arch/x86/kernel/e820.c are still there.
linux-2.6> wc -l arch/x86/kernel/e820.c
690 arch/x86/kernel/e820.c
linux-2.6> wc -l kernel/fw_memmap.c
625 kernel/fw_memmap.c
and interface header file
yhlu@linux-siqj:~/xx/xx/kernel/tip/linux-2.6> cat include/linux/fw_memmap.h
#ifndef _LINUX_FW_MEMMAP_H
#define _LINUX_FW_MEMMAP_H
#define E820MAX 128 /* number of entries in E820MAP */
#define FW_MEMMAP_RAM 1
#define FW_MEMMAP_RESERVED 2
#define E820_RAM FW_MEMMAP_RAM
#define E820_RESERVED FW_MEMMAP_RESERVED
#define E820_ACPI 3
#define E820_NVS 4
#define E820_UNUSABLE 5
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct e820entry {
__u64 addr; /* start of memory segment */
__u64 size; /* size of memory segment */
__u32 type; /* type of memory segment */
} __attribute__((packed));
#ifdef __KERNEL__
void fw_memmap_add_region(u64 start, u64 size, int type);
void fw_memmap_print_map(char *who);
int sanitize_fw_memmap(void);
void finish_fw_memmap_parsing(void);
#include <linux/early_res.h>
unsigned long fw_memmap_end_of_ram_pfn(void);
void fw_memmap_register_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn);
u64 fw_memmap_hole_size(u64 start, u64 end);
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_FW_MEMMAP_H */
and new arch that support early_res/bootmem, will normally only need to call those six functions
like
+void __init setup_memory_map(void)
+{
+ int i;
+ unsigned long phys_base;
+ /* Find available physical memory...
+ *
+ * Read it twice in order to work around a bug in openfirmware.
+ * The call to grab this table itself can cause openfirmware to
+ * allocate memory, which in turn can take away some space from
+ * the list of available memory. Reading it twice makes sure
+ * we really do get the final value.
+ */
+ read_obp_translations();
+ read_obp_memory("reg", &pall[0], &pall_ents);
+ read_obp_memory("available", &pavail[0], &pavail_ents);
+ read_obp_memory("available", &pavail[0], &pavail_ents);
+
+ phys_base = 0xffffffffffffffffUL;
+ for (i = 0; i < pavail_ents; i++) {
+ phys_base = min(phys_base, pavail[i].phys_addr);
+ fw_memmap_add_region(pavail[i].phys_addr, pavail[i].reg_size,
+ FW_MEMMAP_RAM);
+ }
+
+ sanitize_fw_memmap();
+
+ fw_memmap_print_map("obp memmap:");
>
> What happened to the discussion we had earlier, which iirc concluded
> that a better approach would be to adapt x86 to use LMB ?
e820/early_res is more complicated than lmb. to make x86, we still need to keep e820 related code.
and early_res already can be used to replace bootmem code.
maybe we should find other user other than previous lmb users at first.
attached is the one for sparc64/lmb converting...- i have no chance to debug it.
qemu seems doesn't support sparc64 well yet.
Yinghai
We told you a thousand times to investigate using LMB for all of
this.
Instead you are posting the sparc64 conversion to the e820 stuff
again.
That action means you absolutely don't value our feedback at all.
You seem to really not care what a mess you are (unnecessarily)
making.
>>
>> That action means you absolutely don't value our feedback at all.
>
> [PATCH 01/20] x86: add find_e820_area_node
> is addressing your concern that early_res didn't handle memory cross the nodes problem.
Now I know that you _REALLY_ aren't listening to us.
We said to use LMB because 1) it already exists 2) many
platforms have been using it for years and 3) it doesn't
lack the features you're now having to add to e820.
Instead of trying to use LMB, you're just addding feature after
feature to e820 in order to bring it up to having parity with LMB.
You're wasting a lot of time, and you're completely ignoring an
existing facility that has been working on and used for many
years.
And you've never ever convinced any of us familiar with LMB why you
keep doing this, and why LMB can't be used for what you need this
generic e820 crap for.
I'm absolutely flabbergasted at this point, you really have no
value for the feedback you've been given.
None at all.
[PATCH 01/20] x86: add find_e820_area_node
is addressing your concern that early_res didn't handle memory cross the nodes problem.
YH
I'm still not at all happy with it. It's not only about increasing the
size of the kernel. It's about moving some x86 specific stuff and more
or less arbitrarily deciding that everybody has to convert to that model
now, despite the fact that more suited alternatives have existed for
years, rather than thinking about doing the logical thing, which is to
convert x86 over to lmb, eventually adding the missing functionalities
in lmb if need be.
Also, there's something just plain gross about the choice of names.
fw_memmap is something I wouldn't wish my enemies to have to type on a
keyboard, it looks ugly, and it lends to way too long function names. In
addition to the fact that your "generic" facility is still all cluttered
with the e820 names and other very x86 centric definitions.
It -may- well be that adapting x86 to lmb isn't a practical approach,
but if that was the case, then please justify why with precise technical
reasons, which we can discuss then in details and make a decision based
on that.
Cheers,
Ben.
1. lmb is merging region when you add one new reserved region.
early_res doesn't do that merge. so later it could figure wrong freeing.
<recently add free_early_partial, for per cpu setup only>
2. mem type in e820 map has more than RAM, it include RAM, reserved, ACPI, acpi nvs, and type 9?, and KERN_RESERVED...
3. early res, every range has one name tag.
4. early_res is array based, and it could auto double the array size and copy the old one to new one. and first entry in new array is for array itself.
if want x86 to use lmb, the e820 map and the lmb.memory are duplicated.
also need to have lmb.memory to support more type, otherwise still need to go back to check e820 about e820 reserved etc.
Thanks
Yinghai
> On 03/21/2010 10:12 PM, Benjamin Herrenschmidt wrote:
>> It -may- well be that adapting x86 to lmb isn't a practical approach,
>> but if that was the case, then please justify why with precise technical
>> reasons, which we can discuss then in details and make a decision based
>> on that.
>
> 1. lmb is merging region when you add one new reserved region.
> early_res doesn't do that merge. so later it could figure wrong freeing.
> <recently add free_early_partial, for per cpu setup only>
> 2. mem type in e820 map has more than RAM, it include RAM, reserved, ACPI, acpi nvs, and type 9?, and KERN_RESERVED...
lmb has a reserved type as well.
Beyond that does anything except /proc/iomem and acpi care?
> 3. early res, every range has one name tag.
> 4. early_res is array based, and it could auto double the array size and copy the old one to new one. and first entry in new array is for array itself.
>
> if want x86 to use lmb, the e820 map and the lmb.memory are duplicated.
> also need to have lmb.memory to support more type, otherwise still need to go back to check e820 about e820 reserved etc.
lmb clearly supports reserved regions, as well as ram regions.
Eric
Thank you very much. But when I try to apply with the isolcpus opinion,
It seems it still can NOT work.
All other CPUs are isolated, only the first one is left.
Then I found all others are still initialized by the start_secondary()
even I use the isolated opinion, in the end,
I add an endless loop instead of cpu_idle(), The system still can NOT
up.
So is there any mistake in what I did, could someone be kindness to give
me more suggestions.
Thanks in advance.
* David Miller <da...@davemloft.net> wrote:
> From: Yinghai Lu <yin...@kernel.org>
> Date: Sun, 21 Mar 2010 21:28:38 -0700
>
> >>
> >> That action means you absolutely don't value our feedback at all.
> >
> > [PATCH 01/20] x86: add find_e820_area_node
> > is addressing your concern that early_res didn't handle memory cross the nodes problem.
>
> Now I know that you _REALLY_ aren't listening to us.
[ He has done a bit more than just to simply listen: he seems to have written
a patch which he thinks is addressing the concerns you pointed out. It might
not be the response you wished for (and it might be inadequate) for but it
sure gives me the impression of him listening to you - unless by 'listening'
you mean 'follow my exact opinion without argument'. ]
> We said to use LMB because 1) it already exists 2) many platforms have been
> using it for years and 3) it doesn't lack the features you're now having to
> add to e820.
The thing is, lib/lmb.c was librarized two years ago by you (much after
early_res has been written for x86), but was not properly integrated into the
core kernel nor into x86. It was first suggested by you in the early_res
context about ten days ago, when Yinghai started posting Sparc64 patches.
Which is about half a year after the whole very difficult early_res/bootmem
work was started by Yinghai :-(
I dont mind LMB per se, logically it seems quite similar to the early_res bits
Yinghai has generalized (to a certain degree), and is quite a bit cleaner as
you are writing very clean code.
Note the other side of the coin: LMB appears to be deployed on only 4 non-x86
architectures that muster ~1% of the Linux boxes while early_res is deployed
on more than 95%.
So there's a very real hardship of testing and conversion here that we cannot
ignore and an even better path may be to gradually transform the more tested
and more deployed early_res code to meet the interface details of LMB.
Please also realize the difficulties Yinghai has gone through already:
converting and generalizing _all_ of the x86 early allocation code to a more
generic core kernel approach, with essentially zero interest from _any_
non-x86 person ...
Those early_res patches were posted all over on lkml, it was literally
hundreds of difficult patches, and now, months down the line, after we've
tested and upstreamed it (with many nasty regressions fixed on x86 during the
development of it) you come with a rigid "do it some other way, convert all of
x86 over again or else" position.
I really wish non-x86 architectures apprecitated (and helped) the core kernel
work x86 is doing, because it is subsidizing non-x86 architectures all the
time.
For example when LMB was plopped into lib/lmb.c in 2008 why was it not ported
to x86, our most popular architecture? Did you consider posting LMB patches
for x86 instead of expecting Yinghai to post Sparc64, PowerPC, SH and
Microblaze patches?
Anyway, i'm sure we can work out an approach, and yes, LMB looks pretty good
and could be picked up if it can be done gradually - given some mutual
willingness to work on this as equals.
Thanks,
Ingo
> ( Cc:-ed Andrew and Linus as this is a general design/policy matter wrt.
> memory management. )
[snip]
> Please also realize the difficulties Yinghai has gone through already:
> converting and generalizing _all_ of the x86 early allocation code to a more
> generic core kernel approach, with essentially zero interest from _any_
> non-x86 person ...
It still seemed to have a lot that was x86-specific - in particular it
seemed to have a lot of code to cope with various mistakes that
firmware might have made in the memory map. That adds code which is
basically just bloat on architectures where those problems don't
arise.
The fw_memmap.c code also still seemed to be tied to the x86 e820 data
structures and layouts.
> Those early_res patches were posted all over on lkml, it was literally
> hundreds of difficult patches, and now, months down the line, after we've
> tested and upstreamed it (with many nasty regressions fixed on x86 during the
> development of it) you come with a rigid "do it some other way, convert all of
> x86 over again or else" position.
Well I personally don't mind if x86 uses early_res or whatever other
code in arch/x86 to handle the problems that arise from deficient
firmware. I just don't see any value in converting powerpc or sparc64
over to using ~2000 lines of early_res/fw_memmap code where the
existing ~500 lines of lmb code is working just fine.
And I don't see the point of moving the x86 e820 stuff into the kernel
directory. Does any other platform use e820 tables?
> I really wish non-x86 architectures apprecitated (and helped) the core kernel
> work x86 is doing, because it is subsidizing non-x86 architectures all the
> time.
We do help with core kernel work. Coping with deficient x86 firmware
doesn't really feel like core kernel work to me though.
Paul.
> And I don't see the point of moving the x86 e820 stuff into the kernel
> directory. [...]
I dont see the point of that either - that is a mistake. e820 is an x86 bios
call and we shouldnt name a generic mechanism after that. e820 is absolutely
messy and has no place anywhere beyond x86.
The main technical argument i see is 'early_res versus LMB'. Even there i'd
prefer LMB from a technical quality POV.
> Well I personally don't mind if x86 uses early_res or whatever other code in
> arch/x86 to handle the problems that arise from deficient firmware. I just
> don't see any value in converting powerpc or sparc64 over to using ~2000
> lines of early_res/fw_memmap code where the existing ~500 lines of lmb code
> is working just fine.
Lets put it this way then: do you see any point in PowerPC making use of a 10+
million lines of code kernel that is being mainly (80%+) financed, developed,
tested and deployed by people who care about x86 mostly?
If yes then it seems like a pretty damn good deal to me for PowerPC to go
beyond its narrow short-term self-interest and work towards generalizations
more actively, and even consider touching its 500 lines of lmb code ...
I dont know how many times we've accomodated for non-x86 architectures in
various pieces of kernel code.
Obviously if there's bloat affecting PowerPC then that can be addressed via
technical measures. But we really shouldnt leave the slightly incompatible
early allocators in place. (we shouldnt have let them get created in the first
place, but that is water down the bridge.)
Thanks,
Ingo
> only one user, move it back to smpboot.c
>
> remove smpboot_clear_io_apic, and only keep smpboot_clear_io_apic_irqs.
>
> and check nr_legacy_irqs before clear it.
This is not the way how we do that. We move code in one patch and then
do modification in the follow up patch. Doing both makes it harder
than necessary to follow the code change.
As a side note, that changelog for the removal of
smpboot_clear_io_apic is not telling anything why that change is
functional equivivalent to the original code. And I doubt that it's
correct.
Thanks,
tglx
On Mon, 22 Mar 2010, Ingo Molnar wrote:
>
> ( Cc:-ed Andrew and Linus as this is a general design/policy matter wrt.
> memory management. )
>
> * David Miller <da...@davemloft.net> wrote:
>
> > From: Yinghai Lu <yin...@kernel.org>
> > Date: Sun, 21 Mar 2010 21:28:38 -0700
> >
> > >>
> > >> That action means you absolutely don't value our feedback at all.
> > >
> > > [PATCH 01/20] x86: add find_e820_area_node
> > > is addressing your concern that early_res didn't handle memory cross the nodes problem.
> >
> > Now I know that you _REALLY_ aren't listening to us.
> [ He has done a bit more than just to simply listen: he seems to
> have written a patch which he thinks is addressing the concerns you
> pointed out. It might not be the response you wished for (and it
> might be inadequate) for but it sure gives me the impression of him
> listening to you - unless by 'listening' you mean 'follow my exact
> opinion without argument'. ]
I tend to disagree. Fixing the bug pointed out by Dave is not really a
good argument about listening.
The main point is that there is still no answer why lmb cannot be used
and the reposted patch still is a full move of the x86 e820 functions
into kernel/fw_memmap.c.
That's not a generalization, that's simply a relocation of x86 code to
kernel/. And I agree with Dave and Ben that this is an useless
exercise.
> > We said to use LMB because 1) it already exists 2) many platforms
> > have been using it for years and 3) it doesn't lack the features
> > you're now having to add to e820.
>
> The thing is, lib/lmb.c was librarized two years ago by you (much
> after early_res has been written for x86), but was not properly
> integrated into the core kernel nor into x86. It was first suggested
> by you in the early_res context about ten days ago, when Yinghai
> started posting Sparc64 patches.
>
> Which is about half a year after the whole very difficult
> early_res/bootmem work was started by Yinghai :-(
Well, the early_res split out from x86 was mostly an x86 related
effort and there is no point on enforcing that to archs which are
happily using lmb for quite a while.
You have to admit, that we did not notice either that lmb could be
reused for that purpose as well, so blaming Dave now for not paying
attention to the x86 early_res bits is unfair at least.
> I dont mind LMB per se, logically it seems quite similar to the
> early_res bits Yinghai has generalized (to a certain degree), and is
> quite a bit cleaner as you are writing very clean code.
>
> Note the other side of the coin: LMB appears to be deployed on only
> 4 non-x86 architectures that muster ~1% of the Linux boxes while
> early_res is deployed on more than 95%.
Come on, that's a stupid argument. The lmb code looks well structured
and understandable which I can't say about early_res.c. And it
definitly got a fair amount of testing and shakeout.
> So there's a very real hardship of testing and conversion here that
> we cannot ignore and an even better path may be to gradually
> transform the more tested and more deployed early_res code to meet
> the interface details of LMB.
I'd rather see moving that code to the cleaner code base of lmb.
> Please also realize the difficulties Yinghai has gone through
> already: converting and generalizing _all_ of the x86 early
> allocation code to a more generic core kernel approach, with
> essentially zero interest from _any_ non-x86 person ... Those
> early_res patches were posted all over on lkml, it was literally
> hundreds of difficult patches, and now, months down the line, after
> we've tested and upstreamed it (with many nasty regressions fixed on
> x86 during the development of it) you come with a rigid "do it some
> other way, convert all of x86 over again or else" position.
That's not what Dave and Ben said. They just opposed that we push the
e820 horror into kernel/ and enforce it on everybody else.
> I really wish non-x86 architectures apprecitated (and helped) the
> core kernel work x86 is doing, because it is subsidizing non-x86
> architectures all the time.
Can we just stop that "x86 is the center of the universe" chant?
Most of the core kernel work is done by core kernel developers. Just
because you and I ended up being x86 maintainers does not change that
at all.
In some areas the core code actually suffers from x86. Just look at
timers. At least 50% of the code in kernel/time/clock* and tick-* is
just there to cope with x86 specific hardware wreckage.
I don't see a point in doing the same thing with the e820 horror. That
code drop in kernel/fw_memmap.c is _NOT_ what I consider core kernel
work in the sense of providing generalized non arch specific
infrastructure.
Also I consider lmb core infrastructure. It does not have to be placed
in kernel/ to be accounted for it. It's pretty generic and easy to
extend.
> For example when LMB was plopped into lib/lmb.c in 2008 why was it
> not ported to x86, our most popular architecture? Did you consider
Why did _we_ not look at LMB? The early res code of x86 was (and still
is to some degree) a tangled mess and I wouldn't have expected that
some non x86 person would touch that with a ten foot pole.
We made a mistake and it got pointed out, so we better sit down and
make our homework instead of insisting that early_res/fw_memmap is
something set in stone.
> posting LMB patches for x86 instead of expecting Yinghai to post
> Sparc64, PowerPC, SH and Microblaze patches?
Well, it's pretty easy to move those archs over, but I cannot see any
reason why they should move to something which they consider inferior.
> Anyway, i'm sure we can work out an approach, and yes, LMB looks
> pretty good and could be picked up if it can be done gradually -
> given some mutual willingness to work on this as equals.
I think it was made entirely clear, that nobody is opposing to extend
lmb if the need arises.
So before going any further on this early_res stuff, it needs to be
analysed what has to be done (if at all) to make lmb usable for
x86. From that we can figure out on a technical argument how to
proceed, not by reposting e820 -> kernel patches over and over.
Thanks,
tglx
ok - i think you are right. Yinghai, mind having a look at using lib/lmb.c for
all this?
Ingo
1. need to keep e820
2. use e820 range with RAM to fill lmb.memory when finizing_e820
3. use lmb.reserved to replace early_res.
current lmb is merging the region, we can not use name tag any more.
may need to dump early_memtest, and use early_res for bootmem at first.
YH
You use that arguemnt ONE MORE FUCKING TIME and you'll end up in my
killfile with a auto-NACK reply of anything that looks like a patch from
you.
Ben.
> On Mon, 2010-03-22 at 10:28 +0100, Ingo Molnar wrote:
> > Note the other side of the coin: LMB appears to be deployed on only 4
> > non-x86 architectures that muster ~1% of the Linux boxes while early_res
> > is deployed on more than 95%.
>
> You use that arguemnt ONE MORE FUCKING TIME and you'll end up in my killfile
> with a auto-NACK reply of anything that looks like a patch from you.
Does this mean you disagree with that? (I think it's pretty factual, last i
checked the usage stats of devel kernels was somewhere around 99.7%.)
In any case, i dont dispute that LMB is a bit cleaner than kernel/early_res.c
- and both are much cleaner than the new e820 kernel/fw_memmap.c code posted
here by Yinghai.
If you dont disagree then please spare me the insults. (or move me into your
killfile)
Thanks,
Ingo
Feel free to extend LMB as long as it's reasonable. For example, I
wouldn't object to have flags or similar things to LMB regions, one of
them being used to prevent merge for example.
> may need to dump early_memtest, and use early_res for bootmem at
> first.
Cheers,
Ben.
Allright, I've calmed down now, so my appologies for going over the top
here, I shouldn't reply to emails before breakfast ... but it looks like
Thomas made the point a lot more clearly than me or Dave did so it's all
good. Let's move on.
Cheers,
Then we have no argument. The point is, we object to that fw_memmap/e820
stuff taking over for non-x86 architectures. We aren't saying that x86
-must- move to LMB, but if the wish is to have a common implementation
in generic code accross all archs, -then- we object to it being e820.
Ben.
There's an easy solution here. Leave that gunk in arch/x86 where it
belongs and if you want to unify things a bit, then do it at the -API-
level only, and leave the implementation where it is.
> I really wish non-x86 architectures apprecitated (and helped) the core kernel
> work x86 is doing, because it is subsidizing non-x86 architectures all the
> time.
I'm not even going to bother replying to that one
Ben.
> On Mon, 2010-03-22 at 14:05 +0100, Ingo Molnar wrote:
> > * Paul Mackerras <pau...@samba.org> wrote:
> >
> > > And I don't see the point of moving the x86 e820 stuff into the kernel
> > > directory. [...]
> >
> > I dont see the point of that either - that is a mistake. e820 is an x86 bios
> > call and we shouldnt name a generic mechanism after that. e820 is absolutely
> > messy and has no place anywhere beyond x86.
> >
> > The main technical argument i see is 'early_res versus LMB'. Even there i'd
> > prefer LMB from a technical quality POV.
>
> Then we have no argument. The point is, we object to that fw_memmap/e820
> stuff taking over for non-x86 architectures. We aren't saying that x86
> -must- move to LMB, but if the wish is to have a common implementation in
> generic code accross all archs, -then- we object to it being e820.
Ok, just in case i wasnt clear enough in my first reply (and i guess your mail
means i wasnt): that whole-sale move of e820 into kernel/fw_memmap.c is a
total non-starter as far as i'm concerned.
And i kind of like the 'logical memory block' name - it is more intuitive than
'early_res' (which was always a misnomer IMO, just couldnt find a better name
for it and it stuck with us).
So no arguments from me at all about the code quality aspects - i just wanted
to highlight the huge amount of non-trivial work Yinghai has invested into
this already, with little external help, and that if possible it would be nice
to minimize the upsetting of related x86 code if possible. Please help him out
with more specific suggestions about how the two memory allocation spaces
could be unified best, to serve the needs of all these architectures - if you
have some spare time.
Thanks,
Ingo
> Does this mean you disagree with that? (I think it's pretty factual, last i
> checked the usage stats of devel kernels was somewhere around 99.7%.)
Is that number obtained from Fedora downloads or something? I
wouldn't be surprised if desktop usage of bleeding-edge kernels is
near 100%, since basically all desktop machines are x86 these days.
I think that number would be biased against server and embedded
machines, but without knowing exactly what you're counting it's hard
to say.
In any case, I don't think the number of machines is particularly
relevant. Linux runs on maybe 1% of all desktop machines in the
world, according to numbers I've seen, but that doesn't make it
irrelevant or not worth working on.
Paul.
Why not start by unifying the APIs to it, while keeping the
implementation in the arch for now ? That would be a good first step and
would give us a good idea of what kind of requirements all the archs
have since to some extent those requirements need to be represented in
this API.
Cheers,
Ben.
I disagree with that being a relevant argument in the technical
discussion on the relative merits of two implementations of a given
facility. I also disagree with your numbers, if you talk about
deployement, I would be very very surprised if ARM wasn't close to
on-par with x86.
> In any case, i dont dispute that LMB is a bit cleaner than kernel/early_res.c
> - and both are much cleaner than the new e820 kernel/fw_memmap.c code posted
> here by Yinghai.
>
> If you dont disagree then please spare me the insults. (or move me into your
> killfile)
Well, I find some of your arguments quite insulting too, but let's move
on.
Cheers,
Ben.
current early_res has
reserve/free/find
and don't have alloc, because it is equal to find + reserve.
all the find related will subtract reserved area already.
and it use start/end (goal/limit) and it will honor goal.
extern void reserve_early(u64 start, u64 end, char *name);
extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
extern void free_early(u64 start, u64 end);
void free_early_partial(u64 start, u64 end);
extern void early_res_to_bootmem(u64 start, u64 end);
void reserve_early_without_check(u64 start, u64 end, char *name);
u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align);
u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
u64 get_max_mapped(void);
int get_free_all_memory_range(struct range **rangep, int nodeid);
lmd has:
reserve/free/alloc
and it does have lmb_find, but that doesn't subtract reserved area.
also lmb_alloc doesn't take goal. and only have limit there.
YH
On Mon, 22 Mar 2010, Yinghai Lu wrote:
> On 03/22/2010 12:37 PM, Ingo Molnar wrote:
> > * Thomas Gleixner <tg...@linutronix.de> wrote:
>
> >> The main point is that there is still no answer why lmb cannot be used and
> >> the reposted patch still is a full move of the x86 e820 functions into
> >> kernel/fw_memmap.c.
> >>
> >> That's not a generalization, that's simply a relocation of x86 code to
> >> kernel/. And I agree with Dave and Ben that this is an useless exercise.
> >
> > ok - i think you are right. Yinghai, mind having a look at using
> > lib/lmb.c for all this?
>
> 1. need to keep e820
That's neither an argument for using lmb nor an argument not to use
lmb. e820 is x86 specific BIOS wreckage and it's whole purpose is
just to feed information into a (hopefully) generic early resource
management facility.
e820 _CANNOT_ be generalized. Period.
> 2. use e820 range with RAM to fill lmb.memory when finizing_e820
What's finizing_e820 ???
> 3. use lmb.reserved to replace early_res.
What's the implication of doing that ?
> current lmb is merging the region, we can not use name tag any more.
What's wrong with merging of regions ? Are you arguing about a
specific region ("the region") ?
Which name tag ? And why is that name tag important ?
> may need to dump early_memtest, and use early_res for bootmem at
> first.
Why exactly might early_memtest not longer be possible ?
What means "early_res for bootmem" ?
Please take some time to explain in detail. Throwing one liners and
buzzwords w/o context into such a discussion is more than counter
productive.
Thanks,
tglx
early_res array is only corresponding to lmb.reserved, aka reserved region from kernel.
>
>> current lmb is merging the region, we can not use name tag any more.
>
> What's wrong with merging of regions ? Are you arguing about a
> specific region ("the region") ?
>
> Which name tag ? And why is that name tag important ?
struct early_res {
u64 start, end;
char name[15];
char overlap_ok;
};
>
>> may need to dump early_memtest, and use early_res for bootmem at
>> first.
>
> Why exactly might early_memtest not longer be possible ?
early_memtest need to call find_e820_area_size
current lmb doesn't have that kind of find util.
the one memory subtract reserved memory by kernel.
>
> What means "early_res for bootmem" ?
use early_res to replace bootmem, the CONFIG_NO_BOOTMEM.
that need early_res can be double or increase the slots automatically.
Yinghai
I still want to know, what "need to keep e820" means for you.
> >> 2. use e820 range with RAM to fill lmb.memory when finizing_e820
> >
> > What's finizing_e820 ???
> finish_e820_parsing();
Yinghai, come on. Are you really expecting that everyone involved in
this discussion goes to look up what the heck finish_e820_parsing()
is doing ?
You want to explain why your solution is better or why lmb is not
sufficient, so you better go and explain what finish_e820_parsing()
is, why finish_e820_parsing() is important and why lmb cannot cope
with it.
> >> 3. use lmb.reserved to replace early_res.
> >
> > What's the implication of doing that ?
>
> early_res array is only corresponding to lmb.reserved, aka reserved
> region from kernel.
Is it only corresponding (somehow) or is it a full equivivalent ?
> >> current lmb is merging the region, we can not use name tag any more.
> >
> > What's wrong with merging of regions ? Are you arguing about a
> > specific region ("the region") ?
Care to answer my question ?
> >
> > Which name tag ? And why is that name tag important ?
>
> struct early_res {
> u64 start, end;
> char name[15];
> char overlap_ok;
> };
I'm starting to get annoyed, really. What is that name field for and
why is that "name" field important ?
> >
> >> may need to dump early_memtest, and use early_res for bootmem at
> >> first.
> >
> > Why exactly might early_memtest not longer be possible ?
>
> early_memtest need to call find_e820_area_size
> current lmb doesn't have that kind of find util.
> the one memory subtract reserved memory by kernel.
What subtracts what ? And why is it that hard to fix that ?
> >
> > What means "early_res for bootmem" ?
>
> use early_res to replace bootmem, the CONFIG_NO_BOOTMEM.
> that need early_res can be double or increase the slots automatically.
-ENOPARSE
Yinghai, I asked you to take your time and explain things in detail
instead of shooting unparseable answers within a minute.
Everyone else in this discussion tries to be as explanatory as
possible, just you expect that everyone else is going to dig out the
crystal ball to understand the deeper meanings of your patches.
Again, please take your time to explain what needs to be done or what
is impossible to solve in your opinion, so we can get that resolved in
a way which is satisfactory and useful for all parties involved.
Thanks,
tglx
keep the most arch/x86/kernel/e820.c, and later when finish_e820_parsing() is called,
fill lmb.memory according to e820 entries with E820_RAM type.
>
>>>> 2. use e820 range with RAM to fill lmb.memory when finizing_e820
>>>
>>> What's finizing_e820 ???
>> finish_e820_parsing();
>
> Yinghai, come on. Are you really expecting that everyone involved in
> this discussion goes to look up what the heck finish_e820_parsing()
> is doing ?
>
> You want to explain why your solution is better or why lmb is not
> sufficient, so you better go and explain what finish_e820_parsing()
> is, why finish_e820_parsing() is important and why lmb cannot cope
> with it.
current x86:
a. setup e820 array.
b. early_parm mem= and memmap= related code will adjust the e820.
we don't need to call lmb_enforce_memory_limit().
>
>>>> 3. use lmb.reserved to replace early_res.
>>>
>>> What's the implication of doing that ?
>>
>> early_res array is only corresponding to lmb.reserved, aka reserved
>> region from kernel.
>
> Is it only corresponding (somehow) or is it a full equivivalent ?
early_res is not sorted and merged.
>
>>>> current lmb is merging the region, we can not use name tag any more.
>>>
>>> What's wrong with merging of regions ? Are you arguing about a
>>> specific region ("the region") ?
>
> Care to answer my question ?
if range get merged, you can not use name with them.
>
>>>
>>> Which name tag ? And why is that name tag important ?
>>
>> struct early_res {
>> u64 start, end;
>> char name[15];
>> char overlap_ok;
>> };
>
> I'm starting to get annoyed, really. What is that name field for and
> why is that "name" field important ?
at least later when some code free a wrong range, we can figure who cause the problem.
>
>>>
>>>> may need to dump early_memtest, and use early_res for bootmem at
>>>> first.
>>>
>>> Why exactly might early_memtest not longer be possible ?
>>
>> early_memtest need to call find_e820_area_size
>> current lmb doesn't have that kind of find util.
>> the one memory subtract reserved memory by kernel.
>
> What subtracts what ? And why is it that hard to fix that ?
lmb.memory - lmb.reserved
or e820 E820_RAM entries - early_res
move some code from early_res to lmb.c?
>
>>>
>>> What means "early_res for bootmem" ?
>>
>> use early_res to replace bootmem, the CONFIG_NO_BOOTMEM.
>> that need early_res can be double or increase the slots automatically.
>
> -ENOPARSE
>
> Yinghai, I asked you to take your time and explain things in detail
> instead of shooting unparseable answers within a minute.
>
> Everyone else in this discussion tries to be as explanatory as
> possible, just you expect that everyone else is going to dig out the
> crystal ball to understand the deeper meanings of your patches.
>
> Again, please take your time to explain what needs to be done or what
> is impossible to solve in your opinion, so we can get that resolved in
> a way which is satisfactory and useful for all parties involved.
to make x86 to use lmb, we need to extend lmb to have find_early_area.
static int __init find_overlapped_early(u64 start, u64 end)
{
int i;
struct lmb_properties *r;
for (i = 0; i < lmb.reserved_cnt && lmb.reserved.region[i].size; i++) {
r = &lmb.reserved.region[i];
if (end > r->base && start < (r->base + r->size))
break;
}
return i;
}
/* Check for already reserved areas */
static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
{
int i;
u64 addr = *addrp;
int changed = 0;
struct lmb_properties *r;
again:
i = find_overlapped_early(addr, addr + size);
r = &lmb.reserved.region[i];
if (i < lmb.reserved.cnt && r->size) {
*addrp = addr = round_up(r->base + r->size, align);
changed = 1;
goto again;
}
return changed;
}
u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align)
{
u64 addr, last;
addr = round_up(ei_start, align);
if (addr < start)
addr = round_up(start, align);
if (addr >= ei_last)
goto out;
while (bad_addr(&addr, size, align) && addr+size <= ei_last)
;
last = addr + size;
if (last > ei_last)
goto out;
if (last > end)
goto out;
return addr;
out:
return -1ULL;
}
find_early_area_size()...
and use them we can have find_lmb_free_area
/*
* Find a free area with specified alignment in a specific range.
*/
u64 __init find_lmb_area(u64 start, u64 end, u64 size, u64 align)
{
int i;
for (i = 0; i < lmb.memory.cnt; i++) {
u64 ei_start = lmb.memory.region[i].base;
u64 ei_end = ei_start + lmb.memory.region[i].size;
addr = find_early_area(ei_start, ei_last, start, end,
size, align);
if (addr != -1ULL)
return addr;
}
return -1ULL;
}
also later we can use with active_range for bootmem replacement.
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
int i;
/* need to go over early_node_map to find out good range for node */
for_each_active_range_index_in_nid(i, nid) {
u64 addr;
u64 ei_start, ei_last;
ei_last = early_node_map[i].end_pfn;
ei_last <<= PAGE_SHIFT;
ei_start = early_node_map[i].start_pfn;
ei_start <<= PAGE_SHIFT;
addr = find_early_area(ei_start, ei_last,
goal, limit, size, align);
if (addr == -1ULL)
continue;
return addr;
}
return -1ULL;
}
Yinghai
On Mon, 22 Mar 2010, Yinghai Lu wrote:
> On 03/22/2010 03:53 PM, Thomas Gleixner wrote:
> > On Mon, 22 Mar 2010, Yinghai Lu wrote:
> >> On 03/22/2010 03:09 PM, Thomas Gleixner wrote:
> >>> On Mon, 22 Mar 2010, Yinghai Lu wrote:
> >>>> On 03/22/2010 12:37 PM, Ingo Molnar wrote:
> >>
> >>>> 1. need to keep e820
> >>>
> >>> That's neither an argument for using lmb nor an argument not to use
> >>> lmb. e820 is x86 specific BIOS wreckage and it's whole purpose is
> >>> just to feed information into a (hopefully) generic early resource
> >>> management facility.
> >>>
> >>> e820 _CANNOT_ be generalized. Period.
> >
> > I still want to know, what "need to keep e820" means for you.
>
> keep the most arch/x86/kernel/e820.c, and later when
> finish_e820_parsing() is called, fill lmb.memory according to e820
> entries with E820_RAM type.
Right, and we never get rid of e820.c at all. Simply because e820 is a
x86 specific clusterfuck. No way to find anything which is remotely
insane like that in any other architecture.
I really do not understand why you ever thought that moving that code
to a generic place is something useful and acceptable.
The point is, that some of the algorithms which e820 needs to sanitize
the maps might be of general use, but definitely not the whole e820
crappola. And if you look close, lmb has most of them already.
> >
> >>>> 2. use e820 range with RAM to fill lmb.memory when finizing_e820
> >>>
> >>> What's finizing_e820 ???
> >> finish_e820_parsing();
> >
> > Yinghai, come on. Are you really expecting that everyone involved in
> > this discussion goes to look up what the heck finish_e820_parsing()
> > is doing ?
> >
> > You want to explain why your solution is better or why lmb is not
> > sufficient, so you better go and explain what finish_e820_parsing()
> > is, why finish_e820_parsing() is important and why lmb cannot cope
> > with it.
>
> current x86:
> a. setup e820 array.
> b. early_parm mem= and memmap= related code will adjust the e820.
Dammit. I asked for an explanation not for some headword
listing. These bullet points do _NOT_ explain at all why e820 is
superior.
> we don't need to call lmb_enforce_memory_limit().
Of course you do not need to call lmb_enforce_memory_limit() simply
because it is not relevant to the existing e820 code at all.
What's the point ?
> >
> >>>> 3. use lmb.reserved to replace early_res.
> >>>
> >>> What's the implication of doing that ?
> >>
> >> early_res array is only corresponding to lmb.reserved, aka reserved
> >> region from kernel.
> >
> > Is it only corresponding (somehow) or is it a full equivivalent ?
>
> early_res is not sorted and merged.
So what's the implication for x86 vs. the early_res stuff ? Any down
sides, up sides other than not sorted and merged?
> >>>> current lmb is merging the region, we can not use name tag any more.
> >>>
> >>> What's wrong with merging of regions ? Are you arguing about a
> >>> specific region ("the region") ?
> >
> > Care to answer my question ?
> if range get merged, you can not use name with them.
Why does that matter ?
> >>>
> >>> Which name tag ? And why is that name tag important ?
> >>
> >> struct early_res {
> >> u64 start, end;
> >> char name[15];
> >> char overlap_ok;
> >> };
> >
> > I'm starting to get annoyed, really. What is that name field for and
> > why is that "name" field important ?
>
> at least later when some code free a wrong range, we can figure who cause the problem.
That does not explain the value of the name field at all. If some code
frees a wrong range a backtrace is always more helpful than some
arbitrary name field. Am I missing something ?
> >>>> may need to dump early_memtest, and use early_res for bootmem at
> >>>> first.
> >>>
> >>> Why exactly might early_memtest not longer be possible ?
> >>
> >> early_memtest need to call find_e820_area_size
> >> current lmb doesn't have that kind of find util.
> >> the one memory subtract reserved memory by kernel.
> >
> > What subtracts what ? And why is it that hard to fix that ?
>
> lmb.memory - lmb.reserved
>
> or e820 E820_RAM entries - early_res
>
> move some code from early_res to lmb.c?
Care to explain in clear wording what you need to solve ? "move some
code from early_res to lmb.c?" is definitely not an useful
explanation.
> >>>
> >>> What means "early_res for bootmem" ?
> >>
> >> use early_res to replace bootmem, the CONFIG_NO_BOOTMEM.
> >> that need early_res can be double or increase the slots automatically.
> >
> > -ENOPARSE
> >
> > Yinghai, I asked you to take your time and explain things in detail
> > instead of shooting unparseable answers within a minute.
> >
> > Everyone else in this discussion tries to be as explanatory as
> > possible, just you expect that everyone else is going to dig out the
> > crystal ball to understand the deeper meanings of your patches.
> >
> > Again, please take your time to explain what needs to be done or what
> > is impossible to solve in your opinion, so we can get that resolved in
> > a way which is satisfactory and useful for all parties involved.
>
> to make x86 to use lmb, we need to extend lmb to have find_early_area.
Why ?
> static int __init find_overlapped_early(u64 start, u64 end)
> {
No, posting arbitrary code snippets which you think are necessary to
solve it is not the way to go.
There is _ZERO_ explanation _WHY_ you think that this is a
prerequisite.
Those largely uncommented commented code snippets (uncommented as the
corresponding code in x86) are _NOT_ an explanation at all.
You just state that you need that whole bunch just w/o telling _WHY_.
The more I look into this I doubt that there is an actual reason for
this complexity. It just looks like it has grown that way by fixing
corner cases all over the place and not out of a real design
requirement.
Either that or it's just the lack of understanding how to map lmb
functionality to the problem at hand as certainly LMB does not map 1:1
to the current x86 way of solving that problem.
Please give a proper explanation for this, really !
Thanks,
tglx
unless you want to dump two users of find_e820_area_size()
YH
On Mon, 22 Mar 2010, Yinghai Lu wrote:
> On 03/22/2010 05:45 PM, Thomas Gleixner wrote:
> > B1;2005;0cYinghai,
> >>
> >> to make x86 to use lmb, we need to extend lmb to have find_early_area.
> >
> > Why ?
>
> unless you want to dump two users of find_e820_area_size()
I don't care about the two users of find_e820_area_size() as long as
you are not willing to spend more than a split second to explain
things.
I'm really fed up to pull answers from your nose bit by bit. Me and
others wrote lengthy explanations and asked precise questions.
All we get are some meager bones thrown our way.
If that's your understanding of community work, fine. It's just not
our way of working together. After spending valuable time on that I
completely agree with Dave on:
"Now I know that you _REALLY_ aren't listening to us."
It's not only that you are not listening, you are simply ignoring our
concerns. Go ahead with that, but do not wonder about us ignoring you
as well.
Thanks,
tglx
[PATCH 01/20] x86: add find_e820_area_node
[RFC PATCH] x86: use lmb to replace early_res
still keep kernel/early_res.c for the extension.
should move those file to lib/lmb.c later?
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/Kconfig | 1
arch/x86/include/asm/e820.h | 38 +-
arch/x86/include/asm/lmb.h | 8
arch/x86/kernel/e820.c | 163 +----------
arch/x86/kernel/head.c | 2
arch/x86/kernel/head32.c | 4
arch/x86/kernel/head64.c | 2
arch/x86/kernel/setup.c | 2
arch/x86/kernel/setup_percpu.c | 6
include/linux/early_res.h | 9
include/linux/lmb.h | 5
kernel/early_res.c | 594 ++++++++++++++++-------------------------
lib/lmb.c | 9
mm/page_alloc.c | 2
mm/sparse-vmemmap.c | 4
15 files changed, 321 insertions(+), 528 deletions(-)
Index: linux-2.6/arch/x86/Kconfig
===================================================================
--- linux-2.6.orig/arch/x86/Kconfig
+++ linux-2.6/arch/x86/Kconfig
@@ -27,6 +27,7 @@ config X86
select HAVE_PERF_EVENTS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
+ select HAVE_LMB
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
Index: linux-2.6/arch/x86/include/asm/e820.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/e820.h
+++ linux-2.6/arch/x86/include/asm/e820.h
@@ -113,22 +113,36 @@ static inline void early_memtest(unsigne
extern unsigned long end_user_pfn;
-extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
-extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
-u64 find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
-extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
#include <linux/early_res.h>
+static inline u64 find_e820_area(u64 start, u64 end, u64 size, u64 align)
+{
+ return find_lmb_area(start, end, size, align);
+}
+static inline u64 find_e820_area_size(u64 start, u64 *sizep, u64 align)
+{
+ return find_lmb_area_size(start, sizep, align);
+}
+static inline u64
+find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
+ return find_lmb_area_node(nid, start, end, size, align);
+}
+extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
-extern int e820_find_active_region(const struct e820entry *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn);
-extern void e820_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern u64 e820_hole_size(u64 start, u64 end);
+static inline void e820_register_active_regions(int nid,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ lmb_register_active_regions(nid, start_pfn, end_pfn);
+}
+static inline u64 e820_hole_size(u64 start, u64 end)
+{
+ return lmb_hole_size(start, end);
+}
+void init_lmb_memory(void);
+void fill_lmb_memory(void);
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
Index: linux-2.6/arch/x86/kernel/e820.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/e820.c
+++ linux-2.6/arch/x86/kernel/e820.c
@@ -15,6 +15,7 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
+#include <linux/lmb.h>
#include <asm/e820.h>
#include <asm/proto.h>
@@ -727,37 +728,6 @@ static int __init e820_mark_nvs_memory(v
core_initcall(e820_mark_nvs_memory);
#endif
-/*
- * Find a free area with specified alignment in a specific range.
- */
-u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
-{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- u64 addr;
- u64 ei_start, ei_last;
-
- if (ei->type != E820_RAM)
- continue;
-
- ei_last = ei->addr + ei->size;
- ei_start = ei->addr;
- addr = find_early_area(ei_start, ei_last, start, end,
- size, align);
-
- if (addr != -1ULL)
- return addr;
- }
- return -1ULL;
-}
-
-u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align)
-{
- return find_e820_area(start, end, size, align);
-}
-
u64 __init get_max_mapped(void)
{
u64 end = max_pfn_mapped;
@@ -766,47 +736,6 @@ u64 __init get_max_mapped(void)
return end;
}
-/*
- * Find next free range after *start
- */
-u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
-{
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- struct e820entry *ei = &e820.map[i];
- u64 addr;
- u64 ei_start, ei_last;
-
- if (ei->type != E820_RAM)
- continue;
-
- ei_last = ei->addr + ei->size;
- ei_start = ei->addr;
- addr = find_early_area_size(ei_start, ei_last, start,
- sizep, align);
-
- if (addr != -1ULL)
- return addr;
- }
-
- return -1ULL;
-}
-
-u64 __init find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
-{
- u64 addr;
- /*
- * need to call this function after e820_register_active_regions
- * so early_node_map[] is set
- */
- addr = find_memory_core_early(nid, size, align, start, end);
- if (addr != -1ULL)
- return addr;
-
- /* fallback, should already have start end in the node range */
- return find_e820_area(start, end, size, align);
-}
/*
* pre allocated 4k and reserved it in e820
@@ -900,74 +829,6 @@ unsigned long __init e820_end_of_low_ram
{
return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
}
-/*
- * Finds an active region in the address range from start_pfn to last_pfn and
- * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
- */
-int __init e820_find_active_region(const struct e820entry *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn)
-{
- u64 align = PAGE_SIZE;
-
- *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
- *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
-
- /* Skip map entries smaller than a page */
- if (*ei_startpfn >= *ei_endpfn)
- return 0;
-
- /* Skip if map is outside the node */
- if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
- *ei_startpfn >= last_pfn)
- return 0;
-
- /* Check for overlaps */
- if (*ei_startpfn < start_pfn)
- *ei_startpfn = start_pfn;
- if (*ei_endpfn > last_pfn)
- *ei_endpfn = last_pfn;
-
- return 1;
-}
-
-/* Walk the e820 map and register active regions within a node */
-void __init e820_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long last_pfn)
-{
- unsigned long ei_startpfn;
- unsigned long ei_endpfn;
- int i;
-
- for (i = 0; i < e820.nr_map; i++)
- if (e820_find_active_region(&e820.map[i],
- start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- add_active_range(nid, ei_startpfn, ei_endpfn);
-}
-
-/*
- * Find the hole size (in bytes) in the memory range.
- * @start: starting address of the memory range to scan
- * @end: ending address of the memory range to scan
- */
-u64 __init e820_hole_size(u64 start, u64 end)
-{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long last_pfn = end >> PAGE_SHIFT;
- unsigned long ei_startpfn, ei_endpfn, ram = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- if (e820_find_active_region(&e820.map[i],
- start_pfn, last_pfn,
- &ei_startpfn, &ei_endpfn))
- ram += ei_endpfn - ei_startpfn;
- }
- return end - start - ((u64)ram << PAGE_SHIFT);
-}
static void early_panic(char *msg)
{
@@ -1058,6 +919,28 @@ void __init finish_e820_parsing(void)
}
}
+void __init init_lmb_memory(void)
+{
+ lmb_init();
+}
+
+void __init fill_lmb_memory(void)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->type != E820_RAM)
+ continue;
+ lmb_add(ei->addr, ei->size);
+ }
+
+ lmb_analyze();
+
+ lmb_dump_all();
+}
+
static inline const char *e820_type_to_string(int e820_type)
{
switch (e820_type) {
Index: linux-2.6/arch/x86/kernel/head.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head.c
+++ linux-2.6/arch/x86/kernel/head.c
@@ -51,5 +51,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000;
/* reserve all memory between lowmem and the 1MB mark */
- reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved");
+ reserve_early(lowmem, 0x100000, "BIOS reserved");
}
Index: linux-2.6/arch/x86/kernel/head32.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head32.c
+++ linux-2.6/arch/x86/kernel/head32.c
@@ -29,13 +29,15 @@ static void __init i386_default_early_se
void __init i386_start_kernel(void)
{
+
+ init_lmb_memory();
#ifdef CONFIG_X86_TRAMPOLINE
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
+ reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
"EX TRAMPOLINE");
#endif
Index: linux-2.6/arch/x86/kernel/head64.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head64.c
+++ linux-2.6/arch/x86/kernel/head64.c
@@ -96,6 +96,8 @@ void __init x86_64_start_kernel(char * r
void __init x86_64_start_reservations(char *real_mode_data)
{
+ init_lmb_memory();
+
copy_bootdata(__va(real_mode_data));
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
Index: linux-2.6/arch/x86/kernel/setup.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/setup.c
+++ linux-2.6/arch/x86/kernel/setup.c
@@ -892,6 +892,8 @@ void __init setup_arch(char **cmdline_p)
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
#endif
+ fill_lmb_memory();
+
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
setup_bios_corruption_check();
#endif
Index: linux-2.6/arch/x86/kernel/setup_percpu.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/setup_percpu.c
+++ linux-2.6/arch/x86/kernel/setup_percpu.c
@@ -137,13 +137,7 @@ static void * __init pcpu_fc_alloc(unsig
static void __init pcpu_fc_free(void *ptr, size_t size)
{
-#ifdef CONFIG_NO_BOOTMEM
- u64 start = __pa(ptr);
- u64 end = start + size;
- free_early_partial(start, end);
-#else
free_bootmem(__pa(ptr), size);
-#endif
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
Index: linux-2.6/include/linux/early_res.h
===================================================================
--- linux-2.6.orig/include/linux/early_res.h
+++ linux-2.6/include/linux/early_res.h
@@ -5,15 +5,18 @@
extern void reserve_early(u64 start, u64 end, char *name);
extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
extern void free_early(u64 start, u64 end);
-void free_early_partial(u64 start, u64 end);
extern void early_res_to_bootmem(u64 start, u64 end);
-void reserve_early_without_check(u64 start, u64 end, char *name);
u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align);
-u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_lmb_area_size(u64 start, u64 *sizep, u64 align);
+u64 find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
+void lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn);
+u64 lmb_hole_size(u64 start, u64 end);
u64 get_max_mapped(void);
#include <linux/range.h>
int get_free_all_memory_range(struct range **rangep, int nodeid);
Index: linux-2.6/include/linux/lmb.h
===================================================================
--- linux-2.6.orig/include/linux/lmb.h
+++ linux-2.6/include/linux/lmb.h
@@ -26,7 +26,8 @@ struct lmb_property {
struct lmb_region {
unsigned long cnt;
u64 size;
- struct lmb_property region[MAX_LMB_REGIONS+1];
+ struct lmb_property *region;
+ unsigned long region_array_size;
};
struct lmb {
@@ -37,6 +38,8 @@ struct lmb {
};
extern struct lmb lmb;
+extern struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+extern struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
extern void __init lmb_init(void);
extern void __init lmb_analyze(void);
Index: linux-2.6/kernel/early_res.c
===================================================================
--- linux-2.6.orig/kernel/early_res.c
+++ linux-2.6/kernel/early_res.c
@@ -6,284 +6,65 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
+#include <linux/lmb.h>
#include <linux/early_res.h>
/*
* Early reserved memory areas.
*/
-/*
- * need to make sure this one is bigger enough before
- * find_fw_memmap_area could be used
- */
-#define MAX_EARLY_RES_X 32
-
-struct early_res {
- u64 start, end;
- char name[15];
- char overlap_ok;
-};
-static struct early_res early_res_x[MAX_EARLY_RES_X] __initdata;
-
-static int max_early_res __initdata = MAX_EARLY_RES_X;
-static struct early_res *early_res __initdata = &early_res_x[0];
-static int early_res_count __initdata;
-
-static int __init find_overlapped_early(u64 start, u64 end)
-{
- int i;
- struct early_res *r;
-
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- r = &early_res[i];
- if (end > r->start && start < r->end)
- break;
- }
-
- return i;
-}
-
-/*
- * Drop the i-th range from the early reservation map,
- * by copying any higher ranges down one over it, and
- * clearing what had been the last slot.
- */
-static void __init drop_range(int i)
-{
- int j;
-
- for (j = i + 1; j < max_early_res && early_res[j].end; j++)
- ;
-
- memmove(&early_res[i], &early_res[i + 1],
- (j - 1 - i) * sizeof(struct early_res));
-
- early_res[j - 1].end = 0;
- early_res_count--;
-}
-
-static void __init drop_range_partial(int i, u64 start, u64 end)
-{
- u64 common_start, common_end;
- u64 old_start, old_end;
-
- old_start = early_res[i].start;
- old_end = early_res[i].end;
- common_start = max(old_start, start);
- common_end = min(old_end, end);
-
- /* no overlap ? */
- if (common_start >= common_end)
- return;
-
- if (old_start < common_start) {
- /* make head segment */
- early_res[i].end = common_start;
- if (old_end > common_end) {
- char name[15];
-
- /*
- * Save a local copy of the name, since the
- * early_res array could get resized inside
- * reserve_early_without_check() ->
- * __check_and_double_early_res(), which would
- * make the current name pointer invalid.
- */
- strncpy(name, early_res[i].name,
- sizeof(early_res[i].name) - 1);
- /* add another for left over on tail */
- reserve_early_without_check(common_end, old_end, name);
- }
- return;
- } else {
- if (old_end > common_end) {
- /* reuse the entry for tail left */
- early_res[i].start = common_end;
- return;
- }
- /* all covered */
- drop_range(i);
- }
-}
-
-/*
- * Split any existing ranges that:
- * 1) are marked 'overlap_ok', and
- * 2) overlap with the stated range [start, end)
- * into whatever portion (if any) of the existing range is entirely
- * below or entirely above the stated range. Drop the portion
- * of the existing range that overlaps with the stated range,
- * which will allow the caller of this routine to then add that
- * stated range without conflicting with any existing range.
- */
-static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
-{
- int i;
- struct early_res *r;
- u64 lower_start, lower_end;
- u64 upper_start, upper_end;
- char name[15];
-
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- r = &early_res[i];
-
- /* Continue past non-overlapping ranges */
- if (end <= r->start || start >= r->end)
- continue;
-
- /*
- * Leave non-ok overlaps as is; let caller
- * panic "Overlapping early reservations"
- * when it hits this overlap.
- */
- if (!r->overlap_ok)
- return;
-
- /*
- * We have an ok overlap. We will drop it from the early
- * reservation map, and add back in any non-overlapping
- * portions (lower or upper) as separate, overlap_ok,
- * non-overlapping ranges.
- */
-
- /* 1. Note any non-overlapping (lower or upper) ranges. */
- strncpy(name, r->name, sizeof(name) - 1);
-
- lower_start = lower_end = 0;
- upper_start = upper_end = 0;
- if (r->start < start) {
- lower_start = r->start;
- lower_end = start;
- }
- if (r->end > end) {
- upper_start = end;
- upper_end = r->end;
- }
-
- /* 2. Drop the original ok overlapping range */
- drop_range(i);
-
- i--; /* resume for-loop on copied down entry */
-
- /* 3. Add back in any non-overlapping ranges. */
- if (lower_end)
- reserve_early_overlap_ok(lower_start, lower_end, name);
- if (upper_end)
- reserve_early_overlap_ok(upper_start, upper_end, name);
- }
-}
-
-static void __init __reserve_early(u64 start, u64 end, char *name,
- int overlap_ok)
-{
- int i;
- struct early_res *r;
-
- i = find_overlapped_early(start, end);
- if (i >= max_early_res)
- panic("Too many early reservations");
- r = &early_res[i];
- if (r->end)
- panic("Overlapping early reservations "
- "%llx-%llx %s to %llx-%llx %s\n",
- start, end - 1, name ? name : "", r->start,
- r->end - 1, r->name);
- r->start = start;
- r->end = end;
- r->overlap_ok = overlap_ok;
- if (name)
- strncpy(r->name, name, sizeof(r->name) - 1);
- early_res_count++;
-}
-
-/*
- * A few early reservtations come here.
- *
- * The 'overlap_ok' in the name of this routine does -not- mean it
- * is ok for these reservations to overlap an earlier reservation.
- * Rather it means that it is ok for subsequent reservations to
- * overlap this one.
- *
- * Use this entry point to reserve early ranges when you are doing
- * so out of "Paranoia", reserving perhaps more memory than you need,
- * just in case, and don't mind a subsequent overlapping reservation
- * that is known to be needed.
- *
- * The drop_overlaps_that_are_ok() call here isn't really needed.
- * It would be needed if we had two colliding 'overlap_ok'
- * reservations, so that the second such would not panic on the
- * overlap with the first. We don't have any such as of this
- * writing, but might as well tolerate such if it happens in
- * the future.
- */
-void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
-{
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 1);
-}
static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
{
u64 start, end, size, mem;
- struct early_res *new;
+ struct lmb_property *new, *old;
+ struct lmb_region *type = &lmb.reserved;
/* do we have enough slots left ? */
- if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
+ if ((type->region_array_size - type->cnt) >
+ max_t(unsigned long, type->region_array_size/8, 2))
return;
+ old = type->region;
/* double it */
mem = -1ULL;
- size = sizeof(struct early_res) * max_early_res * 2;
- if (early_res == early_res_x)
+ size = sizeof(struct lmb_property) * type->region_array_size * 2;
+ if (old == lmb_reserved_region)
start = 0;
else
- start = early_res[0].end;
+ start = __pa(old) + sizeof(struct lmb_property) *
+ type->region_array_size;
end = ex_start;
if (start + size < end)
- mem = find_fw_memmap_area(start, end, size,
- sizeof(struct early_res));
+ mem = find_lmb_area(start, end, size,
+ sizeof(struct lmb_property));
if (mem == -1ULL) {
start = ex_end;
end = get_max_mapped();
if (start + size < end)
- mem = find_fw_memmap_area(start, end, size,
- sizeof(struct early_res));
+ mem = find_lmb_area(start, end, size,
+ sizeof(struct lmb_property));
}
if (mem == -1ULL)
- panic("can not find more space for early_res array");
+ panic("can not find more space for lmb.reserved.region array");
new = __va(mem);
- /* save the first one for own */
- new[0].start = mem;
- new[0].end = mem + size;
- new[0].overlap_ok = 0;
/* copy old to new */
- if (early_res == early_res_x) {
- memcpy(&new[1], &early_res[0],
- sizeof(struct early_res) * max_early_res);
- memset(&new[max_early_res+1], 0,
- sizeof(struct early_res) * (max_early_res - 1));
- early_res_count++;
- } else {
- memcpy(&new[1], &early_res[1],
- sizeof(struct early_res) * (max_early_res - 1));
- memset(&new[max_early_res], 0,
- sizeof(struct early_res) * max_early_res);
- }
- memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
- early_res = new;
- max_early_res *= 2;
- printk(KERN_DEBUG "early_res array is doubled to %d at [%llx - %llx]\n",
- max_early_res, mem, mem + size - 1);
+ memcpy(&new[0], &old[0],
+ sizeof(struct lmb_property) * type->region_array_size);
+ memset(&new[type->region_array_size], 0,
+ sizeof(struct lmb_property) * type->region_array_size);
+
+ memset(type->region, 0,
+ sizeof(struct lmb_property) * type->region_array_size);
+ type->region = new;
+ type->region_array_size *= 2;
+ printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+ type->region_array_size, mem, mem + size - 1);
+ if (old != lmb_reserved_region)
+ lmb_free(__pa(old),
+ sizeof(struct lmb_property) * type->region_array_size/2);
}
-/*
- * Most early reservations come here.
- *
- * We first have drop_overlaps_that_are_ok() drop any pre-existing
- * 'overlap_ok' ranges, so that we can then reserve this memory
- * range without risk of panic'ing on an overlapping overlap_ok
- * early reservation.
- */
void __init reserve_early(u64 start, u64 end, char *name)
{
if (start >= end)
@@ -291,68 +72,18 @@ void __init reserve_early(u64 start, u64
__check_and_double_early_res(start, end);
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 0);
-}
-
-void __init reserve_early_without_check(u64 start, u64 end, char *name)
-{
- struct early_res *r;
-
- if (start >= end)
- return;
-
- __check_and_double_early_res(start, end);
-
- r = &early_res[early_res_count];
-
- r->start = start;
- r->end = end;
- r->overlap_ok = 0;
- if (name)
- strncpy(r->name, name, sizeof(r->name) - 1);
- early_res_count++;
+ lmb_reserve(start, end - start);
}
void __init free_early(u64 start, u64 end)
{
- struct early_res *r;
- int i;
-
- i = find_overlapped_early(start, end);
- r = &early_res[i];
- if (i >= max_early_res || r->end != end || r->start != start)
- panic("free_early on not reserved area: %llx-%llx!",
- start, end - 1);
-
- drop_range(i);
-}
-
-void __init free_early_partial(u64 start, u64 end)
-{
- struct early_res *r;
- int i;
-
if (start == end)
return;
- if (WARN_ONCE(start > end, "free_early_partial: wrong range [%#llx, %#llx]\n", start, end))
+ if (WARN_ONCE(start > end, "free_early: wrong range [%#llx, %#llx]\n", start, end))
return;
-try_next:
- i = find_overlapped_early(start, end);
- if (i >= max_early_res)
- return;
-
- r = &early_res[i];
- /* hole ? */
- if (r->end >= end && r->start <= start) {
- drop_range_partial(i, start, end);
- return;
- }
-
- drop_range_partial(i, start, end);
- goto try_next;
+ lmb_free(start, end - start);
}
#ifdef CONFIG_NO_BOOTMEM
@@ -360,48 +91,45 @@ static void __init subtract_early_res(st
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
+ count = lmb.reserved.cnt;
+
+ if (lmb.reserved.region != lmb_reserved_region) {
+ /*take out table it self */
+ lmb_free(__pa(lmb.reserved.region),
+ sizeof(struct lmb_property) *
+ lmb.reserved.region_array_size);
+ }
#define DEBUG_PRINT_EARLY_RES 1
#if DEBUG_PRINT_EARLY_RES
printk(KERN_INFO "Subtract (%d early reservations)\n", count);
#endif
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
#if DEBUG_PRINT_EARLY_RES
- printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
- r->start, r->end, r->name);
+ printk(KERN_INFO " #%d [%010llx - %010llx]\n", i,
+ r->base, r->base + r->size);
#endif
- final_start = PFN_DOWN(r->start);
- final_end = PFN_UP(r->end);
+ final_start = PFN_DOWN(r->base);
+ final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
continue;
subtract_range(range, az, final_start, final_end);
}
-
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
- int i, count;
+ int count;
u64 start = 0, end;
u64 size;
u64 mem;
struct range *range;
int nr_range;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
+ count = lmb.reserved.region_array_size;
count *= 2;
@@ -411,12 +139,15 @@ int __init get_free_all_memory_range(str
if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
start = MAX_DMA32_PFN << PAGE_SHIFT;
#endif
- mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
+ mem = find_lmb_area(start, end, size, sizeof(struct range));
if (mem == -1ULL)
panic("can not find more space for range free");
range = __va(mem);
- /* use early_node_map[] and early_res to get range array at first */
+ /*
+ * use early_node_map[] and lmb.reserved.region to get range array
+ * at first
+ */
memset(range, 0, size);
nr_range = 0;
@@ -430,10 +161,11 @@ int __init get_free_all_memory_range(str
/* need to clear it ? */
if (nodeid == MAX_NUMNODES) {
- memset(&early_res[0], 0,
- sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
+ memset(&lmb.reserved.region[0], 0,
+ sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+ lmb.reserved.region = NULL;
+ lmb.reserved.region_array_size = 0;
+ lmb.reserved.cnt = 0;
}
*rangep = range;
@@ -444,24 +176,24 @@ void __init early_res_to_bootmem(u64 sta
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
+ count = lmb.reserved.cnt;
+
+ if (lmb.reserved.region != lmb_reserved_region) {
+ /*take out table it self */
+ lmb_free(__pa(lmb.reserved.region),
+ sizeof(struct lmb_property) *
+ lmb.reserved.region_array_size);
+ }
printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
- count - idx, max_early_res, start, end);
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
+ count, lmb.reserved.cnt, start, end);
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
- r->start, r->end, r->name);
- final_start = max(start, r->start);
- final_end = min(end, r->end);
+ r->base, r->base + r->size);
+ final_start = max(start, r->base);
+ final_end = min(end, r->base + r->size);
if (final_start >= final_end) {
printk(KERN_CONT "\n");
continue;
@@ -472,25 +204,43 @@ void __init early_res_to_bootmem(u64 sta
BOOTMEM_DEFAULT);
}
/* clear them */
- memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
- early_res_count = 0;
+ memset(&lmb.reserved.region[0], 0,
+ sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+ lmb.reserved.region = NULL;
+ lmb.reserved.region_array_size = 0;
+ lmb.reserved.cnt = 0;
}
#endif
+
+/* following code is for early_res converting */
+
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+ int i;
+ struct lmb_property *r;
+
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ r = &lmb.reserved.region[i];
+ if (end > r->base && start < (r->base + r->size))
+ break;
+ }
+
+ return i;
+}
+
/* Check for already reserved areas */
static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
{
int i;
u64 addr = *addrp;
int changed = 0;
- struct early_res *r;
+ struct lmb_property *r;
again:
i = find_overlapped_early(addr, addr + size);
- r = &early_res[i];
- if (i < max_early_res && r->end) {
- *addrp = addr = round_up(r->end, align);
+ r = &lmb.reserved.region[i];
+ if (i < lmb.reserved.cnt && r->size) {
+ *addrp = addr = round_up(r->base + r->size, align);
changed = 1;
goto again;
}
@@ -506,20 +256,20 @@ static inline int __init bad_addr_size(u
int changed = 0;
again:
last = addr + size;
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- struct early_res *r = &early_res[i];
- if (last > r->start && addr < r->start) {
- size = r->start - addr;
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ if (last > r->base && addr < r->base) {
+ size = r->base - addr;
changed = 1;
goto again;
}
- if (last > r->end && addr < r->end) {
- addr = round_up(r->end, align);
+ if (last > (r->base + r->size) && addr < (r->base + r->size)) {
+ addr = round_up(r->base + r->size, align);
size = last - addr;
changed = 1;
goto again;
}
- if (last <= r->end && addr >= r->start) {
+ if (last <= (r->base + r->size) && addr >= r->base) {
(*sizep)++;
return 0;
}
@@ -531,13 +281,8 @@ again:
return changed;
}
-/*
- * Find a free area with specified alignment in a specific range.
- * only with the area.between start to end is active range from early_node_map
- * so they are good as RAM
- */
u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
- u64 size, u64 align)
+ u64 size, u64 align)
{
u64 addr, last;
@@ -582,3 +327,130 @@ u64 __init find_early_area_size(u64 ei_s
out:
return -1ULL;
}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ */
+u64 __init find_lmb_area(u64 start, u64 end, u64 size, u64 align)
+{
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 ei_start = lmb.memory.region[i].base;
+ u64 ei_last = ei_start + lmb.memory.region[i].size;
+ u64 addr;
+
+ addr = find_early_area(ei_start, ei_last, start, end,
+ size, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+ return -1ULL;
+}
+
+/*
+ * Find next free range after *start
+ */
+u64 __init find_lmb_area_size(u64 start, u64 *sizep, u64 align)
+{
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 ei_start = lmb.memory.region[i].base;
+ u64 ei_last = ei_start + lmb.memory.region[i].size;
+ u64 addr;
+
+ addr = find_early_area_size(ei_start, ei_last, start,
+ sizep, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+
+ return -1ULL;
+}
+
+u64 __init find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
+ u64 addr;
+ /*
+ * need to call this function after e820_register_active_regions
+ * so early_node_map[] is set
+ */
+ addr = find_memory_core_early(nid, size, align, start, end);
+ if (addr != -1ULL)
+ return addr;
+
+ /* fallback, should already have start end in the node range */
+ return find_lmb_area(start, end, size, align);
+}
+
+/*
+ * Finds an active region in the address range from start_pfn to last_pfn and
+ * returns its range in ei_startpfn and ei_endpfn for the lmb entry.
+ */
+static int __init lmb_find_active_region(const struct lmb_property *ei,
+ unsigned long start_pfn,
+ unsigned long last_pfn,
+ unsigned long *ei_startpfn,
+ unsigned long *ei_endpfn)
+{
+ u64 align = PAGE_SIZE;
+
+ *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
+ *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
+
+ /* Skip map entries smaller than a page */
+ if (*ei_startpfn >= *ei_endpfn)
+ return 0;
+
+ /* Skip if map is outside the node */
+ if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
+ return 0;
+
+ /* Check for overlaps */
+ if (*ei_startpfn < start_pfn)
+ *ei_startpfn = start_pfn;
+ if (*ei_endpfn > last_pfn)
+ *ei_endpfn = last_pfn;
+
+ return 1;
+}
+
+/* Walk the lmb.memory map and register active regions within a node */
+void __init lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn)
+{
+ unsigned long ei_startpfn;
+ unsigned long ei_endpfn;
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++)
+ if (lmb_find_active_region(&lmb.memory.region[i],
+ start_pfn, last_pfn,
+ &ei_startpfn, &ei_endpfn))
+ add_active_range(nid, ei_startpfn, ei_endpfn);
+}
+
+/*
+ * Find the hole size (in bytes) in the memory range.
+ * @start: starting address of the memory range to scan
+ * @end: ending address of the memory range to scan
+ */
+u64 __init lmb_hole_size(u64 start, u64 end)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long last_pfn = end >> PAGE_SHIFT;
+ unsigned long ei_startpfn, ei_endpfn, ram = 0;
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ if (lmb_find_active_region(&lmb.memory.region[i],
+ start_pfn, last_pfn,
+ &ei_startpfn, &ei_endpfn))
+ ram += ei_endpfn - ei_startpfn;
+ }
+ return end - start - ((u64)ram << PAGE_SHIFT);
+}
+
Index: linux-2.6/lib/lmb.c
===================================================================
--- linux-2.6.orig/lib/lmb.c
+++ linux-2.6/lib/lmb.c
@@ -18,6 +18,8 @@
#define LMB_ALLOC_ANYWHERE 0
struct lmb lmb;
+struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
static int lmb_debug;
@@ -106,6 +108,11 @@ static void lmb_coalesce_regions(struct
void __init lmb_init(void)
{
+ lmb.memory.region = lmb_memory_region;
+ lmb.memory.region_array_size = ARRAY_SIZE(lmb_memory_region);
+ lmb.reserved.region = lmb_reserved_region;
+ lmb.reserved.region_array_size = ARRAY_SIZE(lmb_reserved_region);
+
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
@@ -539,3 +546,5 @@ int lmb_find(struct lmb_property *res)
}
return -1;
}
+
+
Index: linux-2.6/mm/page_alloc.c
===================================================================
--- linux-2.6.orig/mm/page_alloc.c
+++ linux-2.6/mm/page_alloc.c
@@ -3457,7 +3457,7 @@ void * __init __alloc_memory_core_early(
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
- reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ reserve_early(addr, addr + size, "BOOTMEM");
return ptr;
}
#endif
Index: linux-2.6/mm/sparse-vmemmap.c
===================================================================
--- linux-2.6.orig/mm/sparse-vmemmap.c
+++ linux-2.6/mm/sparse-vmemmap.c
@@ -229,8 +229,8 @@ void __init sparse_mem_maps_populate_nod
char name[15];
snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
- reserve_early_without_check(__pa(vmemmap_buf_start),
- __pa(vmemmap_buf), name);
+ reserve_early(__pa(vmemmap_buf_start),
+ __pa(vmemmap_buf), name);
}
#else
free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
Index: linux-2.6/arch/x86/include/asm/lmb.h
===================================================================
--- /dev/null
+++ linux-2.6/arch/x86/include/asm/lmb.h
@@ -0,0 +1,8 @@
+#ifndef _X86_LMB_H
+#define _X86_LMB_H
+
+#define LMB_DBG(fmt...) printk(fmt)
+
+#define LMB_REAL_LIMIT 0
+
+#endif
> please check
>
> [PATCH 01/20] x86: add find_e820_area_node
>
>
> [RFC PATCH] x86: use lmb to replace early_res
>
> still keep kernel/early_res.c for the extension.
>
> should move those file to lib/lmb.c later?
>
> Signed-off-by: Yinghai Lu <yin...@kernel.org>
>
> ---
> arch/x86/Kconfig | 1
> arch/x86/include/asm/e820.h | 38 +-
> arch/x86/include/asm/lmb.h | 8
> arch/x86/kernel/e820.c | 163 +----------
> arch/x86/kernel/head.c | 2
> arch/x86/kernel/head32.c | 4
> arch/x86/kernel/head64.c | 2
> arch/x86/kernel/setup.c | 2
> arch/x86/kernel/setup_percpu.c | 6
> include/linux/early_res.h | 9
> include/linux/lmb.h | 5
> kernel/early_res.c | 594 ++++++++++++++++-------------------------
> lib/lmb.c | 9
> mm/page_alloc.c | 2
> mm/sparse-vmemmap.c | 4
> 15 files changed, 321 insertions(+), 528 deletions(-)
That looks like a very promising direction!
There's several things to do to make the approach fully clean:
1)
I think we want to shape this as a series of simpler (and bisectable) patches.
2)
I think we also need to concentrate the changes back into LMB:
those new lmb_*() APIs should go into lmb.h.
3)
Furthermore, i think all of early_res.c should move into lmb.c as well and we
should eliminate kernel/early_res.c.
early_res.h will go away as well and all the new APIs will be in lmb.h.
4)
Also, we should move lib/lmb.c to mm/lmb.c, as now it's not just some optional
library but _the_ main early-reserve memory subsystem used by the biggest
Linux architectures.
5)
Could we perhaps also try to eliminate e820_*() method uses in arch/x86/, and
replace them by lmb_*() API uses? (that too should be a step by step method,
for bisectability)
> +++ linux-2.6/include/linux/lmb.h
> @@ -26,7 +26,8 @@ struct lmb_property {
> struct lmb_region {
> unsigned long cnt;
> u64 size;
> - struct lmb_property region[MAX_LMB_REGIONS+1];
> + struct lmb_property *region;
> + unsigned long region_array_size;
> };
I suspect this should keep current LMB architectures still working, right?
Ingo
the problem is there for x86 bits even before we are using early_res for bootmem replacement.
after early_res for bootmem replacement, alloc_bootmem_node still can get range on correct node
this patch is fixing problem before bootmem or early_res replacement for bootmem.
now only user is for x86 64bit numa to find node data.
the point is use early_node_map with find_e820_area_node()
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/e820.h | 1 +
arch/x86/kernel/e820.c | 15 +++++++++++++++
arch/x86/mm/numa_64.c | 4 ++--
include/linux/mm.h | 2 ++
mm/page_alloc.c | 37 +++++++++++++++++++++++--------------
5 files changed, 43 insertions(+), 16 deletions(-)
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index ec8a52d..41553af 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -116,6 +116,7 @@ extern unsigned long end_user_pfn;
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
+u64 find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
#include <linux/early_res.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 740b440..05ee724 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -787,6 +787,21 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
return -1ULL;
}
+u64 __init find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
+ u64 addr;
+ /*
+ * need to call this function after e820_register_active_regions
+ * so early_node_map[] is set
+ */
+ addr = find_memory_core_early(nid, size, align, start, end);
+ if (addr != -1ULL)
+ return addr;
+
+ /* fallback, should already have start end in the node range */
+ return find_e820_area(start, end, size, align);
+}
+
/*
* pre allocated 4k and reserved it in e820
*/
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 8948f47..ffc5ad5 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -174,7 +174,7 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
end > (MAX_DMA32_PFN<<PAGE_SHIFT))
start = MAX_DMA32_PFN<<PAGE_SHIFT;
- mem = find_e820_area(start, end, size, align);
+ mem = find_e820_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);
@@ -184,7 +184,7 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
start = MAX_DMA32_PFN<<PAGE_SHIFT;
else
start = MAX_DMA_PFN<<PAGE_SHIFT;
- mem = find_e820_area(start, end, size, align);
+ mem = find_e820_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e70f21b..5c2d17e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1160,6 +1160,8 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
+u64 __init find_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit);
void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d03c946..eef3757 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3408,12 +3408,11 @@ int __init add_from_early_node_map(struct range *range, int az,
return nr_range;
}
-#ifdef CONFIG_NO_BOOTMEM
-void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+#ifdef CONFIG_HAVE_EARLY_RES
+u64 __init find_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
int i;
- void *ptr;
/* need to go over early_node_map to find out good range for node */
for_each_active_range_index_in_nid(i, nid) {
@@ -3430,20 +3429,30 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
if (addr == -1ULL)
continue;
-#if 0
- printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
- nid,
- ei_start, ei_last, goal, limit, size,
- align, addr);
+ return addr;
+ }
+
+ return -1ULL;
+}
#endif
- ptr = phys_to_virt(addr);
- memset(ptr, 0, size);
- reserve_early_without_check(addr, addr + size, "BOOTMEM");
- return ptr;
- }
+#ifdef CONFIG_NO_BOOTMEM
+void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+ u64 goal, u64 limit)
+{
+ void *ptr;
- return NULL;
+ u64 addr;
+
+ addr = find_memory_core_early(nid, size, align, goal, limit);
+
+ if (addr == -1ULL)
+ return NULL;
+
+ ptr = phys_to_virt(addr);
+ memset(ptr, 0, size);
+ reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ return ptr;
}
#endif
--
1.6.4.2
should move those file to lib/lmb.c later?
-v2: fix NO_BOOTMEM hang with printk
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/Kconfig | 1 +
arch/x86/include/asm/e820.h | 38 ++-
arch/x86/include/asm/lmb.h | 8 +
arch/x86/kernel/e820.c | 163 ++----------
arch/x86/kernel/head.c | 2 +-
arch/x86/kernel/head32.c | 4 +-
arch/x86/kernel/head64.c | 2 +
arch/x86/kernel/setup.c | 2 +
arch/x86/kernel/setup_percpu.c | 6 -
include/linux/early_res.h | 9 +-
include/linux/lmb.h | 5 +-
kernel/early_res.c | 593 +++++++++++++++------------------------
lib/lmb.c | 11 +-
mm/page_alloc.c | 2 +-
mm/sparse-vmemmap.c | 4 +-
15 files changed, 317 insertions(+), 533 deletions(-)
create mode 100644 arch/x86/include/asm/lmb.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a80bce..585f611 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -27,6 +27,7 @@ config X86
select HAVE_PERF_EVENTS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
+ select HAVE_LMB
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 01bc987..2b57ff6 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -113,22 +113,36 @@ static inline void early_memtest(unsigned long start, unsigned long end)
extern unsigned long end_user_pfn;
-extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
-extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
-u64 find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
-extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
#include <linux/early_res.h>
+static inline u64 find_e820_area(u64 start, u64 end, u64 size, u64 align)
+{
+ return find_lmb_area(start, end, size, align);
+}
+static inline u64 find_e820_area_size(u64 start, u64 *sizep, u64 align)
+{
+ return find_lmb_area_size(start, sizep, align);
+}
+static inline u64
+find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
diff --git a/arch/x86/include/asm/lmb.h b/arch/x86/include/asm/lmb.h
new file mode 100644
index 0000000..d8fbdbd
--- /dev/null
+++ b/arch/x86/include/asm/lmb.h
@@ -0,0 +1,8 @@
+#ifndef _X86_LMB_H
+#define _X86_LMB_H
+
+#define LMB_DBG(fmt...) printk(fmt)
+
+#define LMB_REAL_LIMIT 0
+
+#endif
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 0c7143b..2e61ef6 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -15,6 +15,7 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
+#include <linux/lmb.h>
#include <asm/e820.h>
#include <asm/proto.h>
@@ -726,37 +727,6 @@ static int __init e820_mark_nvs_memory(void)
@@ -765,47 +735,6 @@ u64 __init get_max_mapped(void)
- * need to call this function after e820_register_active_regions
- * so early_node_map[] is set
- */
- addr = find_memory_core_early(nid, size, align, start, end);
- if (addr != -1ULL)
- return addr;
-
- /* fallback, should already have start end in the node range */
- return find_e820_area(start, end, size, align);
-}
/*
* pre allocated 4k and reserved it in e820
@@ -899,74 +828,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
@@ -1057,6 +918,28 @@ void __init finish_e820_parsing(void)
}
}
+void __init init_lmb_memory(void)
+{
+ lmb_init();
+}
+
+void __init fill_lmb_memory(void)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->type != E820_RAM)
+ continue;
+ lmb_add(ei->addr, ei->size);
+ }
+
+ lmb_analyze();
+
+ lmb_dump_all();
+}
+
static inline const char *e820_type_to_string(int e820_type)
{
switch (e820_type) {
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index 3e66bd3..e0d0ce5 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -51,5 +51,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000;
/* reserve all memory between lowmem and the 1MB mark */
- reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved");
+ reserve_early(lowmem, 0x100000, "BIOS reserved");
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index adedeef..1b723e3 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,13 +29,15 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
+
+ init_lmb_memory();
#ifdef CONFIG_X86_TRAMPOLINE
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
+ reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
"EX TRAMPOLINE");
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b5a9896..86e6a9b 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -96,6 +96,8 @@ void __init x86_64_start_kernel(char * real_mode_data)
void __init x86_64_start_reservations(char *real_mode_data)
{
+ init_lmb_memory();
+
copy_bootdata(__va(real_mode_data));
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3787a82..d1530f4 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -894,6 +894,8 @@ void __init setup_arch(char **cmdline_p)
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
#endif
+ fill_lmb_memory();
+
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
setup_bios_corruption_check();
#endif
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index ef6370b..35abcb8 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -137,13 +137,7 @@ static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
static void __init pcpu_fc_free(void *ptr, size_t size)
{
-#ifdef CONFIG_NO_BOOTMEM
- u64 start = __pa(ptr);
- u64 end = start + size;
- free_early_partial(start, end);
-#else
free_bootmem(__pa(ptr), size);
-#endif
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
diff --git a/include/linux/early_res.h b/include/linux/early_res.h
index 29c09f5..991be64 100644
--- a/include/linux/early_res.h
+++ b/include/linux/early_res.h
@@ -5,15 +5,18 @@
extern void reserve_early(u64 start, u64 end, char *name);
extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
extern void free_early(u64 start, u64 end);
-void free_early_partial(u64 start, u64 end);
extern void early_res_to_bootmem(u64 start, u64 end);
-void reserve_early_without_check(u64 start, u64 end, char *name);
u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align);
-u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_lmb_area_size(u64 start, u64 *sizep, u64 align);
+u64 find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
+void lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn);
+u64 lmb_hole_size(u64 start, u64 end);
u64 get_max_mapped(void);
#include <linux/range.h>
int get_free_all_memory_range(struct range **rangep, int nodeid);
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index f3d1433..8799015 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -26,7 +26,8 @@ struct lmb_property {
struct lmb_region {
unsigned long cnt;
u64 size;
- struct lmb_property region[MAX_LMB_REGIONS+1];
+ struct lmb_property *region;
+ unsigned long region_array_size;
};
struct lmb {
@@ -37,6 +38,8 @@ struct lmb {
};
extern struct lmb lmb;
+extern struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+extern struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
extern void __init lmb_init(void);
extern void __init lmb_analyze(void);
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 69bed5b..5af654d 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -6,284 +6,60 @@
+ unsigned long rgnsz = type->region_array_size;
/* do we have enough slots left ? */
- if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
+ if ((rgnsz - type->cnt) > max_t(unsigned long, rgnsz/8, 2))
return;
+ old = type->region;
/* double it */
mem = -1ULL;
- size = sizeof(struct early_res) * max_early_res * 2;
- if (early_res == early_res_x)
+ size = sizeof(struct lmb_property) * rgnsz * 2;
+ if (old == lmb_reserved_region)
start = 0;
else
- start = early_res[0].end;
+ start = __pa(old) + sizeof(struct lmb_property) * rgnsz;
end = ex_start;
if (start + size < end)
- mem = find_fw_memmap_area(start, end, size,
- sizeof(struct early_res));
+ mem = find_lmb_area(start, end, size,
+ sizeof(struct lmb_property));
if (mem == -1ULL) {
start = ex_end;
end = get_max_mapped();
if (start + size < end)
- mem = find_fw_memmap_area(start, end, size,
- sizeof(struct early_res));
+ mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
+ memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
+ memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
+
+ memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
+ type->region = new;
+ type->region_array_size = rgnsz * 2;
+ printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+ type->region_array_size, mem, mem + size - 1);
+ lmb_reserve(mem, sizeof(struct lmb_property) * rgnsz * 2);
+ if (old != lmb_reserved_region)
+ lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
}
-/*
- * Most early reservations come here.
- *
- * We first have drop_overlaps_that_are_ok() drop any pre-existing
- * 'overlap_ok' ranges, so that we can then reserve this memory
- * range without risk of panic'ing on an overlapping overlap_ok
- * early reservation.
- */
void __init reserve_early(u64 start, u64 end, char *name)
{
if (start >= end)
@@ -291,68 +67,21 @@ void __init reserve_early(u64 start, u64 end, char *name)
+ /* keep punching hole, could use of slots too */
+ __check_and_double_early_res(start, end);
- drop_range_partial(i, start, end);
- goto try_next;
+ lmb_free(start, end - start);
}
#ifdef CONFIG_NO_BOOTMEM
@@ -360,50 +89,46 @@ static void __init subtract_early_res(struct range *range, int az)
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
+ /*take out table it self */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.region_array_size);
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
+ count = lmb.reserved.cnt;
#define DEBUG_PRINT_EARLY_RES 1
#if DEBUG_PRINT_EARLY_RES
printk(KERN_INFO "Subtract (%d early reservations)\n", count);
#endif
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
+
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
#if DEBUG_PRINT_EARLY_RES
- printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
- r->start, r->end, r->name);
+ printk(KERN_INFO " #%d [%010llx - %010llx]\n", i,
+ r->base, r->base + r->size);
#endif
- final_start = PFN_DOWN(r->start);
- final_end = PFN_UP(r->end);
+ final_start = PFN_DOWN(r->base);
+ final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
continue;
subtract_range(range, az, final_start, final_end);
}
-
+ /* put it back */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_reserve(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.region_array_size);
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
- int i, count;
+ int count;
u64 start = 0, end;
u64 size;
u64 mem;
struct range *range;
int nr_range;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- count *= 2;
+ count = lmb.reserved.cnt * 2;
size = sizeof(struct range) * count;
end = get_max_mapped();
@@ -411,12 +136,15 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
start = MAX_DMA32_PFN << PAGE_SHIFT;
#endif
- mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
+ mem = find_lmb_area(start, end, size, sizeof(struct range));
if (mem == -1ULL)
panic("can not find more space for range free");
range = __va(mem);
- /* use early_node_map[] and early_res to get range array at first */
+ /*
+ * use early_node_map[] and lmb.reserved.region to get range array
+ * at first
+ */
memset(range, 0, size);
nr_range = 0;
@@ -430,10 +158,10 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid)
/* need to clear it ? */
if (nodeid == MAX_NUMNODES) {
- memset(&early_res[0], 0,
- sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+ lmb.reserved.region = NULL;
+ lmb.reserved.region_array_size = 0;
+ lmb.reserved.cnt = 0;
}
*rangep = range;
@@ -444,24 +172,20 @@ void __init early_res_to_bootmem(u64 start, u64 end)
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
-
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
-
- printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
- count - idx, max_early_res, start, end);
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
- printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
- r->start, r->end, r->name);
- final_start = max(start, r->start);
- final_end = min(end, r->end);
+
+ /*take out table it self */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+
+ count = lmb.reserved.cnt;
+ printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
+ count, start, end);
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ printk(KERN_INFO " #%d [%010llx - %010llx] ", i,
+ r->base, r->base + r->size);
+ final_start = max(start, r->base);
+ final_end = min(end, r->base + r->size);
if (final_start >= final_end) {
printk(KERN_CONT "\n");
continue;
@@ -472,25 +196,42 @@ void __init early_res_to_bootmem(u64 start, u64 end)
BOOTMEM_DEFAULT);
}
/* clear them */
- memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
- early_res_count = 0;
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.region_array_size);
@@ -506,20 +247,20 @@ static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
int changed = 0;
again:
last = addr + size;
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- struct early_res *r = &early_res[i];
- if (last > r->start && addr < r->start) {
- size = r->start - addr;
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ if (last > r->base && addr < r->base) {
+ size = r->base - addr;
changed = 1;
goto again;
}
- if (last > r->end && addr < r->end) {
- addr = round_up(r->end, align);
+ if (last > (r->base + r->size) && addr < (r->base + r->size)) {
+ addr = round_up(r->base + r->size, align);
size = last - addr;
changed = 1;
goto again;
}
- if (last <= r->end && addr >= r->start) {
+ if (last <= (r->base + r->size) && addr >= r->base) {
(*sizep)++;
return 0;
}
@@ -531,13 +272,8 @@ again:
return changed;
}
-/*
- * Find a free area with specified alignment in a specific range.
- * only with the area.between start to end is active range from early_node_map
- * so they are good as RAM
- */
u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
- u64 size, u64 align)
+ u64 size, u64 align)
{
u64 addr, last;
@@ -582,3 +318,130 @@ u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
out:
return -1ULL;
}
+
+/*
+ * Find a free area with specified alignment in a specific range.
+ */
+u64 __init find_lmb_area(u64 start, u64 end, u64 size, u64 align)
+{
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 ei_start = lmb.memory.region[i].base;
+ u64 ei_last = ei_start + lmb.memory.region[i].size;
+ u64 addr;
+
+ addr = find_early_area(ei_start, ei_last, start, end,
+ size, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+ return -1ULL;
+}
+
+/*
+ * Find next free range after *start
+ */
+u64 __init find_lmb_area_size(u64 start, u64 *sizep, u64 align)
+{
+ int i;
+
+ for (i = 0; i < lmb.memory.cnt; i++) {
+ u64 ei_start = lmb.memory.region[i].base;
+ u64 ei_last = ei_start + lmb.memory.region[i].size;
+ u64 addr;
+
+ addr = find_early_area_size(ei_start, ei_last, start,
+ sizep, align);
+
+ if (addr != -1ULL)
+ return addr;
+ }
+
+ return -1ULL;
+}
+
+u64 __init find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align)
+{
+ u64 addr;
+ /*
+ * need to call this function after e820_register_active_regions
+ * so early_node_map[] is set
+ */
+ addr = find_memory_core_early(nid, size, align, start, end);
+ if (addr != -1ULL)
+ return addr;
+
+ /* fallback, should already have start end in the node range */
+ return find_lmb_area(start, end, size, align);
+}
+
+void __init lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn)
diff --git a/lib/lmb.c b/lib/lmb.c
index b1fc526..2fe35a2 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -18,6 +18,8 @@
#define LMB_ALLOC_ANYWHERE 0
struct lmb lmb;
+struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
static int lmb_debug;
@@ -106,6 +108,11 @@ static void lmb_coalesce_regions(struct lmb_region *rgn,
void __init lmb_init(void)
{
+ lmb.memory.region = lmb_memory_region;
+ lmb.memory.region_array_size = ARRAY_SIZE(lmb_memory_region);
+ lmb.reserved.region = lmb_reserved_region;
+ lmb.reserved.region_array_size = ARRAY_SIZE(lmb_reserved_region);
+
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
@@ -169,7 +176,7 @@ static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
if (coalesced)
return coalesced;
- if (rgn->cnt >= MAX_LMB_REGIONS)
+ if (rgn->cnt >= (rgn->region_array_size - 1))
return -1;
/* Couldn't coalesce the LMB, so add it to the sorted table. */
@@ -539,3 +546,5 @@ int lmb_find(struct lmb_property *res)
}
return -1;
}
+
+
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eef3757..04c241a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3451,7 +3451,7 @@ void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
- reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ reserve_early(addr, addr + size, "BOOTMEM");
return ptr;
}
#endif
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 392b9bb..ca56c5d 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -225,8 +225,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
char name[15];
snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
- reserve_early_without_check(__pa(vmemmap_buf_start),
- __pa(vmemmap_buf), name);
+ reserve_early(__pa(vmemmap_buf_start),
+ __pa(vmemmap_buf), name);
}
#else
free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
and could change e820_saved to initdata
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/include/asm/e820.h | 5 ++---
arch/x86/kernel/e820.c | 26 ++++++++++++++++++--------
arch/x86/kernel/efi.c | 2 +-
arch/x86/kernel/setup.c | 10 +++++-----
arch/x86/xen/setup.c | 4 +---
5 files changed, 27 insertions(+), 20 deletions(-)
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index 41553af..01bc987 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -75,15 +75,14 @@ struct e820map {
#ifdef __KERNEL__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;
-extern struct e820map e820_saved;
extern unsigned long pci_mem_start;
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
extern void e820_print_map(char *who);
-extern int
-sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
+int sanitize_e820_map(void);
+void save_e820_map(void);
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type);
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 05ee724..0c7143b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -35,7 +35,7 @@
* next kernel with full memory.
*/
struct e820map e820;
-struct e820map e820_saved;
+static struct e820map __initdata e820_saved;
/* For PCI or other memory-mapped resources */
unsigned long pci_mem_start = 0xaeedbabe;
@@ -224,7 +224,7 @@ void __init e820_print_map(char *who)
* ______________________4_
*/
-int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
+static int __init __sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
u32 *pnr_map)
{
struct change_member {
@@ -383,6 +383,11 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
return 0;
}
+int __init sanitize_e820_map(void)
+{
+ return __sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+}
+
static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
{
while (nr_map) {
@@ -555,7 +560,7 @@ void __init update_e820(void)
u32 nr_map;
nr_map = e820.nr_map;
- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
+ if (__sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
return;
e820.nr_map = nr_map;
printk(KERN_INFO "modified physical RAM map:\n");
@@ -566,7 +571,7 @@ static void __init update_e820_saved(void)
u32 nr_map;
nr_map = e820_saved.nr_map;
- if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
+ if (__sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
return;
e820_saved.nr_map = nr_map;
}
@@ -661,7 +666,7 @@ void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
sdata = early_ioremap(pa_data, map_len);
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
if (map_len > PAGE_SIZE)
early_iounmap(sdata, map_len);
printk(KERN_INFO "extended physical RAM map:\n");
@@ -1043,7 +1048,7 @@ void __init finish_e820_parsing(void)
if (userdef) {
u32 nr = e820.nr_map;
- if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
+ if (__sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
early_panic("Invalid user supplied memory map");
e820.nr_map = nr;
@@ -1173,7 +1178,7 @@ char *__init default_machine_specific_memory_setup(void)
* the next section from 1mb->appropriate_mem_k
*/
new_nr = boot_params.e820_entries;
- sanitize_e820_map(boot_params.e820_map,
+ __sanitize_e820_map(boot_params.e820_map,
ARRAY_SIZE(boot_params.e820_map),
&new_nr);
boot_params.e820_entries = new_nr;
@@ -1200,12 +1205,17 @@ char *__init default_machine_specific_memory_setup(void)
return who;
}
+void __init save_e820_map(void)
+{
+ memcpy(&e820_saved, &e820, sizeof(struct e820map));
+}
+
void __init setup_memory_map(void)
{
char *who;
who = x86_init.resources.memory_setup();
- memcpy(&e820_saved, &e820, sizeof(struct e820map));
+ save_e820_map();
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c
index c2fa9b8..299f03f 100644
--- a/arch/x86/kernel/efi.c
+++ b/arch/x86/kernel/efi.c
@@ -272,7 +272,7 @@ static void __init do_add_efi_memmap(void)
}
e820_add_region(start, size, e820_type);
}
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
}
void __init efi_reserve_early(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5d7ba1a..3787a82 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -461,8 +461,8 @@ static void __init e820_reserve_setup_data(void)
if (!found)
return;
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- memcpy(&e820_saved, &e820, sizeof(struct e820map));
+ sanitize_e820_map();
+ save_e820_map();
printk(KERN_INFO "extended physical RAM map:\n");
e820_print_map("reserve setup_data");
}
@@ -614,7 +614,7 @@ static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
d->ident);
e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
return 0;
}
@@ -683,7 +683,7 @@ static void __init trim_bios_range(void)
* take them out.
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
}
/*
@@ -854,7 +854,7 @@ void __init setup_arch(char **cmdline_p)
if (ppro_with_ram_bug()) {
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
printk(KERN_INFO "fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ad0047f..3f2c411 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -43,8 +43,6 @@ char * __init xen_memory_setup(void)
max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
- e820.nr_map = 0;
-
e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);
/*
@@ -65,7 +63,7 @@ char * __init xen_memory_setup(void)
__pa(xen_start_info->pt_base),
"XEN START INFO");
- sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ sanitize_e820_map();
return "Xen";
FWIW, several years ago a MontaVista representative said there were more Linux
units that did have their RT extensions than there were Linux units
that did not have them...
Gr{oetje,eeting}s,
Geert
--
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- ge...@linux-m68k.org
In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
-- Linus Torvalds
will check it.
at least change include/linux/lmb.h and lib/lmb.c change could be sperated.
other looks a little bit hard.
>
> 2)
>
> I think we also need to concentrate the changes back into LMB:
yes. put them in kernel/early_res.c and move them to lmb.c if lmb gugs are happy with the change.
next version
>
> 3)
>
> Furthermore, i think all of early_res.c should move into lmb.c as well and we
> should eliminate kernel/early_res.c.
>
> early_res.h will go away as well and all the new APIs will be in lmb.h.
current have three levels
a. old lmb users
b. x86 with bootmem
c. x86 with no-bootmem
some functions later could be moved to new bootmem.c
>
> 4)
>
> Also, we should move lib/lmb.c to mm/lmb.c, as now it's not just some optional
> library but _the_ main early-reserve memory subsystem used by the biggest
> Linux architectures.
yes
>
> 5)
>
> Could we perhaps also try to eliminate e820_*() method uses in arch/x86/, and
> replace them by lmb_*() API uses? (that too should be a step by step method,
> for bisectability)
yes.
except e820_any_mapped(,,E820_RESERVED)
others should not be used after fill_lmb_memory()
>
>> +++ linux-2.6/include/linux/lmb.h
>> @@ -26,7 +26,8 @@ struct lmb_property {
>> struct lmb_region {
>> unsigned long cnt;
>> u64 size;
>> - struct lmb_property region[MAX_LMB_REGIONS+1];
>> + struct lmb_property *region;
>> + unsigned long region_array_size;
>> };
>
> I suspect this should keep current LMB architectures still working, right?
they are still working. lmb_init will connect the pointers.
Index: linux-2.6/lib/lmb.c
===================================================================
--- linux-2.6.orig/lib/lmb.c
+++ linux-2.6/lib/lmb.c
@@ -18,6 +18,8 @@
#define LMB_ALLOC_ANYWHERE 0
struct lmb lmb;
+struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
static int lmb_debug;
@@ -106,6 +108,11 @@ static void lmb_coalesce_regions(struct
void __init lmb_init(void)
{
+ lmb.memory.region = lmb_memory_region;
+ lmb.memory.region_array_size = ARRAY_SIZE(lmb_memory_region);
+ lmb.reserved.region = lmb_reserved_region;
+ lmb.reserved.region_array_size = ARRAY_SIZE(lmb_reserved_region);
+
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
@@ -169,7 +176,7 @@ static long lmb_add_region(struct lmb_re
if (coalesced)
return coalesced;
- if (rgn->cnt >= MAX_LMB_REGIONS)
+ if (rgn->cnt >= (rgn->region_array_size - 1))
return -1;
Thanks
Yinghai
> still keep kernel/early_res.c for the extension.
>
> should move those file to lib/lmb.c later?
>
> -v2: fix NO_BOOTMEM hang with printk
Would be nice to track known TODO items - that way people dont feel that you
are ignoring (or have missed) their feedback. For example are the list of 4
improvements i suggested in the previous mail planned, or do you disagree with
some of that?
Thanks,
Ingo
> > 2)
> >
> > I think we also need to concentrate the changes back into LMB:
>
> yes. put them in kernel/early_res.c and move them to lmb.c if lmb gugs are
> happy with the change.
Yes, they seemed OK with changing it to accomodate x86, as long as current
behavior stays compatible and as long as the changes are squeaky-clean.
Both of which are highly reasonable expectations ;-)
> > early_res.h will go away as well and all the new APIs will be in lmb.h.
>
> current have three levels
> a. old lmb users
> b. x86 with bootmem
> c. x86 with no-bootmem
>
> some functions later could be moved to new bootmem.c
I think we want to work towards the end result where we dont have bootmem.c
anymore. I.e. a modern LMB architecture should generally not make use of
bootmem at all.
We could do that switch on x86 straight away, and make CONFIG_NO_BOOTMEM a
default-y option, hm? We could also hide the interactivity behind
CONFIG_DEBUG_VM or so - and eliminate it altogether later on.
We should also switch around the flag and turn it into CONFIG_BOOTMEM.
Hm?
Ingo
also add region_array_size in lmb_region to tack the region array size.
-v3: seperate lmb core change to seperated patch
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
include/linux/lmb.h | 5 ++++-
lib/lmb.c | 11 ++++++++++-
2 files changed, 14 insertions(+), 2 deletions(-)
Index: linux-2.6/include/linux/lmb.h
===================================================================
--- linux-2.6.orig/include/linux/lmb.h
+++ linux-2.6/include/linux/lmb.h
@@ -26,7 +26,8 @@ struct lmb_property {
struct lmb_region {
unsigned long cnt;
u64 size;
- struct lmb_property region[MAX_LMB_REGIONS+1];
+ struct lmb_property *region;
+ unsigned long region_array_size;
};
struct lmb {
@@ -37,6 +38,8 @@ struct lmb {
};
extern struct lmb lmb;
+extern struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+extern struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
extern void __init lmb_init(void);
extern void __init lmb_analyze(void);
Index: linux-2.6/lib/lmb.c
===================================================================
--- linux-2.6.orig/lib/lmb.c
+++ linux-2.6/lib/lmb.c
@@ -18,6 +18,8 @@
#define LMB_ALLOC_ANYWHERE 0
struct lmb lmb;
+struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
static int lmb_debug;
@@ -106,6 +108,11 @@ static void lmb_coalesce_regions(struct
void __init lmb_init(void)
{
+ lmb.memory.region = lmb_memory_region;
+ lmb.memory.region_array_size = ARRAY_SIZE(lmb_memory_region);
+ lmb.reserved.region = lmb_reserved_region;
+ lmb.reserved.region_array_size = ARRAY_SIZE(lmb_reserved_region);
+
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
@@ -169,7 +176,7 @@ static long lmb_add_region(struct lmb_re
if (coalesced)
return coalesced;
- if (rgn->cnt >= MAX_LMB_REGIONS)
+ if (rgn->cnt >= (rgn->region_array_size - 1))
return -1;
/* Couldn't coalesce the LMB, so add it to the sorted table. */
still keep kernel/early_res.c for the extension.
should move those file to lib/lmb.c later?
in early_res.c
1. change find_e820_area_xxx, to find_lmb_area_xxx
2. e820_register_active_regions to lmb_register_active_regions.
3. reserve_early will call lmb_reserve directly.
4. free_early will call lmb_free directly.
5. remove functions that are used by old reserve_early and free_early
6. get_free_all_memory_range use lmb.reserved.
7. early_res_to_bootmem use lmb.reserved
8. add fill_lmb_memory() to fill lmb.memory according e820 RAM entries
-v2: fix NO_BOOTMEM hang with printk
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/Kconfig | 1
arch/x86/include/asm/e820.h | 38 +-
arch/x86/include/asm/lmb.h | 8
arch/x86/kernel/e820.c | 163 +----------
arch/x86/kernel/head.c | 2
arch/x86/kernel/head32.c | 5
arch/x86/kernel/head64.c | 2
arch/x86/kernel/setup.c | 2
arch/x86/kernel/setup_percpu.c | 6
include/linux/early_res.h | 9
kernel/early_res.c | 592 +++++++++++++++--------------------------
mm/page_alloc.c | 2
mm/sparse-vmemmap.c | 4
13 files changed, 301 insertions(+), 533 deletions(-)
Index: linux-2.6/arch/x86/Kconfig
===================================================================
--- linux-2.6.orig/arch/x86/Kconfig
+++ linux-2.6/arch/x86/Kconfig
@@ -27,6 +27,7 @@ config X86
select HAVE_PERF_EVENTS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
+ select HAVE_LMB
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
Index: linux-2.6/arch/x86/include/asm/e820.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/e820.h
+++ linux-2.6/arch/x86/include/asm/e820.h
@@ -113,22 +113,36 @@ static inline void early_memtest(unsigne
Index: linux-2.6/arch/x86/kernel/e820.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/e820.c
+++ linux-2.6/arch/x86/kernel/e820.c
@@ -15,6 +15,7 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
+#include <linux/lmb.h>
#include <asm/e820.h>
#include <asm/proto.h>
@@ -726,37 +727,6 @@ static int __init e820_mark_nvs_memory(v
Index: linux-2.6/arch/x86/kernel/head.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head.c
+++ linux-2.6/arch/x86/kernel/head.c
@@ -51,5 +51,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000;
/* reserve all memory between lowmem and the 1MB mark */
- reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved");
+ reserve_early(lowmem, 0x100000, "BIOS reserved");
}
Index: linux-2.6/arch/x86/kernel/head32.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head32.c
+++ linux-2.6/arch/x86/kernel/head32.c
@@ -29,14 +29,15 @@ static void __init i386_default_early_se
void __init i386_start_kernel(void)
{
+ init_lmb_memory();
+
#ifdef CONFIG_X86_TRAMPOLINE
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
- "EX TRAMPOLINE");
+ reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
#endif
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
Index: linux-2.6/arch/x86/kernel/head64.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head64.c
+++ linux-2.6/arch/x86/kernel/head64.c
@@ -96,6 +96,8 @@ void __init x86_64_start_kernel(char * r
void __init x86_64_start_reservations(char *real_mode_data)
{
+ init_lmb_memory();
+
copy_bootdata(__va(real_mode_data));
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
Index: linux-2.6/arch/x86/kernel/setup.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/setup.c
+++ linux-2.6/arch/x86/kernel/setup.c
@@ -894,6 +894,8 @@ void __init setup_arch(char **cmdline_p)
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
#endif
+ fill_lmb_memory();
+
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
setup_bios_corruption_check();
#endif
Index: linux-2.6/arch/x86/kernel/setup_percpu.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/setup_percpu.c
+++ linux-2.6/arch/x86/kernel/setup_percpu.c
@@ -137,13 +137,7 @@ static void * __init pcpu_fc_alloc(unsig
static void __init pcpu_fc_free(void *ptr, size_t size)
{
-#ifdef CONFIG_NO_BOOTMEM
- u64 start = __pa(ptr);
- u64 end = start + size;
- free_early_partial(start, end);
-#else
free_bootmem(__pa(ptr), size);
-#endif
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
Index: linux-2.6/include/linux/early_res.h
===================================================================
--- linux-2.6.orig/include/linux/early_res.h
+++ linux-2.6/include/linux/early_res.h
@@ -5,15 +5,18 @@
extern void reserve_early(u64 start, u64 end, char *name);
extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
extern void free_early(u64 start, u64 end);
-void free_early_partial(u64 start, u64 end);
extern void early_res_to_bootmem(u64 start, u64 end);
-void reserve_early_without_check(u64 start, u64 end, char *name);
u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align);
-u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_lmb_area_size(u64 start, u64 *sizep, u64 align);
+u64 find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
+void lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn);
+u64 lmb_hole_size(u64 start, u64 end);
u64 get_max_mapped(void);
#include <linux/range.h>
int get_free_all_memory_range(struct range **rangep, int nodeid);
Index: linux-2.6/kernel/early_res.c
===================================================================
--- linux-2.6.orig/kernel/early_res.c
+++ linux-2.6/kernel/early_res.c
@@ -6,353 +6,82 @@
-}
-
-/*
- * Most early reservations come here.
- *
- * We first have drop_overlaps_that_are_ok() drop any pre-existing
- * 'overlap_ok' ranges, so that we can then reserve this memory
- * range without risk of panic'ing on an overlapping overlap_ok
- * early reservation.
- */
-void __init reserve_early(u64 start, u64 end, char *name)
-{
- if (start >= end)
- return;
-
- __check_and_double_early_res(start, end);
+ memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
+ memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 0);
+ memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
+ type->region = new;
+ type->region_array_size = rgnsz * 2;
+ printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+ type->region_array_size, mem, mem + size - 1);
+ lmb_reserve(mem, sizeof(struct lmb_property) * rgnsz * 2);
+ if (old != lmb_reserved_region)
+ lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
}
-void __init reserve_early_without_check(u64 start, u64 end, char *name)
+void __init reserve_early(u64 start, u64 end, char *name)
{
- struct early_res *r;
-
if (start >= end)
return;
__check_and_double_early_res(start, end);
+ /* keep punching hole, could run out of slots too */
+ __check_and_double_early_res(start, end);
- drop_range_partial(i, start, end);
- goto try_next;
+ lmb_free(start, end - start);
}
#ifdef CONFIG_NO_BOOTMEM
@@ -360,50 +89,46 @@ static void __init subtract_early_res(st
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
+ /*take out region array at first*/
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+
+ count = lmb.reserved.cnt;
#define DEBUG_PRINT_EARLY_RES 1
#if DEBUG_PRINT_EARLY_RES
printk(KERN_INFO "Subtract (%d early reservations)\n", count);
#endif
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
+
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
#if DEBUG_PRINT_EARLY_RES
- printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
- r->start, r->end, r->name);
+ printk(KERN_INFO " #%d [%010llx - %010llx]\n", i,
+ r->base, r->base + r->size);
#endif
- final_start = PFN_DOWN(r->start);
- final_end = PFN_UP(r->end);
+ final_start = PFN_DOWN(r->base);
+ final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
continue;
subtract_range(range, az, final_start, final_end);
}
-
+ /* put region array back */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_reserve(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.region_array_size);
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
- int i, count;
+ int count;
u64 start = 0, end;
u64 size;
u64 mem;
struct range *range;
int nr_range;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- count *= 2;
+ count = lmb.reserved.cnt * 2;
size = sizeof(struct range) * count;
end = get_max_mapped();
@@ -411,12 +136,15 @@ int __init get_free_all_memory_range(str
if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
start = MAX_DMA32_PFN << PAGE_SHIFT;
#endif
- mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
+ mem = find_lmb_area(start, end, size, sizeof(struct range));
if (mem == -1ULL)
panic("can not find more space for range free");
range = __va(mem);
- /* use early_node_map[] and early_res to get range array at first */
+ /*
+ * use early_node_map[] and lmb.reserved.region to get range array
+ * at first
+ */
memset(range, 0, size);
nr_range = 0;
@@ -430,10 +158,10 @@ int __init get_free_all_memory_range(str
/* need to clear it ? */
if (nodeid == MAX_NUMNODES) {
- memset(&early_res[0], 0,
- sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+ lmb.reserved.region = NULL;
+ lmb.reserved.region_array_size = 0;
+ lmb.reserved.cnt = 0;
}
*rangep = range;
@@ -444,24 +172,20 @@ void __init early_res_to_bootmem(u64 sta
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
-
- printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
- count - idx, max_early_res, start, end);
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
- printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
- r->start, r->end, r->name);
- final_start = max(start, r->start);
- final_end = min(end, r->end);
+ /*take out region array */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+
+ count = lmb.reserved.cnt;
+ printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
+ count, start, end);
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ printk(KERN_INFO " #%d [%010llx - %010llx] ", i,
+ r->base, r->base + r->size);
+ final_start = max(start, r->base);
+ final_end = min(end, r->base + r->size);
if (final_start >= final_end) {
printk(KERN_CONT "\n");
continue;
@@ -471,26 +195,40 @@ void __init early_res_to_bootmem(u64 sta
reserve_bootmem_generic(final_start, final_end - final_start,
BOOTMEM_DEFAULT);
}
- /* clear them */
- memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
- early_res_count = 0;
+ /* clear them to avoid misuse */
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.region_array_size);
+ lmb.reserved.region = NULL;
+ lmb.reserved.region_array_size = 0;
+ lmb.reserved.cnt = 0;
}
#endif
@@ -506,20 +244,20 @@ static inline int __init bad_addr_size(u
int changed = 0;
again:
last = addr + size;
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- struct early_res *r = &early_res[i];
- if (last > r->start && addr < r->start) {
- size = r->start - addr;
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ if (last > r->base && addr < r->base) {
+ size = r->base - addr;
changed = 1;
goto again;
}
- if (last > r->end && addr < r->end) {
- addr = round_up(r->end, align);
+ if (last > (r->base + r->size) && addr < (r->base + r->size)) {
+ addr = round_up(r->base + r->size, align);
size = last - addr;
changed = 1;
goto again;
}
- if (last <= r->end && addr >= r->start) {
+ if (last <= (r->base + r->size) && addr >= r->base) {
(*sizep)++;
return 0;
}
@@ -531,13 +269,8 @@ again:
return changed;
}
-/*
- * Find a free area with specified alignment in a specific range.
- * only with the area.between start to end is active range from early_node_map
- * so they are good as RAM
- */
u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
- u64 size, u64 align)
+ u64 size, u64 align)
{
u64 addr, last;
@@ -582,3 +315,130 @@ u64 __init find_early_area_size(u64 ei_s
Index: linux-2.6/mm/page_alloc.c
===================================================================
--- linux-2.6.orig/mm/page_alloc.c
+++ linux-2.6/mm/page_alloc.c
@@ -3451,7 +3451,7 @@ void * __init __alloc_memory_core_early(
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
- reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ reserve_early(addr, addr + size, "BOOTMEM");
return ptr;
}
#endif
Index: linux-2.6/mm/sparse-vmemmap.c
===================================================================
--- linux-2.6.orig/mm/sparse-vmemmap.c
+++ linux-2.6/mm/sparse-vmemmap.c
@@ -225,8 +225,8 @@ void __init sparse_mem_maps_populate_nod
char name[15];
snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
- reserve_early_without_check(__pa(vmemmap_buf_start),
- __pa(vmemmap_buf), name);
+ reserve_early(__pa(vmemmap_buf_start),
+ __pa(vmemmap_buf), name);
}
#else
free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
Index: linux-2.6/arch/x86/include/asm/lmb.h
===================================================================
--- /dev/null
+++ linux-2.6/arch/x86/include/asm/lmb.h
@@ -0,0 +1,8 @@
+#ifndef _X86_LMB_H
+#define _X86_LMB_H
+
+#define LMB_DBG(fmt...) printk(fmt)
+
+#define LMB_REAL_LIMIT 0
+
+#endif
> void __init lmb_init(void)
> {
> + lmb.memory.region = lmb_memory_region;
> + lmb.memory.region_array_size = ARRAY_SIZE(lmb_memory_region);
> + lmb.reserved.region = lmb_reserved_region;
> + lmb.reserved.region_array_size = ARRAY_SIZE(lmb_reserved_region);
> +
That's rather unreadable and has random whitespace noise.
Should be something like:
lmb.memory.region = lmb_memory_region;
lmb.memory.region_array_size = ARRAY_SIZE(lmb_memory_region);
lmb.reserved.region = lmb_reserved_region;
lmb.reserved.region_array_size = ARRAY_SIZE(lmb_reserved_region);
also, i'd suggest to shorten region_array_size to region_size (we know it's an
array), so it would become:
lmb.memory.region = lmb_memory_region;
lmb.memory.region_size = ARRAY_SIZE(lmb_memory_region);
lmb.reserved.region = lmb_reserved_region;
lmb.reserved.region_size = ARRAY_SIZE(lmb_reserved_region);
> - if (rgn->cnt >= MAX_LMB_REGIONS)
> + if (rgn->cnt >= (rgn->region_array_size - 1))
> return -1;
'x >= y-1' is equivalent to 'x > y', so that should be:
if (rgn->cnt > rgn->region_size)
Ingo
> On Mon, 2010-03-22 at 21:57 +0100, Ingo Molnar wrote:
> >
> > > You use that arguemnt ONE MORE FUCKING TIME and you'll end up in my killfile
> > > with a auto-NACK reply of anything that looks like a patch from you.
> >
> > Does this mean you disagree with that? (I think it's pretty factual, last i
> > checked the usage stats of devel kernels was somewhere around 99.7%.)
>
> I disagree with that being a relevant argument in the technical discussion
> on the relative merits of two implementations of a given facility. I also
> disagree with your numbers, if you talk about deployement, I would be very
> very surprised if ARM wasn't close to on-par with x86.
As an upstream maintainer i mainly care about upstream kernel contributions.
These contributions have three main forms:
- patches i get against latest upstream
- on-lkml review/analysis that is done on those patches
- test/bug/regression reports i get against latest upstream (either directly
on lkml or via kerneloops.org or bugzilla.kernel.org)
So i weigh the architectures based on that input.
Since you mentioned ARM - here's the Git contribution stats. In the last 5
years since we have kernel Git history, there's been 1080 commits to
kernel/sched.c. Amongst those 1080 commits i could find only a _single commit_
(a minor fix) being related to or contributed by anyone doing ARM development!
To be on the safe side lets assume that i missed many commits, lets up that
count to ten times of that count: 10 commits. I.e. the 'weight of ARM', when
it comes to kernel/sched.c, is still less than 1%.
'millions of ARM units' alone means little to me, if it does not translate
into actual upstream kernel contributions. Many of those 'millions of units'
are walled off from kernel contributions: the users dont even know they are
running Linux. They are not linked to kerneloops.org and dont produce bugzilla
bugreports. They do finance Linux developers by proxy - but as far as the
upstream kernel is concerned they only exist to the extent they finance kernel
developers to care about it.
Lets look at a counter example: Sparc64. There's literally just a handful of
Sparc64 'units' that run Linux, still the weight of the arch is much higher -
due to the well-known highly productive kernel contributor who is using that
architecture. I have seen about 10 times more scheduler contributions [~15
commits] from that single unit Sparc64 angle than from the millions of ARM
units! (and davem isnt even doing scheduler development per se - he's mostly
doing drive-by fixes and improvements with no particular focus on the
scheduler.)
Or lets look at an architecture that during its development had a physical
unit count of _zero_: SGI UV. It was only running in simulators for a year but
it sure resulted in dozens and dozens of useful patches that extended Linux's
scalability reach. So did SGI UV matter, despite having had a zero unit count?
Heck it did ...
I singled out kernel/sched.c but there's a very similar picture and
contribution weights when it comes to other areas i co-maintain: lockdep,
perf, tracing, etc.
So if you want your architecture to matter to me the rule is very simple:
contribute, contribute, contribute, and stop whining. If you dont contribute,
frankly you dont really exist to me. On the other hand if you are actively
contributing while your architecture only exists on paper, it already starts
mattering to me.
I'm really that simple.
Thanks,
Ingo
Is there a reason why this needs to be exported ?
Thanks,
tglx
@@ -26,7 +26,8 @@ struct lmb_property {
struct lmb_region {
unsigned long cnt;
u64 size;
- struct lmb_property region[MAX_LMB_REGIONS+1];
+ struct lmb_property *region;
+ unsigned long region_array_size;
};
cnt is number of slots used.
size is memory size
can we use rgn_sz for region array size?
YH
later if those functions are moved lmb.c, we can make lmb_reserved_region to be static
same thing to lmb_memory_region, that will be used by add_lmb_memory() later.
YH
I would strongly suggest for you to send the next (-v4) version to lkml only
if it's 100% clean and complete in every known regard (with a good splitup and
explanations in every commit), so that people dont repeat review or review an
unclean aspect aspect that will go away in a next iteration.
Thanks,
Ingo
> can we use rgn_sz for region array size?
>
No.
And while at it properly documented :-) I wouldn't mind also a
reasonably clear explanation in the changeset comment as to why
they are necessary for x86.
To be honest, that's my #1 grief so far with this entire patch set,
it Yinghai apparent inability to write anything ressembling remotely
like an explanation. All we get is key words, bullet points and half
sentences, and I admit have a very very hard time extracting a meaning
out of anything he's been writing so far.
Cheers,
Ben.
Yup. As I said, tho, I'd also like a little better level of explanation
and documentation (and I know the existing LMB interfaces are -not-
documented, but let's not add more shall we ? :-)
> I think we want to work towards the end result where we dont have
> bootmem.c anymore. I.e. a modern LMB architecture should generally
> not make use of bootmem at all.
bootmem has one advantage over LMB, I think, in that LMB has this
annoying static array of regions, which is prone to being either too big
(wasted space) or too small.
We might want to consider a slightly smarter approach there if we are
going to replace bootmem.
I though one possibility would be to have LMB regions become more lists
than arrays, so that the static storage only needs to cover as much as
is needed during really early boot (and we could probably still move the
BSS top point on some archs to dynamically make more ... actually we
could be smart arses and use LMB to allocate more LMB list heads if we
are reaching the table limit :-)
> We could do that switch on x86 straight away, and make
> CONFIG_NO_BOOTMEM a
> default-y option, hm? We could also hide the interactivity behind
> CONFIG_DEBUG_VM or so - and eliminate it altogether later on.
>
> We should also switch around the flag and turn it into CONFIG_BOOTMEM.
Cheers,
Ben.
Actually what about that:
LMB entries are linked-listed. The array is just storage for those entry
"heads".
The initial static array only needs to be big enough for very very early
platform specific kernel bits and pieces, so it could even be sized by a
Kconfig option. Or it could just use a klimit moving trick to pick up a
page right after the BSS but that may need to be arch specific.
lmb_init() queues all the entries from the initial array in a freelist
lmb_alloc() and lmb_reserve() just pop entries from that freelist to
populate the two main linked lists (memory and reserved).
When something tries to dequeue up the last freelist entry, then under
the hood, LMB uses it instead to allocate a new block of LMB entries
that gets added to the freelist.
We never free blocks of LMB entries.
That way, we can fine tine the static array to be as small as we can
realistically make it be, and we have no boundary limitations on the
amount of entries in either the memory list or the reserved list.
I'm a bit too flat out right now to write code, but if there's no
objection, I might give that a go either later this week or next week,
see if I can replace bootmem on powerpc.
Cheers,
Ben.
I dislike those arrays anyways. See my other message about turning them
into lists, which would get rid of capacity constraints completely. What
do you think ?
Cheers,
Ben.
So basically, what you are saying is that, totally regardless of how
much an architecture is actually used in the field, if the architecture
maintainer for it doesn't also do your work and contribute to every
single of your pet projects, or if everybody is like you, every single
other subsystem in the kernel, then that architecture is irrelevant for
technical considerations and choices regarding any design decision made
to the core kernel ?
Sorry Ingo, but that's just arrogant bullshit. So stop trying to win
this useless argument, all you manage to do is anger people and make us
even less willing to actually work with you.
Cheers,
Ben.
also add nr_regions in lmb_region to tack the region array size.
-v3: seperate lmb core change to seperated patch
-v4: according to Ingo, change >= x -1 to > x
change region_array_size to nr_regions
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
include/linux/lmb.h | 5 ++++-
lib/lmb.c | 9 ++++++++-
2 files changed, 12 insertions(+), 2 deletions(-)
Index: linux-2.6/include/linux/lmb.h
===================================================================
--- linux-2.6.orig/include/linux/lmb.h
+++ linux-2.6/include/linux/lmb.h
@@ -26,7 +26,8 @@ struct lmb_property {
struct lmb_region {
unsigned long cnt;
u64 size;
- struct lmb_property region[MAX_LMB_REGIONS+1];
+ struct lmb_property *region;
+ unsigned long nr_regions;
};
struct lmb {
@@ -37,6 +38,8 @@ struct lmb {
};
extern struct lmb lmb;
+extern struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+extern struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
extern void __init lmb_init(void);
extern void __init lmb_analyze(void);
Index: linux-2.6/lib/lmb.c
===================================================================
--- linux-2.6.orig/lib/lmb.c
+++ linux-2.6/lib/lmb.c
@@ -18,6 +18,8 @@
#define LMB_ALLOC_ANYWHERE 0
struct lmb lmb;
+struct lmb_property lmb_memory_region[MAX_LMB_REGIONS + 1];
+struct lmb_property lmb_reserved_region[MAX_LMB_REGIONS + 1];
static int lmb_debug;
@@ -106,6 +108,11 @@ static void lmb_coalesce_regions(struct
void __init lmb_init(void)
{
+ lmb.memory.region = lmb_memory_region;
+ lmb.reserved.region = lmb_reserved_region;
+ lmb.memory.nr_regions = ARRAY_SIZE(lmb_memory_region);
+ lmb.reserved.nr_regions = ARRAY_SIZE(lmb_reserved_region);
+
/* Create a dummy zero size LMB which will get coalesced away later.
* This simplifies the lmb_add() code below...
*/
@@ -169,7 +176,7 @@ static long lmb_add_region(struct lmb_re
if (coalesced)
return coalesced;
- if (rgn->cnt >= MAX_LMB_REGIONS)
+ if (rgn->cnt > rgn->nr_regions)
return -1;
/* Couldn't coalesce the LMB, so add it to the sorted table. */
still keep kernel/early_res.c for the extension.
should move those file to lib/lmb.c later?
in early_res.c
1. change find_e820_area_xxx, to find_lmb_area_xxx
2. e820_register_active_regions to lmb_register_active_regions.
3. reserve_early will call lmb_reserve directly.
4. free_early will call lmb_free directly.
5. remove functions that are used by old reserve_early and free_early
6. get_free_all_memory_range use lmb.reserved.
7. early_res_to_bootmem use lmb.reserved
8. add fill_lmb_memory() to fill lmb.memory according e820 RAM entries
-v2: fix NO_BOOTMEM hang with printk
-v4: add add_lmb_memory that could increase lmb.memory.region size
change region_array_size to nr_regions
make sure some find_lmb_area<_size> are called after fill_lmb_memory
todo:
1. make early_memtest to depend on early_res. and move it to mm/
2. make all lmb user to use extend early_res/nobootmem
3. merge lmb.c and early_res.c, move it to mm/
4. make other platform to use lmb/early_res/nobootmem
5. remove BOOTMEM related code
Signed-off-by: Yinghai Lu <yin...@kernel.org>
---
arch/x86/Kconfig | 1
arch/x86/include/asm/e820.h | 38 +-
arch/x86/include/asm/lmb.h | 8
arch/x86/kernel/check.c | 14
arch/x86/kernel/e820.c | 171 +----------
arch/x86/kernel/head.c | 2
arch/x86/kernel/head32.c | 5
arch/x86/kernel/head64.c | 2
arch/x86/kernel/setup.c | 9
arch/x86/kernel/setup_percpu.c | 6
arch/x86/mm/memtest.c | 5
arch/x86/mm/numa_64.c | 4
include/linux/early_res.h | 19 -
kernel/early_res.c | 631 ++++++++++++++++-------------------------
mm/page_alloc.c | 2
mm/sparse-vmemmap.c | 11
16 files changed, 344 insertions(+), 584 deletions(-)
Index: linux-2.6/arch/x86/Kconfig
===================================================================
--- linux-2.6.orig/arch/x86/Kconfig
+++ linux-2.6/arch/x86/Kconfig
@@ -27,6 +27,7 @@ config X86
select HAVE_PERF_EVENTS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
+ select HAVE_LMB
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
Index: linux-2.6/arch/x86/include/asm/e820.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/e820.h
+++ linux-2.6/arch/x86/include/asm/e820.h
@@ -111,24 +111,30 @@ static inline void early_memtest(unsigne
}
#endif
-extern unsigned long end_user_pfn;
-
-extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
-extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
-u64 find_e820_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
-extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
-#include <linux/early_res.h>
-
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
-extern int e820_find_active_region(const struct e820entry *ei,
- unsigned long start_pfn,
- unsigned long last_pfn,
- unsigned long *ei_startpfn,
- unsigned long *ei_endpfn);
-extern void e820_register_active_regions(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
-extern u64 e820_hole_size(u64 start, u64 end);
+#include <linux/early_res.h>
+extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
+
+/*
+ * next three functions will be removed, esp find_e820_area()
+ * can not be used before fill_lmb_memory()
+ */
+static inline u64 find_e820_area(u64 start, u64 end, u64 size, u64 align)
+{
+ return find_lmb_area(start, end, size, align);
+}
+static inline void e820_register_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn)
+{
+ lmb_register_active_regions(nid, start_pfn, end_pfn);
+}
+static inline u64 e820_hole_size(u64 start, u64 end)
+{
+ return lmb_hole_size(start, end);
+}
+
+void init_lmb_memory(void);
+void fill_lmb_memory(void);
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
Index: linux-2.6/arch/x86/kernel/e820.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/e820.c
+++ linux-2.6/arch/x86/kernel/e820.c
@@ -15,6 +15,7 @@
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/firmware-map.h>
+#include <linux/lmb.h>
#include <asm/e820.h>
#include <asm/proto.h>
@@ -726,37 +727,6 @@ static int __init e820_mark_nvs_memory(v
@@ -765,50 +735,9 @@ u64 __init get_max_mapped(void)
- * pre allocated 4k and reserved it in e820
+ * pre allocated 4k and reserved it in lmb and e820_saved
*/
u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
{
@@ -817,7 +746,7 @@ u64 __init early_reserve_e820(u64 startt
u64 start;
for (start = startt; ; start += size) {
- start = find_e820_area_size(start, &size, align);
+ start = find_lmb_area_size(start, &size, align);
if (!(start + 1))
return 0;
if (size >= sizet)
@@ -834,10 +763,9 @@ u64 __init early_reserve_e820(u64 startt
addr = round_down(start + size - sizet, align);
if (addr < start)
return 0;
- e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
+ reserve_early(addr, addr + sizet, "new next");
e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "update e820 for early_reserve_e820\n");
- update_e820();
+ printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
update_e820_saved();
return addr;
@@ -899,74 +827,6 @@ unsigned long __init e820_end_of_low_ram
@@ -1219,3 +1079,24 @@ void __init setup_memory_map(void)
printk(KERN_INFO "BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
+
+void __init init_lmb_memory(void)
+{
+ lmb_init();
+}
+
+void __init fill_lmb_memory(void)
+{
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
+
+ if (ei->type != E820_RAM)
+ continue;
+ add_lmb_memory(ei->addr, ei->addr + ei->size);
+ }
+
+ lmb_analyze();
+ lmb_dump_all();
+}
Index: linux-2.6/arch/x86/kernel/head.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head.c
+++ linux-2.6/arch/x86/kernel/head.c
@@ -51,5 +51,5 @@ void __init reserve_ebda_region(void)
lowmem = 0x9f000;
/* reserve all memory between lowmem and the 1MB mark */
- reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved");
+ reserve_early(lowmem, 0x100000, "BIOS reserved");
}
Index: linux-2.6/arch/x86/kernel/head32.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head32.c
+++ linux-2.6/arch/x86/kernel/head32.c
@@ -29,14 +29,15 @@ static void __init i386_default_early_se
void __init i386_start_kernel(void)
{
+ init_lmb_memory();
+
#ifdef CONFIG_X86_TRAMPOLINE
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE,
- "EX TRAMPOLINE");
+ reserve_early(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE");
#endif
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
Index: linux-2.6/arch/x86/kernel/head64.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/head64.c
+++ linux-2.6/arch/x86/kernel/head64.c
@@ -96,6 +96,8 @@ void __init x86_64_start_kernel(char * r
void __init x86_64_start_reservations(char *real_mode_data)
{
+ init_lmb_memory();
+
copy_bootdata(__va(real_mode_data));
reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
Index: linux-2.6/arch/x86/kernel/setup.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/setup.c
+++ linux-2.6/arch/x86/kernel/setup.c
@@ -868,8 +868,6 @@ void __init setup_arch(char **cmdline_p)
*/
max_pfn = e820_end_of_ram_pfn();
- /* preallocate 4k for mptable mpc */
- early_reserve_e820_mpc_new();
/* update e820 for memory not covered by WB MTRRs */
mtrr_bp_init();
if (mtrr_trim_uncached_memory(max_pfn))
@@ -894,6 +892,11 @@ void __init setup_arch(char **cmdline_p)
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
#endif
+ fill_lmb_memory();
+
+ /* preallocate 4k for mptable mpc */
+ early_reserve_e820_mpc_new();
+
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
setup_bios_corruption_check();
#endif
@@ -970,7 +973,7 @@ void __init setup_arch(char **cmdline_p)
initmem_init(0, max_pfn, acpi, k8);
#ifndef CONFIG_NO_BOOTMEM
- early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
+ lmb_reserved_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
#endif
dma32_reserve_bootmem();
Index: linux-2.6/arch/x86/kernel/setup_percpu.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/setup_percpu.c
+++ linux-2.6/arch/x86/kernel/setup_percpu.c
@@ -137,13 +137,7 @@ static void * __init pcpu_fc_alloc(unsig
static void __init pcpu_fc_free(void *ptr, size_t size)
{
-#ifdef CONFIG_NO_BOOTMEM
- u64 start = __pa(ptr);
- u64 end = start + size;
- free_early_partial(start, end);
-#else
free_bootmem(__pa(ptr), size);
-#endif
}
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
Index: linux-2.6/include/linux/early_res.h
===================================================================
--- linux-2.6.orig/include/linux/early_res.h
+++ linux-2.6/include/linux/early_res.h
@@ -3,19 +3,22 @@
#ifdef __KERNEL__
extern void reserve_early(u64 start, u64 end, char *name);
-extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
extern void free_early(u64 start, u64 end);
-void free_early_partial(u64 start, u64 end);
-extern void early_res_to_bootmem(u64 start, u64 end);
+void lmb_reserved_to_bootmem(u64 start, u64 end);
-void reserve_early_without_check(u64 start, u64 end, char *name);
u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
-u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
- u64 *sizep, u64 align);
-u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align);
+
+void add_lmb_memory(u64 start, u64 end);
+
+u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+u64 find_lmb_area_size(u64 start, u64 *sizep, u64 align);
+u64 find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
+void lmb_register_active_regions(int nid, unsigned long start_pfn,
+ unsigned long last_pfn);
+u64 lmb_hole_size(u64 start, u64 end);
u64 get_max_mapped(void);
-#include <linux/range.h>
+struct range;
int get_free_all_memory_range(struct range **rangep, int nodeid);
#endif /* __KERNEL__ */
Index: linux-2.6/kernel/early_res.c
===================================================================
--- linux-2.6.orig/kernel/early_res.c
+++ linux-2.6/kernel/early_res.c
@@ -6,404 +6,140 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
+#include <linux/lmb.h>
+#include <linux/range.h>
-
-static void __init __check_and_double_early_res(u64 ex_start, u64 ex_end)
+static void __init __check_and_double_region_array(struct lmb_region *type,
+ struct lmb_property *static_region,
+ u64 ex_start, u64 ex_end)
{
u64 start, end, size, mem;
- struct early_res *new;
+ struct lmb_property *new, *old;
+ unsigned long rgnsz = type->nr_regions;
/* do we have enough slots left ? */
- if ((max_early_res - early_res_count) > max(max_early_res/8, 2))
+ if ((rgnsz - type->cnt) > max_t(unsigned long, rgnsz/8, 2))
return;
+ old = type->region;
/* double it */
mem = -1ULL;
- size = sizeof(struct early_res) * max_early_res * 2;
- if (early_res == early_res_x)
+ size = sizeof(struct lmb_property) * rgnsz * 2;
+ if (old == static_region)
-}
-
-/*
- * Most early reservations come here.
- *
- * We first have drop_overlaps_that_are_ok() drop any pre-existing
- * 'overlap_ok' ranges, so that we can then reserve this memory
- * range without risk of panic'ing on an overlapping overlap_ok
- * early reservation.
- */
-void __init reserve_early(u64 start, u64 end, char *name)
-{
- if (start >= end)
- return;
+ memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
+ memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
- __check_and_double_early_res(start, end);
+ memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
+ type->region = new;
+ type->nr_regions = rgnsz * 2;
+ printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+ type->nr_regions, mem, mem + size - 1);
- drop_overlaps_that_are_ok(start, end);
- __reserve_early(start, end, name, 0);
+ /* reserve new array and free old one */
+ lmb_reserve(mem, sizeof(struct lmb_property) * rgnsz * 2);
+ if (old != static_region)
+ lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
}
-void __init reserve_early_without_check(u64 start, u64 end, char *name)
+void __init add_lmb_memory(u64 start, u64 end)
{
- struct early_res *r;
-
- if (start >= end)
- return;
-
- __check_and_double_early_res(start, end);
-
- r = &early_res[early_res_count];
-
- r->start = start;
- r->end = end;
- r->overlap_ok = 0;
- if (name)
- strncpy(r->name, name, sizeof(r->name) - 1);
- early_res_count++;
+ __check_and_double_region_array(&lmb.memory, &lmb_memory_region[0], start, end);
+ lmb_add(start, end - start);
}
-void __init free_early(u64 start, u64 end)
+void __init reserve_early(u64 start, u64 end, char *name)
{
- struct early_res *r;
- int i;
+ if (start == end)
+ return;
- i = find_overlapped_early(start, end);
- r = &early_res[i];
- if (i >= max_early_res || r->end != end || r->start != start)
- panic("free_early on not reserved area: %llx-%llx!",
- start, end - 1);
+ if (WARN_ONCE(start > end, "reserve_early: wrong range [%#llx, %#llx]\n", start, end))
+ return;
- drop_range(i);
+ __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+ lmb_reserve(start, end - start);
}
-void __init free_early_partial(u64 start, u64 end)
+void __init free_early(u64 start, u64 end)
{
- struct early_res *r;
- int i;
-
if (start == end)
return;
- if (WARN_ONCE(start > end, "free_early_partial: wrong range [%#llx, %#llx]\n", start, end))
- return;
-
-try_next:
- i = find_overlapped_early(start, end);
- if (i >= max_early_res)
- return;
-
- r = &early_res[i];
- /* hole ? */
- if (r->end >= end && r->start <= start) {
- drop_range_partial(i, start, end);
+ if (WARN_ONCE(start > end, "free_early: wrong range [%#llx, %#llx]\n", start, end))
return;
- }
- drop_range_partial(i, start, end);
- goto try_next;
+ /* keep punching hole, could run out of slots too */
+ __check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+ lmb_free(start, end - start);
}
#ifdef CONFIG_NO_BOOTMEM
-static void __init subtract_early_res(struct range *range, int az)
+static void __init subtract_lmb_reserved(struct range *range, int az)
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
+ /*take out region array at first*/
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+
+ count = lmb.reserved.cnt;
#define DEBUG_PRINT_EARLY_RES 1
#if DEBUG_PRINT_EARLY_RES
printk(KERN_INFO "Subtract (%d early reservations)\n", count);
#endif
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
+
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
#if DEBUG_PRINT_EARLY_RES
- printk(KERN_INFO " #%d [%010llx - %010llx] %15s\n", i,
- r->start, r->end, r->name);
+ printk(KERN_INFO " #%d [%010llx - %010llx]\n", i,
+ r->base, r->base + r->size);
#endif
- final_start = PFN_DOWN(r->start);
- final_end = PFN_UP(r->end);
+ final_start = PFN_DOWN(r->base);
+ final_end = PFN_UP(r->base + r->size);
if (final_start >= final_end)
continue;
subtract_range(range, az, final_start, final_end);
}
-
+ /* put region array back */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_reserve(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
}
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
- int i, count;
+ int count;
u64 start = 0, end;
u64 size;
u64 mem;
struct range *range;
int nr_range;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- count *= 2;
+ count = lmb.reserved.cnt * 2;
size = sizeof(struct range) * count;
end = get_max_mapped();
@@ -411,12 +147,15 @@ int __init get_free_all_memory_range(str
if (end > (MAX_DMA32_PFN << PAGE_SHIFT))
start = MAX_DMA32_PFN << PAGE_SHIFT;
#endif
- mem = find_fw_memmap_area(start, end, size, sizeof(struct range));
+ mem = find_lmb_area(start, end, size, sizeof(struct range));
if (mem == -1ULL)
panic("can not find more space for range free");
range = __va(mem);
- /* use early_node_map[] and early_res to get range array at first */
+ /*
+ * use early_node_map[] and lmb.reserved.region to get range array
+ * at first
+ */
memset(range, 0, size);
nr_range = 0;
@@ -425,43 +164,39 @@ int __init get_free_all_memory_range(str
#ifdef CONFIG_X86_32
subtract_range(range, count, max_low_pfn, -1ULL);
#endif
- subtract_early_res(range, count);
+ subtract_lmb_reserved(range, count);
nr_range = clean_sort_range(range, count);
/* need to clear it ? */
if (nodeid == MAX_NUMNODES) {
- memset(&early_res[0], 0,
- sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+ lmb.reserved.region = NULL;
+ lmb.reserved.nr_regions = 0;
+ lmb.reserved.cnt = 0;
}
*rangep = range;
return nr_range;
}
#else
-void __init early_res_to_bootmem(u64 start, u64 end)
+void __init lmb_reserved_to_bootmem(u64 start, u64 end)
{
int i, count;
u64 final_start, final_end;
- int idx = 0;
- count = 0;
- for (i = 0; i < max_early_res && early_res[i].end; i++)
- count++;
-
- /* need to skip first one ?*/
- if (early_res != early_res_x)
- idx = 1;
-
- printk(KERN_INFO "(%d/%d early reservations) ==> bootmem [%010llx - %010llx]\n",
- count - idx, max_early_res, start, end);
- for (i = idx; i < count; i++) {
- struct early_res *r = &early_res[i];
- printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
- r->start, r->end, r->name);
- final_start = max(start, r->start);
- final_end = min(end, r->end);
+ /*take out region array */
+ if (lmb.reserved.region != lmb_reserved_region)
+ lmb_free(__pa(lmb.reserved.region), sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+
+ count = lmb.reserved.cnt;
+ printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
+ count, start, end);
+ for (i = 0; i < count; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ printk(KERN_INFO " #%d [%010llx - %010llx] ", i,
+ r->base, r->base + r->size);
+ final_start = max(start, r->base);
+ final_end = min(end, r->base + r->size);
if (final_start >= final_end) {
printk(KERN_CONT "\n");
continue;
@@ -471,57 +206,71 @@ void __init early_res_to_bootmem(u64 sta
reserve_bootmem_generic(final_start, final_end - final_start,
BOOTMEM_DEFAULT);
}
- /* clear them */
- memset(&early_res[0], 0, sizeof(struct early_res) * max_early_res);
- early_res = NULL;
- max_early_res = 0;
- early_res_count = 0;
+ /* clear them to avoid misuse */
+ memset(&lmb.reserved.region[0], 0, sizeof(struct lmb_property) * lmb.reserved.nr_regions);
+ lmb.reserved.region = NULL;
+ lmb.reserved.nr_regions = 0;
+ lmb.reserved.cnt = 0;
}
#endif
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+ int i;
+ struct lmb_property *r;
+
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ r = &lmb.reserved.region[i];
+ if (end > r->base && start < (r->base + r->size))
+ break;
+ }
+
+ return i;
+}
+
/* Check for already reserved areas */
-static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
+static inline bool __init bad_addr(u64 *addrp, u64 size, u64 align)
{
int i;
u64 addr = *addrp;
- int changed = 0;
- struct early_res *r;
+ bool changed = false;
+ struct lmb_property *r;
again:
i = find_overlapped_early(addr, addr + size);
- r = &early_res[i];
- if (i < max_early_res && r->end) {
- *addrp = addr = round_up(r->end, align);
- changed = 1;
+ r = &lmb.reserved.region[i];
+ if (i < lmb.reserved.cnt && r->size) {
+ *addrp = addr = round_up(r->base + r->size, align);
+ changed = true;
goto again;
}
return changed;
}
/* Check for already reserved areas */
-static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
+static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
{
int i;
u64 addr = *addrp, last;
u64 size = *sizep;
- int changed = 0;
+ bool changed = false;
again:
last = addr + size;
- for (i = 0; i < max_early_res && early_res[i].end; i++) {
- struct early_res *r = &early_res[i];
- if (last > r->start && addr < r->start) {
- size = r->start - addr;
- changed = 1;
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ struct lmb_property *r = &lmb.reserved.region[i];
+ if (last > r->base && addr < r->base) {
+ size = r->base - addr;
+ changed = true;
goto again;
}
- if (last > r->end && addr < r->end) {
- addr = round_up(r->end, align);
+ if (last > (r->base + r->size) && addr < (r->base + r->size)) {
+ addr = round_up(r->base + r->size, align);
size = last - addr;
- changed = 1;
+ changed = true;
goto again;
}
- if (last <= r->end && addr >= r->start) {
+ if (last <= (r->base + r->size) && addr >= r->base) {
(*sizep)++;
- return 0;
+ return false;
}
}
if (changed) {
@@ -531,13 +280,8 @@ again:
return changed;
}
-/*
- * Find a free area with specified alignment in a specific range.
- * only with the area.between start to end is active range from early_node_map
- * so they are good as RAM
- */
u64 __init find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
- u64 size, u64 align)
+ u64 size, u64 align)
{
u64 addr, last;
@@ -560,7 +304,7 @@ out:
return -1ULL;
}
-u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
+static u64 __init find_early_area_size(u64 ei_start, u64 ei_last, u64 start,
u64 *sizep, u64 align)
{
u64 addr, last;
@@ -582,3 +326,130 @@ u64 __init find_early_area_size(u64 ei_s
+ * need to call this function after lmb_register_active_regions
Index: linux-2.6/mm/page_alloc.c
===================================================================
--- linux-2.6.orig/mm/page_alloc.c
+++ linux-2.6/mm/page_alloc.c
@@ -3451,7 +3451,7 @@ void * __init __alloc_memory_core_early(
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
- reserve_early_without_check(addr, addr + size, "BOOTMEM");
+ reserve_early(addr, addr + size, "BOOTMEM");
return ptr;
}
#endif
Index: linux-2.6/mm/sparse-vmemmap.c
===================================================================
--- linux-2.6.orig/mm/sparse-vmemmap.c
+++ linux-2.6/mm/sparse-vmemmap.c
@@ -219,18 +219,7 @@ void __init sparse_mem_maps_populate_nod
if (vmemmap_buf_start) {
/* need to free left buf */
-#ifdef CONFIG_NO_BOOTMEM
- free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end));
- if (vmemmap_buf_start < vmemmap_buf) {
- char name[15];
-
- snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
- reserve_early_without_check(__pa(vmemmap_buf_start),
- __pa(vmemmap_buf), name);
- }
-#else
free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
-#endif
vmemmap_buf = NULL;
vmemmap_buf_end = NULL;
}
Index: linux-2.6/arch/x86/include/asm/lmb.h
===================================================================
--- /dev/null
+++ linux-2.6/arch/x86/include/asm/lmb.h
@@ -0,0 +1,8 @@
+#ifndef _X86_LMB_H
+#define _X86_LMB_H
+
+#define LMB_DBG(fmt...) printk(fmt)
+
+#define LMB_REAL_LIMIT 0
+
+#endif
Index: linux-2.6/arch/x86/kernel/check.c
===================================================================
--- linux-2.6.orig/arch/x86/kernel/check.c
+++ linux-2.6/arch/x86/kernel/check.c
@@ -2,7 +2,8 @@
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
-#include <asm/e820.h>
+#include <linux/early_res.h>
+
#include <asm/proto.h>
/*
@@ -18,10 +19,12 @@ static int __read_mostly memory_corrupti
static unsigned __read_mostly corruption_check_size = 64*1024;
static unsigned __read_mostly corruption_check_period = 60; /* seconds */
-static struct e820entry scan_areas[MAX_SCAN_AREAS];
+static struct scan_area {
+ u64 addr;
+ u64 size;
+} scan_areas[MAX_SCAN_AREAS];
static int num_scan_areas;
-
static __init int set_corruption_check(char *arg)
{
char *end;
@@ -81,7 +84,7 @@ void __init setup_bios_corruption_check(
while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
u64 size;
- addr = find_e820_area_size(addr, &size, PAGE_SIZE);
+ addr = find_lmb_area_size(addr, &size, PAGE_SIZE);
if (!(addr + 1))
break;
@@ -92,7 +95,7 @@ void __init setup_bios_corruption_check(
if ((addr + size) > corruption_check_size)
size = corruption_check_size - addr;
- e820_update_range(addr, size, E820_RAM, E820_RESERVED);
+ reserve_early(addr, addr + size, "SCAN RAM");
scan_areas[num_scan_areas].addr = addr;
scan_areas[num_scan_areas].size = size;
num_scan_areas++;
@@ -105,7 +108,6 @@ void __init setup_bios_corruption_check(
printk(KERN_INFO "Scanning %d areas for low memory corruption\n",
num_scan_areas);
- update_e820();
}
Index: linux-2.6/arch/x86/mm/memtest.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/memtest.c
+++ linux-2.6/arch/x86/mm/memtest.c
@@ -6,8 +6,7 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pfn.h>
-
-#include <asm/e820.h>
+#include <linux/early_res.h>
static u64 patterns[] __initdata = {
0,
@@ -74,7 +73,7 @@ static void __init do_one_pass(u64 patte
u64 size = 0;
while (start < end) {
- start = find_e820_area_size(start, &size, 1);
+ start = find_lmb_area_size(start, &size, 1);
/* done ? */
if (start >= end)
Index: linux-2.6/arch/x86/mm/numa_64.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/numa_64.c
+++ linux-2.6/arch/x86/mm/numa_64.c
@@ -174,7 +174,7 @@ static void * __init early_node_mem(int
if (start < (MAX_DMA32_PFN<<PAGE_SHIFT) &&
end > (MAX_DMA32_PFN<<PAGE_SHIFT))
start = MAX_DMA32_PFN<<PAGE_SHIFT;
- mem = find_e820_area_node(nodeid, start, end, size, align);
+ mem = find_lmb_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);
@@ -184,7 +184,7 @@ static void * __init early_node_mem(int
start = MAX_DMA32_PFN<<PAGE_SHIFT;
else
start = MAX_DMA_PFN<<PAGE_SHIFT;
- mem = find_e820_area_node(nodeid, start, end, size, align);
+ mem = find_lmb_area_node(nodeid, start, end, size, align);
if (mem != -1L)
return __va(mem);
please check the v4.
the function rely on find_lmb_area().
it will check if there is enough space left, otherwise try to get new big array, and
copy old array to new array.
final function like:
static void __init __check_and_double_region_array(struct lmb_region *type,
struct lmb_property *static_region,
u64 ex_start, u64 ex_end)
{
u64 start, end, size, mem;
struct lmb_property *new, *old;
unsigned long rgnsz = type->nr_regions;
/* do we have enough slots left ? */
if ((rgnsz - type->cnt) > max_t(unsigned long, rgnsz/8, 2))
return;
old = type->region;
/* double it */
mem = -1ULL;
size = sizeof(struct lmb_property) * rgnsz * 2;
if (old == static_region)
start = 0;
else
start = __pa(old) + sizeof(struct lmb_property) * rgnsz;
end = ex_start;
if (start + size < end)
mem = find_lmb_area(start, end, size,
sizeof(struct lmb_property));
if (mem == -1ULL) {
start = ex_end;
end = get_max_mapped();
if (start + size < end)
mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
}
if (mem == -1ULL)
panic("can not find more space for lmb.reserved.region array");
new = __va(mem);
/* copy old to new */
memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
type->region = new;
type->nr_regions = rgnsz * 2;
printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
type->nr_regions, mem, mem + size - 1);
/* reserve new array and free old one */
lmb_reserve(mem, sizeof(struct lmb_property) * rgnsz * 2);
if (old != static_region)
lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
}
void __init add_lmb_memory(u64 start, u64 end)
{
__check_and_double_region_array(&lmb.memory, &lmb_memory_region[0], start, end);
lmb_add(start, end - start);
}
void __init reserve_early(u64 start, u64 end, char *name)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "reserve_early: wrong range [%#llx, %#llx]\n", start, end))
return;
__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
lmb_reserve(start, end - start);
}
void __init free_early(u64 start, u64 end)
{
if (start == end)
return;
if (WARN_ONCE(start > end, "free_early: wrong range [%#llx, %#llx]\n", start, end))
return;
/* keep punching hole, could run out of slots too */
__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
lmb_free(start, end - start);
}
with those function, we can replace the bootmem in x86.
Thanks
Yinghai
Ingo,
I'd like to assume that you _accidentally_ picked the *worst* possible
example file in the entire repository (with the exception of anything
in arch/x86...). You should keep in mind that basically nobody doing
ARM development cares one hoot about the scheduler as long as it
"basically works"... most such systems could get away with just
SCHED_FIFO and static priorities. When you're porting the kernel to a
platform with a 250MHz in-order CPU that's going to have a load
average of zero for 99.5% of the time and a load average of 1.0 for
the other 0.5% of the time that's pretty much the *LAST* thing you
care about.
In fact, I would guess that all the excellent work that has been done
with regards to optimized SMP and UP scheduling on much busier systems
with pathological loads has meant that the scheduler is probably one
of the most reliable pieces of code for the ARM developers. If you
want some excellent examples that go the other way, I suggest looking
at gpiolib for some great examples of code that don't matter for beans
on 99.95% of X86 boxes yet are essential for any kind of real embedded
development.
So while it's possible that you could make a reasonable point about
developer time by looking at GIT commit logs... You should refrain
from making such insulting and sweeping over-generalizations by
looking at single files, particularly ones which are largely
irrelevant or immaterial for the specific subset of developers.
Cheers,
Kyle Moffett
if the array can be doubled and have old one copied to new one.
then we don't change lmb.c too much.
new early_res.c exend lmb. and another half already works with x86 to replace bootmem.
will check if i can produce one patch to make powerpc to reuse early_res/nobootmem
Thanks
Yinghai
new early_res.c will have
find_lmb_area
reserve_early
free_early
also have get_free_all_memory_range to support bootmem replacement.
find_lmb_area: will take goal/limit, so we can find free region with more control.
when we try to check if there is enough slots for new reserve entry.
we need to make sure the new array will not overlap with range below to
entry that going to be reserved.
so we need to goal for find.
otherwise you have to keep trying with lmb_alloc until get one for new array
that will not overlap with new entry.
>
> To be honest, that's my #1 grief so far with this entire patch set,
> it Yinghai apparent inability to write anything ressembling remotely
> like an explanation. All we get is key words, bullet points and half
> sentences, and I admit have a very very hard time extracting a meaning
> out of anything he's been writing so far.
will spend more time to write more sentences...
thought you guys like to read code instead.
Thanks for you patience.
Yinghai Lu
It's still bloody arrays with fixed sizes, arbitrary limits and
arbitrary waste of BSS space ;-) To be honest, I much prefer my idea of
linked lists... But I'll let others speak.
I think your double array size looks more like a band-aid than a proper
fix. If we are going to use LMB in the long run for bootmem, we need to
properly address its capacity constraints, not just paper over the
problem.
Cheers,
Ben.
While at it, can you rename early_res to something that doesn't
suck ? :-)
Cheers,
Ben.
That would be fantastic! PowerPC and x86 both doing it would give it enough of
a critical mass to make the removal of bootmem realistic.
Thanks,
Ingo
>From the look of it, I wont' have any bandwidth this week tho ... but
let's keep that as a plan for moving forward and who gets to do it first
wins a free beer at the next conference :-)
Cheers,
Ben.
please check following renaming is ok to you.
the api from linux/early_res.h
#ifndef _LINUX_EARLY_RES_H
#define _LINUX_EARLY_RES_H
#ifdef __KERNEL__
extern void reserve_early(u64 start, u64 end, char *name);
extern void free_early(u64 start, u64 end);
void lmb_reserved_to_bootmem(u64 start, u64 end);
u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
void add_lmb_memory(u64 start, u64 end);
u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
u64 find_lmb_area_size(u64 start, u64 *sizep, u64 align);
u64 find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
void lmb_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn);
u64 lmb_hole_size(u64 start, u64 end);
u64 get_max_mapped(void);
struct range;
int get_free_all_memory_range(struct range **rangep, int nodeid);
#endif /* __KERNEL__ */
#endif /* _LINUX_EARLY_RES_H */
====>
extern void reserve_lmb(u64 start, u64 end, char *name);
extern void free_lmb(u64 start, u64 end);
void lmb_reserved_to_bootmem(u64 start, u64 end);
u64 __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
u64 size, u64 align);
void add_lmb_memory(u64 start, u64 end);
u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
u64 find_lmb_area_size(u64 start, u64 *sizep, u64 align);
u64 find_lmb_area_node(int nid, u64 start, u64 end, u64 size, u64 align);
void lmb_register_active_regions(int nid, unsigned long start_pfn,
unsigned long last_pfn);
u64 lmb_hole_size(u64 start, u64 end);
u64 get_max_mapped(void);
struct range;
int get_free_all_memory_range(struct range **rangep, int nodeid);
Thanks
Yinghai