Sophie

Sophie

distrib > Mandriva > 2009.0 > i586 > by-pkgid > 9a31c1c7754dd25ef583d54398d4947b > files > 49

kernel-xen-2.6.30.2-5mdv2009.0.src.rpm

From: jbeulich@novell.com
Subject: fold IPIs onto a single IRQ each
Patch-mainline: obsolete

--- head-2009-06-23.orig/arch/x86/kernel/apic/ipi-xen.c	2009-06-23 13:47:52.000000000 +0200
+++ head-2009-06-23/arch/x86/kernel/apic/ipi-xen.c	2009-06-09 15:51:56.000000000 +0200
@@ -21,31 +21,22 @@
 
 #include <xen/evtchn.h>
 
-DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
-
-static inline void __send_IPI_one(unsigned int cpu, int vector)
-{
-	int irq = per_cpu(ipi_to_irq, cpu)[vector];
-	BUG_ON(irq < 0);
-	notify_remote_via_irq(irq);
-}
-
 static void __send_IPI_shortcut(unsigned int shortcut, int vector)
 {
 	unsigned int cpu;
 
 	switch (shortcut) {
 	case APIC_DEST_SELF:
-		__send_IPI_one(smp_processor_id(), vector);
+		notify_remote_via_ipi(vector, smp_processor_id());
 		break;
 	case APIC_DEST_ALLBUT:
 		for_each_online_cpu(cpu)
 			if (cpu != smp_processor_id())
-				__send_IPI_one(cpu, vector);
+				notify_remote_via_ipi(vector, cpu);
 		break;
 	case APIC_DEST_ALLINC:
 		for_each_online_cpu(cpu)
-			__send_IPI_one(cpu, vector);
+			notify_remote_via_ipi(vector, cpu);
 		break;
 	default:
 		printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
@@ -63,7 +54,7 @@ void xen_send_IPI_mask_allbutself(const 
 	WARN_ON(!cpumask_subset(cpumask, cpu_online_mask));
 	for_each_cpu_and(cpu, cpumask, cpu_online_mask)
 		if (cpu != smp_processor_id())
-			__send_IPI_one(cpu, vector);
+			notify_remote_via_ipi(vector, cpu);
 	local_irq_restore(flags);
 }
 
@@ -75,7 +66,7 @@ void xen_send_IPI_mask(const struct cpum
 	local_irq_save(flags);
 	WARN_ON(!cpumask_subset(cpumask, cpu_online_mask));
 	for_each_cpu_and(cpu, cpumask, cpu_online_mask)
-		__send_IPI_one(cpu, vector);
+		notify_remote_via_ipi(vector, cpu);
 	local_irq_restore(flags);
 }
 
--- head-2009-06-23.orig/arch/x86/kernel/irq_32-xen.c	2009-06-23 13:47:52.000000000 +0200
+++ head-2009-06-23/arch/x86/kernel/irq_32-xen.c	2009-06-09 15:51:56.000000000 +0200
@@ -228,6 +228,8 @@ void fixup_irqs(void)
 			continue;
 		if (irq == 2)
 			continue;
+		if (desc->status & IRQ_PER_CPU)
+			continue;
 
 		affinity = desc->affinity;
 		if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
--- head-2009-06-23.orig/arch/x86/kernel/irq_64-xen.c	2009-06-23 13:47:52.000000000 +0200
+++ head-2009-06-23/arch/x86/kernel/irq_64-xen.c	2009-06-09 15:51:56.000000000 +0200
@@ -85,6 +85,7 @@ void fixup_irqs(void)
 
 		affinity = desc->affinity;
 		if (!irq_has_action(irq) ||
+		    (desc->status & IRQ_PER_CPU) ||
 		    cpumask_equal(affinity, cpu_online_mask)) {
 			spin_unlock(&desc->lock);
 			continue;
--- head-2009-06-23.orig/drivers/xen/Kconfig	2009-06-09 15:51:41.000000000 +0200
+++ head-2009-06-23/drivers/xen/Kconfig	2009-06-25 08:56:01.000000000 +0200
@@ -4,6 +4,7 @@
 
 config XEN
 	bool
+	select IRQ_PER_CPU if SMP
 
 if XEN
 config XEN_INTERFACE_VERSION
@@ -332,6 +333,9 @@ endmenu
 config HAVE_IRQ_IGNORE_UNHANDLED
 	def_bool y
 
+config IRQ_PER_CPU
+	bool
+
 config NO_IDLE_HZ
 	def_bool y
 
--- head-2009-06-23.orig/drivers/xen/core/evtchn.c	2009-05-11 14:08:58.000000000 +0200
+++ head-2009-06-23/drivers/xen/core/evtchn.c	2009-06-09 15:51:56.000000000 +0200
@@ -59,6 +59,22 @@ static DEFINE_SPINLOCK(irq_mapping_updat
 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
 	[0 ...  NR_EVENT_CHANNELS-1] = -1 };
 
+/* IRQ <-> IPI mapping. */
+#ifndef NR_IPIS
+#define NR_IPIS 1
+#endif
+#if defined(CONFIG_SMP) && defined(CONFIG_X86)
+static int ipi_to_irq[NR_IPIS] __read_mostly = {[0 ... NR_IPIS-1] = -1};
+static DEFINE_PER_CPU(int[NR_IPIS], ipi_to_evtchn);
+#else
+#define PER_CPU_IPI_IRQ
+#endif
+#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
+#define BUG_IF_IPI(irq) BUG_ON(type_from_irq(irq) == IRQT_IPI)
+#else
+#define BUG_IF_IPI(irq) ((void)(irq))
+#endif
+
 /* Binding types. */
 enum {
 	IRQT_UNBOUND,
@@ -117,12 +133,14 @@ static inline u32 mk_irq_info(u32 type, 
  * Accessors for packed IRQ information.
  */
 
+#ifdef PER_CPU_IPI_IRQ
 static inline unsigned int evtchn_from_irq(int irq)
 {
 	const struct irq_cfg *cfg = irq_cfg(irq);
 
 	return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0;
 }
+#endif
 
 static inline unsigned int index_from_irq(int irq)
 {
@@ -139,14 +157,32 @@ static inline unsigned int type_from_irq
 	return cfg ? cfg->info >> (32 - _IRQT_BITS) : IRQT_UNBOUND;
 }
 
+#ifndef PER_CPU_IPI_IRQ
+static inline unsigned int evtchn_from_per_cpu_irq(unsigned int irq,
+						    unsigned int cpu)
+{
+	BUG_ON(type_from_irq(irq) != IRQT_IPI);
+	return per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)];
+}
+
+static inline unsigned int evtchn_from_irq(unsigned int irq)
+{
+	if (type_from_irq(irq) != IRQT_IPI) {
+		const struct irq_cfg *cfg = irq_cfg(irq);
+
+		return cfg ? cfg->info & ((1U << _EVTCHN_BITS) - 1) : 0;
+	}
+	return evtchn_from_per_cpu_irq(irq, smp_processor_id());
+}
+#endif
+
 /* IRQ <-> VIRQ mapping. */
 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
 
+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
 /* IRQ <-> IPI mapping. */
-#ifndef NR_IPIS
-#define NR_IPIS 1
-#endif
 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
+#endif
 
 #ifdef CONFIG_SMP
 
@@ -170,8 +206,14 @@ static void bind_evtchn_to_cpu(unsigned 
 
 	BUG_ON(!test_bit(chn, s->evtchn_mask));
 
-	if (irq != -1)
-		cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
+	if (irq != -1) {
+		struct irq_desc *desc = irq_to_desc(irq);
+
+		if (!(desc->status & IRQ_PER_CPU))
+			cpumask_copy(desc->affinity, cpumask_of(cpu));
+		else
+			cpumask_set_cpu(cpu, desc->affinity);
+	}
 
 	clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn]));
 	set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
@@ -339,7 +381,7 @@ asmlinkage void __irq_entry evtchn_do_up
 
 static struct irq_chip dynirq_chip;
 
-static int find_unbound_irq(unsigned int cpu)
+static int find_unbound_irq(unsigned int cpu, bool percpu)
 {
 	static int warned;
 	int irq;
@@ -349,10 +391,19 @@ static int find_unbound_irq(unsigned int
 		struct irq_cfg *cfg = desc->chip_data;
 
 		if (!cfg->bindcount) {
+			irq_flow_handler_t handle;
+			const char *name;
+
 			desc->status |= IRQ_NOPROBE;
+			if (!percpu) {
+				handle = handle_level_irq;
+				name = "level";
+			} else {
+				handle = handle_percpu_irq;
+				name = "percpu";
+			}
 			set_irq_chip_and_handler_name(irq, &dynirq_chip,
-						      handle_level_irq,
-						      "level");
+						      handle, name);
 			return irq;
 		}
 	}
@@ -373,7 +424,7 @@ static int bind_caller_port_to_irq(unsig
 	spin_lock(&irq_mapping_update_lock);
 
 	if ((irq = evtchn_to_irq[caller_port]) == -1) {
-		if ((irq = find_unbound_irq(smp_processor_id())) < 0)
+		if ((irq = find_unbound_irq(smp_processor_id(), false)) < 0)
 			goto out;
 
 		evtchn_to_irq[caller_port] = irq;
@@ -396,7 +447,7 @@ static int bind_local_port_to_irq(unsign
 
 	BUG_ON(evtchn_to_irq[local_port] != -1);
 
-	if ((irq = find_unbound_irq(smp_processor_id())) < 0) {
+	if ((irq = find_unbound_irq(smp_processor_id(), false)) < 0) {
 		struct evtchn_close close = { .port = local_port };
 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
 			BUG();
@@ -449,7 +500,7 @@ static int bind_virq_to_irq(unsigned int
 	spin_lock(&irq_mapping_update_lock);
 
 	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
-		if ((irq = find_unbound_irq(cpu)) < 0)
+		if ((irq = find_unbound_irq(cpu, false)) < 0)
 			goto out;
 
 		bind_virq.virq = virq;
@@ -474,6 +525,7 @@ static int bind_virq_to_irq(unsigned int
 	return irq;
 }
 
+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
 {
 	struct evtchn_bind_ipi bind_ipi;
@@ -482,7 +534,7 @@ static int bind_ipi_to_irq(unsigned int 
 	spin_lock(&irq_mapping_update_lock);
 
 	if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
-		if ((irq = find_unbound_irq(cpu)) < 0)
+		if ((irq = find_unbound_irq(cpu, false)) < 0)
 			goto out;
 
 		bind_ipi.vcpu = cpu;
@@ -505,6 +557,7 @@ static int bind_ipi_to_irq(unsigned int 
 	spin_unlock(&irq_mapping_update_lock);
 	return irq;
 }
+#endif
 
 static void unbind_from_irq(unsigned int irq)
 {
@@ -512,6 +565,7 @@ static void unbind_from_irq(unsigned int
 	unsigned int cpu;
 	int evtchn = evtchn_from_irq(irq);
 
+	BUG_IF_IPI(irq);
 	spin_lock(&irq_mapping_update_lock);
 
 	if (!--irq_cfg(irq)->bindcount && VALID_EVTCHN(evtchn)) {
@@ -525,10 +579,12 @@ static void unbind_from_irq(unsigned int
 			per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
 				[index_from_irq(irq)] = -1;
 			break;
+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
 		case IRQT_IPI:
 			per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
 				[index_from_irq(irq)] = -1;
 			break;
+#endif
 		default:
 			break;
 		}
@@ -551,6 +607,46 @@ static void unbind_from_irq(unsigned int
 	spin_unlock(&irq_mapping_update_lock);
 }
 
+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
+{
+	struct evtchn_close close;
+	int evtchn = evtchn_from_per_cpu_irq(irq, cpu);
+
+	spin_lock(&irq_mapping_update_lock);
+
+	if (VALID_EVTCHN(evtchn)) {
+		struct irq_desc *desc = irq_to_desc(irq);
+
+		mask_evtchn(evtchn);
+
+		BUG_ON(irq_cfg(irq)->bindcount <= 1);
+		irq_cfg(irq)->bindcount--;
+		cpumask_clear_cpu(cpu, desc->affinity);
+
+		close.port = evtchn;
+		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
+			BUG();
+
+		switch (type_from_irq(irq)) {
+		case IRQT_IPI:
+			per_cpu(ipi_to_evtchn, cpu)[index_from_irq(irq)] = 0;
+			break;
+		default:
+			BUG();
+			break;
+		}
+
+		/* Closed ports are implicitly re-bound to VCPU0. */
+		bind_evtchn_to_cpu(evtchn, 0);
+
+		evtchn_to_irq[evtchn] = -1;
+	}
+
+	spin_unlock(&irq_mapping_update_lock);
+}
+#endif /* CONFIG_SMP && !PER_CPU_IPI_IRQ */
+
 int bind_caller_port_to_irqhandler(
 	unsigned int caller_port,
 	irq_handler_t handler,
@@ -645,6 +741,8 @@ int bind_virq_to_irqhandler(
 }
 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
 
+#ifdef CONFIG_SMP
+#ifdef PER_CPU_IPI_IRQ
 int bind_ipi_to_irqhandler(
 	unsigned int ipi,
 	unsigned int cpu,
@@ -667,7 +765,71 @@ int bind_ipi_to_irqhandler(
 
 	return irq;
 }
-EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
+#else
+int __cpuinit bind_ipi_to_irqaction(
+	unsigned int ipi,
+	unsigned int cpu,
+	struct irqaction *action)
+{
+	struct evtchn_bind_ipi bind_ipi;
+	int evtchn, irq, retval = 0;
+
+	spin_lock(&irq_mapping_update_lock);
+
+	if (VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi])) {
+		spin_unlock(&irq_mapping_update_lock);
+		return -EBUSY;
+	}
+
+	if ((irq = ipi_to_irq[ipi]) == -1) {
+		if ((irq = find_unbound_irq(cpu, true)) < 0) {
+			spin_unlock(&irq_mapping_update_lock);
+			return irq;
+		}
+
+		/* Extra reference so count will never drop to zero. */
+		irq_cfg(irq)->bindcount++;
+
+		ipi_to_irq[ipi] = irq;
+		irq_cfg(irq)->info = mk_irq_info(IRQT_IPI, ipi, 0);
+		retval = 1;
+	}
+
+	bind_ipi.vcpu = cpu;
+	if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+					&bind_ipi) != 0)
+		BUG();
+
+	evtchn = bind_ipi.port;
+	evtchn_to_irq[evtchn] = irq;
+	per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+
+	bind_evtchn_to_cpu(evtchn, cpu);
+
+	irq_cfg(irq)->bindcount++;
+
+	spin_unlock(&irq_mapping_update_lock);
+
+	if (retval == 0) {
+		unsigned long flags;
+
+		local_irq_save(flags);
+		unmask_evtchn(evtchn);
+		local_irq_restore(flags);
+	} else {
+		action->flags |= IRQF_PERCPU;
+		retval = setup_irq(irq, action);
+		if (retval) {
+			unbind_from_per_cpu_irq(irq, cpu);
+			BUG_ON(retval > 0);
+			irq = retval;
+		}
+	}
+
+	return irq;
+}
+#endif /* PER_CPU_IPI_IRQ */
+#endif /* CONFIG_SMP */
 
 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
 {
@@ -693,6 +855,7 @@ static void rebind_irq_to_cpu(unsigned i
 {
 	int evtchn = evtchn_from_irq(irq);
 
+	BUG_IF_IPI(irq);
 	if (VALID_EVTCHN(evtchn))
 		rebind_evtchn_to_cpu(evtchn, tcpu);
 }
@@ -776,6 +939,7 @@ static struct irq_chip dynirq_chip = {
 	.unmask   = unmask_dynirq,
 	.mask_ack = ack_dynirq,
 	.ack      = ack_dynirq,
+	.eoi      = end_dynirq,
 	.end      = end_dynirq,
 #ifdef CONFIG_SMP
 	.set_affinity = set_affinity_irq,
@@ -955,10 +1119,21 @@ int irq_ignore_unhandled(unsigned int ir
 	return !!(irq_status.flags & XENIRQSTAT_shared);
 }
 
+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
+{
+	int evtchn = evtchn_from_per_cpu_irq(ipi_to_irq[ipi], cpu);
+
+	if (VALID_EVTCHN(evtchn))
+		notify_remote_via_evtchn(evtchn);
+}
+#endif
+
 void notify_remote_via_irq(int irq)
 {
 	int evtchn = evtchn_from_irq(irq);
 
+	BUG_IF_IPI(irq);
 	if (VALID_EVTCHN(evtchn))
 		notify_remote_via_evtchn(evtchn);
 }
@@ -966,6 +1141,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq)
 
 int irq_to_evtchn_port(int irq)
 {
+	BUG_IF_IPI(irq);
 	return evtchn_from_irq(irq);
 }
 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
@@ -1081,11 +1257,17 @@ static void restore_cpu_virqs(unsigned i
 
 static void restore_cpu_ipis(unsigned int cpu)
 {
+#ifdef CONFIG_SMP
 	struct evtchn_bind_ipi bind_ipi;
 	int ipi, irq, evtchn;
 
 	for (ipi = 0; ipi < NR_IPIS; ipi++) {
+#ifdef PER_CPU_IPI_IRQ
 		if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
+#else
+		if ((irq = ipi_to_irq[ipi]) == -1
+		    || !VALID_EVTCHN(per_cpu(ipi_to_evtchn, cpu)[ipi]))
+#endif
 			continue;
 
 		BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_IPI, ipi, 0));
@@ -1099,13 +1281,18 @@ static void restore_cpu_ipis(unsigned in
 
 		/* Record the new mapping. */
 		evtchn_to_irq[evtchn] = irq;
+#ifdef PER_CPU_IPI_IRQ
 		irq_cfg(irq)->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
+#else
+		per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
+#endif
 		bind_evtchn_to_cpu(evtchn, cpu);
 
 		/* Ready for use. */
 		if (!(irq_to_desc(irq)->status & IRQ_DISABLED))
 			unmask_evtchn(evtchn);
 	}
+#endif
 }
 
 static int evtchn_resume(struct sys_device *dev)
--- head-2009-06-23.orig/drivers/xen/core/smpboot.c	2009-04-30 12:30:58.000000000 +0200
+++ head-2009-06-23/drivers/xen/core/smpboot.c	2009-06-09 15:51:56.000000000 +0200
@@ -40,12 +40,9 @@ cpumask_var_t vcpu_initialized_mask;
 DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-static DEFINE_PER_CPU(int, resched_irq);
-static DEFINE_PER_CPU(int, callfunc_irq);
-static DEFINE_PER_CPU(int, call1func_irq);
-static char resched_name[NR_CPUS][15];
-static char callfunc_name[NR_CPUS][15];
-static char call1func_name[NR_CPUS][15];
+static int __read_mostly resched_irq = -1;
+static int __read_mostly callfunc_irq = -1;
+static int __read_mostly call1func_irq = -1;
 
 #ifdef CONFIG_X86_LOCAL_APIC
 #define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid))
@@ -100,47 +97,54 @@ remove_siblinginfo(unsigned int cpu)
 
 static int __cpuinit xen_smp_intr_init(unsigned int cpu)
 {
+	static struct irqaction resched_action = {
+		.handler = smp_reschedule_interrupt,
+		.flags   = IRQF_DISABLED,
+		.name    = "resched"
+	}, callfunc_action = {
+		.handler = smp_call_function_interrupt,
+		.flags   = IRQF_DISABLED,
+		.name    = "callfunc"
+	}, call1func_action = {
+		.handler = smp_call_function_single_interrupt,
+		.flags   = IRQF_DISABLED,
+		.name    = "call1func"
+	};
 	int rc;
 
-	per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) =
-		per_cpu(call1func_irq, cpu) = -1;
-
-	sprintf(resched_name[cpu], "resched%u", cpu);
-	rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
-				    cpu,
-				    smp_reschedule_interrupt,
-				    IRQF_DISABLED|IRQF_NOBALANCING,
-				    resched_name[cpu],
-				    NULL);
+	rc = bind_ipi_to_irqaction(RESCHEDULE_VECTOR,
+				   cpu,
+				   &resched_action);
 	if (rc < 0)
-		goto fail;
-	per_cpu(resched_irq, cpu) = rc;
-
-	sprintf(callfunc_name[cpu], "callfunc%u", cpu);
-	rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
-				    cpu,
-				    smp_call_function_interrupt,
-				    IRQF_DISABLED|IRQF_NOBALANCING,
-				    callfunc_name[cpu],
-				    NULL);
+		return rc;
+	if (resched_irq < 0)
+		resched_irq = rc;
+	else
+		BUG_ON(resched_irq != rc);
+
+	rc = bind_ipi_to_irqaction(CALL_FUNCTION_VECTOR,
+				   cpu,
+				   &callfunc_action);
 	if (rc < 0)
-		goto fail;
-	per_cpu(callfunc_irq, cpu) = rc;
-
-	sprintf(call1func_name[cpu], "call1func%u", cpu);
-	rc = bind_ipi_to_irqhandler(CALL_FUNC_SINGLE_VECTOR,
-				    cpu,
-				    smp_call_function_single_interrupt,
-				    IRQF_DISABLED|IRQF_NOBALANCING,
-				    call1func_name[cpu],
-				    NULL);
+		goto unbind_resched;
+	if (callfunc_irq < 0)
+		callfunc_irq = rc;
+	else
+		BUG_ON(callfunc_irq != rc);
+
+	rc = bind_ipi_to_irqaction(CALL_FUNC_SINGLE_VECTOR,
+				   cpu,
+				   &call1func_action);
 	if (rc < 0)
-		goto fail;
-	per_cpu(call1func_irq, cpu) = rc;
+		goto unbind_call;
+	if (call1func_irq < 0)
+		call1func_irq = rc;
+	else
+		BUG_ON(call1func_irq != rc);
 
 	rc = xen_spinlock_init(cpu);
 	if (rc < 0)
-		goto fail;
+		goto unbind_call1;
 
 	if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
 		goto fail;
@@ -148,13 +152,13 @@ static int __cpuinit xen_smp_intr_init(u
 	return 0;
 
  fail:
-	if (per_cpu(resched_irq, cpu) >= 0)
-		unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-	if (per_cpu(callfunc_irq, cpu) >= 0)
-		unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-	if (per_cpu(call1func_irq, cpu) >= 0)
-		unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
 	xen_spinlock_cleanup(cpu);
+ unbind_call1:
+	unbind_from_per_cpu_irq(call1func_irq, cpu);
+ unbind_call:
+	unbind_from_per_cpu_irq(callfunc_irq, cpu);
+ unbind_resched:
+	unbind_from_per_cpu_irq(resched_irq, cpu);
 	return rc;
 }
 
@@ -164,9 +168,9 @@ static void __cpuinit xen_smp_intr_exit(
 	if (cpu != 0)
 		local_teardown_timer(cpu);
 
-	unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
-	unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
-	unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
+	unbind_from_per_cpu_irq(resched_irq, cpu);
+	unbind_from_per_cpu_irq(callfunc_irq, cpu);
+	unbind_from_per_cpu_irq(call1func_irq, cpu);
 	xen_spinlock_cleanup(cpu);
 }
 #endif
--- head-2009-06-23.orig/drivers/xen/core/spinlock.c	2009-06-23 13:47:52.000000000 +0200
+++ head-2009-06-23/drivers/xen/core/spinlock.c	2009-06-09 15:51:56.000000000 +0200
@@ -14,8 +14,7 @@
 
 #ifdef TICKET_SHIFT
 
-static DEFINE_PER_CPU(int, spinlock_irq) = -1;
-static char spinlock_name[NR_CPUS][15];
+static int __read_mostly spinlock_irq = -1;
 
 struct spinning {
 	raw_spinlock_t *lock;
@@ -32,34 +31,36 @@ static DEFINE_PER_CPU(raw_rwlock_t, spin
 
 int __cpuinit xen_spinlock_init(unsigned int cpu)
 {
+	static struct irqaction spinlock_action = {
+		.handler = smp_reschedule_interrupt,
+		.flags   = IRQF_DISABLED,
+		.name    = "spinlock"
+	};
 	int rc;
 
-	sprintf(spinlock_name[cpu], "spinlock%u", cpu);
-	rc = bind_ipi_to_irqhandler(SPIN_UNLOCK_VECTOR,
-				    cpu,
-				    smp_reschedule_interrupt,
-				    IRQF_DISABLED|IRQF_NOBALANCING,
-				    spinlock_name[cpu],
-				    NULL);
+	rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR,
+				   cpu,
+				   &spinlock_action);
  	if (rc < 0)
  		return rc;
 
-	disable_irq(rc); /* make sure it's never delivered */
-	per_cpu(spinlock_irq, cpu) = rc;
+	if (spinlock_irq < 0) {
+		disable_irq(rc); /* make sure it's never delivered */
+		spinlock_irq = rc;
+	} else
+		BUG_ON(spinlock_irq != rc);
 
 	return 0;
 }
 
 void __cpuinit xen_spinlock_cleanup(unsigned int cpu)
 {
-	if (per_cpu(spinlock_irq, cpu) >= 0)
-		unbind_from_irqhandler(per_cpu(spinlock_irq, cpu), NULL);
-	per_cpu(spinlock_irq, cpu) = -1;
+	unbind_from_per_cpu_irq(spinlock_irq, cpu);
 }
 
 int xen_spin_wait(raw_spinlock_t *lock, unsigned int token)
 {
-	int rc = 0, irq = percpu_read(spinlock_irq);
+	int rc = 0, irq = spinlock_irq;
 	raw_rwlock_t *rm_lock;
 	unsigned long flags;
 	struct spinning spinning;
@@ -154,7 +155,7 @@ void xen_spin_kick(raw_spinlock_t *lock,
 		raw_local_irq_restore(flags);
 
 		if (unlikely(spinning)) {
-			notify_remote_via_irq(per_cpu(spinlock_irq, cpu));
+			notify_remote_via_ipi(SPIN_UNLOCK_VECTOR, cpu);
 			return;
 		}
 	}
--- head-2009-06-23.orig/include/xen/evtchn.h	2009-05-04 11:30:39.000000000 +0200
+++ head-2009-06-23/include/xen/evtchn.h	2009-06-09 15:51:56.000000000 +0200
@@ -89,6 +89,8 @@ int bind_virq_to_irqhandler(
 	unsigned long irqflags,
 	const char *devname,
 	void *dev_id);
+#if defined(CONFIG_SMP) && !defined(MODULE)
+#ifndef CONFIG_X86
 int bind_ipi_to_irqhandler(
 	unsigned int ipi,
 	unsigned int cpu,
@@ -96,6 +98,13 @@ int bind_ipi_to_irqhandler(
 	unsigned long irqflags,
 	const char *devname,
 	void *dev_id);
+#else
+int bind_ipi_to_irqaction(
+	unsigned int ipi,
+	unsigned int cpu,
+	struct irqaction *action);
+#endif
+#endif
 
 /*
  * Common unbind function for all event sources. Takes IRQ to unbind from.
@@ -104,6 +113,11 @@ int bind_ipi_to_irqhandler(
  */
 void unbind_from_irqhandler(unsigned int irq, void *dev_id);
 
+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
+/* Specialized unbind function for per-CPU IRQs. */
+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu);
+#endif
+
 #ifndef CONFIG_XEN
 void irq_resume(void);
 #endif
@@ -181,4 +195,8 @@ void xen_poll_irq(int irq);
 void notify_remote_via_irq(int irq);
 int irq_to_evtchn_port(int irq);
 
+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu);
+#endif
+
 #endif /* __ASM_EVTCHN_H__ */