smp.c 18.3 KB
Newer Older
1 2 3 4 5 6
/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */
#include <linux/rcupdate.h>
7
#include <linux/rculist.h>
8
#include <linux/kernel.h>
9
#include <linux/export.h>
Ingo Molnar's avatar
Ingo Molnar committed
10 11
#include <linux/percpu.h>
#include <linux/init.h>
12
#include <linux/gfp.h>
13
#include <linux/smp.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
14
#include <linux/cpu.h>
15

16 17
#include "smpboot.h"

18
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
19
enum {
20
	CSD_FLAG_LOCK		= 0x01,
21 22 23
};

struct call_function_data {
24
	struct call_single_data	__percpu *csd;
Ingo Molnar's avatar
Ingo Molnar committed
25
	cpumask_var_t		cpumask;
26
	cpumask_var_t		cpumask_ipi;
27 28
};

29 30
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);

31
struct call_single_queue {
Ingo Molnar's avatar
Ingo Molnar committed
32
	struct list_head	list;
33
	raw_spinlock_t		lock;
34 35
};

36
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
Peter Zijlstra's avatar
Peter Zijlstra committed
37 38 39 40 41 42 43 44 45 46

static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
47
		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
Peter Zijlstra's avatar
Peter Zijlstra committed
48
				cpu_to_node(cpu)))
49
			return notifier_from_errno(-ENOMEM);
50 51 52
		if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
				cpu_to_node(cpu)))
			return notifier_from_errno(-ENOMEM);
53 54 55 56 57
		cfd->csd = alloc_percpu(struct call_single_data);
		if (!cfd->csd) {
			free_cpumask_var(cfd->cpumask);
			return notifier_from_errno(-ENOMEM);
		}
Peter Zijlstra's avatar
Peter Zijlstra committed
58 59
		break;

60
#ifdef CONFIG_HOTPLUG_CPU
Peter Zijlstra's avatar
Peter Zijlstra committed
61 62 63 64 65 66
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:

	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		free_cpumask_var(cfd->cpumask);
67
		free_cpumask_var(cfd->cpumask_ipi);
68
		free_percpu(cfd->csd);
Peter Zijlstra's avatar
Peter Zijlstra committed
69 70 71 72 73 74 75 76
		break;
#endif
	};

	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
Ingo Molnar's avatar
Ingo Molnar committed
77
	.notifier_call		= hotplug_cfd,
Peter Zijlstra's avatar
Peter Zijlstra committed
78 79
};

80
void __init call_function_init(void)
81
{
Peter Zijlstra's avatar
Peter Zijlstra committed
82
	void *cpu = (void *)(long)smp_processor_id();
83 84 85 86 87
	int i;

	for_each_possible_cpu(i) {
		struct call_single_queue *q = &per_cpu(call_single_queue, i);

88
		raw_spin_lock_init(&q->lock);
89 90
		INIT_LIST_HEAD(&q->list);
	}
Peter Zijlstra's avatar
Peter Zijlstra committed
91 92 93

	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
	register_cpu_notifier(&hotplug_cfd_notifier);
94 95
}

Peter Zijlstra's avatar
Peter Zijlstra committed
96 97 98
/*
 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 *
Ingo Molnar's avatar
Ingo Molnar committed
99 100 101
 * For non-synchronous ipi calls the csd can still be in use by the
 * previous function call. For multi-cpu calls its even more interesting
 * as we'll have to ensure no other cpu is observing our csd.
Peter Zijlstra's avatar
Peter Zijlstra committed
102
 */
103
static void csd_lock_wait(struct call_single_data *data)
Peter Zijlstra's avatar
Peter Zijlstra committed
104 105 106
{
	while (data->flags & CSD_FLAG_LOCK)
		cpu_relax();
107 108 109 110 111
}

static void csd_lock(struct call_single_data *data)
{
	csd_lock_wait(data);
Peter Zijlstra's avatar
Peter Zijlstra committed
112 113 114
	data->flags = CSD_FLAG_LOCK;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
115 116 117
	 * prevent CPU from reordering the above assignment
	 * to ->flags with any subsequent assignments to other
	 * fields of the specified call_single_data structure:
Peter Zijlstra's avatar
Peter Zijlstra committed
118 119 120 121 122 123 124
	 */
	smp_mb();
}

static void csd_unlock(struct call_single_data *data)
{
	WARN_ON(!(data->flags & CSD_FLAG_LOCK));
Ingo Molnar's avatar
Ingo Molnar committed
125

Peter Zijlstra's avatar
Peter Zijlstra committed
126
	/*
Ingo Molnar's avatar
Ingo Molnar committed
127
	 * ensure we're all done before releasing data:
Peter Zijlstra's avatar
Peter Zijlstra committed
128 129
	 */
	smp_mb();
Ingo Molnar's avatar
Ingo Molnar committed
130

Peter Zijlstra's avatar
Peter Zijlstra committed
131
	data->flags &= ~CSD_FLAG_LOCK;
132 133 134
}

/*
Ingo Molnar's avatar
Ingo Molnar committed
135 136 137
 * Insert a previously allocated call_single_data element
 * for execution on the given CPU. data must already have
 * ->func, ->info, and ->flags set.
138
 */
139 140
static
void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141 142 143
{
	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
	unsigned long flags;
144
	int ipi;
145

146
	raw_spin_lock_irqsave(&dst->lock, flags);
147 148
	ipi = list_empty(&dst->list);
	list_add_tail(&data->list, &dst->list);
149
	raw_spin_unlock_irqrestore(&dst->lock, flags);
150

151
	/*
152 153 154 155 156 157 158
	 * The list addition should be visible before sending the IPI
	 * handler locks the list to pull the entry off it because of
	 * normal cache coherency rules implied by spinlocks.
	 *
	 * If IPIs can go out of order to the cache coherency protocol
	 * in an architecture, sufficient synchronisation should be added
	 * to arch code to make it appear to obey cache coherency WRT
Ingo Molnar's avatar
Ingo Molnar committed
159 160
	 * locking and barrier primitives. Generic code isn't really
	 * equipped to do the right thing...
161
	 */
162 163 164 165
	if (ipi)
		arch_send_call_function_single_ipi(cpu);

	if (wait)
166
		csd_lock_wait(data);
167 168 169
}

/*
Ingo Molnar's avatar
Ingo Molnar committed
170 171
 * Invoked by arch to handle an IPI for call function single. Must be
 * called from the arch with interrupts disabled.
172 173 174 175
 */
void generic_smp_call_function_single_interrupt(void)
{
	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
176
	unsigned int data_flags;
Ingo Molnar's avatar
Ingo Molnar committed
177
	LIST_HEAD(list);
178

179 180 181 182 183
	/*
	 * Shouldn't receive this interrupt on a cpu that is not yet online.
	 */
	WARN_ON_ONCE(!cpu_online(smp_processor_id()));

184
	raw_spin_lock(&q->lock);
185
	list_replace_init(&q->list, &list);
186
	raw_spin_unlock(&q->lock);
187

188 189
	while (!list_empty(&list)) {
		struct call_single_data *data;
190

Ingo Molnar's avatar
Ingo Molnar committed
191
		data = list_entry(list.next, struct call_single_data, list);
192
		list_del(&data->list);
193 194

		/*
Ingo Molnar's avatar
Ingo Molnar committed
195 196 197
		 * 'data' can be invalid after this call if flags == 0
		 * (when called through generic_exec_single()),
		 * so save them away before making the call:
198
		 */
199 200 201 202
		data_flags = data->flags;

		data->func(data->info);

Peter Zijlstra's avatar
Peter Zijlstra committed
203
		/*
Ingo Molnar's avatar
Ingo Molnar committed
204
		 * Unlocked CSDs are valid through generic_exec_single():
Peter Zijlstra's avatar
Peter Zijlstra committed
205 206 207
		 */
		if (data_flags & CSD_FLAG_LOCK)
			csd_unlock(data);
208 209 210
	}
}

211
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
212

213 214 215 216 217 218
/*
 * smp_call_function_single - Run a function on a specific CPU
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed on other CPUs.
 *
219
 * Returns 0 on success, else a negative status code.
220
 */
221
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
222
			     int wait)
223
{
Peter Zijlstra's avatar
Peter Zijlstra committed
224 225 226
	struct call_single_data d = {
		.flags = 0,
	};
227
	unsigned long flags;
Ingo Molnar's avatar
Ingo Molnar committed
228
	int this_cpu;
229
	int err = 0;
230

Ingo Molnar's avatar
Ingo Molnar committed
231 232 233 234 235 236
	/*
	 * prevent preemption and reschedule on another processor,
	 * as well as CPU removal
	 */
	this_cpu = get_cpu();

237 238 239 240 241 242 243 244
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
		     && !oops_in_progress);
245

Ingo Molnar's avatar
Ingo Molnar committed
246
	if (cpu == this_cpu) {
247 248 249
		local_irq_save(flags);
		func(info);
		local_irq_restore(flags);
Ingo Molnar's avatar
Ingo Molnar committed
250 251 252
	} else {
		if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
			struct call_single_data *data = &d;
253

Ingo Molnar's avatar
Ingo Molnar committed
254 255
			if (!wait)
				data = &__get_cpu_var(csd_data);
256

Ingo Molnar's avatar
Ingo Molnar committed
257
			csd_lock(data);
258

Ingo Molnar's avatar
Ingo Molnar committed
259 260 261 262 263 264
			data->func = func;
			data->info = info;
			generic_exec_single(cpu, data, wait);
		} else {
			err = -ENXIO;	/* CPU not online */
		}
265 266 267
	}

	put_cpu();
Ingo Molnar's avatar
Ingo Molnar committed
268

269
	return err;
270 271 272
}
EXPORT_SYMBOL(smp_call_function_single);

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/*
 * smp_call_function_any - Run a function on any of the given cpus
 * @mask: The mask of cpus it can run on.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed.
 *
 * Returns 0 on success, else a negative status code (if no cpus were online).
 * Note that @wait will be implicitly turned on in case of allocation failures,
 * since we fall back to on-stack allocation.
 *
 * Selection preference:
 *	1) current cpu if in @mask
 *	2) any cpu of current node if in @mask
 *	3) any other online cpu in @mask
 */
int smp_call_function_any(const struct cpumask *mask,
290
			  smp_call_func_t func, void *info, int wait)
291 292 293 294 295 296 297 298 299 300 301
{
	unsigned int cpu;
	const struct cpumask *nodemask;
	int ret;

	/* Try for same CPU (cheapest) */
	cpu = get_cpu();
	if (cpumask_test_cpu(cpu, mask))
		goto call;

	/* Try for same node. */
302
	nodemask = cpumask_of_node(cpu_to_node(cpu));
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
	     cpu = cpumask_next_and(cpu, nodemask, mask)) {
		if (cpu_online(cpu))
			goto call;
	}

	/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
	cpu = cpumask_any_and(mask, cpu_online_mask);
call:
	ret = smp_call_function_single(cpu, func, info, wait);
	put_cpu();
	return ret;
}
EXPORT_SYMBOL_GPL(smp_call_function_any);

318
/**
319
 * __smp_call_function_single(): Run a function on a specific CPU
320 321
 * @cpu: The CPU to run on.
 * @data: Pre-allocated and setup data structure
322
 * @wait: If true, wait until function has completed on specified CPU.
323
 *
Ingo Molnar's avatar
Ingo Molnar committed
324 325 326
 * Like smp_call_function_single(), but allow caller to pass in a
 * pre-allocated data structure. Useful for embedding @data inside
 * other structures, for instance.
327
 */
328 329
void __smp_call_function_single(int cpu, struct call_single_data *data,
				int wait)
330
{
331 332
	unsigned int this_cpu;
	unsigned long flags;
333

334
	this_cpu = get_cpu();
335 336 337 338 339 340 341 342
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
		     && !oops_in_progress);
343

344 345 346 347 348 349 350 351 352
	if (cpu == this_cpu) {
		local_irq_save(flags);
		data->func(data->info);
		local_irq_restore(flags);
	} else {
		csd_lock(data);
		generic_exec_single(cpu, data, wait);
	}
	put_cpu();
353 354 355
}

/**
356 357
 * smp_call_function_many(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on (only runs on online subset).
358 359
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar's avatar
Ingo Molnar committed
360 361
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
362
 *
363
 * If @wait is true, then returns once @func has returned.
364 365 366 367 368
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler. Preemption
 * must be disabled when calling this function.
 */
369
void smp_call_function_many(const struct cpumask *mask,
370
			    smp_call_func_t func, void *info, bool wait)
371
{
372
	struct call_function_data *data;
373
	int cpu, next_cpu, this_cpu = smp_processor_id();
374

375 376 377 378 379 380 381
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
382
		     && !oops_in_progress && !early_boot_irqs_disabled);
383

384
	/* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
385
	cpu = cpumask_first_and(mask, cpu_online_mask);
Ingo Molnar's avatar
Ingo Molnar committed
386
	if (cpu == this_cpu)
387
		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar's avatar
Ingo Molnar committed
388

389 390 391 392 393 394
	/* No online cpus?  We're done. */
	if (cpu >= nr_cpu_ids)
		return;

	/* Do we have another CPU which isn't us? */
	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar's avatar
Ingo Molnar committed
395
	if (next_cpu == this_cpu)
396 397 398 399 400 401
		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);

	/* Fastpath: do that cpu by itself. */
	if (next_cpu >= nr_cpu_ids) {
		smp_call_function_single(cpu, func, info, wait);
		return;
402 403
	}

Peter Zijlstra's avatar
Peter Zijlstra committed
404
	data = &__get_cpu_var(cfd_data);
405 406 407

	cpumask_and(data->cpumask, mask, cpu_online_mask);
	cpumask_clear_cpu(this_cpu, data->cpumask);
408 409

	/* Some callers race with other cpus changing the passed mask */
410
	if (unlikely(!cpumask_weight(data->cpumask)))
411
		return;
412

413 414 415 416 417 418
	/*
	 * After we put an entry into the list, data->cpumask
	 * may be cleared again when another CPU sends another IPI for
	 * a SMP function call, so data->cpumask will be zero.
	 */
	cpumask_copy(data->cpumask_ipi, data->cpumask);
419

420 421 422 423 424 425 426 427 428 429 430 431 432 433
	for_each_cpu(cpu, data->cpumask) {
		struct call_single_data *csd = per_cpu_ptr(data->csd, cpu);
		struct call_single_queue *dst =
					&per_cpu(call_single_queue, cpu);
		unsigned long flags;

		csd_lock(csd);
		csd->func = func;
		csd->info = info;

		raw_spin_lock_irqsave(&dst->lock, flags);
		list_add_tail(&csd->list, &dst->list);
		raw_spin_unlock_irqrestore(&dst->lock, flags);
	}
434

435
	/* Send a message to all CPUs in the map */
436
	arch_send_call_function_ipi_mask(data->cpumask_ipi);
437

438 439 440 441 442 443 444
	if (wait) {
		for_each_cpu(cpu, data->cpumask) {
			struct call_single_data *csd =
					per_cpu_ptr(data->csd, cpu);
			csd_lock_wait(csd);
		}
	}
445
}
446
EXPORT_SYMBOL(smp_call_function_many);
447 448 449 450 451

/**
 * smp_call_function(): Run a function on all other CPUs.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar's avatar
Ingo Molnar committed
452 453
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
454
 *
455
 * Returns 0.
456 457
 *
 * If @wait is true, then returns once @func has returned; otherwise
458
 * it returns just before the target cpu calls @func.
459 460 461 462
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
463
int smp_call_function(smp_call_func_t func, void *info, int wait)
464 465
{
	preempt_disable();
466
	smp_call_function_many(cpu_online_mask, func, info, wait);
467
	preempt_enable();
Ingo Molnar's avatar
Ingo Molnar committed
468

469
	return 0;
470 471
}
EXPORT_SYMBOL(smp_call_function);
472 473
#endif /* USE_GENERIC_SMP_HELPERS */

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
/* Setup configured maximum number of CPUs to activate */
unsigned int setup_max_cpus = NR_CPUS;
EXPORT_SYMBOL(setup_max_cpus);


/*
 * Setup routine for controlling SMP activation
 *
 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
 * activation entirely (the MPS table probe still happens, though).
 *
 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
 * greater than 0, limits the maximum number of CPUs activated in
 * SMP mode to <NUM>.
 */

void __weak arch_disable_smp_support(void) { }

static int __init nosmp(char *str)
{
	setup_max_cpus = 0;
	arch_disable_smp_support();

	return 0;
}

early_param("nosmp", nosmp);

/* this is hard limit */
static int __init nrcpus(char *str)
{
	int nr_cpus;

	get_option(&str, &nr_cpus);
	if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
		nr_cpu_ids = nr_cpus;

	return 0;
}

early_param("nr_cpus", nrcpus);

static int __init maxcpus(char *str)
{
	get_option(&str, &setup_max_cpus);
	if (setup_max_cpus == 0)
		arch_disable_smp_support();

	return 0;
}

early_param("maxcpus", maxcpus);

/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);

/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void)
{
	nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}

/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
	unsigned int cpu;

542 543
	idle_threads_init();

544 545 546 547 548 549 550 551 552 553 554 555 556
	/* FIXME: This should be done in userspace --RR */
	for_each_present_cpu(cpu) {
		if (num_online_cpus() >= setup_max_cpus)
			break;
		if (!cpu_online(cpu))
			cpu_up(cpu);
	}

	/* Any cleanup work */
	printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
	smp_cpus_done(setup_max_cpus);
}

557
/*
558 559 560
 * Call a function on all processors.  May be used during early boot while
 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
 * of local_irq_disable/enable().
561 562 563
 */
int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
564
	unsigned long flags;
565 566 567 568
	int ret = 0;

	preempt_disable();
	ret = smp_call_function(func, info, wait);
569
	local_irq_save(flags);
570
	func(info);
571
	local_irq_restore(flags);
572 573 574 575
	preempt_enable();
	return ret;
}
EXPORT_SYMBOL(on_each_cpu);
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604

/**
 * on_each_cpu_mask(): Run a function on processors specified by
 * cpumask, which may include the local processor.
 * @mask: The set of cpus to run on (only runs on online subset).
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
 *
 * If @wait is true, then returns once @func has returned.
 *
 * You must not call this function with disabled interrupts or
 * from a hardware interrupt handler or from a bottom half handler.
 */
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
			void *info, bool wait)
{
	int cpu = get_cpu();

	smp_call_function_many(mask, func, info, wait);
	if (cpumask_test_cpu(cpu, mask)) {
		local_irq_disable();
		func(info);
		local_irq_enable();
	}
	put_cpu();
}
EXPORT_SYMBOL(on_each_cpu_mask);
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665

/*
 * on_each_cpu_cond(): Call a function on each processor for which
 * the supplied function cond_func returns true, optionally waiting
 * for all the required CPUs to finish. This may include the local
 * processor.
 * @cond_func:	A callback function that is passed a cpu id and
 *		the the info parameter. The function is called
 *		with preemption disabled. The function should
 *		return a blooean value indicating whether to IPI
 *		the specified CPU.
 * @func:	The function to run on all applicable CPUs.
 *		This must be fast and non-blocking.
 * @info:	An arbitrary pointer to pass to both functions.
 * @wait:	If true, wait (atomically) until function has
 *		completed on other CPUs.
 * @gfp_flags:	GFP flags to use when allocating the cpumask
 *		used internally by the function.
 *
 * The function might sleep if the GFP flags indicates a non
 * atomic allocation is allowed.
 *
 * Preemption is disabled to protect against CPUs going offline but not online.
 * CPUs going online during the call will not be seen or sent an IPI.
 *
 * You must not call this function with disabled interrupts or
 * from a hardware interrupt handler or from a bottom half handler.
 */
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
			smp_call_func_t func, void *info, bool wait,
			gfp_t gfp_flags)
{
	cpumask_var_t cpus;
	int cpu, ret;

	might_sleep_if(gfp_flags & __GFP_WAIT);

	if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
		preempt_disable();
		for_each_online_cpu(cpu)
			if (cond_func(cpu, info))
				cpumask_set_cpu(cpu, cpus);
		on_each_cpu_mask(cpus, func, info, wait);
		preempt_enable();
		free_cpumask_var(cpus);
	} else {
		/*
		 * No free cpumask, bother. No matter, we'll
		 * just have to IPI them one by one.
		 */
		preempt_disable();
		for_each_online_cpu(cpu)
			if (cond_func(cpu, info)) {
				ret = smp_call_function_single(cpu, func,
								info, wait);
				WARN_ON_ONCE(!ret);
			}
		preempt_enable();
	}
}
EXPORT_SYMBOL(on_each_cpu_cond);
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688

static void do_nothing(void *unused)
{
}

/**
 * kick_all_cpus_sync - Force all cpus out of idle
 *
 * Used to synchronize the update of pm_idle function pointer. It's
 * called after the pointer is updated and returns after the dummy
 * callback function has been executed on all cpus. The execution of
 * the function can only happen on the remote cpus after they have
 * left the idle function which had been called via pm_idle function
 * pointer. So it's guaranteed that nothing uses the previous pointer
 * anymore.
 */
void kick_all_cpus_sync(void)
{
	/* Make sure the change is visible before we kick the cpus */
	smp_mb();
	smp_call_function(do_nothing, NULL, 1);
}
EXPORT_SYMBOL_GPL(kick_all_cpus_sync);