padata.c 27.1 KB
Newer Older
1 2 3
/*
 * padata.c - generic interface to process data streams in parallel
 *
4 5
 * See Documentation/padata.txt for an api documentation.
 *
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * Copyright (C) 2008, 2009 secunet Security Networks AG
 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

23
#include <linux/export.h>
24 25 26 27 28 29
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/padata.h>
#include <linux/mutex.h>
#include <linux/sched.h>
30
#include <linux/slab.h>
31
#include <linux/sysfs.h>
32 33
#include <linux/rcupdate.h>

34
#define MAX_OBJ_NUM 1000
35 36 37 38 39

static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
	int cpu, target_cpu;

40
	target_cpu = cpumask_first(pd->cpumask.pcpu);
41
	for (cpu = 0; cpu < cpu_index; cpu++)
42
		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
43 44 45 46

	return target_cpu;
}

47
static int padata_cpu_hash(struct parallel_data *pd)
48 49 50 51 52 53 54
{
	int cpu_index;

	/*
	 * Hash the sequence numbers to the cpus by taking
	 * seq_nr mod. number of cpus in use.
	 */
55 56 57 58 59

	spin_lock(&pd->seq_lock);
	cpu_index =  pd->seq_nr % cpumask_weight(pd->cpumask.pcpu);
	pd->seq_nr++;
	spin_unlock(&pd->seq_lock);
60 61 62 63

	return padata_index_to_cpu(pd, cpu_index);
}

64
static void padata_parallel_worker(struct work_struct *parallel_work)
65
{
66
	struct padata_parallel_queue *pqueue;
67 68 69 70 71
	struct parallel_data *pd;
	struct padata_instance *pinst;
	LIST_HEAD(local_list);

	local_bh_disable();
72 73 74
	pqueue = container_of(parallel_work,
			      struct padata_parallel_queue, work);
	pd = pqueue->pd;
75 76
	pinst = pd->pinst;

77 78 79
	spin_lock(&pqueue->parallel.lock);
	list_replace_init(&pqueue->parallel.list, &local_list);
	spin_unlock(&pqueue->parallel.lock);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94

	while (!list_empty(&local_list)) {
		struct padata_priv *padata;

		padata = list_entry(local_list.next,
				    struct padata_priv, list);

		list_del_init(&padata->list);

		padata->parallel(padata);
	}

	local_bh_enable();
}

95
/**
96 97 98 99 100
 * padata_do_parallel - padata parallelization function
 *
 * @pinst: padata instance
 * @padata: object to be parallelized
 * @cb_cpu: cpu the serialization callback function will run on,
101
 *          must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
102 103 104 105 106 107 108 109 110
 *
 * The parallelization callback function will run with BHs off.
 * Note: Every object which is parallelized by padata_do_parallel
 * must be seen by padata_do_serial.
 */
int padata_do_parallel(struct padata_instance *pinst,
		       struct padata_priv *padata, int cb_cpu)
{
	int target_cpu, err;
111
	struct padata_parallel_queue *queue;
112 113 114 115 116 117
	struct parallel_data *pd;

	rcu_read_lock_bh();

	pd = rcu_dereference(pinst->pd);

118
	err = -EINVAL;
119
	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
120 121
		goto out;

122
	if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
123 124 125 126 127 128 129 130 131
		goto out;

	err =  -EBUSY;
	if ((pinst->flags & PADATA_RESET))
		goto out;

	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
		goto out;

132
	err = 0;
133 134 135 136
	atomic_inc(&pd->refcnt);
	padata->pd = pd;
	padata->cb_cpu = cb_cpu;

137
	target_cpu = padata_cpu_hash(pd);
138
	queue = per_cpu_ptr(pd->pqueue, target_cpu);
139 140 141 142 143

	spin_lock(&queue->parallel.lock);
	list_add_tail(&padata->list, &queue->parallel.list);
	spin_unlock(&queue->parallel.lock);

144
	queue_work_on(target_cpu, pinst->wq, &queue->work);
145 146 147 148 149 150 151 152

out:
	rcu_read_unlock_bh();

	return err;
}
EXPORT_SYMBOL(padata_do_parallel);

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
/*
 * padata_get_next - Get the next object that needs serialization.
 *
 * Return values are:
 *
 * A pointer to the control struct of the next object that needs
 * serialization, if present in one of the percpu reorder queues.
 *
 * NULL, if all percpu reorder queues are empty.
 *
 * -EINPROGRESS, if the next object that needs serialization will
 *  be parallel processed by another cpu and is not yet present in
 *  the cpu's reorder queue.
 *
 * -ENODATA, if this cpu has to do the parallel processing for
 *  the next object.
 */
170 171
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
172
	int cpu, num_cpus;
173
	unsigned int next_nr, next_index;
174
	struct padata_parallel_queue *next_queue;
175 176 177
	struct padata_priv *padata;
	struct padata_list *reorder;

178
	num_cpus = cpumask_weight(pd->cpumask.pcpu);
179

180 181 182 183 184 185 186
	/*
	 * Calculate the percpu reorder queue and the sequence
	 * number of the next object.
	 */
	next_nr = pd->processed;
	next_index = next_nr % num_cpus;
	cpu = padata_index_to_cpu(pd, next_index);
187
	next_queue = per_cpu_ptr(pd->pqueue, cpu);
188

189 190 191 192 193 194 195 196 197 198 199 200 201
	padata = NULL;

	reorder = &next_queue->reorder;

	if (!list_empty(&reorder->list)) {
		padata = list_entry(reorder->list.next,
				    struct padata_priv, list);

		spin_lock(&reorder->lock);
		list_del_init(&padata->list);
		atomic_dec(&pd->reorder_objects);
		spin_unlock(&reorder->lock);

202
		pd->processed++;
203 204 205 206

		goto out;
	}

207
	if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
208 209 210 211 212 213 214 215 216 217 218
		padata = ERR_PTR(-ENODATA);
		goto out;
	}

	padata = ERR_PTR(-EINPROGRESS);
out:
	return padata;
}

static void padata_reorder(struct parallel_data *pd)
{
219
	int cb_cpu;
220
	struct padata_priv *padata;
221
	struct padata_serial_queue *squeue;
222 223
	struct padata_instance *pinst = pd->pinst;

224 225 226 227 228 229 230 231 232 233
	/*
	 * We need to ensure that only one cpu can work on dequeueing of
	 * the reorder queue the time. Calculating in which percpu reorder
	 * queue the next object will arrive takes some time. A spinlock
	 * would be highly contended. Also it is not clear in which order
	 * the objects arrive to the reorder queues. So a cpu could wait to
	 * get the lock just to notice that there is nothing to do at the
	 * moment. Therefore we use a trylock and let the holder of the lock
	 * care for all the objects enqueued during the holdtime of the lock.
	 */
234
	if (!spin_trylock_bh(&pd->lock))
235
		return;
236 237 238 239

	while (1) {
		padata = padata_get_next(pd);

240 241 242 243 244 245
		/*
		 * All reorder queues are empty, or the next object that needs
		 * serialization is parallel processed by another cpu and is
		 * still on it's way to the cpu's reorder queue, nothing to
		 * do for now.
		 */
246 247 248
		if (!padata || PTR_ERR(padata) == -EINPROGRESS)
			break;

249 250 251
		/*
		 * This cpu has to do the parallel processing of the next
		 * object. It's waiting in the cpu's parallelization queue,
252
		 * so exit immediately.
253
		 */
254
		if (PTR_ERR(padata) == -ENODATA) {
255
			del_timer(&pd->timer);
256
			spin_unlock_bh(&pd->lock);
257
			return;
258 259
		}

260 261
		cb_cpu = padata->cb_cpu;
		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
262

263 264 265
		spin_lock(&squeue->serial.lock);
		list_add_tail(&padata->list, &squeue->serial.list);
		spin_unlock(&squeue->serial.lock);
266

267
		queue_work_on(cb_cpu, pinst->wq, &squeue->work);
268 269 270 271
	}

	spin_unlock_bh(&pd->lock);

272 273 274
	/*
	 * The next object that needs serialization might have arrived to
	 * the reorder queues in the meantime, we will be called again
275
	 * from the timer function if no one else cares for it.
276
	 */
277 278 279 280 281
	if (atomic_read(&pd->reorder_objects)
			&& !(pinst->flags & PADATA_RESET))
		mod_timer(&pd->timer, jiffies + HZ);
	else
		del_timer(&pd->timer);
282 283 284 285

	return;
}

286 287 288 289 290 291 292
static void padata_reorder_timer(unsigned long arg)
{
	struct parallel_data *pd = (struct parallel_data *)arg;

	padata_reorder(pd);
}

293
static void padata_serial_worker(struct work_struct *serial_work)
294
{
295
	struct padata_serial_queue *squeue;
296 297 298 299
	struct parallel_data *pd;
	LIST_HEAD(local_list);

	local_bh_disable();
300 301
	squeue = container_of(serial_work, struct padata_serial_queue, work);
	pd = squeue->pd;
302

303 304 305
	spin_lock(&squeue->serial.lock);
	list_replace_init(&squeue->serial.list, &local_list);
	spin_unlock(&squeue->serial.lock);
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320

	while (!list_empty(&local_list)) {
		struct padata_priv *padata;

		padata = list_entry(local_list.next,
				    struct padata_priv, list);

		list_del_init(&padata->list);

		padata->serial(padata);
		atomic_dec(&pd->refcnt);
	}
	local_bh_enable();
}

321
/**
322 323 324 325 326 327 328 329 330 331
 * padata_do_serial - padata serialization function
 *
 * @padata: object to be serialized.
 *
 * padata_do_serial must be called for every parallelized object.
 * The serialization callback function will run with BHs off.
 */
void padata_do_serial(struct padata_priv *padata)
{
	int cpu;
332
	struct padata_parallel_queue *pqueue;
333 334 335 336 337
	struct parallel_data *pd;

	pd = padata->pd;

	cpu = get_cpu();
338
	pqueue = per_cpu_ptr(pd->pqueue, cpu);
339

340
	spin_lock(&pqueue->reorder.lock);
341
	atomic_inc(&pd->reorder_objects);
342 343
	list_add_tail(&padata->list, &pqueue->reorder.list);
	spin_unlock(&pqueue->reorder.lock);
344 345 346 347 348 349 350

	put_cpu();

	padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);

351 352 353
static int padata_setup_cpumasks(struct parallel_data *pd,
				 const struct cpumask *pcpumask,
				 const struct cpumask *cbcpumask)
354
{
355 356
	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
		return -ENOMEM;
357

358
	cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
359 360 361 362
	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
		free_cpumask_var(pd->cpumask.cbcpu);
		return -ENOMEM;
	}
363

364
	cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
365 366
	return 0;
}
367

368 369 370 371 372
static void __padata_list_init(struct padata_list *pd_list)
{
	INIT_LIST_HEAD(&pd_list->list);
	spin_lock_init(&pd_list->lock);
}
373

374 375 376 377 378
/* Initialize all percpu queues used by serial workers */
static void padata_init_squeues(struct parallel_data *pd)
{
	int cpu;
	struct padata_serial_queue *squeue;
379

380 381 382 383 384 385 386
	for_each_cpu(cpu, pd->cpumask.cbcpu) {
		squeue = per_cpu_ptr(pd->squeue, cpu);
		squeue->pd = pd;
		__padata_list_init(&squeue->serial);
		INIT_WORK(&squeue->work, padata_serial_worker);
	}
}
387

388 389 390
/* Initialize all percpu queues used by parallel workers */
static void padata_init_pqueues(struct parallel_data *pd)
{
391
	int cpu_index, cpu;
392
	struct padata_parallel_queue *pqueue;
393

394 395 396 397 398
	cpu_index = 0;
	for_each_cpu(cpu, pd->cpumask.pcpu) {
		pqueue = per_cpu_ptr(pd->pqueue, cpu);
		pqueue->pd = pd;
		pqueue->cpu_index = cpu_index;
399
		cpu_index++;
400

401 402 403 404
		__padata_list_init(&pqueue->reorder);
		__padata_list_init(&pqueue->parallel);
		INIT_WORK(&pqueue->work, padata_parallel_worker);
		atomic_set(&pqueue->num_obj, 0);
405
	}
406
}
407

408 409 410 411 412 413
/* Allocate and initialize the internal cpumask dependend resources. */
static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
					     const struct cpumask *pcpumask,
					     const struct cpumask *cbcpumask)
{
	struct parallel_data *pd;
414

415 416 417
	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
	if (!pd)
		goto err;
418

419 420 421 422 423 424 425 426 427
	pd->pqueue = alloc_percpu(struct padata_parallel_queue);
	if (!pd->pqueue)
		goto err_free_pd;

	pd->squeue = alloc_percpu(struct padata_serial_queue);
	if (!pd->squeue)
		goto err_free_pqueue;
	if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
		goto err_free_squeue;
428

429 430
	padata_init_pqueues(pd);
	padata_init_squeues(pd);
431
	setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
432
	pd->seq_nr = 0;
433 434 435 436 437 438 439
	atomic_set(&pd->reorder_objects, 0);
	atomic_set(&pd->refcnt, 0);
	pd->pinst = pinst;
	spin_lock_init(&pd->lock);

	return pd;

440 441 442 443
err_free_squeue:
	free_percpu(pd->squeue);
err_free_pqueue:
	free_percpu(pd->pqueue);
444 445 446 447 448 449 450 451
err_free_pd:
	kfree(pd);
err:
	return NULL;
}

static void padata_free_pd(struct parallel_data *pd)
{
452 453 454 455
	free_cpumask_var(pd->cpumask.pcpu);
	free_cpumask_var(pd->cpumask.cbcpu);
	free_percpu(pd->pqueue);
	free_percpu(pd->squeue);
456 457 458
	kfree(pd);
}

459
/* Flush all objects out of the padata queues. */
460 461 462
static void padata_flush_queues(struct parallel_data *pd)
{
	int cpu;
463 464
	struct padata_parallel_queue *pqueue;
	struct padata_serial_queue *squeue;
465

466 467 468
	for_each_cpu(cpu, pd->cpumask.pcpu) {
		pqueue = per_cpu_ptr(pd->pqueue, cpu);
		flush_work(&pqueue->work);
469 470 471 472 473 474 475
	}

	del_timer_sync(&pd->timer);

	if (atomic_read(&pd->reorder_objects))
		padata_reorder(pd);

476 477 478
	for_each_cpu(cpu, pd->cpumask.cbcpu) {
		squeue = per_cpu_ptr(pd->squeue, cpu);
		flush_work(&squeue->work);
479 480 481 482 483
	}

	BUG_ON(atomic_read(&pd->refcnt) != 0);
}

484 485 486 487 488
static void __padata_start(struct padata_instance *pinst)
{
	pinst->flags |= PADATA_INIT;
}

489 490 491 492 493 494 495 496 497 498 499 500 501 502
static void __padata_stop(struct padata_instance *pinst)
{
	if (!(pinst->flags & PADATA_INIT))
		return;

	pinst->flags &= ~PADATA_INIT;

	synchronize_rcu();

	get_online_cpus();
	padata_flush_queues(pinst->pd);
	put_online_cpus();
}

503
/* Replace the internal control structure with a new one. */
504 505 506 507
static void padata_replace(struct padata_instance *pinst,
			   struct parallel_data *pd_new)
{
	struct parallel_data *pd_old = pinst->pd;
508
	int notification_mask = 0;
509 510 511 512 513 514 515

	pinst->flags |= PADATA_RESET;

	rcu_assign_pointer(pinst->pd, pd_new);

	synchronize_rcu();

516 517 518 519 520
	if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
		notification_mask |= PADATA_CPU_PARALLEL;
	if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
		notification_mask |= PADATA_CPU_SERIAL;

521
	padata_flush_queues(pd_old);
522 523
	padata_free_pd(pd_old);

524 525
	if (notification_mask)
		blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
526 527
					     notification_mask,
					     &pd_new->cpumask);
528 529 530 531

	pinst->flags &= ~PADATA_RESET;
}

532
/**
533 534
 * padata_register_cpumask_notifier - Registers a notifier that will be called
 *                             if either pcpu or cbcpu or both cpumasks change.
535
 *
536 537
 * @pinst: A poineter to padata instance
 * @nblock: A pointer to notifier block.
538
 */
539 540
int padata_register_cpumask_notifier(struct padata_instance *pinst,
				     struct notifier_block *nblock)
541
{
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
						nblock);
}
EXPORT_SYMBOL(padata_register_cpumask_notifier);

/**
 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
 *        registered earlier  using padata_register_cpumask_notifier
 *
 * @pinst: A pointer to data instance.
 * @nlock: A pointer to notifier block.
 */
int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
				       struct notifier_block *nblock)
{
	return blocking_notifier_chain_unregister(
		&pinst->cpumask_change_notifier,
		nblock);
}
EXPORT_SYMBOL(padata_unregister_cpumask_notifier);


564 565 566 567
/* If cpumask contains no active cpu, we mark the instance as invalid. */
static bool padata_validate_cpumask(struct padata_instance *pinst,
				    const struct cpumask *cpumask)
{
568
	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
569 570 571 572 573 574 575 576
		pinst->flags |= PADATA_INVALID;
		return false;
	}

	pinst->flags &= ~PADATA_INVALID;
	return true;
}

577 578 579 580 581
static int __padata_set_cpumasks(struct padata_instance *pinst,
				 cpumask_var_t pcpumask,
				 cpumask_var_t cbcpumask)
{
	int valid;
582
	struct parallel_data *pd;
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622

	valid = padata_validate_cpumask(pinst, pcpumask);
	if (!valid) {
		__padata_stop(pinst);
		goto out_replace;
	}

	valid = padata_validate_cpumask(pinst, cbcpumask);
	if (!valid)
		__padata_stop(pinst);

out_replace:
	pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
	if (!pd)
		return -ENOMEM;

	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);

	padata_replace(pinst, pd);

	if (valid)
		__padata_start(pinst);

	return 0;
}

/**
 * padata_set_cpumasks - Set both parallel and serial cpumasks. The first
 *                       one is used by parallel workers and the second one
 *                       by the wokers doing serialization.
 *
 * @pinst: padata instance
 * @pcpumask: the cpumask to use for parallel workers
 * @cbcpumask: the cpumsak to use for serial workers
 */
int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
			cpumask_var_t cbcpumask)
{
	int err;
623 624

	mutex_lock(&pinst->lock);
625
	get_online_cpus();
626

627 628 629 630 631 632 633 634 635 636
	err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);

	put_online_cpus();
	mutex_unlock(&pinst->lock);

	return err;

}
EXPORT_SYMBOL(padata_set_cpumasks);

637 638 639
/**
 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
 *                     equivalent to @cpumask.
640 641
 *
 * @pinst: padata instance
642 643
 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
 *                to parallel and serial cpumasks respectively.
644 645
 * @cpumask: the cpumask to use
 */
646 647 648 649
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
		       cpumask_var_t cpumask)
{
	struct cpumask *serial_mask, *parallel_mask;
650 651 652
	int err = -EINVAL;

	mutex_lock(&pinst->lock);
653 654
	get_online_cpus();

655 656 657 658 659 660 661 662 663 664
	switch (cpumask_type) {
	case PADATA_CPU_PARALLEL:
		serial_mask = pinst->cpumask.cbcpu;
		parallel_mask = cpumask;
		break;
	case PADATA_CPU_SERIAL:
		parallel_mask = pinst->cpumask.pcpu;
		serial_mask = cpumask;
		break;
	default:
665
		 goto out;
666 667
	}

668
	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
669 670

out:
671
	put_online_cpus();
672 673 674 675 676 677 678 679 680 681
	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_set_cpumask);

static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
	struct parallel_data *pd;

682
	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
683 684
		pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
				     pinst->cpumask.cbcpu);
685 686 687 688
		if (!pd)
			return -ENOMEM;

		padata_replace(pinst, pd);
689

690 691
		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
692
			__padata_start(pinst);
693 694 695 696 697
	}

	return 0;
}

698 699 700
 /**
 * padata_add_cpu - add a cpu to one or both(parallel and serial)
 *                  padata cpumasks.
701 702 703
 *
 * @pinst: padata instance
 * @cpu: cpu to add
704 705 706 707
 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
 *        The @mask may be any combination of the following flags:
 *          PADATA_CPU_SERIAL   - serial cpumask
 *          PADATA_CPU_PARALLEL - parallel cpumask
708
 */
709 710

int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
711 712 713
{
	int err;

714 715 716
	if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
		return -EINVAL;

717 718
	mutex_lock(&pinst->lock);

719
	get_online_cpus();
720 721 722 723 724
	if (mask & PADATA_CPU_SERIAL)
		cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
	if (mask & PADATA_CPU_PARALLEL)
		cpumask_set_cpu(cpu, pinst->cpumask.pcpu);

725
	err = __padata_add_cpu(pinst, cpu);
726
	put_online_cpus();
727 728 729 730 731 732 733 734 735

	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_add_cpu);

static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
{
736
	struct parallel_data *pd = NULL;
737 738

	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
739

740
		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
741
		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
742 743
			__padata_stop(pinst);

744 745
		pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
				     pinst->cpumask.cbcpu);
746 747 748 749
		if (!pd)
			return -ENOMEM;

		padata_replace(pinst, pd);
750 751 752

		cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
		cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
753 754 755 756 757
	}

	return 0;
}

758
 /**
759
 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
760
 *                     padata cpumasks.
761 762 763
 *
 * @pinst: padata instance
 * @cpu: cpu to remove
764 765 766 767
 * @mask: bitmask specifying from which cpumask @cpu should be removed
 *        The @mask may be any combination of the following flags:
 *          PADATA_CPU_SERIAL   - serial cpumask
 *          PADATA_CPU_PARALLEL - parallel cpumask
768
 */
769
int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
770 771 772
{
	int err;

773 774 775
	if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
		return -EINVAL;

776 777
	mutex_lock(&pinst->lock);

778
	get_online_cpus();
779 780 781 782 783
	if (mask & PADATA_CPU_SERIAL)
		cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
	if (mask & PADATA_CPU_PARALLEL)
		cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);

784
	err = __padata_remove_cpu(pinst, cpu);
785
	put_online_cpus();
786 787 788 789 790 791 792

	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_remove_cpu);

793
/**
794 795 796 797
 * padata_start - start the parallel processing
 *
 * @pinst: padata instance to start
 */
798
int padata_start(struct padata_instance *pinst)
799
{
800 801
	int err = 0;

802
	mutex_lock(&pinst->lock);
803 804 805 806 807 808

	if (pinst->flags & PADATA_INVALID)
		err =-EINVAL;

	 __padata_start(pinst);

809
	mutex_unlock(&pinst->lock);
810 811

	return err;
812 813 814
}
EXPORT_SYMBOL(padata_start);

815
/**
816 817 818 819 820 821 822
 * padata_stop - stop the parallel processing
 *
 * @pinst: padata instance to stop
 */
void padata_stop(struct padata_instance *pinst)
{
	mutex_lock(&pinst->lock);
823
	__padata_stop(pinst);
824 825 826 827
	mutex_unlock(&pinst->lock);
}
EXPORT_SYMBOL(padata_stop);

828
#ifdef CONFIG_HOTPLUG_CPU
829 830 831 832 833 834 835 836

static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
{
	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
}


837 838
static int padata_cpu_callback(struct notifier_block *nfb,
			       unsigned long action, void *hcpu)
839 840 841 842 843 844 845 846 847 848
{
	int err;
	struct padata_instance *pinst;
	int cpu = (unsigned long)hcpu;

	pinst = container_of(nfb, struct padata_instance, cpu_notifier);

	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
849
		if (!pinst_has_cpu(pinst, cpu))
850 851 852 853 854
			break;
		mutex_lock(&pinst->lock);
		err = __padata_add_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
		if (err)
855
			return notifier_from_errno(err);
856 857 858 859
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
860
		if (!pinst_has_cpu(pinst, cpu))
861 862 863 864 865
			break;
		mutex_lock(&pinst->lock);
		err = __padata_remove_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
		if (err)
866
			return notifier_from_errno(err);
867 868 869 870
		break;

	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
871
		if (!pinst_has_cpu(pinst, cpu))
872 873 874 875 876 877 878
			break;
		mutex_lock(&pinst->lock);
		__padata_remove_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);

	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
879
		if (!pinst_has_cpu(pinst, cpu))
880 881 882 883 884 885 886 887
			break;
		mutex_lock(&pinst->lock);
		__padata_add_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
	}

	return NOTIFY_OK;
}
888
#endif
889

890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
static void __padata_free(struct padata_instance *pinst)
{
#ifdef CONFIG_HOTPLUG_CPU
	unregister_hotcpu_notifier(&pinst->cpu_notifier);
#endif

	padata_stop(pinst);
	padata_free_pd(pinst->pd);
	free_cpumask_var(pinst->cpumask.pcpu);
	free_cpumask_var(pinst->cpumask.cbcpu);
	kfree(pinst);
}

#define kobj2pinst(_kobj)					\
	container_of(_kobj, struct padata_instance, kobj)
#define attr2pentry(_attr)					\
	container_of(_attr, struct padata_sysfs_entry, attr)

static void padata_sysfs_release(struct kobject *kobj)
{
	struct padata_instance *pinst = kobj2pinst(kobj);
	__padata_free(pinst);
}

struct padata_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
	ssize_t (*store)(struct padata_instance *, struct attribute *,
			 const char *, size_t);
};

static ssize_t show_cpumask(struct padata_instance *pinst,
			    struct attribute *attr,  char *buf)
{
	struct cpumask *cpumask;
	ssize_t len;

	mutex_lock(&pinst->lock);
	if (!strcmp(attr->name, "serial_cpumask"))
		cpumask = pinst->cpumask.cbcpu;
	else
		cpumask = pinst->cpumask.pcpu;

	len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask),
			       nr_cpu_ids);
	if (PAGE_SIZE - len < 2)
		len = -EINVAL;
	else
		len += sprintf(buf + len, "\n");

	mutex_unlock(&pinst->lock);
	return len;
}

static ssize_t store_cpumask(struct padata_instance *pinst,
			     struct attribute *attr,
			     const char *buf, size_t count)
{
	cpumask_var_t new_cpumask;
	ssize_t ret;
	int mask_type;

	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
		return -ENOMEM;

	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
			   nr_cpumask_bits);
	if (ret < 0)
		goto out;

	mask_type = !strcmp(attr->name, "serial_cpumask") ?
		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
	if (!ret)
		ret = count;

out:
	free_cpumask_var(new_cpumask);
	return ret;
}

#define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
	static struct padata_sysfs_entry _name##_attr =		\
		__ATTR(_name, 0644, _show_name, _store_name)
#define PADATA_ATTR_RO(_name, _show_name)		\
	static struct padata_sysfs_entry _name##_attr = \
		__ATTR(_name, 0400, _show_name, NULL)

PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);

/*
 * Padata sysfs provides the following objects:
 * serial_cpumask   [RW] - cpumask for serial workers
 * parallel_cpumask [RW] - cpumask for parallel workers
 */
static struct attribute *padata_default_attrs[] = {
	&serial_cpumask_attr.attr,
	&parallel_cpumask_attr.attr,
	NULL,
};

static ssize_t padata_sysfs_show(struct kobject *kobj,
				 struct attribute *attr, char *buf)
{
	struct padata_instance *pinst;
	struct padata_sysfs_entry *pentry;
	ssize_t ret = -EIO;

	pinst = kobj2pinst(kobj);
	pentry = attr2pentry(attr);
	if (pentry->show)
		ret = pentry->show(pinst, attr, buf);

	return ret;
}

static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
				  const char *buf, size_t count)
{
	struct padata_instance *pinst;
	struct padata_sysfs_entry *pentry;
	ssize_t ret = -EIO;

	pinst = kobj2pinst(kobj);
	pentry = attr2pentry(attr);
	if (pentry->show)
		ret = pentry->store(pinst, attr, buf, count);

	return ret;
}

static const struct sysfs_ops padata_sysfs_ops = {
	.show = padata_sysfs_show,
	.store = padata_sysfs_store,
};

static struct kobj_type padata_attr_type = {
	.sysfs_ops = &padata_sysfs_ops,
	.default_attrs = padata_default_attrs,
	.release = padata_sysfs_release,
};

1033
/**
1034 1035 1036
 * padata_alloc_possible - Allocate and initialize padata instance.
 *                         Use the cpu_possible_mask for serial and
 *                         parallel workers.
1037 1038 1039
 *
 * @wq: workqueue to use for the allocated padata instance
 */
1040
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1041
{
1042
	return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1043
}
1044
EXPORT_SYMBOL(padata_alloc_possible);
1045 1046

/**
1047 1048
 * padata_alloc - allocate and initialize a padata instance and specify
 *                cpumasks for serial and parallel workers.
1049 1050
 *
 * @wq: workqueue to use for the allocated padata instance
1051 1052
 * @pcpumask: cpumask that will be used for padata parallelization
 * @cbcpumask: cpumask that will be used for padata serialization
1053
 */
1054 1055 1056
struct padata_instance *padata_alloc(struct workqueue_struct *wq,
				     const struct cpumask *pcpumask,
				     const struct cpumask *cbcpumask)
1057 1058
{
	struct padata_instance *pinst;
1059
	struct parallel_data *pd = NULL;
1060 1061 1062 1063 1064

	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
	if (!pinst)
		goto err;

1065
	get_online_cpus();
1066
	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1067
		goto err_free_inst;
1068 1069
	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
		free_cpumask_var(pinst->cpumask.pcpu);
1070
		goto err_free_inst;
1071
	}
1072 1073 1074
	if (!padata_validate_cpumask(pinst, pcpumask) ||
	    !padata_validate_cpumask(pinst, cbcpumask))
		goto err_free_masks;
1075

1076 1077 1078
	pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
	if (!pd)
		goto err_free_masks;
1079

1080 1081 1082 1083
	rcu_assign_pointer(pinst->pd, pd);

	pinst->wq = wq;

1084 1085
	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1086 1087 1088

	pinst->flags = 0;

1089
#ifdef CONFIG_HOTPLUG_CPU
1090 1091
	pinst->cpu_notifier.notifier_call = padata_cpu_callback;
	pinst->cpu_notifier.priority = 0;
1092 1093
	register_hotcpu_notifier(&pinst->cpu_notifier);
#endif
1094

1095 1096
	put_online_cpus();

1097
	BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1098
	kobject_init(&pinst->kobj, &padata_attr_type);
1099 1100 1101 1102
	mutex_init(&pinst->lock);

	return pinst;

1103 1104 1105
err_free_masks:
	free_cpumask_var(pinst->cpumask.pcpu);
	free_cpumask_var(pinst->cpumask.cbcpu);
1106 1107
err_free_inst:
	kfree(pinst);
1108
	put_online_cpus();
1109 1110 1111 1112 1113
err:
	return NULL;
}
EXPORT_SYMBOL(padata_alloc);

1114
/**
1115 1116
 * padata_free - free a padata instance
 *
1117
 * @padata_inst: padata instance to free
1118 1119 1120
 */
void padata_free(struct padata_instance *pinst)
{
1121
	kobject_put(&pinst->kobj);
1122 1123
}
EXPORT_SYMBOL(padata_free);