dimm_devs.c 22 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
#include <linux/moduleparam.h>
7
#include <linux/vmalloc.h>
8
#include <linux/device.h>
9
#include <linux/ndctl.h>
10 11 12 13 14
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include "nd-core.h"
Dan Williams's avatar
Dan Williams committed
15
#include "label.h"
16
#include "pmem.h"
17
#include "nd.h"
18 19 20

static DEFINE_IDA(dimm_ida);

21 22 23 24
static bool noblk;
module_param(noblk, bool, 0444);
MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");

25 26 27 28
/*
 * Retrieve bus and dimm handle and return if this bus supports
 * get_config_data commands
 */
29
int nvdimm_check_config_data(struct device *dev)
30
{
31
	struct nvdimm *nvdimm = to_nvdimm(dev);
32

33 34
	if (!nvdimm->cmd_mask ||
	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
35
		if (test_bit(NDD_LABELING, &nvdimm->flags))
36 37 38 39
			return -ENXIO;
		else
			return -ENOTTY;
	}
40 41 42 43 44 45

	return 0;
}

static int validate_dimm(struct nvdimm_drvdata *ndd)
{
46
	int rc;
47

48 49 50 51 52
	if (!ndd)
		return -EINVAL;

	rc = nvdimm_check_config_data(ndd->dev);
	if (rc)
53
		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
54 55 56 57 58 59 60 61 62 63 64 65 66 67
				__builtin_return_address(0), __func__, rc);
	return rc;
}

/**
 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
 * @nvdimm: dimm to initialize
 */
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
{
	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
	struct nvdimm_bus_descriptor *nd_desc;
	int rc = validate_dimm(ndd);
68
	int cmd_rc = 0;
69 70 71 72 73 74 75 76 77

	if (rc)
		return rc;

	if (cmd->config_size)
		return 0; /* already valid */

	memset(cmd, 0, sizeof(*cmd));
	nd_desc = nvdimm_bus->nd_desc;
78 79 80 81 82
	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
	if (rc < 0)
		return rc;
	return cmd_rc;
83 84
}

85 86
int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
			   size_t offset, size_t len)
87 88
{
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
89
	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
90
	int rc = validate_dimm(ndd), cmd_rc = 0;
91
	struct nd_cmd_get_config_data_hdr *cmd;
92
	size_t max_cmd_size, buf_offset;
93 94 95 96

	if (rc)
		return rc;

97
	if (offset + len > ndd->nsarea.config_size)
98 99
		return -ENXIO;

100
	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
101
	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
102 103 104
	if (!cmd)
		return -ENOMEM;

105 106 107 108 109 110 111 112 113
	for (buf_offset = 0; len;
	     len -= cmd->in_length, buf_offset += cmd->in_length) {
		size_t cmd_size;

		cmd->in_offset = offset + buf_offset;
		cmd->in_length = min(max_cmd_size, len);

		cmd_size = sizeof(*cmd) + cmd->in_length;

114
		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
115
				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
116 117 118 119
		if (rc < 0)
			break;
		if (cmd_rc < 0) {
			rc = cmd_rc;
120 121
			break;
		}
122 123 124

		/* out_buf should be valid, copy it into our output buffer */
		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
125
	}
126
	kvfree(cmd);
127 128 129 130

	return rc;
}

Dan Williams's avatar
Dan Williams committed
131 132 133 134 135
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
		void *buf, size_t len)
{
	size_t max_cmd_size, buf_offset;
	struct nd_cmd_set_config_hdr *cmd;
136
	int rc = validate_dimm(ndd), cmd_rc = 0;
Dan Williams's avatar
Dan Williams committed
137 138 139 140 141 142 143 144 145
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;

	if (rc)
		return rc;

	if (offset + len > ndd->nsarea.config_size)
		return -ENXIO;

146 147
	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
Dan Williams's avatar
Dan Williams committed
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
	if (!cmd)
		return -ENOMEM;

	for (buf_offset = 0; len; len -= cmd->in_length,
			buf_offset += cmd->in_length) {
		size_t cmd_size;

		cmd->in_offset = offset + buf_offset;
		cmd->in_length = min(max_cmd_size, len);
		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);

		/* status is output in the last 4-bytes of the command buffer */
		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);

		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
163 164 165 166 167
				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
		if (rc < 0)
			break;
		if (cmd_rc < 0) {
			rc = cmd_rc;
Dan Williams's avatar
Dan Williams committed
168 169 170
			break;
		}
	}
171
	kvfree(cmd);
Dan Williams's avatar
Dan Williams committed
172 173 174 175

	return rc;
}

176
void nvdimm_set_labeling(struct device *dev)
177 178 179
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

180
	set_bit(NDD_LABELING, &nvdimm->flags);
181 182 183 184 185 186 187
}

void nvdimm_set_locked(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

	set_bit(NDD_LOCKED, &nvdimm->flags);
188 189
}

190 191 192 193 194 195 196
void nvdimm_clear_locked(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

	clear_bit(NDD_LOCKED, &nvdimm->flags);
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
static void nvdimm_release(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

	ida_simple_remove(&dimm_ida, nvdimm->id);
	kfree(nvdimm);
}

struct nvdimm *to_nvdimm(struct device *dev)
{
	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);

	WARN_ON(!is_nvdimm(dev));
	return nvdimm;
}
EXPORT_SYMBOL_GPL(to_nvdimm);

214 215 216 217 218 219 220 221 222
struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
{
	struct nd_region *nd_region = &ndbr->nd_region;
	struct nd_mapping *nd_mapping = &nd_region->mapping[0];

	return nd_mapping->nvdimm;
}
EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);

223 224 225 226 227 228 229
unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
{
	/* pmem mapping properties are private to libnvdimm */
	return ARCH_MEMREMAP_PMEM;
}
EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
{
	struct nvdimm *nvdimm = nd_mapping->nvdimm;

	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));

	return dev_get_drvdata(&nvdimm->dev);
}
EXPORT_SYMBOL(to_ndd);

void nvdimm_drvdata_release(struct kref *kref)
{
	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
	struct device *dev = ndd->dev;
	struct resource *res, *_r;

246
	dev_dbg(dev, "trace\n");
247 248 249 250 251
	nvdimm_bus_lock(dev);
	for_each_dpa_resource_safe(ndd, res, _r)
		nvdimm_free_dpa(ndd, res);
	nvdimm_bus_unlock(dev);

252
	kvfree(ndd->data);
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
	kfree(ndd);
	put_device(dev);
}

void get_ndd(struct nvdimm_drvdata *ndd)
{
	kref_get(&ndd->kref);
}

void put_ndd(struct nvdimm_drvdata *ndd)
{
	if (ndd)
		kref_put(&ndd->kref, nvdimm_drvdata_release);
}

268 269 270 271 272 273
const char *nvdimm_name(struct nvdimm *nvdimm)
{
	return dev_name(&nvdimm->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_name);

274 275 276 277 278 279
struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
{
	return &nvdimm->dev.kobj;
}
EXPORT_SYMBOL_GPL(nvdimm_kobj);

280 281 282 283 284 285
unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
{
	return nvdimm->cmd_mask;
}
EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);

286 287
void *nvdimm_provider_data(struct nvdimm *nvdimm)
{
288 289 290
	if (nvdimm)
		return nvdimm->provider_data;
	return NULL;
291 292 293
}
EXPORT_SYMBOL_GPL(nvdimm_provider_data);

294 295 296 297 298 299
static ssize_t commands_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	int cmd, len = 0;

300
	if (!nvdimm->cmd_mask)
301 302
		return sprintf(buf, "\n");

303
	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
304 305 306 307 308 309
		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
	len += sprintf(buf + len, "\n");
	return len;
}
static DEVICE_ATTR_RO(commands);

310 311 312 313 314
static ssize_t flags_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

315
	return sprintf(buf, "%s%s%s\n",
316
			test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
317
			test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
318 319 320 321
			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
}
static DEVICE_ATTR_RO(flags);

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
		char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

	/*
	 * The state may be in the process of changing, userspace should
	 * quiesce probing if it wants a static answer
	 */
	nvdimm_bus_lock(dev);
	nvdimm_bus_unlock(dev);
	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
			? "active" : "idle");
}
static DEVICE_ATTR_RO(state);

Dan Williams's avatar
Dan Williams committed
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
static ssize_t available_slots_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
	ssize_t rc;
	u32 nfree;

	if (!ndd)
		return -ENXIO;

	nvdimm_bus_lock(dev);
	nfree = nd_label_nfree(ndd);
	if (nfree - 1 > nfree) {
		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
		nfree = 0;
	} else
		nfree--;
	rc = sprintf(buf, "%d\n", nfree);
	nvdimm_bus_unlock(dev);
	return rc;
}
static DEVICE_ATTR_RO(available_slots);

361
__weak ssize_t security_show(struct device *dev,
362 363 364 365
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

366
	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
367
		return sprintf(buf, "disabled\n");
368
	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
369
		return sprintf(buf, "unlocked\n");
370
	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
371
		return sprintf(buf, "locked\n");
372
	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
373 374 375
		return sprintf(buf, "overwrite\n");
	return -ENOTTY;
}
376

377 378 379 380 381 382 383 384 385 386
static ssize_t frozen_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
				&nvdimm->sec.flags));
}
static DEVICE_ATTR_RO(frozen);

387 388 389 390 391 392 393 394 395 396 397
static ssize_t security_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)

{
	ssize_t rc;

	/*
	 * Require all userspace triggered security management to be
	 * done while probing is idle and the DIMM is not in active use
	 * in any region.
	 */
398
	nd_device_lock(dev);
399 400
	nvdimm_bus_lock(dev);
	wait_nvdimm_bus_probe_idle(dev);
401
	rc = nvdimm_security_store(dev, buf, len);
402
	nvdimm_bus_unlock(dev);
403
	nd_device_unlock(dev);
404 405 406 407

	return rc;
}
static DEVICE_ATTR_RW(security);
408

409
static struct attribute *nvdimm_attributes[] = {
410
	&dev_attr_state.attr,
411
	&dev_attr_flags.attr,
412
	&dev_attr_commands.attr,
Dan Williams's avatar
Dan Williams committed
413
	&dev_attr_available_slots.attr,
414
	&dev_attr_security.attr,
415
	&dev_attr_frozen.attr,
416 417 418
	NULL,
};

419 420 421 422 423
static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, typeof(*dev), kobj);
	struct nvdimm *nvdimm = to_nvdimm(dev);

424
	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
425
		return a->mode;
426
	if (!nvdimm->sec.flags)
427
		return 0;
428 429 430 431 432 433 434 435 436 437 438 439

	if (a == &dev_attr_security.attr) {
		/* Are there any state mutation ops (make writable)? */
		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
				|| nvdimm->sec.ops->change_key
				|| nvdimm->sec.ops->erase
				|| nvdimm->sec.ops->overwrite)
			return a->mode;
		return 0444;
	}

	if (nvdimm->sec.ops->freeze)
440
		return a->mode;
441
	return 0;
442 443
}

444
static const struct attribute_group nvdimm_attribute_group = {
445
	.attrs = nvdimm_attributes,
446
	.is_visible = nvdimm_visible,
447
};
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464

static const struct attribute_group *nvdimm_attribute_groups[] = {
	&nd_device_attribute_group,
	&nvdimm_attribute_group,
	NULL,
};

static const struct device_type nvdimm_device_type = {
	.name = "nvdimm",
	.release = nvdimm_release,
	.groups = nvdimm_attribute_groups,
};

bool is_nvdimm(struct device *dev)
{
	return dev->type == &nvdimm_device_type;
}
465

466 467 468
struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
		void *provider_data, const struct attribute_group **groups,
		unsigned long flags, unsigned long cmd_mask, int num_flush,
469 470
		struct resource *flush_wpq, const char *dimm_id,
		const struct nvdimm_security_ops *sec_ops)
471 472 473 474 475 476 477 478 479 480 481 482
{
	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
	struct device *dev;

	if (!nvdimm)
		return NULL;

	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
	if (nvdimm->id < 0) {
		kfree(nvdimm);
		return NULL;
	}
483 484

	nvdimm->dimm_id = dimm_id;
485
	nvdimm->provider_data = provider_data;
486 487
	if (noblk)
		flags |= 1 << NDD_NOBLK;
488
	nvdimm->flags = flags;
489
	nvdimm->cmd_mask = cmd_mask;
490 491
	nvdimm->num_flush = num_flush;
	nvdimm->flush_wpq = flush_wpq;
492
	atomic_set(&nvdimm->busy, 0);
493 494 495 496
	dev = &nvdimm->dev;
	dev_set_name(dev, "nmem%d", nvdimm->id);
	dev->parent = &nvdimm_bus->dev;
	dev->type = &nvdimm_device_type;
497
	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
498
	dev->groups = groups;
499
	nvdimm->sec.ops = sec_ops;
500 501
	nvdimm->sec.overwrite_tmo = 0;
	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
502 503 504 505
	/*
	 * Security state must be initialized before device_add() for
	 * attribute visibility.
	 */
506
	/* get security state and extended (master) state */
507 508
	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
509
	nd_device_register(dev);
510 511 512

	return nvdimm;
}
513
EXPORT_SYMBOL_GPL(__nvdimm_create);
514

515
static void shutdown_security_notify(void *data)
516
{
517 518 519 520 521 522 523 524 525
	struct nvdimm *nvdimm = data;

	sysfs_put(nvdimm->sec.overwrite_state);
}

int nvdimm_security_setup_events(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

526
	if (!nvdimm->sec.flags || !nvdimm->sec.ops
527 528 529
			|| !nvdimm->sec.ops->overwrite)
		return 0;
	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
530
	if (!nvdimm->sec.overwrite_state)
531 532 533
		return -ENOMEM;

	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
534 535 536 537 538 539 540 541 542
}
EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);

int nvdimm_in_overwrite(struct nvdimm *nvdimm)
{
	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
}
EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);

543 544 545 546 547 548 549 550 551
int nvdimm_security_freeze(struct nvdimm *nvdimm)
{
	int rc;

	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));

	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
		return -EOPNOTSUPP;

552
	if (!nvdimm->sec.flags)
553 554
		return -EIO;

555 556 557 558 559
	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
		return -EBUSY;
	}

560
	rc = nvdimm->sec.ops->freeze(nvdimm);
561
	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
562 563 564 565

	return rc;
}

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static unsigned long dpa_align(struct nd_region *nd_region)
{
	struct device *dev = &nd_region->dev;

	if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
				"bus lock required for capacity provision\n"))
		return 0;
	if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
				% nd_region->ndr_mappings,
				"invalid region align %#lx mappings: %d\n",
				nd_region->align, nd_region->ndr_mappings))
		return 0;
	return nd_region->align / nd_region->ndr_mappings;
}

581
int alias_dpa_busy(struct device *dev, void *data)
582
{
583
	resource_size_t map_end, blk_start, new;
584 585 586 587 588
	struct blk_alloc_info *info = data;
	struct nd_mapping *nd_mapping;
	struct nd_region *nd_region;
	struct nvdimm_drvdata *ndd;
	struct resource *res;
589
	unsigned long align;
590 591
	int i;

592
	if (!is_memory(dev))
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
		return 0;

	nd_region = to_nd_region(dev);
	for (i = 0; i < nd_region->ndr_mappings; i++) {
		nd_mapping  = &nd_region->mapping[i];
		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
			break;
	}

	if (i >= nd_region->ndr_mappings)
		return 0;

	ndd = to_ndd(nd_mapping);
	map_end = nd_mapping->start + nd_mapping->size - 1;
	blk_start = nd_mapping->start;
608 609 610 611 612 613 614 615 616 617 618 619 620 621

	/*
	 * In the allocation case ->res is set to free space that we are
	 * looking to validate against PMEM aliasing collision rules
	 * (i.e. BLK is allocated after all aliased PMEM).
	 */
	if (info->res) {
		if (info->res->start >= nd_mapping->start
				&& info->res->start < map_end)
			/* pass */;
		else
			return 0;
	}

622 623 624
 retry:
	/*
	 * Find the free dpa from the end of the last pmem allocation to
625
	 * the end of the interleave-set mapping.
626
	 */
627 628 629 630
	align = dpa_align(nd_region);
	if (!align)
		return 0;

631
	for_each_dpa_resource(ndd, res) {
632 633
		resource_size_t start, end;

634 635
		if (strncmp(res->name, "pmem", 4) != 0)
			continue;
636 637 638 639 640 641

		start = ALIGN_DOWN(res->start, align);
		end = ALIGN(res->end + 1, align) - 1;
		if ((start >= blk_start && start < map_end)
				|| (end >= blk_start && end <= map_end)) {
			new = max(blk_start, min(map_end, end) + 1);
642 643 644 645
			if (new != blk_start) {
				blk_start = new;
				goto retry;
			}
646 647 648
		}
	}

649 650 651 652 653 654 655 656
	/* update the free space range with the probed blk_start */
	if (info->res && blk_start > info->res->start) {
		info->res->start = max(info->res->start, blk_start);
		if (info->res->start > info->res->end)
			info->res->end = info->res->start - 1;
		return 1;
	}

657
	info->available -= blk_start - nd_mapping->start;
658

659 660 661
	return 0;
}

662 663 664 665
/**
 * nd_blk_available_dpa - account the unused dpa of BLK region
 * @nd_mapping: container of dpa-resource-root + labels
 *
666 667 668
 * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
 * we arrange for them to never start at an lower dpa than the last
 * PMEM allocation in an aliased region.
669
 */
670
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
671
{
672 673
	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
674
	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
675 676 677
	struct blk_alloc_info info = {
		.nd_mapping = nd_mapping,
		.available = nd_mapping->size,
678
		.res = NULL,
679
	};
680
	struct resource *res;
681
	unsigned long align;
682 683 684 685

	if (!ndd)
		return 0;

686
	device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
687

688
	/* now account for busy blk allocations in unaliased dpa */
689 690 691
	align = dpa_align(nd_region);
	if (!align)
		return 0;
692
	for_each_dpa_resource(ndd, res) {
693 694
		resource_size_t start, end, size;

695 696
		if (strncmp(res->name, "blk", 3) != 0)
			continue;
697 698 699 700 701 702
		start = ALIGN_DOWN(res->start, align);
		end = ALIGN(res->end + 1, align) - 1;
		size = end - start + 1;
		if (size >= info.available)
			return 0;
		info.available -= size;
703 704 705
	}

	return info.available;
706 707
}

708 709 710 711 712 713 714 715 716 717 718 719 720
/**
 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
 *			   contiguous unallocated dpa range.
 * @nd_region: constrain available space check to this reference region
 * @nd_mapping: container of dpa-resource-root + labels
 */
resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
					   struct nd_mapping *nd_mapping)
{
	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
	struct nvdimm_bus *nvdimm_bus;
	resource_size_t max = 0;
	struct resource *res;
721
	unsigned long align;
722 723 724 725 726

	/* if a dimm is disabled the available capacity is zero */
	if (!ndd)
		return 0;

727 728 729 730
	align = dpa_align(nd_region);
	if (!align)
		return 0;

731 732 733 734
	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
		return 0;
	for_each_dpa_resource(ndd, res) {
735 736
		resource_size_t start, end;

737 738
		if (strcmp(res->name, "pmem-reserve") != 0)
			continue;
739 740 741 742 743 744 745
		/* trim free space relative to current alignment setting */
		start = ALIGN(res->start, align);
		end = ALIGN_DOWN(res->end + 1, align) - 1;
		if (end < start)
			continue;
		if (end - start + 1 > max)
			max = end - start + 1;
746 747 748 749 750
	}
	release_free_pmem(nvdimm_bus, nd_mapping);
	return max;
}

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
/**
 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
 * @nd_mapping: container of dpa-resource-root + labels
 * @nd_region: constrain available space check to this reference region
 * @overlap: calculate available space assuming this level of overlap
 *
 * Validate that a PMEM label, if present, aligns with the start of an
 * interleave set and truncate the available size at the lowest BLK
 * overlap point.
 *
 * The expectation is that this routine is called multiple times as it
 * probes for the largest BLK encroachment for any single member DIMM of
 * the interleave set.  Once that value is determined the PMEM-limit for
 * the set can be established.
 */
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
		struct nd_mapping *nd_mapping, resource_size_t *overlap)
{
	resource_size_t map_start, map_end, busy = 0, available, blk_start;
	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
	struct resource *res;
	const char *reason;
773
	unsigned long align;
774 775 776 777

	if (!ndd)
		return 0;

778 779 780 781
	align = dpa_align(nd_region);
	if (!align)
		return 0;

782 783 784
	map_start = nd_mapping->start;
	map_end = map_start + nd_mapping->size - 1;
	blk_start = max(map_start, map_end + 1 - *overlap);
785
	for_each_dpa_resource(ndd, res) {
786 787 788 789 790
		resource_size_t start, end;

		start = ALIGN_DOWN(res->start, align);
		end = ALIGN(res->end + 1, align) - 1;
		if (start >= map_start && start < map_end) {
791
			if (strncmp(res->name, "blk", 3) == 0)
792
				blk_start = min(blk_start,
793 794
						max(map_start, start));
			else if (end > map_end) {
795 796
				reason = "misaligned to iset";
				goto err;
797
			} else
798 799
				busy += end - start + 1;
		} else if (end >= map_start && end <= map_end) {
800 801 802 803 804 805 806
			if (strncmp(res->name, "blk", 3) == 0) {
				/*
				 * If a BLK allocation overlaps the start of
				 * PMEM the entire interleave set may now only
				 * be used for BLK.
				 */
				blk_start = map_start;
807
			} else
808 809
				busy += end - start + 1;
		} else if (map_start > start && map_start < end) {
810 811 812 813
			/* total eclipse of the mapping */
			busy += nd_mapping->size;
			blk_start = map_start;
		}
814
	}
815 816 817 818

	*overlap = map_end + 1 - blk_start;
	available = blk_start - map_start;
	if (busy < available)
819
		return ALIGN_DOWN(available - busy, align);
820 821 822 823 824 825 826
	return 0;

 err:
	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
	return 0;
}

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
{
	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
	kfree(res->name);
	__release_region(&ndd->dpa, res->start, resource_size(res));
}

struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
		struct nd_label_id *label_id, resource_size_t start,
		resource_size_t n)
{
	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
	struct resource *res;

	if (!name)
		return NULL;

	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
	res = __request_region(&ndd->dpa, start, n, name, 0);
	if (!res)
		kfree(name);
	return res;
}

851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
/**
 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
 * @nvdimm: container of dpa-resource-root + labels
 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
 */
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
		struct nd_label_id *label_id)
{
	resource_size_t allocated = 0;
	struct resource *res;

	for_each_dpa_resource(ndd, res)
		if (strcmp(res->name, label_id->id) == 0)
			allocated += resource_size(res);

	return allocated;
}

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
static int count_dimms(struct device *dev, void *c)
{
	int *count = c;

	if (is_nvdimm(dev))
		(*count)++;
	return 0;
}

int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
{
	int count = 0;
	/* Flush any possible dimm registration failures */
	nd_synchronize();

	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
885
	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
886 887 888 889 890
	if (count != dimm_count)
		return -ENXIO;
	return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
891 892 893 894 895

void __exit nvdimm_devs_exit(void)
{
	ida_destroy(&dimm_ida);
}