Skip to content

Commit

Permalink
Merge tag 'libnvdimm-for-4.9' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/nvdimm/nvdimm

Pull libnvdimm updates from Dan Williams:
 "Aside from the recently added pmem sub-division support these have
  been in -next for several releases with no reported issues. The sub-
  division support was included in next-20161010 with no reported
  issues. It passes all unit tests including new tests for all the new
  functionality below.

  Summary:

   - PMEM sub-division support: Allow a single PMEM region to be divided
     into multiple namespaces. Originally, ~2 years ago, it was thought
     that partitions of a /dev/pmemX block device could handle
     sub-allocations of persistent memory for different use cases. With
     the decision to not support DAX mappings of raw block-devices, and
     the genesis of device-dax, the need for having multiple
     pmem-namespace per region has grown.

   - Device-DAX unified inode: In support of dynamic-resizing of a
     device-dax instance the kernel arranges for all mappings of a
     device-dax node to share the same inode. This allows unmap /
     truncate / invalidation events to affect all instances of the
     device similar to the behavior of mmap on block devices.

   - Hardware error scrubbing reworks: The original address-range-scrub
     and badblocks tracking solution allowed clearing entries at the
     individual namespace level, but it failed to clear the internal
     list of media errors maintained at the bus level. The result was
     that the next scrub or namespace disable/re-enable event would
     restore the cleared badblocks, but now that is fixed. The v4.8
     kernel introduced an auto-scrub-on-machine-check behavior to
     repopulate the badblocks list. Now, in v4.9, the auto-scrub
     behavior can be disabled and simply arrange for the error reported
     in the machine-check to be added to the list.

   - DIMM health-event notification support: ACPI 6.1 defines a
     notification event code that can be send to ACPI NVDIMM devices. A
     poll(2) capable file descriptor for these events can be obtained
     from the nmemX/nfit/flags sysfs-attribute of a libnvdimm memory
     device.

   - Miscellaneous fixes: NVDIMM-N probe error, device-dax build error,
     and a change to dedup the flush hint list to not flush the memory
     controller more than necessary"

* tag 'libnvdimm-for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (39 commits)
  /dev/dax: fix Kconfig dependency build breakage
  dax: use correct dev_t value
  dax: convert devm_create_dax_dev to PTR_ERR
  libnvdimm, namespace: allow creation of multiple pmem-namespaces per region
  libnvdimm, namespace: lift single pmem limit in scan_labels()
  libnvdimm, namespace: filter out of range labels in scan_labels()
  libnvdimm, namespace: enable allocation of multiple pmem namespaces
  libnvdimm, namespace: update label implementation for multi-pmem
  libnvdimm, namespace: expand pmem device naming scheme for multi-pmem
  libnvdimm, region: update nd_region_available_dpa() for multi-pmem support
  libnvdimm, namespace: sort namespaces by dpa at init
  libnvdimm, namespace: allow multiple pmem-namespaces per region at scan time
  tools/testing/nvdimm: support for sub-dividing a pmem region
  libnvdimm, namespace: unify blk and pmem label scanning
  libnvdimm, namespace: refactor uuid_show() into a namespace_to_uuid() helper
  libnvdimm, label: convert label tracking to a linked list
  libnvdimm, region: move region-mapping input-paramters to nd_mapping_desc
  nvdimm: reduce duplicated wpq flushes
  libnvdimm: clear the internal poison_list when clearing badblocks
  pmem: reduce kmap_atomic sections to the memcpys only
  ...
  • Loading branch information
torvalds committed Oct 11, 2016
2 parents f29135b + e476f94 commit d09ba13
Show file tree
Hide file tree
Showing 27 changed files with 1,940 additions and 751 deletions.
210 changes: 180 additions & 30 deletions drivers/acpi/nfit/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -886,6 +886,58 @@ static ssize_t revision_show(struct device *dev,
}
static DEVICE_ATTR_RO(revision);

static ssize_t hw_error_scrub_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
}

/*
* The 'hw_error_scrub' attribute can have the following values written to it:
* '0': Switch to the default mode where an exception will only insert
* the address of the memory error into the poison and badblocks lists.
* '1': Enable a full scrub to happen if an exception for a memory error is
* received.
*/
static ssize_t hw_error_scrub_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct nvdimm_bus_descriptor *nd_desc;
ssize_t rc;
long val;

rc = kstrtol(buf, 0, &val);
if (rc)
return rc;

device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

switch (val) {
case HW_ERROR_SCRUB_ON:
acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
break;
case HW_ERROR_SCRUB_OFF:
acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
break;
default:
rc = -EINVAL;
break;
}
}
device_unlock(dev);
if (rc)
return rc;
return size;
}
static DEVICE_ATTR_RW(hw_error_scrub);

/*
* This shows the number of full Address Range Scrubs that have been
* completed since driver load time. Userspace can wait on this using
Expand Down Expand Up @@ -958,6 +1010,7 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
static struct attribute *acpi_nfit_attributes[] = {
&dev_attr_revision.attr,
&dev_attr_scrub.attr,
&dev_attr_hw_error_scrub.attr,
NULL,
};

Expand Down Expand Up @@ -1256,6 +1309,44 @@ static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
return NULL;
}

void __acpi_nvdimm_notify(struct device *dev, u32 event)
{
struct nfit_mem *nfit_mem;
struct acpi_nfit_desc *acpi_desc;

dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
event);

if (event != NFIT_NOTIFY_DIMM_HEALTH) {
dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
event);
return;
}

acpi_desc = dev_get_drvdata(dev->parent);
if (!acpi_desc)
return;

/*
* If we successfully retrieved acpi_desc, then we know nfit_mem data
* is still valid.
*/
nfit_mem = dev_get_drvdata(dev);
if (nfit_mem && nfit_mem->flags_attr)
sysfs_notify_dirent(nfit_mem->flags_attr);
}
EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);

static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_device *adev = data;
struct device *dev = &adev->dev;

device_lock(dev->parent);
__acpi_nvdimm_notify(dev, event);
device_unlock(dev->parent);
}

static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
Expand All @@ -1280,6 +1371,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return force_enable_dimms ? 0 : -ENODEV;
}

if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
dev_err(dev, "%s: notification registration failed\n",
dev_name(&adev_dimm->dev));
return -ENXIO;
}

/*
* Until standardization materializes we need to consider 4
* different command sets. Note, that checking for function0 (bit0)
Expand Down Expand Up @@ -1318,18 +1416,41 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return 0;
}

static void shutdown_dimm_notify(void *data)
{
struct acpi_nfit_desc *acpi_desc = data;
struct nfit_mem *nfit_mem;

mutex_lock(&acpi_desc->init_mutex);
/*
* Clear out the nfit_mem->flags_attr and shut down dimm event
* notifications.
*/
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
struct acpi_device *adev_dimm = nfit_mem->adev;

if (nfit_mem->flags_attr) {
sysfs_put(nfit_mem->flags_attr);
nfit_mem->flags_attr = NULL;
}
if (adev_dimm)
acpi_remove_notify_handler(adev_dimm->handle,
ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
}
mutex_unlock(&acpi_desc->init_mutex);
}

static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_mem *nfit_mem;
int dimm_count = 0;
int dimm_count = 0, rc;
struct nvdimm *nvdimm;

list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
struct acpi_nfit_flush_address *flush;
unsigned long flags = 0, cmd_mask;
struct nvdimm *nvdimm;
u32 device_handle;
u16 mem_flags;
int rc;

device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
Expand Down Expand Up @@ -1382,7 +1503,30 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)

}

return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
if (rc)
return rc;

/*
* Now that dimms are successfully registered, and async registration
* is flushed, attempt to enable event notification.
*/
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
struct kernfs_node *nfit_kernfs;

nvdimm = nfit_mem->nvdimm;
nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
if (nfit_kernfs)
nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
"flags");
sysfs_put(nfit_kernfs);
if (!nfit_mem->flags_attr)
dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
nvdimm_name(nvdimm));
}

return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
acpi_desc);
}

static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
Expand Down Expand Up @@ -1491,9 +1635,9 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
if (!info)
return -ENOMEM;
for (i = 0; i < nr; i++) {
struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
struct nfit_set_info_map *map = &info->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
struct nvdimm *nvdimm = mapping->nvdimm;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
spa->range_index, i);
Expand Down Expand Up @@ -1917,7 +2061,7 @@ static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
}

static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
struct acpi_nfit_memory_map *memdev,
struct nfit_spa *nfit_spa)
{
Expand All @@ -1934,26 +2078,26 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
return -ENODEV;
}

nd_mapping->nvdimm = nvdimm;
mapping->nvdimm = nvdimm;
switch (nfit_spa_type(spa)) {
case NFIT_SPA_PM:
case NFIT_SPA_VOLATILE:
nd_mapping->start = memdev->address;
nd_mapping->size = memdev->region_size;
mapping->start = memdev->address;
mapping->size = memdev->region_size;
break;
case NFIT_SPA_DCR:
nfit_mem = nvdimm_provider_data(nvdimm);
if (!nfit_mem || !nfit_mem->bdw) {
dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
spa->range_index, nvdimm_name(nvdimm));
} else {
nd_mapping->size = nfit_mem->bdw->capacity;
nd_mapping->start = nfit_mem->bdw->start_address;
mapping->size = nfit_mem->bdw->capacity;
mapping->start = nfit_mem->bdw->start_address;
ndr_desc->num_lanes = nfit_mem->bdw->windows;
blk_valid = 1;
}

ndr_desc->nd_mapping = nd_mapping;
ndr_desc->mapping = mapping;
ndr_desc->num_mappings = blk_valid;
ndbr_desc = to_blk_region_desc(ndr_desc);
ndbr_desc->enable = acpi_nfit_blk_region_enable;
Expand All @@ -1979,7 +2123,7 @@ static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct nfit_spa *nfit_spa)
{
static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
struct acpi_nfit_system_address *spa = nfit_spa->spa;
struct nd_blk_region_desc ndbr_desc;
struct nd_region_desc *ndr_desc;
Expand All @@ -1998,7 +2142,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
}

memset(&res, 0, sizeof(res));
memset(&nd_mappings, 0, sizeof(nd_mappings));
memset(&mappings, 0, sizeof(mappings));
memset(&ndbr_desc, 0, sizeof(ndbr_desc));
res.start = spa->address;
res.end = res.start + spa->length - 1;
Expand All @@ -2014,7 +2158,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,

list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
struct nd_mapping *nd_mapping;
struct nd_mapping_desc *mapping;

if (memdev->range_index != spa->range_index)
continue;
Expand All @@ -2023,14 +2167,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
spa->range_index, ND_MAX_MAPPINGS);
return -ENXIO;
}
nd_mapping = &nd_mappings[count++];
rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
mapping = &mappings[count++];
rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
memdev, nfit_spa);
if (rc)
goto out;
}

ndr_desc->nd_mapping = nd_mappings;
ndr_desc->mapping = mappings;
ndr_desc->num_mappings = count;
rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
if (rc)
Expand Down Expand Up @@ -2678,29 +2822,30 @@ static int acpi_nfit_remove(struct acpi_device *adev)
return 0;
}

static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
struct device *dev = &adev->dev;
union acpi_object *obj;
acpi_status status;
int ret;

dev_dbg(dev, "%s: event: %d\n", __func__, event);

device_lock(dev);
if (event != NFIT_NOTIFY_UPDATE)
return;

if (!dev->driver) {
/* dev->driver may be null if we're being removed */
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
goto out_unlock;
return;
}

if (!acpi_desc) {
acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
if (!acpi_desc)
goto out_unlock;
acpi_nfit_desc_init(acpi_desc, &adev->dev);
return;
acpi_nfit_desc_init(acpi_desc, dev);
} else {
/*
* Finish previous registration before considering new
Expand All @@ -2710,10 +2855,10 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
}

/* Evaluate _FIT */
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
if (ACPI_FAILURE(status)) {
dev_err(dev, "failed to evaluate _FIT\n");
goto out_unlock;
return;
}

obj = buf.pointer;
Expand All @@ -2725,9 +2870,14 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
} else
dev_err(dev, "Invalid _FIT\n");
kfree(buf.pointer);
}
EXPORT_SYMBOL_GPL(__acpi_nfit_notify);

out_unlock:
device_unlock(dev);
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
device_lock(&adev->dev);
__acpi_nfit_notify(&adev->dev, adev->handle, event);
device_unlock(&adev->dev);
}

static const struct acpi_device_id acpi_nfit_ids[] = {
Expand Down
Loading

0 comments on commit d09ba13

Please sign in to comment.