Skip to content

Commit

Permalink
add devm_memremap_pages
Browse files Browse the repository at this point in the history
This behaves like devm_memremap except that it ensures we have page
structures available that can back the region.

Signed-off-by: Christoph Hellwig <hch@lst.de>
[djbw: catch attempts to remap RAM, drop flags]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
  • Loading branch information
Christoph Hellwig authored and djbw committed Aug 27, 2015
1 parent 033fbae commit 41e94a8
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 0 deletions.
20 changes: 20 additions & 0 deletions include/linux/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,13 @@

#include <linux/types.h>
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>

struct device;
struct resource;

__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
Expand Down Expand Up @@ -84,6 +87,23 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);

void *__devm_memremap_pages(struct device *dev, struct resource *res);

#ifdef CONFIG_ZONE_DEVICE
void *devm_memremap_pages(struct device *dev, struct resource *res);
#else
static inline void *devm_memremap_pages(struct device *dev, struct resource *res)
{
/*
* Fail attempts to call devm_memremap_pages() without
* ZONE_DEVICE support enabled, this requires callers to fall
* back to plain devm_memremap() based on config
*/
WARN_ON_ONCE(1);
return ERR_PTR(-ENXIO);
}
#endif

/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
Expand Down
53 changes: 53 additions & 0 deletions kernel/memremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/memory_hotplug.h>

#ifndef ioremap_cache
/* temporary while we convert existing ioremap_cache users to memremap */
Expand Down Expand Up @@ -135,3 +136,55 @@ void devm_memunmap(struct device *dev, void *addr)
memunmap(addr);
}
EXPORT_SYMBOL(devm_memunmap);

#ifdef CONFIG_ZONE_DEVICE
struct page_map {
struct resource res;
};

static void devm_memremap_pages_release(struct device *dev, void *res)
{
struct page_map *page_map = res;

/* pages are dead and unused, undo the arch mapping */
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
}

void *devm_memremap_pages(struct device *dev, struct resource *res)
{
int is_ram = region_intersects(res->start, resource_size(res),
"System RAM");
struct page_map *page_map;
int error, nid;

if (is_ram == REGION_MIXED) {
WARN_ONCE(1, "%s attempted on mixed region %pr\n",
__func__, res);
return ERR_PTR(-ENXIO);
}

if (is_ram == REGION_INTERSECTS)
return __va(res->start);

page_map = devres_alloc(devm_memremap_pages_release,
sizeof(*page_map), GFP_KERNEL);
if (!page_map)
return ERR_PTR(-ENOMEM);

memcpy(&page_map->res, res, sizeof(*res));

nid = dev_to_node(dev);
if (nid < 0)
nid = 0;

error = arch_add_memory(nid, res->start, resource_size(res), true);
if (error) {
devres_free(page_map);
return ERR_PTR(error);
}

devres_add(dev, page_map);
return __va(res->start);
}
EXPORT_SYMBOL(devm_memremap_pages);
#endif /* CONFIG_ZONE_DEVICE */

0 comments on commit 41e94a8

Please sign in to comment.