summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/dma-mapping.c72
-rw-r--r--drivers/base/memory.c42
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/block/zram/zram_drv.c106
-rw-r--r--drivers/block/zram/zram_drv.h6
-rw-r--r--drivers/firmware/memmap.c3
-rw-r--r--drivers/virtio/Kconfig1
-rw-r--r--drivers/virtio/virtio_balloon.c76
9 files changed, 252 insertions, 60 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 134f763d90fd..61a33f4ba608 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -252,6 +252,9 @@ config DMA_CMA
to allocate big physically-contiguous blocks of memory for use with
hardware components that do not support I/O map nor scatter-gather.
+ You can disable CMA by specifying "cma=0" on the kernel's command
+ line.
+
For more information see <include/linux/dma-contiguous.h>.
If unsure, say "n".
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 6cd08e145bfa..9e8bbdd470ca 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -10,6 +10,8 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <asm-generic/dma-coherent.h>
/*
@@ -267,3 +269,73 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
EXPORT_SYMBOL(dma_common_mmap);
+
+#ifdef CONFIG_MMU
+/*
+ * remaps an array of PAGE_SIZE pages into another vm_area
+ * Cannot be used in non-sleeping contexts
+ */
+void *dma_common_pages_remap(struct page **pages, size_t size,
+ unsigned long vm_flags, pgprot_t prot,
+ const void *caller)
+{
+ struct vm_struct *area;
+
+ area = get_vm_area_caller(size, vm_flags, caller);
+ if (!area)
+ return NULL;
+
+ area->pages = pages;
+
+ if (map_vm_area(area, prot, pages)) {
+ vunmap(area->addr);
+ return NULL;
+ }
+
+ return area->addr;
+}
+
+/*
+ * remaps an allocated contiguous region into another vm_area.
+ * Cannot be used in non-sleeping contexts
+ */
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+ unsigned long vm_flags,
+ pgprot_t prot, const void *caller)
+{
+ int i;
+ struct page **pages;
+ void *ptr;
+ unsigned long pfn;
+
+ pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
+ pages[i] = pfn_to_page(pfn + i);
+
+ ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+
+ kfree(pages);
+
+ return ptr;
+}
+
+/*
+ * unmaps a range previously mapped by dma_common_*_remap
+ */
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+{
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (!area || (area->flags & vm_flags) != vm_flags) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
+ }
+
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+}
+#endif
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index a2e13e250bba..7c5d87191b28 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -373,6 +373,45 @@ static ssize_t show_phys_device(struct device *dev,
return sprintf(buf, "%d\n", mem->phys_device);
}
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static ssize_t show_valid_zones(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn, end_pfn;
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct page *first_page;
+ struct zone *zone;
+
+ start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ end_pfn = start_pfn + nr_pages;
+ first_page = pfn_to_page(start_pfn);
+
+ /* The block contains more than one zone can not be offlined. */
+ if (!test_pages_in_a_zone(start_pfn, end_pfn))
+ return sprintf(buf, "none\n");
+
+ zone = page_zone(first_page);
+
+ if (zone_idx(zone) == ZONE_MOVABLE - 1) {
+ /*The mem block is the last memoryblock of this zone.*/
+ if (end_pfn == zone_end_pfn(zone))
+ return sprintf(buf, "%s %s\n",
+ zone->name, (zone + 1)->name);
+ }
+
+ if (zone_idx(zone) == ZONE_MOVABLE) {
+ /*The mem block is the first memoryblock of ZONE_MOVABLE.*/
+ if (start_pfn == zone->zone_start_pfn)
+ return sprintf(buf, "%s %s\n",
+ zone->name, (zone - 1)->name);
+ }
+
+ return sprintf(buf, "%s\n", zone->name);
+}
+static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
+#endif
+
static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
@@ -523,6 +562,9 @@ static struct attribute *memory_memblk_attrs[] = {
&dev_attr_state.attr,
&dev_attr_phys_device.attr,
&dev_attr_removable.attr,
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ &dev_attr_valid_zones.attr,
+#endif
NULL
};
diff --git a/drivers/base/node.c b/drivers/base/node.c
index d51c49c9bafa..472168cd0c97 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -289,8 +289,6 @@ static int register_node(struct node *node, int num, struct node *parent)
device_create_file(&node->dev, &dev_attr_distance);
device_create_file(&node->dev, &dev_attr_vmstat);
- scan_unevictable_register_node(node);
-
hugetlb_register_node(node);
compaction_register_node(node);
@@ -314,7 +312,6 @@ void unregister_node(struct node *node)
device_remove_file(&node->dev, &dev_attr_distance);
device_remove_file(&node->dev, &dev_attr_vmstat);
- scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
device_unregister(&node->dev);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d00831c3d731..3b850164c65c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -103,10 +103,10 @@ static ssize_t mem_used_total_show(struct device *dev,
down_read(&zram->init_lock);
if (init_done(zram))
- val = zs_get_total_size_bytes(meta->mem_pool);
+ val = zs_get_total_pages(meta->mem_pool);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}
static ssize_t max_comp_streams_show(struct device *dev,
@@ -122,6 +122,72 @@ static ssize_t max_comp_streams_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}
+static ssize_t mem_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->limit_pages;
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+}
+
+static ssize_t mem_limit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u64 limit;
+ char *tmp;
+ struct zram *zram = dev_to_zram(dev);
+
+ limit = memparse(buf, &tmp);
+ if (buf == tmp) /* no chars parsed, invalid input */
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t mem_used_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val = 0;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ if (init_done(zram))
+ val = atomic_long_read(&zram->stats.max_used_pages);
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
+}
+
+static ssize_t mem_used_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int err;
+ unsigned long val;
+ struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta = zram->meta;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err || val != 0)
+ return -EINVAL;
+
+ down_read(&zram->init_lock);
+ if (init_done(zram))
+ atomic_long_set(&zram->stats.max_used_pages,
+ zs_get_total_pages(meta->mem_pool));
+ up_read(&zram->init_lock);
+
+ return len;
+}
+
static ssize_t max_comp_streams_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
@@ -434,6 +500,21 @@ out_cleanup:
return ret;
}
+static inline void update_used_max(struct zram *zram,
+ const unsigned long pages)
+{
+ int old_max, cur_max;
+
+ old_max = atomic_long_read(&zram->stats.max_used_pages);
+
+ do {
+ cur_max = old_max;
+ if (pages > cur_max)
+ old_max = atomic_long_cmpxchg(
+ &zram->stats.max_used_pages, cur_max, pages);
+ } while (old_max != cur_max);
+}
+
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset)
{
@@ -445,6 +526,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
struct zram_meta *meta = zram->meta;
struct zcomp_strm *zstrm;
bool locked = false;
+ unsigned long alloced_pages;
page = bvec->bv_page;
if (is_partial_io(bvec)) {
@@ -513,6 +595,16 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = -ENOMEM;
goto out;
}
+
+ alloced_pages = zs_get_total_pages(meta->mem_pool);
+ if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ zs_free(meta->mem_pool, handle);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ update_used_max(zram, alloced_pages);
+
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
@@ -606,6 +698,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ atomic64_inc(&zram->stats.notify_free);
index++;
n -= PAGE_SIZE;
}
@@ -617,6 +710,9 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
struct zram_meta *meta;
down_write(&zram->init_lock);
+
+ zram->limit_pages = 0;
+
if (!init_done(zram)) {
up_write(&zram->init_lock);
return;
@@ -857,6 +953,10 @@ static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
+static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
+ mem_limit_store);
+static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
+ mem_used_max_store);
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
max_comp_streams_show, max_comp_streams_store);
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
@@ -885,6 +985,8 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_orig_data_size.attr,
&dev_attr_compr_data_size.attr,
&dev_attr_mem_used_total.attr,
+ &dev_attr_mem_limit.attr,
+ &dev_attr_mem_used_max.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
NULL,
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index e0f725c87cc6..c6ee271317f5 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -90,6 +90,7 @@ struct zram_stats {
atomic64_t notify_free; /* no. of swap slot free notifications */
atomic64_t zero_pages; /* no. of zero filled pages */
atomic64_t pages_stored; /* no. of pages currently stored */
+ atomic_long_t max_used_pages; /* no. of maximum pages stored */
};
struct zram_meta {
@@ -112,6 +113,11 @@ struct zram {
u64 disksize; /* bytes */
int max_comp_streams;
struct zram_stats stats;
+ /*
+ * the number of pages zram can consume for storing compressed data
+ */
+ unsigned long limit_pages;
+
char compressor[10];
};
#endif
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 79f18e6d9c4f..cc016c615c19 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -184,6 +184,9 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
static int map_entries_nr;
static struct kset *mmap_kset;
+ if (entry->kobj.state_in_sysfs)
+ return -EEXIST;
+
if (!mmap_kset) {
mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
if (!mmap_kset)
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index c6683f2e396c..00b228638274 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -25,6 +25,7 @@ config VIRTIO_PCI
config VIRTIO_BALLOON
tristate "Virtio balloon driver"
depends on VIRTIO
+ select MEMORY_BALLOON
---help---
This driver supports increasing and decreasing the amount
of memory within a KVM guest.
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 25ebe8eecdb7..f893148a107b 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -59,7 +59,7 @@ struct virtio_balloon
* Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
* to num_pages above.
*/
- struct balloon_dev_info *vb_dev_info;
+ struct balloon_dev_info vb_dev_info;
/* Synchronize access/update to this struct virtio_balloon elements */
struct mutex balloon_lock;
@@ -127,7 +127,7 @@ static void set_page_pfns(u32 pfns[], struct page *page)
static void fill_balloon(struct virtio_balloon *vb, size_t num)
{
- struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;
+ struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
@@ -163,15 +163,15 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
/* Find pfns pointing at start of each page, get pages and free them. */
for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
struct page *page = balloon_pfn_to_page(pfns[i]);
- balloon_page_free(page);
adjust_managed_page_count(page, 1);
+ put_page(page); /* balloon reference */
}
}
static void leak_balloon(struct virtio_balloon *vb, size_t num)
{
struct page *page;
- struct balloon_dev_info *vb_dev_info = vb->vb_dev_info;
+ struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
/* We can only do one array worth at a time. */
num = min(num, ARRAY_SIZE(vb->pfns));
@@ -353,12 +353,11 @@ static int init_vqs(struct virtio_balloon *vb)
return 0;
}
-static const struct address_space_operations virtio_balloon_aops;
#ifdef CONFIG_BALLOON_COMPACTION
/*
* virtballoon_migratepage - perform the balloon page migration on behalf of
* a compation thread. (called under page lock)
- * @mapping: the page->mapping which will be assigned to the new migrated page.
+ * @vb_dev_info: the balloon device
* @newpage: page that will replace the isolated page after migration finishes.
* @page : the isolated (old) page that is about to be migrated to newpage.
* @mode : compaction mode -- not used for balloon page migration.
@@ -373,17 +372,13 @@ static const struct address_space_operations virtio_balloon_aops;
* This function preforms the balloon page migration task.
* Called through balloon_mapping->a_ops->migratepage
*/
-static int virtballoon_migratepage(struct address_space *mapping,
+static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
- struct balloon_dev_info *vb_dev_info = balloon_page_device(page);
- struct virtio_balloon *vb;
+ struct virtio_balloon *vb = container_of(vb_dev_info,
+ struct virtio_balloon, vb_dev_info);
unsigned long flags;
- BUG_ON(!vb_dev_info);
-
- vb = vb_dev_info->balloon_device;
-
/*
* In order to avoid lock contention while migrating pages concurrently
* to leak_balloon() or fill_balloon() we just give up the balloon_lock
@@ -395,21 +390,19 @@ static int virtballoon_migratepage(struct address_space *mapping,
if (!mutex_trylock(&vb->balloon_lock))
return -EAGAIN;
+ get_page(newpage); /* balloon reference */
+
/* balloon's page migration 1st step -- inflate "newpage" */
spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
- balloon_page_insert(newpage, mapping, &vb_dev_info->pages);
+ balloon_page_insert(vb_dev_info, newpage);
vb_dev_info->isolated_pages--;
+ __count_vm_event(BALLOON_MIGRATE);
spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb->pfns, newpage);
tell_host(vb, vb->inflate_vq);
- /*
- * balloon's page migration 2nd step -- deflate "page"
- *
- * It's safe to delete page->lru here because this page is at
- * an isolated migration list, and this step is expected to happen here
- */
+ /* balloon's page migration 2nd step -- deflate "page" */
balloon_page_delete(page);
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb->pfns, page);
@@ -417,20 +410,15 @@ static int virtballoon_migratepage(struct address_space *mapping,
mutex_unlock(&vb->balloon_lock);
- return MIGRATEPAGE_BALLOON_SUCCESS;
-}
+ put_page(page); /* balloon reference */
-/* define the balloon_mapping->a_ops callback to allow balloon page migration */
-static const struct address_space_operations virtio_balloon_aops = {
- .migratepage = virtballoon_migratepage,
-};
+ return MIGRATEPAGE_SUCCESS;
+}
#endif /* CONFIG_BALLOON_COMPACTION */
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
- struct address_space *vb_mapping;
- struct balloon_dev_info *vb_devinfo;
int err;
vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
@@ -446,30 +434,14 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->vdev = vdev;
vb->need_stats_update = 0;
- vb_devinfo = balloon_devinfo_alloc(vb);
- if (IS_ERR(vb_devinfo)) {
- err = PTR_ERR(vb_devinfo);
- goto out_free_vb;
- }
-
- vb_mapping = balloon_mapping_alloc(vb_devinfo,
- (balloon_compaction_check()) ?
- &virtio_balloon_aops : NULL);
- if (IS_ERR(vb_mapping)) {
- /*
- * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP
- * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off.
- */
- err = PTR_ERR(vb_mapping);
- if (err != -EOPNOTSUPP)
- goto out_free_vb_devinfo;
- }
-
- vb->vb_dev_info = vb_devinfo;
+ balloon_devinfo_init(&vb->vb_dev_info);
+#ifdef CONFIG_BALLOON_COMPACTION
+ vb->vb_dev_info.migratepage = virtballoon_migratepage;
+#endif
err = init_vqs(vb);
if (err)
- goto out_free_vb_mapping;
+ goto out_free_vb;
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
@@ -481,10 +453,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
out_del_vqs:
vdev->config->del_vqs(vdev);
-out_free_vb_mapping:
- balloon_mapping_free(vb_mapping);
-out_free_vb_devinfo:
- balloon_devinfo_free(vb_devinfo);
out_free_vb:
kfree(vb);
out:
@@ -510,8 +478,6 @@ static void virtballoon_remove(struct virtio_device *vdev)
kthread_stop(vb->thread);
remove_common(vb);
- balloon_mapping_free(vb->vb_dev_info->mapping);
- balloon_devinfo_free(vb->vb_dev_info);
kfree(vb);
}