Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge more updates from Andrew Morton:

 - a few misc things

 - kexec updates

 - DMA-mapping updates to better support networking DMA operations

 - IPC updates

 - various MM changes to improve DAX fault handling

 - lots of radix-tree changes, mainly to the test suite. All leading up
   to reimplementing the IDA/IDR code to be a wrapper layer over the
   radix-tree. However the final trigger-pulling patch is held off for
   4.11.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits)
  radix tree test suite: delete unused rcupdate.c
  radix tree test suite: add new tag check
  radix-tree: ensure counts are initialised
  radix tree test suite: cache recently freed objects
  radix tree test suite: add some more functionality
  idr: reduce the number of bits per level from 8 to 6
  rxrpc: abstract away knowledge of IDR internals
  tpm: use idr_find(), not idr_find_slowpath()
  idr: add ida_is_empty
  radix tree test suite: check multiorder iteration
  radix-tree: fix replacement for multiorder entries
  radix-tree: add radix_tree_split_preload()
  radix-tree: add radix_tree_split
  radix-tree: add radix_tree_join
  radix-tree: delete radix_tree_range_tag_if_tagged()
  radix-tree: delete radix_tree_locate_item()
  radix-tree: improve multiorder iterators
  btrfs: fix race in btrfs_free_dummy_fs_info()
  radix-tree: improve dump output
  radix-tree: make radix_tree_find_next_bit more useful
  ...
  • Loading branch information
torvalds committed Dec 15, 2016
2 parents cf1b334 + e1e14ab commit a57cb1c
Show file tree
Hide file tree
Showing 140 changed files with 3,428 additions and 2,218 deletions.
2 changes: 1 addition & 1 deletion Documentation/filesystems/Locking
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ till "end_pgoff". ->map_pages() is called with page table locked and must
not block. If it's not possible to reach a page without blocking,
filesystem should skip it. Filesystem should use do_set_pte() to setup
page table entry. Pointer to entry associated with the page is passed in
"pte" field in fault_env structure. Pointers to entries for other offsets
"pte" field in vm_fault structure. Pointers to entries for other offsets
should be calculated relative to "pte".

->page_mkwrite() is called when a previously read-only pte is
Expand Down
5 changes: 4 additions & 1 deletion arch/arc/mm/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,10 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
_dma_cache_sync(paddr, size, dir);

if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir);

return plat_phys_to_dma(dev, paddr);
}

Expand Down
16 changes: 10 additions & 6 deletions arch/arm/common/dmabounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,8 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
}

static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
enum dma_data_direction dir,
unsigned long attrs)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
struct safe_buffer *buf;
Expand All @@ -262,7 +263,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);

if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
Expand All @@ -272,7 +274,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
}

static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
size_t size, enum dma_data_direction dir)
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);
Expand All @@ -283,7 +286,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,

DO_STATS(dev->archdata.dmabounce->bounce_count++);

if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
void *ptr = buf->ptr;

dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
Expand Down Expand Up @@ -334,7 +338,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
return DMA_ERROR_CODE;
}

return map_single(dev, page_address(page) + offset, size, dir);
return map_single(dev, page_address(page) + offset, size, dir, attrs);
}

/*
Expand All @@ -357,7 +361,7 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t
return;
}

unmap_single(dev, buf, size, dir);
unmap_single(dev, buf, size, dir, attrs);
}

static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
Expand Down
7 changes: 6 additions & 1 deletion arch/avr32/mm/dma-coherent.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
{
void *cpu_addr = page_address(page) + offset;

dma_cache_sync(dev, cpu_addr, size, direction);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_cache_sync(dev, cpu_addr, size, direction);
return virt_to_bus(cpu_addr);
}

Expand All @@ -162,6 +163,10 @@ static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,

sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
virt = sg_virt(sg);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;

dma_cache_sync(dev, virt, sg->length, direction);
}

Expand Down
8 changes: 7 additions & 1 deletion arch/blackfin/kernel/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,10 @@ static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,

for_each_sg(sg_list, sg, nents, i) {
sg->dma_address = (dma_addr_t) sg_virt(sg);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;

__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
}

Expand All @@ -143,7 +147,9 @@ static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
{
dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);

_dma_sync(handle, size, dir);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_sync(handle, size, dir);

return handle;
}

Expand Down
14 changes: 10 additions & 4 deletions arch/c6x/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,17 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
{
dma_addr_t handle = virt_to_phys(page_address(page) + offset);

c6x_dma_sync(handle, size, dir);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
c6x_dma_sync(handle, size, dir);

return handle;
}

static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
c6x_dma_sync(handle, size, dir);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
c6x_dma_sync(handle, size, dir);
}

static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
Expand All @@ -60,7 +63,8 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,

for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
c6x_dma_sync(sg->dma_address, sg->length, dir);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
c6x_dma_sync(sg->dma_address, sg->length, dir);
}

return nents;
Expand All @@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;

for_each_sg(sglist, sg, nents, i)
c6x_dma_sync(sg_dma_address(sg), sg->length, dir);

}

static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
Expand Down
14 changes: 10 additions & 4 deletions arch/frv/mb93090-mb00/pci-dma-nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,16 +109,19 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
int i;
struct scatterlist *sg;
int i;

BUG_ON(direction == DMA_NONE);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return nents;

for_each_sg(sglist, sg, nents, i) {
frv_cache_wback_inv(sg_dma_address(sg),
sg_dma_address(sg) + sg_dma_len(sg));
}

BUG_ON(direction == DMA_NONE);

return nents;
}

Expand All @@ -127,7 +130,10 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
enum dma_data_direction direction, unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);

if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
flush_dcache_page(page);

return (dma_addr_t) page_to_phys(page) + offset;
}

Expand Down
9 changes: 7 additions & 2 deletions arch/frv/mb93090-mb00/pci-dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,16 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *sg;
unsigned long dampr2;
void *vaddr;
int i;
struct scatterlist *sg;

BUG_ON(direction == DMA_NONE);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return nents;

dampr2 = __get_DAMPR(2);

for_each_sg(sglist, sg, nents, i) {
Expand All @@ -70,7 +73,9 @@ static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
flush_dcache_page(page);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
flush_dcache_page(page);

return (dma_addr_t) page_to_phys(page) + offset;
}

Expand Down
6 changes: 5 additions & 1 deletion arch/hexagon/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,9 @@ static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,

s->dma_length = s->length;

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;

flush_dcache_range(dma_addr_to_virt(s->dma_address),
dma_addr_to_virt(s->dma_address + s->length));
}
Expand Down Expand Up @@ -180,7 +183,8 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
if (!check_addr("map_single", dev, bus, size))
return bad_dma_address;

dma_sync(dma_addr_to_virt(bus), size, dir);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_sync(dma_addr_to_virt(bus), size, dir);

return bus;
}
Expand Down
8 changes: 7 additions & 1 deletion arch/m68k/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,9 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
{
dma_addr_t handle = page_to_phys(page) + offset;

dma_sync_single_for_device(dev, handle, size, dir);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_sync_single_for_device(dev, handle, size, dir);

return handle;
}

Expand All @@ -146,6 +148,10 @@ static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,

for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;

dma_sync_single_for_device(dev, sg->dma_address, sg->length,
dir);
}
Expand Down
16 changes: 13 additions & 3 deletions arch/metag/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -484,16 +484,18 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
direction);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_sync_for_device((void *)(page_to_phys(page) + offset),
size, direction);
return page_to_phys(page) + offset;
}

static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}

static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
Expand All @@ -507,6 +509,10 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
BUG_ON(!sg_page(sg));

sg->dma_address = sg_phys(sg);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;

dma_sync_for_device(sg_virt(sg), sg->length, direction);
}

Expand All @@ -525,6 +531,10 @@ static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
BUG_ON(!sg_page(sg));

sg->dma_address = sg_phys(sg);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;

dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
}
}
Expand Down
10 changes: 8 additions & 2 deletions arch/microblaze/kernel/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
/* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg);

if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;

__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
sg->length, direction);
}
Expand All @@ -80,7 +84,8 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
enum dma_data_direction direction,
unsigned long attrs)
{
__dma_sync(page_to_phys(page) + offset, size, direction);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(page_to_phys(page) + offset, size, direction);
return page_to_phys(page) + offset;
}

Expand All @@ -95,7 +100,8 @@ static inline void dma_direct_unmap_page(struct device *dev,
* phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
* dma_address is physical address
*/
__dma_sync(dma_address, size, direction);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(dma_address, size, direction);
}

static inline void
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/loongson64/common/dma-swiotlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, attrs);
mb();

return r;
Expand Down
8 changes: 5 additions & 3 deletions arch/mips/mm/dma-default.c
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ static inline void __dma_sync(struct page *page,
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, unsigned long attrs)
{
if (cpu_needs_post_dma_flush(dev))
if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction);
plat_post_dma_flush(dev);
Expand All @@ -307,7 +307,8 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;

for_each_sg(sglist, sg, nents, i) {
if (!plat_device_is_coherent(dev))
if (!plat_device_is_coherent(dev) &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
Expand All @@ -324,7 +325,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
if (!plat_device_is_coherent(dev))
if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
__dma_sync(page, offset, size, direction);

return plat_map_dma_mem_page(dev, page) + offset;
Expand All @@ -339,6 +340,7 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,

for_each_sg(sglist, sg, nhwentries, i) {
if (!plat_device_is_coherent(dev) &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
direction != DMA_TO_DEVICE)
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
Expand Down
Loading

0 comments on commit a57cb1c

Please sign in to comment.