diff options
841 files changed, 19594 insertions, 8628 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index a3c5a6685036..6615fda0abfb 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci @@ -117,7 +117,7 @@ Description: What: /sys/bus/pci/devices/.../vpd Date: February 2008 -Contact: Ben Hutchings <bhutchings@solarflare.com> +Contact: Ben Hutchings <bwh@kernel.org> Description: A file named vpd in a device directory will be a binary file containing the Vital Product Data for the @@ -250,3 +250,24 @@ Description: valid. For example, writing a 2 to this file when sriov_numvfs is not 0 and not 2 already will return an error. Writing a 10 when the value of sriov_totalvfs is 8 will return an error. + +What: /sys/bus/pci/devices/.../driver_override +Date: April 2014 +Contact: Alex Williamson <alex.williamson@redhat.com> +Description: + This file allows the driver for a device to be specified which + will override standard static and dynamic ID matching. When + specified, only a driver with a name matching the value written + to driver_override will have an opportunity to bind to the + device. The override is specified by writing a string to the + driver_override file (echo pci-stub > driver_override) and + may be cleared with an empty string (echo > driver_override). + This returns the device to standard matching rules binding. + Writing to driver_override does not automatically unbind the + device from its current driver or make any attempt to + automatically load the specified driver. If no driver with a + matching name is currently loaded in the kernel, the device + will not bind to any driver. This also allows devices to + opt-out of driver binding using a driver_override name such as + "none". Only a single driver may be specified in the override, + there is no support for parsing delimiters. diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index 5e983031cc11..dcbbe3602d78 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt @@ -9,16 +9,76 @@ This is a guide to device driver writers on how to use the DMA API with example pseudo-code. For a concise description of the API, see DMA-API.txt. -Most of the 64bit platforms have special hardware that translates bus -addresses (DMA addresses) into physical addresses. This is similar to -how page tables and/or a TLB translates virtual addresses to physical -addresses on a CPU. This is needed so that e.g. PCI devices can -access with a Single Address Cycle (32bit DMA address) any page in the -64bit physical address space. Previously in Linux those 64bit -platforms had to set artificial limits on the maximum RAM size in the -system, so that the virt_to_bus() static scheme works (the DMA address -translation tables were simply filled on bootup to map each bus -address to the physical page __pa(bus_to_virt())). + CPU and DMA addresses + +There are several kinds of addresses involved in the DMA API, and it's +important to understand the differences. + +The kernel normally uses virtual addresses. Any address returned by +kmalloc(), vmalloc(), and similar interfaces is a virtual address and can +be stored in a "void *". + +The virtual memory system (TLB, page tables, etc.) translates virtual +addresses to CPU physical addresses, which are stored as "phys_addr_t" or +"resource_size_t". The kernel manages device resources like registers as +physical addresses. These are the addresses in /proc/iomem. The physical +address is not directly useful to a driver; it must use ioremap() to map +the space and produce a virtual address. + +I/O devices use a third kind of address: a "bus address" or "DMA address". +If a device has registers at an MMIO address, or if it performs DMA to read +or write system memory, the addresses used by the device are bus addresses. +In some systems, bus addresses are identical to CPU physical addresses, but +in general they are not. IOMMUs and host bridges can produce arbitrary +mappings between physical and bus addresses. + +Here's a picture and some examples: + + CPU CPU Bus + Virtual Physical Address + Address Address Space + Space Space + + +-------+ +------+ +------+ + | | |MMIO | Offset | | + | | Virtual |Space | applied | | + C +-------+ --------> B +------+ ----------> +------+ A + | | mapping | | by host | | + +-----+ | | | | bridge | | +--------+ + | | | | +------+ | | | | + | CPU | | | | RAM | | | | Device | + | | | | | | | | | | + +-----+ +-------+ +------+ +------+ +--------+ + | | Virtual |Buffer| Mapping | | + X +-------+ --------> Y +------+ <---------- +------+ Z + | | mapping | RAM | by IOMMU + | | | | + | | | | + +-------+ +------+ + +During the enumeration process, the kernel learns about I/O devices and +their MMIO space and the host bridges that connect them to the system. For +example, if a PCI device has a BAR, the kernel reads the bus address (A) +from the BAR and converts it to a CPU physical address (B). The address B +is stored in a struct resource and usually exposed via /proc/iomem. When a +driver claims a device, it typically uses ioremap() to map physical address +B at a virtual address (C). It can then use, e.g., ioread32(C), to access +the device registers at bus address A. + +If the device supports DMA, the driver sets up a buffer using kmalloc() or +a similar interface, which returns a virtual address (X). The virtual +memory system maps X to a physical address (Y) in system RAM. The driver +can use virtual address X to access the buffer, but the device itself +cannot because DMA doesn't go through the CPU virtual memory system. + +In some simple systems, the device can do DMA directly to physical address +Y. But in many others, there is IOMMU hardware that translates bus +addresses to physical addresses, e.g., it translates Z to Y. This is part +of the reason for the DMA API: the driver can give a virtual address X to +an interface like dma_map_single(), which sets up any required IOMMU +mapping and returns the bus address Z. The driver then tells the device to +do DMA to Z, and the IOMMU maps it to the buffer at address Y in system +RAM. So that Linux can use the dynamic DMA mapping, it needs some help from the drivers, namely it has to take into account that DMA addresses should be @@ -29,17 +89,17 @@ The following API will work of course even on platforms where no such hardware exists. Note that the DMA API works with any bus independent of the underlying -microprocessor architecture. You should use the DMA API rather than -the bus specific DMA API (e.g. pci_dma_*). +microprocessor architecture. You should use the DMA API rather than the +bus-specific DMA API, i.e., use the dma_map_*() interfaces rather than the +pci_map_*() interfaces. First of all, you should make sure #include <linux/dma-mapping.h> -is in your driver. This file will obtain for you the definition of the -dma_addr_t (which can hold any valid DMA address for the platform) -type which should be used everywhere you hold a DMA (bus) address -returned from the DMA mapping functions. +is in your driver, which provides the definition of dma_addr_t. This type +can hold any valid DMA or bus address for the platform and should be used +everywhere you hold a DMA address returned from the DMA mapping functions. What memory is DMA'able? @@ -123,9 +183,9 @@ Here, dev is a pointer to the device struct of your device, and mask is a bit mask describing which bits of an address your device supports. It returns zero if your card can perform DMA properly on the machine given the address mask you provided. In general, the -device struct of your device is embedded in the bus specific device -struct of your device. For example, a pointer to the device struct of -your PCI device is pdev->dev (pdev is a pointer to the PCI device +device struct of your device is embedded in the bus-specific device +struct of your device. For example, &pdev->dev is a pointer to the +device struct of a PCI device (pdev is a pointer to the PCI device struct of your device). If it returns non-zero, your device cannot perform DMA properly on @@ -147,8 +207,7 @@ exactly why. The standard 32-bit addressing device would do something like this: if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { - printk(KERN_WARNING - "mydev: No suitable DMA available.\n"); + dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } @@ -170,8 +229,7 @@ all 64-bits when accessing streaming DMA: } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) { using_dac = 0; } else { - printk(KERN_WARNING - "mydev: No suitable DMA available.\n"); + dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } @@ -187,22 +245,20 @@ the case would look like this: using_dac = 0; consistent_using_dac = 0; } else { - printk(KERN_WARNING - "mydev: No suitable DMA available.\n"); + dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } -The coherent coherent mask will always be able to set the same or a -smaller mask as the streaming mask. However for the rare case that a -device driver only uses consistent allocations, one would have to -check the return value from dma_set_coherent_mask(). +The coherent mask will always be able to set the same or a smaller mask as +the streaming mask. However for the rare case that a device driver only +uses consistent allocations, one would have to check the return value from +dma_set_coherent_mask(). Finally, if your device can only drive the low 24-bits of address you might do something like: if (dma_set_mask(dev, DMA_BIT_MASK(24))) { - printk(KERN_WARNING - "mydev: 24-bit DMA addressing not available.\n"); + dev_warn(dev, "mydev: 24-bit DMA addressing not available\n"); goto ignore_this_device; } @@ -232,14 +288,14 @@ Here is pseudo-code showing how this might be done: card->playback_enabled = 1; } else { card->playback_enabled = 0; - printk(KERN_WARNING "%s: Playback disabled due to DMA limitations.\n", + dev_warn(dev, "%s: Playback disabled due to DMA limitations\n", card->name); } if (!dma_set_mask(dev, RECORD_ADDRESS_BITS)) { card->record_enabled = 1; } else { card->record_enabled = 0; - printk(KERN_WARNING "%s: Record disabled due to DMA limitations.\n", + dev_warn(dev, "%s: Record disabled due to DMA limitations\n", card->name); } @@ -331,7 +387,7 @@ context with the GFP_ATOMIC flag. Size is the length of the region you want to allocate, in bytes. This routine will allocate RAM for that region, so it acts similarly to -__get_free_pages (but takes size instead of a page order). If your +__get_free_pages() (but takes size instead of a page order). If your driver needs regions sized smaller than a page, you may prefer using the dma_pool interface, described below. @@ -343,11 +399,11 @@ the consistent DMA mask has been explicitly changed via dma_set_coherent_mask(). This is true of the dma_pool interface as well. -dma_alloc_coherent returns two values: the virtual address which you +dma_alloc_coherent() returns two values: the virtual address which you can use to access it from the CPU and dma_handle which you pass to the card. -The cpu return address and the DMA bus master address are both +The CPU virtual address and the DMA bus address are both guaranteed to be aligned to the smallest PAGE_SIZE order which is greater than or equal to the requested size. This invariant exists (for example) to guarantee that if you allocate a chunk @@ -359,13 +415,13 @@ To unmap and free such a DMA region, you call: dma_free_coherent(dev, size, cpu_addr, dma_handle); where dev, size are the same as in the above call and cpu_addr and -dma_handle are the values dma_alloc_coherent returned to you. +dma_handle are the values dma_alloc_coherent() returned to you. This function may not be called in interrupt context. If your driver needs lots of smaller memory regions, you can write -custom code to subdivide pages returned by dma_alloc_coherent, +custom code to subdivide pages returned by dma_alloc_coherent(), or you can use the dma_pool API to do that. A dma_pool is like -a kmem_cache, but it uses dma_alloc_coherent not __get_free_pages. +a kmem_cache, but it uses dma_alloc_coherent(), not __get_free_pages(). Also, it understands common hardware constraints for alignment, like queue heads needing to be aligned on N byte boundaries. @@ -373,37 +429,37 @@ Create a dma_pool like this: struct dma_pool *pool; - pool = dma_pool_create(name, dev, size, align, alloc); + pool = dma_pool_create(name, dev, size, align, boundary); The "name" is for diagnostics (like a kmem_cache name); dev and size are as above. The device's hardware alignment requirement for this type of data is "align" (which is expressed in bytes, and must be a power of two). If your device has no boundary crossing restrictions, -pass 0 for alloc; passing 4096 says memory allocated from this pool +pass 0 for boundary; passing 4096 says memory allocated from this pool must not cross 4KByte boundaries (but at that time it may be better to -go for dma_alloc_coherent directly instead). +use dma_alloc_coherent() directly instead). -Allocate memory from a dma pool like this: +Allocate memory from a DMA pool like this: cpu_addr = dma_pool_alloc(pool, flags, &dma_handle); -flags are SLAB_KERNEL if blocking is permitted (not in_interrupt nor -holding SMP locks), SLAB_ATOMIC otherwise. Like dma_alloc_coherent, +flags are GFP_KERNEL if blocking is permitted (not in_interrupt nor +holding SMP locks), GFP_ATOMIC otherwise. Like dma_alloc_coherent(), this returns two values, cpu_addr and dma_handle. Free memory that was allocated from a dma_pool like this: dma_pool_free(pool, cpu_addr, dma_handle); -where pool is what you passed to dma_pool_alloc, and cpu_addr and -dma_handle are the values dma_pool_alloc returned. This function +where pool is what you passed to dma_pool_alloc(), and cpu_addr and +dma_handle are the values dma_pool_alloc() returned. This function may be called in interrupt context. Destroy a dma_pool by calling: dma_pool_destroy(pool); -Make sure you've called dma_pool_free for all memory allocated +Make sure you've called dma_pool_free() for all memory allocated from a pool before you destroy the pool. This function may not be called in interrupt context. @@ -418,7 +474,7 @@ one of the following values: DMA_FROM_DEVICE DMA_NONE -One should provide the exact DMA direction if you know it. +You should provide the exact DMA direction if you know it. DMA_TO_DEVICE means "from main memory to the device" DMA_FROM_DEVICE means "from the device to main memory" @@ -489,14 +545,14 @@ and to unmap it: dma_unmap_single(dev, dma_handle, size, direction); You should call dma_mapping_error() as dma_map_single() could fail and return -error. Not all dma implementations support dma_mapping_error() interface. +error. Not all DMA implementations support the dma_mapping_error() interface. However, it is a good practice to call dma_mapping_error() interface, which will invoke the generic mapping error check interface. Doing so will ensure -that the mapping code will work correctly on all dma implementations without +that the mapping code will work correctly on all DMA implementations without any dependency on the specifics of the underlying implementation. Using the returned address without checking for errors could result in failures ranging from panics to silent data corruption. A couple of examples of incorrect ways -to check for errors that make assumptions about the underlying dma +to check for errors that make assumptions about the underlying DMA implementation are as follows and these are applicable to dma_map_page() as well. @@ -516,13 +572,13 @@ Incorrect example 2: goto map_error; } -You should call dma_unmap_single when the DMA activity is finished, e.g. +You should call dma_unmap_single() when the DMA activity is finished, e.g., from the interrupt which told you that the DMA transfer is done. -Using cpu pointers like this for single mappings has a disadvantage, +Using CPU pointers like this for single mappings has a disadvantage: you cannot reference HIGHMEM memory in this way. Thus, there is a -map/unmap interface pair akin to dma_{map,unmap}_single. These -interfaces deal with page/offset pairs instead of cpu pointers. +map/unmap interface pair akin to dma_{map,unmap}_single(). These +interfaces deal with page/offset pairs instead of CPU pointers. Specifically: struct device *dev = &my_dev->dev; @@ -550,7 +606,7 @@ Here, "offset" means byte offset within the given page. You should call dma_mapping_error() as dma_map_page() could fail and return error as outlined under the dma_map_single() discussion. -You should call dma_unmap_page when the DMA activity is finished, e.g. +You should call dma_unmap_page() when the DMA activity is finished, e.g., from the interrupt which told you that the DMA transfer is done. With scatterlists, you map a region gathered from several regions by: @@ -588,18 +644,16 @@ PLEASE NOTE: The 'nents' argument to the dma_unmap_sg call must be it should _NOT_ be the 'count' value _returned_ from the dma_map_sg call. -Every dma_map_{single,sg} call should have its dma_unmap_{single,sg} -counterpart, because the bus address space is a shared resource (although -in some ports the mapping is per each BUS so less devices contend for the -same bus address space) and you could render the machine unusable by eating -all bus addresses. +Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}() +counterpart, because the bus address space is a shared resource and +you could render the machine unusable by consuming all bus addresses. If you need to use the same streaming DMA region multiple times and touch the data in between the DMA transfers, the buffer needs to be synced -properly in order for the cpu and device to see the most uptodate and +properly in order for the CPU and device to see the most up-to-date and correct copy of the DMA buffer. -So, firstly, just map it with dma_map_{single,sg}, and after each DMA +So, firstly, just map it with dma_map_{single,sg}(), and after each DMA transfer call either: dma_sync_single_for_cpu(dev, dma_handle, size, direction); @@ -611,7 +665,7 @@ or: as appropriate. Then, if you wish to let the device get at the DMA area again, -finish accessing the data with the cpu, and then before actually +finish accessing the data with the CPU, and then before actually giving the buffer to the hardware call either: dma_sync_single_for_device(dev, dma_handle, size, direction); @@ -623,9 +677,9 @@ or: as appropriate. After the last DMA transfer call one of the DMA unmap routines -dma_unmap_{single,sg}. If you don't touch the data from the first dma_map_* -call till dma_unmap_*, then you don't have to call the dma_sync_* -routines at all. +dma_unmap_{single,sg}(). If you don't touch the data from the first +dma_map_*() call till dma_unmap_*(), then you don't have to call the +dma_sync_*() routines at all. Here is pseudo code which shows a situation in which you would need to use the dma_sync_*() interfaces. @@ -690,12 +744,12 @@ to use the dma_sync_*() interfaces. } } -Drivers converted fully to this interface should not use virt_to_bus any -longer, nor should they use bus_to_virt. Some drivers have to be changed a -little bit, because there is no longer an equivalent to bus_to_virt in the +Drivers converted fully to this interface should not use virt_to_bus() any +longer, nor should they use bus_to_virt(). Some drivers have to be changed a +little bit, because there is no longer an equivalent to bus_to_virt() in the dynamic DMA mapping scheme - you have to always store the DMA addresses -returned by the dma_alloc_coherent, dma_pool_alloc, and dma_map_single -calls (dma_map_sg stores them in the scatterlist itself if the platform +returned by the dma_alloc_coherent(), dma_pool_alloc(), and dma_map_single() +calls (dma_map_sg() stores them in the scatterlist itself if the platform supports dynamic DMA mapping in hardware) in your driver structures and/or in the card registers. @@ -709,9 +763,9 @@ as it is impossible to correctly support them. DMA address space is limited on some architectures and an allocation failure can be determined by: -- checking if dma_alloc_coherent returns NULL or dma_map_sg returns 0 +- checking if dma_alloc_coherent() returns NULL or dma_map_sg returns 0 -- checking the returned dma_addr_t of dma_map_single and dma_map_page +- checking the dma_addr_t returned from dma_map_single() and dma_map_page() by using dma_mapping_error(): dma_addr_t dma_handle; @@ -794,7 +848,7 @@ Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when dma_unmap_single(array[i].dma_addr); } -Networking drivers must call dev_kfree_skb to free the socket buffer +Networking drivers must call dev_kfree_skb() to free the socket buffer and return NETDEV_TX_OK if the DMA mapping fails on the transmit hook (ndo_start_xmit). This means that the socket buffer is just dropped in the failure case. @@ -831,7 +885,7 @@ transform some example code. DEFINE_DMA_UNMAP_LEN(len); }; -2) Use dma_unmap_{addr,len}_set to set these values. +2) Use dma_unmap_{addr,len}_set() to set these values. Example, before: ringp->mapping = FOO; @@ -842,7 +896,7 @@ transform some example code. dma_unmap_addr_set(ringp, mapping, FOO); dma_unmap_len_set(ringp, len, BAR); -3) Use dma_unmap_{addr,len} to access these values. +3) Use dma_unmap_{addr,len}() to access these values. Example, before: dma_unmap_single(dev, ringp->mapping, ringp->len, diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index e865279cec58..52088408668a 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -4,22 +4,26 @@ James E.J. Bottomley <James.Bottomley@HansenPartnership.com> This document describes the DMA API. For a more gentle introduction -of the API (and actual examples) see -Documentation/DMA-API-HOWTO.txt. +of the API (and actual examples), see Documentation/DMA-API-HOWTO.txt. -This API is split into two pieces. Part I describes the API. Part II -describes the extensions to the API for supporting non-consistent -memory machines. Unless you know that your driver absolutely has to -support non-consistent platforms (this is usually only legacy -platforms) you should only use the API described in part I. +This API is split into two pieces. Part I describes the basic API. +Part II describes extensions for supporting non-consistent memory +machines. Unless you know that your driver absolutely has to support +non-consistent platforms (this is usually only legacy platforms) you +should only use the API described in part I. Part I - dma_ API ------------------------------------- -To get the dma_ API, you must #include <linux/dma-mapping.h> +To get the dma_ API, you must #include <linux/dma-mapping.h>. This +provides dma_addr_t and the interfaces described below. +A dma_addr_t can hold any valid DMA or bus address for the platform. It +can be given to a device to use as a DMA source or target. A CPU cannot +reference a dma_addr_t directly because there may be translation between +its physical address space and the bus address space. -Part Ia - Using large dma-coherent buffers +Part Ia - Using large DMA-coherent buffers ------------------------------------------ void * @@ -33,20 +37,21 @@ to make sure to flush the processor's write buffers before telling devices to read that memory.) This routine allocates a region of <size> bytes of consistent memory. -It also returns a <dma_handle> which may be cast to an unsigned -integer the same width as the bus and used as the physical address -base of the region. -Returns: a pointer to the allocated region (in the processor's virtual +It returns a pointer to the allocated region (in the processor's virtual address space) or NULL if the allocation failed. +It also returns a <dma_handle> which may be cast to an unsigned integer the +same width as the bus and given to the device as the bus address base of +the region. + Note: consistent memory can be expensive on some platforms, and the minimum allocation length may be as big as a page, so you should consolidate your requests for consistent memory as much as possible. The simplest way to do that is to use the dma_pool calls (see below). -The flag parameter (dma_alloc_coherent only) allows the caller to -specify the GFP_ flags (see kmalloc) for the allocation (the +The flag parameter (dma_alloc_coherent() only) allows the caller to +specify the GFP_ flags (see kmalloc()) for the allocation (the implementation may choose to ignore flags that affect the location of the returned memory, like GFP_DMA). @@ -61,24 +66,24 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) -Free the region of consistent memory you previously allocated. dev, -size and dma_handle must all be the same as those passed into the -consistent allocate. cpu_addr must be the virtual address returned by -the consistent allocate. +Free a region of consistent memory you previously allocated. dev, +size and dma_handle must all be the same as those passed into +dma_alloc_coherent(). cpu_addr must be the virtual address returned by +the dma_alloc_coherent(). Note that unlike their sibling allocation calls, these routines may only be called with IRQs enabled. -Part Ib - Using small dma-coherent buffers +Part Ib - Using small DMA-coherent buffers ------------------------------------------ To get this part of the dma_ API, you must #include <linux/dmapool.h> -Many drivers need lots of small dma-coherent memory regions for DMA +Many drivers need lots of small DMA-coherent memory regions for DMA descriptors or I/O buffers. Rather than allocating in units of a page or more using dma_alloc_coherent(), you can use DMA pools. These work -much like a struct kmem_cache, except that they use the dma-coherent allocator, +much like a struct kmem_cache, except that they use the DMA-coherent allocator, not __get_free_pages(). Also, they understand common hardware constraints for alignment, like queue heads needing to be aligned on N-byte boundaries. @@ -87,7 +92,7 @@ for alignment, like queue heads needing to be aligned on N-byte boundaries. dma_pool_create(const char *name, struct device *dev, size_t size, size_t align, size_t alloc); -The pool create() routines initialize a pool of dma-coherent buffers +dma_pool_create() initializes a pool of DMA-coherent buffers for use with a given device. It must be called in a context which can sleep. @@ -102,25 +107,26 @@ from this pool must not cross 4KByte boundaries. void *dma_pool_alloc(struct dma_pool *pool, gfp_t gfp_flags, dma_addr_t *dma_handle); -This allocates memory from the pool; the returned memory will meet the size -and alignment requirements specified at creation time. Pass GFP_ATOMIC to -prevent blocking, or if it's permitted (not in_interrupt, not holding SMP locks), -pass GFP_KERNEL to allow blocking. Like dma_alloc_coherent(), this returns -two values: an address usable by the cpu, and the dma address usable by the -pool's device. +This allocates memory from the pool; the returned memory will meet the +size and alignment requirements specified at creation time. Pass +GFP_ATOMIC to prevent blocking, or if it's permitted (not +in_interrupt, not holding SMP locks), pass GFP_KERNEL to allow +blocking. Like dma_alloc_coherent(), this returns two values: an +address usable by the CPU, and the DMA address usable by the pool's +device. void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); This puts memory back into the pool. The pool is what was passed to -the pool allocation routine; the cpu (vaddr) and dma addresses are what +dma_pool_alloc(); the CPU (vaddr) and DMA addresses are what were returned when that routine allocated the memory being freed. void dma_pool_destroy(struct dma_pool *pool); -The pool destroy() routines free the resources of the pool. They must be +dma_pool_destroy() frees the resources of the pool. It must be called in a context which can sleep. Make sure you've freed all allocated memory back to the pool before you destroy it. @@ -187,9 +193,9 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction) Maps a piece of processor virtual memory so it can be accessed by the -device and returns the physical handle of the memory. +device and returns the bus address of the memory. -The direction for both api's may be converted freely by casting. +The direction for both APIs may be converted freely by casting. However the dma_ API uses a strongly typed enumerator for its direction: @@ -198,31 +204,30 @@ DMA_TO_DEVICE data is going from the memory to the device DMA_FROM_DEVICE data is coming from the device to the memory DMA_BIDIRECTIONAL direction isn't known -Notes: Not all memory regions in a machine can be mapped by this -API. Further, regions that appear to be physically contiguous in -kernel virtual space may not be contiguous as physical memory. Since -this API does not provide any scatter/gather capability, it will fail -if the user tries to map a non-physically contiguous piece of memory. -For this reason, it is recommended that memory mapped by this API be -obtained only from sources which guarantee it to be physically contiguous -(like kmalloc). - -Further, the physical address of the memory must be within the -dma_mask of the device (the dma_mask represents a bit mask of the -addressable region for the device. I.e., if the physical address of -the memory anded with the dma_mask is still equal to the physical -address, then the device can perform DMA to the memory). In order to +Notes: Not all memory regions in a machine can be mapped by this API. +Further, contiguous kernel virtual space may not be contiguous as +physical memory. Since this API does not provide any scatter/gather +capability, it will fail if the user tries to map a non-physically +contiguous piece of memory. For this reason, memory to be mapped by +this API should be obtained from sources which guarantee it to be +physically contiguous (like kmalloc). + +Further, the bus address of the memory must be within the +dma_mask of the device (the dma_mask is a bit mask of the +addressable region for the device, i.e., if the bus address of +the memory ANDed with the dma_mask is still equal to the bus +address, then the device can perform DMA to the memory). To ensure that the memory allocated by kmalloc is within the dma_mask, the driver may specify various platform-dependent flags to restrict -the physical memory range of the allocation (e.g. on x86, GFP_DMA -guarantees to be within the first 16Mb of available physical memory, +the bus address range of the allocation (e.g., on x86, GFP_DMA +guarantees to be within the first 16MB of available bus addresses, as required by ISA devices). Note also that the above constraints on physical contiguity and dma_mask may not apply if the platform has an IOMMU (a device which -supplies a physical to virtual mapping between the I/O memory bus and -the device). However, to be portable, device driver writers may *not* -assume that such an IOMMU exists. +maps an I/O bus address to a physical memory address). However, to be +portable, device driver writers may *not* assume that such an IOMMU +exists. Warnings: Memory coherency operates at a granularity called the cache line width. In order for memory mapped by this API to operate @@ -281,9 +286,9 @@ cache width is. int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -In some circumstances dma_map_single and dma_map_page will fail to create +In some circumstances dma_map_single() and dma_map_page() will fail to create a mapping. A driver can check for these errors by testing the returned -dma address with dma_mapping_error(). A non-zero return value means the mapping +DMA address with dma_mapping_error(). A non-zero return value means the mapping could not be created and the driver should take appropriate action (e.g. reduce current DMA mapping usage or delay and try again later). @@ -291,7 +296,7 @@ reduce current DMA mapping usage or delay and try again later). dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) -Returns: the number of physical segments mapped (this may be shorter +Returns: the number of bus address segments mapped (this may be shorter than <nents> passed in if some elements of the scatter/gather list are physically or virtually adjacent and an IOMMU maps them with a single entry). @@ -299,7 +304,7 @@ entry). Please note that the sg cannot be mapped again if it has been mapped once. The mapping process is allowed to destroy information in the sg. -As with the other mapping interfaces, dma_map_sg can fail. When it +As with the other mapping interfaces, dma_map_sg() can fail. When it does, 0 is returned and a driver must take appropriate action. It is critical that the driver do something, in the case of a block driver aborting the request or even oopsing is better than doing nothing and @@ -335,7 +340,7 @@ must be the same as those and passed in to the scatter/gather mapping API. Note: <nents> must be the number you passed in, *not* the number of -physical entries returned. +bus address entries returned. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, @@ -350,7 +355,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) -Synchronise a single contiguous or scatter/gather mapping for the cpu +Synchronise a single contiguous or scatter/gather mapping for the CPU and device. With the sync_sg API, all the parameters must be the same as those passed into the single mapping API. With the sync_single API, you can use dma_handle and size parameters that aren't identical to @@ -391,10 +396,10 @@ The four functions above are just like the counterpart functions without the _attrs suffixes, except that they pass an optional struct dma_attrs*. -struct dma_attrs encapsulates a set of "dma attributes". For the +struct dma_attrs encapsulates a set of "DMA attributes". For the definition of struct dma_attrs see linux/dma-attrs.h. -The interpretation of dma attributes is architecture-specific, and +The interpretation of DMA attributes is architecture-specific, and each attribute should be documented in Documentation/DMA-attributes.txt. If struct dma_attrs* is NULL, the semantics of each of these @@ -458,7 +463,7 @@ Note: where the platform can return consistent memory, it will guarantee that the sync points become nops. Warning: Handling non-consistent memory is a real pain. You should -only ever use this API if you positively know your driver will be +only use this API if you positively know your driver will be required to work on one of the rare (usually non-PCI) architectures that simply cannot make consistent memory. @@ -492,30 +497,29 @@ continuing on for size. Again, you *must* observe the cache line boundaries when doing this. int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) -Declare region of memory to be handed out by dma_alloc_coherent when +Declare region of memory to be handed out by dma_alloc_coherent() when it's asked for coherent memory for this device. -bus_addr is the physical address to which the memory is currently -assigned in the bus responding region (this will be used by the -platform to perform the mapping). +phys_addr is the CPU physical address to which the memory is currently +assigned (this will be ioremapped so the CPU can access the region). -device_addr is the physical address the device needs to be programmed -with actually to address this memory (this will be handed out as the +device_addr is the bus address the device needs to be programmed +with to actually address this memory (this will be handed out as the dma_addr_t in dma_alloc_coherent()). size is the size of the area (must be multiples of PAGE_SIZE). -flags can be or'd together and are: +flags can be ORed together and are: DMA_MEMORY_MAP - request that the memory returned from dma_alloc_coherent() be directly writable. DMA_MEMORY_IO - request that the memory returned from -dma_alloc_coherent() be addressable using read/write/memcpy_toio etc. +dma_alloc_coherent() be addressable using read()/write()/memcpy_toio() etc. One or both of these flags must be present. @@ -572,7 +576,7 @@ region is occupied. Part III - Debug drivers use of the DMA-API ------------------------------------------- -The DMA-API as described above as some constraints. DMA addresses must be +The DMA-API as described above has some constraints. DMA addresses must be released with the corresponding function with the same size for example. With the advent of hardware IOMMUs it becomes more and more important that drivers do not violate those constraints. In the worst case such a violation can @@ -690,11 +694,11 @@ architectural default. void debug_dmap_mapping_error(struct device *dev, dma_addr_t dma_addr); dma-debug interface debug_dma_mapping_error() to debug drivers that fail -to check dma mapping errors on addresses returned by dma_map_single() and +to check DMA mapping errors on addresses returned by dma_map_single() and dma_map_page() interfaces. This interface clears a flag set by debug_dma_map_page() to indicate that dma_mapping_error() has been called by the driver. When driver does unmap, debug_dma_unmap() checks the flag and if this flag is still set, prints warning message that includes call trace that leads up to the unmap. This interface can be called from dma_mapping_error() -routines to enable dma mapping error check debugging. +routines to enable DMA mapping error check debugging. diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt index e767805b4182..b1a19835e907 100644 --- a/Documentation/DMA-ISA-LPC.txt +++ b/Documentation/DMA-ISA-LPC.txt @@ -16,7 +16,7 @@ To do ISA style DMA you need to include two headers: #include <asm/dma.h> The first is the generic DMA API used to convert virtual addresses to -physical addresses (see Documentation/DMA-API.txt for details). +bus addresses (see Documentation/DMA-API.txt for details). The second contains the routines specific to ISA DMA transfers. Since this is not present on all platforms make sure you construct your @@ -50,7 +50,7 @@ early as possible and not release it until the driver is unloaded.) Part III - Address translation ------------------------------ -To translate the virtual address to a physical use the normal DMA +To translate the virtual address to a bus address, use the normal DMA API. Do _not_ use isa_virt_to_phys() even though it does the same thing. The reason for this is that the function isa_virt_to_phys() will require a Kconfig dependency to ISA, not just ISA_DMA_API which diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl index 677a02553ec0..ba60d93c1855 100644 --- a/Documentation/DocBook/drm.tmpl +++ b/Documentation/DocBook/drm.tmpl @@ -79,7 +79,7 @@ <partintro> <para> This first part of the DRM Developer's Guide documents core DRM code, - helper libraries for writting drivers and generic userspace interfaces + helper libraries for writing drivers and generic userspace interfaces exposed by DRM drivers. </para> </partintro> @@ -459,7 +459,7 @@ char *date;</synopsis> providing a solution to every graphics memory-related problems, GEM identified common code between drivers and created a support library to share it. GEM has simpler initialization and execution requirements than - TTM, but has no video RAM management capabitilies and is thus limited to + TTM, but has no video RAM management capabilities and is thus limited to UMA devices. </para> <sect2> @@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev, vice versa. Drivers must use the kernel dma-buf buffer sharing framework to manage the PRIME file descriptors. Similar to the mode setting API PRIME is agnostic to the underlying buffer object manager, as - long as handles are 32bit unsinged integers. + long as handles are 32bit unsigned integers. </para> <para> While non-GEM drivers must implement the operations themselves, GEM @@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev) first create properties and then create and associate individual instances of those properties to objects. A property can be instantiated multiple times and associated with different objects. Values are stored in property - instances, and all other property information are stored in the propery + instances, and all other property information are stored in the property and shared between all instances of the property. </para> <para> @@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis> <sect1> <title>Legacy Support Code</title> <para> - The section very brievely covers some of the old legacy support code which + The section very briefly covers some of the old legacy support code which is only used by old DRM drivers which have done a so-called shadow-attach to the underlying device instead of registering as a real driver. This - also includes some of the old generic buffer mangement and command + also includes some of the old generic buffer management and command submission code. Do not use any of this in new and modern drivers. </para> diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl index 4f676838da06..bcdfdb9a9277 100644 --- a/Documentation/DocBook/filesystems.tmpl +++ b/Documentation/DocBook/filesystems.tmpl @@ -62,7 +62,7 @@ !Efs/mpage.c !Efs/namei.c !Efs/buffer.c -!Efs/bio.c +!Eblock/bio.c !Efs/seq_file.c !Efs/filesystems.c !Efs/fs-writeback.c diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile index f9fd615427fb..1d27f0a1abd1 100644 --- a/Documentation/DocBook/media/Makefile +++ b/Documentation/DocBook/media/Makefile @@ -195,7 +195,7 @@ DVB_DOCUMENTED = \ # install_media_images = \ - $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api + $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64 $(Q)base64 -d $< >$@ diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt index 2a1519b87177..fd786ea13a1f 100644 --- a/Documentation/acpi/enumeration.txt +++ b/Documentation/acpi/enumeration.txt @@ -296,7 +296,7 @@ specifies the path to the controller. In order to use these GPIOs in Linux we need to translate them to the corresponding Linux GPIO descriptors. There is a standard GPIO API for that and is documented in -Documentation/gpio.txt. +Documentation/gpio/. In the above example we can get the corresponding two GPIO descriptors with a code like this: diff --git a/Documentation/debugging-via-ohci1394.txt b/Documentation/debugging-via-ohci1394.txt index fa0151a712f9..5c9a567b3fac 100644 --- a/Documentation/debugging-via-ohci1394.txt +++ b/Documentation/debugging-via-ohci1394.txt @@ -25,9 +25,11 @@ using data transfer rates in the order of 10MB/s or more. With most FireWire controllers, memory access is limited to the low 4 GB of physical address space. This can be a problem on IA64 machines where memory is located mostly above that limit, but it is rarely a problem on -more common hardware such as x86, x86-64 and PowerPC. However, at least -Agere/LSI FW643e and FW643e2 controllers are known to support access to -physical addresses above 4 GB. +more common hardware such as x86, x86-64 and PowerPC. + +At least LSI FW643e and FW643e2 controllers are known to support access to +physical addresses above 4 GB, but this feature is currently not enabled by +Linux. Together with a early initialization of the OHCI-1394 controller for debugging, this facility proved most useful for examining long debugs logs in the printk @@ -101,8 +103,9 @@ Step-by-step instructions for using firescope with early OHCI initialization: compliant, they are based on TI PCILynx chips and require drivers for Win- dows operating systems. - The mentioned kernel log message contains ">4 GB phys DMA" in case of - OHCI-1394 controllers which support accesses above this limit. + The mentioned kernel log message contains the string "physUB" if the + controller implements a writable Physical Upper Bound register. This is + required for physical DMA above 4 GB (but not utilized by Linux yet). 2) Establish a working FireWire cable connection: diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 05a27e9442bd..2f5173500bd9 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -309,7 +309,10 @@ ii) Status error_if_no_space|queue_if_no_space If the pool runs out of data or metadata space, the pool will either queue or error the IO destined to the data device. The - default is to queue the IO until more space is added. + default is to queue the IO until more space is added or the + 'no_space_timeout' expires. The 'no_space_timeout' dm-thin-pool + module parameter can be used to change this timeout -- it + defaults to 60 seconds but may be disabled using a value of 0. iii) Messages diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt index cd5e23912888..6794cdc96d8f 100644 --- a/Documentation/devicetree/bindings/clock/at91-clock.txt +++ b/Documentation/devicetree/bindings/clock/at91-clock.txt @@ -62,7 +62,7 @@ Required properties for PMC node: - interrupt-controller : tell that the PMC is an interrupt controller. - #interrupt-cells : must be set to 1. The first cell encodes the interrupt id, and reflect the bit position in the PMC_ER/DR/SR registers. - You can use the dt macros defined in dt-bindings/clk/at91.h. + You can use the dt macros defined in dt-bindings/clock/at91.h. 0 (AT91_PMC_MOSCS) -> main oscillator ready 1 (AT91_PMC_LOCKA) -> PLL A ready 2 (AT91_PMC_LOCKB) -> PLL B ready diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt index 5992dceec7af..02a25d99ca61 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt @@ -43,7 +43,7 @@ Example clock-output-names = "tpu0", "mmcif1", "sdhi3", "sdhi2", "sdhi1", "sdhi0", "mmcif0"; - renesas,clock-indices = < + clock-indices = < R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3 R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0 R8A7790_CLK_MMCIF0 diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index 9fbbdb783a72..68ff2137bae7 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt @@ -29,6 +29,6 @@ edma: edma@49000000 { dma-channels = <64>; ti,edma-regions = <4>; ti,edma-slots = <256>; - ti,edma-xbar-event-map = <1 12 - 2 13>; + ti,edma-xbar-event-map = /bits/ 16 <1 12 + 2 13>; }; diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt index f61cef74a212..941a26aa4322 100644 --- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt +++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt @@ -21,6 +21,12 @@ Required Properties: GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported. - gpio-ranges: Range of pins managed by the GPIO controller. +Optional properties: + + - clocks: Must contain a reference to the functional clock. The property is + mandatory if the hardware implements a controllable functional clock for + the GPIO instance. + Please refer to gpio.txt in this directory for details of gpio-ranges property and the common GPIO bindings used by client devices. diff --git a/Documentation/devicetree/bindings/hsi/client-devices.txt b/Documentation/devicetree/bindings/hsi/client-devices.txt new file mode 100644 index 000000000000..104c9a3e57a4 --- /dev/null +++ b/Documentation/devicetree/bindings/hsi/client-devices.txt @@ -0,0 +1,44 @@ +Each HSI port is supposed to have one child node, which +symbols the remote device connected to the HSI port. The +following properties are standardized for HSI clients: + +Required HSI configuration properties: + +- hsi-channel-ids: A list of channel ids + +- hsi-rx-mode: Receiver Bit transmission mode ("stream" or "frame") +- hsi-tx-mode: Transmitter Bit transmission mode ("stream" or "frame") +- hsi-mode: May be used instead hsi-rx-mode and hsi-tx-mode if + the transmission mode is the same for receiver and + transmitter +- hsi-speed-kbps: Max bit transmission speed in kbit/s +- hsi-flow: RX flow type ("synchronized" or "pipeline") +- hsi-arb-mode: Arbitration mode for TX frame ("round-robin", "priority") + +Optional HSI configuration properties: + +- hsi-channel-names: A list with one name per channel specified in the + hsi-channel-ids property + + +Device Tree node example for an HSI client: + +hsi-controller { + hsi-port { + modem: hsi-client { + compatible = "nokia,n900-modem"; + + hsi-channel-ids = <0>, <1>, <2>, <3>; + hsi-channel-names = "mcsaab-control", + "speech-control", + "speech-data", + "mcsaab-data"; + hsi-speed-kbps = <55000>; + hsi-mode = "frame"; + hsi-flow = "synchronized"; + hsi-arb-mode = "round-robin"; + + /* more client specific properties */ + }; + }; +}; diff --git a/Documentation/devicetree/bindings/hsi/nokia-modem.txt b/Documentation/devicetree/bindings/hsi/nokia-modem.txt new file mode 100644 index 000000000000..8a979780452b --- /dev/null +++ b/Documentation/devicetree/bindings/hsi/nokia-modem.txt @@ -0,0 +1,57 @@ +Nokia modem client bindings + +The Nokia modem HSI client follows the common HSI client binding +and inherits all required properties. The following additional +properties are needed by the Nokia modem HSI client: + +Required properties: +- compatible: Should be one of + "nokia,n900-modem" +- hsi-channel-names: Should contain the following strings + "mcsaab-control" + "speech-control" + "speech-data" + "mcsaab-data" +- gpios: Should provide a GPIO handler for each GPIO listed in + gpio-names +- gpio-names: Should contain the following strings + "cmt_apeslpx" + "cmt_rst_rq" + "cmt_en" + "cmt_rst" + "cmt_bsi" +- interrupts: Should be IRQ handle for modem's reset indication + +Example: + +&ssi_port { + modem: hsi-client { + compatible = "nokia,n900-modem"; + + pinctrl-names = "default"; + pinctrl-0 = <&modem_pins>; + + hsi-channel-ids = <0>, <1>, <2>, <3>; + hsi-channel-names = "mcsaab-control", + "speech-control", + "speech-data", + "mcsaab-data"; + hsi-speed-kbps = <55000>; + hsi-mode = "frame"; + hsi-flow = "synchronized"; + hsi-arb-mode = "round-robin"; + + interrupts-extended = <&gpio3 8 IRQ_TYPE_EDGE_FALLING>; /* 72 */ + + gpios = <&gpio3 6 GPIO_ACTIVE_HIGH>, /* 70 */ + <&gpio3 9 GPIO_ACTIVE_HIGH>, /* 73 */ + <&gpio3 10 GPIO_ACTIVE_HIGH>, /* 74 */ + <&gpio3 11 GPIO_ACTIVE_HIGH>, /* 75 */ + <&gpio5 29 GPIO_ACTIVE_HIGH>; /* 157 */ + gpio-names = "cmt_apeslpx", + "cmt_rst_rq", + "cmt_en", + "cmt_rst", + "cmt_bsi"; + }; +}; diff --git a/Documentation/devicetree/bindings/hsi/omap-ssi.txt b/Documentation/devicetree/bindings/hsi/omap-ssi.txt new file mode 100644 index 000000000000..f26625e42693 --- /dev/null +++ b/Documentation/devicetree/bindings/hsi/omap-ssi.txt @@ -0,0 +1,97 @@ +OMAP SSI controller bindings + +OMAP Synchronous Serial Interface (SSI) controller implements a legacy +variant of MIPI's High Speed Synchronous Serial Interface (HSI). + +Required properties: +- compatible: Should include "ti,omap3-ssi". +- reg-names: Contains the values "sys" and "gdd" (in this order). +- reg: Contains a matching register specifier for each entry + in reg-names. +- interrupt-names: Contains the value "gdd_mpu". +- interrupts: Contains matching interrupt information for each entry + in interrupt-names. +- ranges: Represents the bus address mapping between the main + controller node and the child nodes below. +- clock-names: Must include the following entries: + "ssi_ssr_fck": The OMAP clock of that name + "ssi_sst_fck": The OMAP clock of that name + "ssi_ick": The OMAP clock of that name +- clocks: Contains a matching clock specifier for each entry in + clock-names. +- #address-cells: Should be set to <1> +- #size-cells: Should be set to <1> + +Each port is represented as a sub-node of the ti,omap3-ssi device. + +Required Port sub-node properties: +- compatible: Should be set to the following value + ti,omap3-ssi-port (applicable to OMAP34xx devices) +- reg-names: Contains the values "tx" and "rx" (in this order). +- reg: Contains a matching register specifier for each entry + in reg-names. +- interrupt-parent Should be a phandle for the interrupt controller +- interrupts: Should contain interrupt specifiers for mpu interrupts + 0 and 1 (in this order). +- ti,ssi-cawake-gpio: Defines which GPIO pin is used to signify CAWAKE + events for the port. This is an optional board-specific + property. If it's missing the port will not be + enabled. + +Example for Nokia N900: + +ssi-controller@48058000 { + compatible = "ti,omap3-ssi"; + + /* needed until hwmod is updated to use the compatible string */ + ti,hwmods = "ssi"; + + reg = <0x48058000 0x1000>, + <0x48059000 0x1000>; + reg-names = "sys", + "gdd"; + + interrupts = <55>; + interrupt-names = "gdd_mpu"; + + clocks = <&ssi_ssr_fck>, + <&ssi_sst_fck>, + <&ssi_ick>; + clock-names = "ssi_ssr_fck", + "ssi_sst_fck", + "ssi_ick"; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + ssi-port@4805a000 { + compatible = "ti,omap3-ssi-port"; + + reg = <0x4805a000 0x800>, + <0x4805a800 0x800>; + reg-names = "tx", + "rx"; + + interrupt-parent = <&intc>; + interrupts = <67>, + <68>; + + ti,ssi-cawake-gpio = <&gpio5 23 GPIO_ACTIVE_HIGH>; /* 151 */ + } + + ssi-port@4805a000 { + compatible = "ti,omap3-ssi-port"; + + reg = <0x4805b000 0x800>, + <0x4805b800 0x800>; + reg-names = "tx", + "rx"; + + interrupt-parent = <&intc>; + interrupts = <69>, + <70>; + + status = "disabled"; /* second port is not used on N900 */ + } +} diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt index 2b584cae352a..03796cf2d3e7 100644 --- a/Documentation/devicetree/bindings/mmc/mmci.txt +++ b/Documentation/devicetree/bindings/mmc/mmci.txt @@ -4,12 +4,58 @@ The ARM PrimeCell MMCI PL180 and PL181 provides an interface for reading and writing to MultiMedia and SD cards alike. This file documents differences between the core properties described -by mmc.txt and the properties used by the mmci driver. +by mmc.txt and the properties used by the mmci driver. Using "st" as +the prefix for a property, indicates support by the ST Micro variant. Required properties: - compatible : contains "arm,pl18x", "arm,primecell". -- arm,primecell-periphid : contains the PrimeCell Peripheral ID. +- vmmc-supply : phandle to the regulator device tree node, mentioned + as the VCC/VDD supply in the eMMC/SD specs. Optional properties: -- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable -- mmc-cap-sd-highspeed : indicates whether SD is high speed capable +- arm,primecell-periphid : contains the PrimeCell Peripheral ID, it overrides + the ID provided by the HW +- vqmmc-supply : phandle to the regulator device tree node, mentioned + as the VCCQ/VDD_IO supply in the eMMC/SD specs. +- st,sig-dir-dat0 : bus signal direction pin used for DAT[0]. +- st,sig-dir-dat2 : bus signal direction pin used for DAT[2]. +- st,sig-dir-dat31 : bus signal direction pin used for DAT[3] and DAT[1]. +- st,sig-dir-dat74 : bus signal direction pin used for DAT[4] to DAT[7]. +- st,sig-dir-cmd : cmd signal direction pin used for CMD. +- st,sig-pin-fbclk : feedback clock signal pin used. + +Deprecated properties: +- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable. +- mmc-cap-sd-highspeed : indicates whether SD is high speed capable. + +Example: + +sdi0_per1@80126000 { + compatible = "arm,pl18x", "arm,primecell"; + reg = <0x80126000 0x1000>; + interrupts = <0 60 IRQ_TYPE_LEVEL_HIGH>; + + dmas = <&dma 29 0 0x2>, /* Logical - DevToMem */ + <&dma 29 0 0x0>; /* Logical - MemToDev */ + dma-names = "rx", "tx"; + + clocks = <&prcc_kclk 1 5>, <&prcc_pclk 1 5>; + clock-names = "sdi", "apb_pclk"; + + max-frequency = <100000000>; + bus-width = <4>; + cap-sd-highspeed; + cap-mmc-highspeed; + cd-gpios = <&gpio2 31 0x4>; // 95 + st,sig-dir-dat0; + st,sig-dir-dat2; + st,sig-dir-cmd; + st,sig-pin-fbclk; + + vmmc-supply = <&ab8500_ldo_aux3_reg>; + vqmmc-supply = <&vmmci>; + + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&sdi0_default_mode>; + pinctrl-1 = <&sdi0_sleep_mode>; +}; diff --git a/Documentation/devicetree/bindings/net/mdio-gpio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt index c79bab025369..8dbcf8295c6c 100644 --- a/Documentation/devicetree/bindings/net/mdio-gpio.txt +++ b/Documentation/devicetree/bindings/net/mdio-gpio.txt @@ -14,7 +14,7 @@ node. Example: aliases { - mdio-gpio0 = <&mdio0>; + mdio-gpio0 = &mdio0; }; mdio0: mdio { diff --git a/Documentation/devicetree/bindings/pci/host-generic-pci.txt b/Documentation/devicetree/bindings/pci/host-generic-pci.txt new file mode 100644 index 000000000000..f0b0436807b4 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/host-generic-pci.txt @@ -0,0 +1,100 @@ +* Generic PCI host controller + +Firmware-initialised PCI host controllers and PCI emulations, such as the +virtio-pci implementations found in kvmtool and other para-virtualised +systems, do not require driver support for complexities such as regulator +and clock management. In fact, the controller may not even require the +configuration of a control interface by the operating system, instead +presenting a set of fixed windows describing a subset of IO, Memory and +Configuration Spaces. + +Such a controller can be described purely in terms of the standardized device +tree bindings communicated in pci.txt: + + +Properties of the host controller node: + +- compatible : Must be "pci-host-cam-generic" or "pci-host-ecam-generic" + depending on the layout of configuration space (CAM vs + ECAM respectively). + +- device_type : Must be "pci". + +- ranges : As described in IEEE Std 1275-1994, but must provide + at least a definition of non-prefetchable memory. One + or both of prefetchable Memory and IO Space may also + be provided. + +- bus-range : Optional property (also described in IEEE Std 1275-1994) + to indicate the range of bus numbers for this controller. + If absent, defaults to <0 255> (i.e. all buses). + +- #address-cells : Must be 3. + +- #size-cells : Must be 2. + +- reg : The Configuration Space base address and size, as accessed + from the parent bus. + + +Properties of the /chosen node: + +- linux,pci-probe-only + : Optional property which takes a single-cell argument. + If '0', then Linux will assign devices in its usual manner, + otherwise it will not try to assign devices and instead use + them as they are configured already. + +Configuration Space is assumed to be memory-mapped (as opposed to being +accessed via an ioport) and laid out with a direct correspondence to the +geography of a PCI bus address by concatenating the various components to +form an offset. + +For CAM, this 24-bit offset is: + + cfg_offset(bus, device, function, register) = + bus << 16 | device << 11 | function << 8 | register + +Whilst ECAM extends this by 4 bits to accomodate 4k of function space: + + cfg_offset(bus, device, function, register) = + bus << 20 | device << 15 | function << 12 | register + +Interrupt mapping is exactly as described in `Open Firmware Recommended +Practice: Interrupt Mapping' and requires the following properties: + +- #interrupt-cells : Must be 1 + +- interrupt-map : <see aforementioned specification> + +- interrupt-map-mask : <see aforementioned specification> + + +Example: + +pci { + compatible = "pci-host-cam-generic" + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x0 0x1>; + + // CPU_PHYSICAL(2) SIZE(2) + reg = <0x0 0x40000000 0x0 0x1000000>; + + // BUS_ADDRESS(3) CPU_PHYSICAL(2) SIZE(2) + ranges = <0x01000000 0x0 0x01000000 0x0 0x01000000 0x0 0x00010000>, + <0x02000000 0x0 0x41000000 0x0 0x41000000 0x0 0x3f000000>; + + + #interrupt-cells = <0x1>; + + // PCI_DEVICE(3) INT#(1) CONTROLLER(PHANDLE) CONTROLLER_DATA(3) + interrupt-map = < 0x0 0x0 0x0 0x1 &gic 0x0 0x4 0x1 + 0x800 0x0 0x0 0x1 &gic 0x0 0x5 0x1 + 0x1000 0x0 0x0 0x1 &gic 0x0 0x6 0x1 + 0x1800 0x0 0x0 0x1 &gic 0x0 0x7 0x1>; + + // PCI_DEVICE(3) INT#(1) + interrupt-map-mask = <0xf800 0x0 0x0 0x7>; +} diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt new file mode 100644 index 000000000000..d8ef5bf50f11 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt @@ -0,0 +1,66 @@ +Renesas AHB to PCI bridge +------------------------- + +This is the bridge used internally to connect the USB controllers to the +AHB. There is one bridge instance per USB port connected to the internal +OHCI and EHCI controllers. + +Required properties: +- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; + "renesas,pci-r8a7791" for the R8A7791 SoC. +- reg: A list of physical regions to access the device: the first is + the operational registers for the OHCI/EHCI controllers and the + second is for the bridge configuration and control registers. +- interrupts: interrupt for the device. +- clocks: The reference to the device clock. +- bus-range: The PCI bus number range; as this is a single bus, the range + should be specified as the same value twice. +- #address-cells: must be 3. +- #size-cells: must be 2. +- #interrupt-cells: must be 1. +- interrupt-map: standard property used to define the mapping of the PCI + interrupts to the GIC interrupts. +- interrupt-map-mask: standard property that helps to define the interrupt + mapping. + +Example SoC configuration: + + pci0: pci@ee090000 { + compatible = "renesas,pci-r8a7790"; + clocks = <&mstp7_clks R8A7790_CLK_EHCI>; + reg = <0x0 0xee090000 0x0 0xc00>, + <0x0 0xee080000 0x0 0x1100>; + interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + + bus-range = <0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + interrupt-map-mask = <0xff00 0 0 0x7>; + interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH + 0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH + 0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>; + + pci@0,1 { + reg = <0x800 0 0 0 0>; + device_type = "pci"; + phys = <&usbphy 0 0>; + phy-names = "usb"; + }; + + pci@0,2 { + reg = <0x1000 0 0 0 0>; + device_type = "pci"; + phys = <&usbphy 0 0>; + phy-names = "usb"; + }; + }; + +Example board setup: + +&pci0 { + status = "okay"; + pinctrl-0 = <&usb0_pins>; + pinctrl-names = "default"; +}; diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt new file mode 100644 index 000000000000..29d3b989d3b0 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt @@ -0,0 +1,47 @@ +* Renesas RCar PCIe interface + +Required properties: +- compatible: should contain one of the following + "renesas,pcie-r8a7779", "renesas,pcie-r8a7790", "renesas,pcie-r8a7791" +- reg: base address and length of the pcie controller registers. +- #address-cells: set to <3> +- #size-cells: set to <2> +- bus-range: PCI bus numbers covered +- device_type: set to "pci" +- ranges: ranges for the PCI memory and I/O regions. +- dma-ranges: ranges for the inbound memory regions. +- interrupts: two interrupt sources for MSI interrupts, followed by interrupt + source for hardware related interrupts (e.g. link speed change). +- #interrupt-cells: set to <1> +- interrupt-map-mask and interrupt-map: standard PCI properties + to define the mapping of the PCIe interface to interrupt + numbers. +- clocks: from common clock binding: clock specifiers for the PCIe controller + and PCIe bus clocks. +- clock-names: from common clock binding: should be "pcie" and "pcie_bus". + +Example: + +SoC specific DT Entry: + + pcie: pcie@fe000000 { + compatible = "renesas,pcie-r8a7791"; + reg = <0 0xfe000000 0 0x80000>; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x00 0xff>; + device_type = "pci"; + ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000 + 0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000 + 0x02000000 0 0x30000000 0 0x30000000 0 0x08000000 + 0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>; + dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000 + 0x42000000 2 0x00000000 2 0x00000000 0 0x40000000>; + interrupts = <0 116 4>, <0 117 4>, <0 118 4>; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &gic 0 116 4>; + clocks = <&mstp3_clks R8A7791_CLK_PCIE>, <&pcie_bus_clk>; + clock-names = "pcie", "pcie_bus"; + status = "disabled"; + }; diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 4f7897e99cba..10b8c5d2c797 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -308,3 +308,10 @@ SLAVE DMA ENGINE SPI devm_spi_register_master() + +GPIO + devm_gpiod_get() + devm_gpiod_get_index() + devm_gpiod_get_optional() + devm_gpiod_get_index_optional() + devm_gpiod_put() diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt index e9f5daccbd02..4e30ebaa9e5b 100644 --- a/Documentation/email-clients.txt +++ b/Documentation/email-clients.txt @@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this: - Edit your Thunderbird config settings so that it won't use format=flowed. Go to "edit->preferences->advanced->config editor" to bring up the - thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to - "false". + thunderbird's registry editor. -- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false". +- Set "mailnews.send_plaintext_flowed" to "false" -- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true". +- Set "mailnews.wraplength" from "72" to "0" -- Enable UTF8: Set "prefs.converted-to-utf8" to "true". +- "View" > "Message Body As" > "Plain Text" -- Install the "toggle wordwrap" extension. Download the file from: - https://addons.mozilla.org/thunderbird/addon/2351/ - Then go to "tools->add ons", select "install" at the bottom of the screen, - and browse to where you saved the .xul file. This adds an "Enable - Wordwrap" entry under the Options menu of the message composer. +- "View" > "Character Encoding" > "Unicode (UTF-8)" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TkRat (GUI) diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 8b9cd8eb3f91..264bcde0c51c 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -1245,8 +1245,9 @@ second). The meanings of the columns are as follows, from left to right: The "intr" line gives counts of interrupts serviced since boot time, for each of the possible system interrupts. The first column is the total of all -interrupts serviced; each subsequent column is the total for that particular -interrupt. +interrupts serviced including unnumbered architecture specific interrupts; +each subsequent column is the total for that particular numbered interrupt. +Unnumbered interrupts are not shown, only summed into the total. The "ctxt" line gives the total number of context switches across all CPUs. diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt index f73cc7b5dc85..fa9a0a8b3734 100644 --- a/Documentation/gpio/driver.txt +++ b/Documentation/gpio/driver.txt @@ -73,6 +73,65 @@ The IRQ portions of the GPIO block are implemented using an irqchip, using the header <linux/irq.h>. So basically such a driver is utilizing two sub- systems simultaneously: gpio and irq. +GPIO irqchips usually fall in one of two categories: + +* CHAINED GPIO irqchips: these are usually the type that is embedded on + an SoC. This means that there is a fast IRQ handler for the GPIOs that + gets called in a chain from the parent IRQ handler, most typically the + system interrupt controller. This means the GPIO irqchip is registered + using irq_set_chained_handler() or the corresponding + gpiochip_set_chained_irqchip() helper function, and the GPIO irqchip + handler will be called immediately from the parent irqchip, while + holding the IRQs disabled. The GPIO irqchip will then end up calling + something like this sequence in its interrupt handler: + + static irqreturn_t tc3589x_gpio_irq(int irq, void *data) + chained_irq_enter(...); + generic_handle_irq(...); + chained_irq_exit(...); + + Chained GPIO irqchips typically can NOT set the .can_sleep flag on + struct gpio_chip, as everything happens directly in the callbacks. + +* NESTED THREADED GPIO irqchips: these are off-chip GPIO expanders and any + other GPIO irqchip residing on the other side of a sleeping bus. Of course + such drivers that need slow bus traffic to read out IRQ status and similar, + traffic which may in turn incur other IRQs to happen, cannot be handled + in a quick IRQ handler with IRQs disabled. Instead they need to spawn a + thread and then mask the parent IRQ line until the interrupt is handled + by the driver. The hallmark of this driver is to call something like + this in its interrupt handler: + + static irqreturn_t tc3589x_gpio_irq(int irq, void *data) + ... + handle_nested_irq(irq); + + The hallmark of threaded GPIO irqchips is that they set the .can_sleep + flag on struct gpio_chip to true, indicating that this chip may sleep + when accessing the GPIOs. + +To help out in handling the set-up and management of GPIO irqchips and the +associated irqdomain and resource allocation callbacks, the gpiolib has +some helpers that can be enabled by selecting the GPIOLIB_IRQCHIP Kconfig +symbol: + +* gpiochip_irqchip_add(): adds an irqchip to a gpiochip. It will pass + the struct gpio_chip* for the chip to all IRQ callbacks, so the callbacks + need to embed the gpio_chip in its state container and obtain a pointer + to the container using container_of(). + (See Documentation/driver-model/design-patterns.txt) + +* gpiochip_set_chained_irqchip(): sets up a chained irq handler for a + gpio_chip from a parent IRQ and passes the struct gpio_chip* as handler + data. (Notice handler data, since the irqchip data is likely used by the + parent irqchip!) This is for the chained type of chip. + +To use the helpers please keep the following in mind: + +- Make sure to assign all relevant members of the struct gpio_chip so that + the irqchip can initialize. E.g. .dev and .can_sleep shall be set up + properly. + It is legal for any IRQ consumer to request an IRQ from any irqchip no matter if that is a combined GPIO+IRQ driver. The basic premise is that gpio_chip and irq_chip are orthogonal, and offering their services independent of each diff --git a/Documentation/hsi.txt b/Documentation/hsi.txt new file mode 100644 index 000000000000..6ac6cd51852a --- /dev/null +++ b/Documentation/hsi.txt @@ -0,0 +1,75 @@ +HSI - High-speed Synchronous Serial Interface + +1. Introduction +~~~~~~~~~~~~~~~ + +High Speed Syncronous Interface (HSI) is a fullduplex, low latency protocol, +that is optimized for die-level interconnect between an Application Processor +and a Baseband chipset. It has been specified by the MIPI alliance in 2003 and +implemented by multiple vendors since then. + +The HSI interface supports full duplex communication over multiple channels +(typically 8) and is capable of reaching speeds up to 200 Mbit/s. + +The serial protocol uses two signals, DATA and FLAG as combined data and clock +signals and an additional READY signal for flow control. An additional WAKE +signal can be used to wakeup the chips from standby modes. The signals are +commonly prefixed by AC for signals going from the application die to the +cellular die and CA for signals going the other way around. + ++------------+ +---------------+ +| Cellular | | Application | +| Die | | Die | +| | - - - - - - CAWAKE - - - - - - >| | +| T|------------ CADATA ------------>|R | +| X|------------ CAFLAG ------------>|X | +| |<----------- ACREADY ------------| | +| | | | +| | | | +| |< - - - - - ACWAKE - - - - - - -| | +| R|<----------- ACDATA -------------|T | +| X|<----------- ACFLAG -------------|X | +| |------------ CAREADY ----------->| | +| | | | +| | | | ++------------+ +---------------+ + +2. HSI Subsystem in Linux +~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the Linux kernel the hsi subsystem is supposed to be used for HSI devices. +The hsi subsystem contains drivers for hsi controllers including support for +multi-port controllers and provides a generic API for using the HSI ports. + +It also contains HSI client drivers, which make use of the generic API to +implement a protocol used on the HSI interface. These client drivers can +use an arbitrary number of channels. + +3. hsi-char Device +~~~~~~~~~~~~~~~~~~ + +Each port automatically registers a generic client driver called hsi_char, +which provides a charecter device for userspace representing the HSI port. +It can be used to communicate via HSI from userspace. Userspace may +configure the hsi_char device using the following ioctl commands: + +* HSC_RESET: + - flush the HSI port + +* HSC_SET_PM + - enable or disable the client. + +* HSC_SEND_BREAK + - send break + +* HSC_SET_RX + - set RX configuration + +* HSC_GET_RX + - get RX configuration + +* HSC_SET_TX + - set TX configuration + +* HSC_GET_TX + - get TX configuration diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface index 79f8257dd790..2cc95ad46604 100644 --- a/Documentation/hwmon/sysfs-interface +++ b/Documentation/hwmon/sysfs-interface @@ -327,6 +327,13 @@ temp[1-*]_max_hyst from the max value. RW +temp[1-*]_min_hyst + Temperature hysteresis value for min limit. + Unit: millidegree Celsius + Must be reported as an absolute temperature, NOT a delta + from the min value. + RW + temp[1-*]_input Temperature input value. Unit: millidegree Celsius RO @@ -362,6 +369,13 @@ temp[1-*]_lcrit Temperature critical min value, typically lower than Unit: millidegree Celsius RW +temp[1-*]_lcrit_hyst + Temperature hysteresis value for critical min limit. + Unit: millidegree Celsius + Must be reported as an absolute temperature, NOT a delta + from the critical min value. + RW + temp[1-*]_offset Temperature offset which is added to the temperature reading by the chip. diff --git a/Documentation/java.txt b/Documentation/java.txt index e6a723281547..418020584ccc 100644 --- a/Documentation/java.txt +++ b/Documentation/java.txt @@ -188,6 +188,9 @@ shift #define CP_METHODREF 10 #define CP_INTERFACEMETHODREF 11 #define CP_NAMEANDTYPE 12 +#define CP_METHODHANDLE 15 +#define CP_METHODTYPE 16 +#define CP_INVOKEDYNAMIC 18 /* Define some commonly used error messages */ @@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur) break; case CP_CLASS: case CP_STRING: + case CP_METHODTYPE: seekerr = fseek(classfile, 2, SEEK_CUR); break; + case CP_METHODHANDLE: + seekerr = fseek(classfile, 3, SEEK_CUR); + break; case CP_INTEGER: case CP_FLOAT: case CP_FIELDREF: case CP_METHODREF: case CP_INTERFACEMETHODREF: case CP_NAMEANDTYPE: + case CP_INVOKEDYNAMIC: seekerr = fseek(classfile, 4, SEEK_CUR); break; case CP_LONG: diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 43842177b771..7da289ee0589 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -892,7 +892,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. (mmio) or 32-bit (mmio32). The options are the same as for ttyS, above. - earlyprintk= [X86,SH,BLACKFIN,ARM] + earlyprintk= [X86,SH,BLACKFIN,ARM,M68k] earlyprintk=vga earlyprintk=efi earlyprintk=xen @@ -2218,10 +2218,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. noreplace-smp [X86-32,SMP] Don't replace SMP instructions with UP alternatives - nordrand [X86] Disable the direct use of the RDRAND - instruction even if it is supported by the - processor. RDRAND is still available to user - space applications. + nordrand [X86] Disable kernel use of the RDRAND and + RDSEED instructions even if they are supported + by the processor. RDRAND and RDSEED are still + available to user space applications. noresume [SWSUSP] Disables resume and restores original swap space. diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 81f940f4e884..e3ba753cb714 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table: mark skb->mark queue skb->queue_mapping hatype skb->dev->type - rxhash skb->rxhash + rxhash skb->hash cpu raw_smp_processor_id() vlan_tci vlan_tx_tag_get(skb) vlan_pr vlan_tx_tag_present(skb) diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index 6fea79efb4cb..38112d512f47 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt @@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets. Currently implemented fanout policies are: - - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash + - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash - PACKET_FANOUT_LB: schedule to socket by round-robin - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on - PACKET_FANOUT_RND: schedule to socket by random selection diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index a9380ba54c8e..b4f53653c106 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2126,7 +2126,7 @@ into the hash PTE second double word). 4.75 KVM_IRQFD Capability: KVM_CAP_IRQFD -Architectures: x86 +Architectures: x86 s390 Type: vm ioctl Parameters: struct kvm_irqfd (in) Returns: 0 on success, -1 on error diff --git a/MAINTAINERS b/MAINTAINERS index 51ebb779c5f3..b82ebe761b30 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -537,7 +537,7 @@ L: linux-alpha@vger.kernel.org F: arch/alpha/ ALTERA TRIPLE SPEED ETHERNET DRIVER -M: Vince Bridgers <vbridgers2013@gmail.com +M: Vince Bridgers <vbridgers2013@gmail.com> L: netdev@vger.kernel.org L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) S: Maintained @@ -1893,14 +1893,15 @@ L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ -BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE +BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Christian Daudt <bcm@fixthebug.org> M: Matt Porter <mporter@linaro.org> L: bcm-kernel-feedback-list@broadcom.com -T: git git://git.github.com/broadcom/bcm11351 +T: git git://github.com/broadcom/mach-bcm S: Maintained F: arch/arm/mach-bcm/ F: arch/arm/boot/dts/bcm113* +F: arch/arm/boot/dts/bcm216* F: arch/arm/boot/dts/bcm281* F: arch/arm/configs/bcm_defconfig F: drivers/mmc/host/sdhci_bcm_kona.c @@ -2245,12 +2246,6 @@ L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/host/ohci-ep93xx.c -CIRRUS LOGIC CS4270 SOUND DRIVER -M: Timur Tabi <timur@tabi.org> -L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Odd Fixes -F: sound/soc/codecs/cs4270* - CIRRUS LOGIC AUDIO CODEC DRIVERS M: Brian Austin <brian.austin@cirrus.com> M: Paul Handrigan <Paul.Handrigan@cirrus.com> @@ -4209,9 +4204,11 @@ S: Maintained F: fs/hpfs/ HSI SUBSYSTEM -M: Sebastian Reichel <sre@debian.org> +M: Sebastian Reichel <sre@kernel.org> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-hsi.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-hsi +F: Documentation/hsi.txt F: drivers/hsi/ F: include/linux/hsi/ F: include/uapi/linux/hsi/ @@ -4818,6 +4815,14 @@ L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: kernel/irq/ + +IRQCHIP DRIVERS +M: Thomas Gleixner <tglx@linutronix.de> +M: Jason Cooper <jason@lakedaemon.net> +L: linux-kernel@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core +T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core F: drivers/irqchip/ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) @@ -5490,15 +5495,15 @@ F: Documentation/hwmon/ltc4261 F: drivers/hwmon/ltc4261.c LTP (Linux Test Project) -M: Shubham Goyal <shubham@linux.vnet.ibm.com> M: Mike Frysinger <vapier@gentoo.org> M: Cyril Hrubis <chrubis@suse.cz> -M: Caspar Zhang <caspar@casparzhang.com> M: Wanlong Gao <gaowanlong@cn.fujitsu.com> +M: Jan Stancek <jstancek@redhat.com> +M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com> +M: Alexey Kodanev <alexey.kodanev@oracle.com> L: ltp-list@lists.sourceforge.net (subscribers-only) -W: http://ltp.sourceforge.net/ +W: http://linux-test-project.github.io/ T: git git://github.com/linux-test-project/ltp.git -T: git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev S: Maintained M32R ARCHITECTURE @@ -6415,6 +6420,7 @@ F: drivers/usb/*/*omap* F: arch/arm/*omap*/usb* OMAP GPIO DRIVER +M: Javier Martinez Canillas <javier@dowhile0.org> M: Santosh Shilimkar <santosh.shilimkar@ti.com> M: Kevin Hilman <khilman@deeprootsystems.com> L: linux-omap@vger.kernel.org @@ -6511,10 +6517,10 @@ T: git git://openrisc.net/~jonas/linux F: arch/openrisc/ OPENVSWITCH -M: Jesse Gross <jesse@nicira.com> +M: Pravin Shelar <pshelar@nicira.com> L: dev@openvswitch.org W: http://openvswitch.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git S: Maintained F: net/openvswitch/ @@ -6704,6 +6710,7 @@ F: Documentation/PCI/ F: drivers/pci/ F: include/linux/pci* F: arch/x86/pci/ +F: arch/x86/kernel/quirks.c PCI DRIVER FOR IMX6 M: Richard Zhu <r65037@freescale.com> @@ -6751,6 +6758,14 @@ L: linux-pci@vger.kernel.org S: Maintained F: drivers/pci/host/*designware* +PCI DRIVER FOR GENERIC OF HOSTS +M: Will Deacon <will.deacon@arm.com> +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/pci/host-generic-pci.txt +F: drivers/pci/host/pci-host-generic.c + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org @@ -7402,6 +7417,14 @@ F: drivers/rpmsg/ F: Documentation/rpmsg.txt F: include/linux/rpmsg.h +RESET CONTROLLER FRAMEWORK +M: Philipp Zabel <p.zabel@pengutronix.de> +S: Maintained +F: drivers/reset/ +F: Documentation/devicetree/bindings/reset/ +F: include/linux/reset.h +F: include/linux/reset-controller.h + RFKILL M: Johannes Berg <johannes@sipsolutions.net> L: linux-wireless@vger.kernel.org @@ -9107,6 +9130,9 @@ F: arch/um/os-Linux/drivers/ TURBOCHANNEL SUBSYSTEM M: "Maciej W. Rozycki" <macro@linux-mips.org> +M: Ralf Baechle <ralf@linux-mips.org> +L: linux-mips@linux-mips.org +Q: http://patchwork.linux-mips.org/project/linux-mips/list/ S: Maintained F: drivers/tc/ F: include/linux/tc.h @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 15 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc8 NAME = Shuffling Zombie Juror # *DOCUMENTATION* diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h index d01afb78919c..f7f680f7457d 100644 --- a/arch/alpha/include/asm/pci.h +++ b/arch/alpha/include/asm/pci.h @@ -59,11 +59,6 @@ struct pci_controller { extern void pcibios_set_master(struct pci_dev *dev); -extern inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* IOMMU controls. */ /* The PCI address space does not equal the physical memory address space. diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts index 6b71ad95a5cf..305975d3f531 100644 --- a/arch/arm/boot/dts/am335x-boneblack.dts +++ b/arch/arm/boot/dts/am335x-boneblack.dts @@ -26,7 +26,6 @@ pinctrl-0 = <&emmc_pins>; bus-width = <8>; status = "okay"; - ti,vcc-aux-disable-is-sleep; }; &am33xx_pinmux { diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index cb6811e5ae5a..7ad75b4e0663 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -144,7 +144,7 @@ compatible = "ti,edma3"; ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; reg = <0x49000000 0x10000>, - <0x44e10f90 0x10>; + <0x44e10f90 0x40>; interrupts = <12 13 14>; #dma-cells = <1>; dma-channels = <64>; diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi index 788391f91684..5a452fdd7c5d 100644 --- a/arch/arm/boot/dts/am3517.dtsi +++ b/arch/arm/boot/dts/am3517.dtsi @@ -62,5 +62,21 @@ }; }; +&iva { + status = "disabled"; +}; + +&mailbox { + status = "disabled"; +}; + +&mmu_isp { + status = "disabled"; +}; + +&smartreflex_mpu_iva { + status = "disabled"; +}; + /include/ "am35xx-clocks.dtsi" /include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi" diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index d1f8707ff1df..d547009148da 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -67,11 +67,15 @@ }; ocp { - compatible = "simple-bus"; + compatible = "ti,am4372-l3-noc", "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; ti,hwmods = "l3_main"; + reg = <0x44000000 0x400000 + 0x44800000 0x400000>; + interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; prcm: prcm@44df0000 { compatible = "ti,am4-prcm"; diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index df8798e8bd25..a055f7f0f14a 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -117,6 +117,11 @@ status = "okay"; }; +&gpio5 { + status = "okay"; + ti,no-reset-on-init; +}; + &mmc1 { status = "okay"; vmmc-supply = <&vmmcsd_fixed>; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 167dbc8494de..2836328b90c8 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -341,7 +341,7 @@ }; partition@9 { label = "NAND.file-system"; - reg = <0x00800000 0x1F600000>; + reg = <0x00a00000 0x1f600000>; }; }; }; diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts index 82f238a9063f..3383c4b66803 100644 --- a/arch/arm/boot/dts/armada-370-db.dts +++ b/arch/arm/boot/dts/armada-370-db.dts @@ -67,6 +67,7 @@ i2c@11000 { pinctrl-0 = <&i2c0_pins>; pinctrl-names = "default"; + clock-frequency = <100000>; status = "okay"; audio_codec: audio-codec@4a { compatible = "cirrus,cs42l51"; diff --git a/arch/arm/boot/dts/armada-375-db.dts b/arch/arm/boot/dts/armada-375-db.dts index 9378d3136b41..0451124e8ebf 100644 --- a/arch/arm/boot/dts/armada-375-db.dts +++ b/arch/arm/boot/dts/armada-375-db.dts @@ -79,6 +79,11 @@ }; }; + sata@a0000 { + status = "okay"; + nr-ports = <2>; + }; + nand: nand@d0000 { pinctrl-0 = <&nand_pins>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi index 068031f0f263..6d0f03c98ee9 100644 --- a/arch/arm/boot/dts/armada-380.dtsi +++ b/arch/arm/boot/dts/armada-380.dtsi @@ -99,7 +99,7 @@ pcie@3,0 { device_type = "pci"; assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; - reg = <0x1000 0 0 0 0>; + reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi index e2919f02e1d4..da801964a257 100644 --- a/arch/arm/boot/dts/armada-385.dtsi +++ b/arch/arm/boot/dts/armada-385.dtsi @@ -110,7 +110,7 @@ pcie@3,0 { device_type = "pci"; assigned-addresses = <0x82000800 0 0x44000 0 0x2000>; - reg = <0x1000 0 0 0 0>; + reg = <0x1800 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; @@ -131,7 +131,7 @@ pcie@4,0 { device_type = "pci"; assigned-addresses = <0x82000800 0 0x48000 0 0x2000>; - reg = <0x1000 0 0 0 0>; + reg = <0x2000 0 0 0 0>; #address-cells = <3>; #size-cells = <2>; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index 448373c4b0e5..90f0bf6f9271 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -49,7 +49,7 @@ /* Device Bus parameters are required */ /* Read parameters */ - devbus,bus-width = <8>; + devbus,bus-width = <16>; devbus,turn-off-ps = <60000>; devbus,badr-skew-ps = <0>; devbus,acc-first-ps = <124000>; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 61bda687f782..0c756421ae6a 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -59,7 +59,7 @@ /* Device Bus parameters are required */ /* Read parameters */ - devbus,bus-width = <8>; + devbus,bus-width = <16>; devbus,turn-off-ps = <60000>; devbus,badr-skew-ps = <0>; devbus,acc-first-ps = <124000>; @@ -146,22 +146,22 @@ ethernet@70000 { status = "okay"; phy = <&phy0>; - phy-mode = "rgmii-id"; + phy-mode = "qsgmii"; }; ethernet@74000 { status = "okay"; phy = <&phy1>; - phy-mode = "rgmii-id"; + phy-mode = "qsgmii"; }; ethernet@30000 { status = "okay"; phy = <&phy2>; - phy-mode = "rgmii-id"; + phy-mode = "qsgmii"; }; ethernet@34000 { status = "okay"; phy = <&phy3>; - phy-mode = "rgmii-id"; + phy-mode = "qsgmii"; }; /* Front-side USB slot */ diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index 985948ce67b3..5d42feb31049 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts @@ -39,7 +39,7 @@ /* Device Bus parameters are required */ /* Read parameters */ - devbus,bus-width = <8>; + devbus,bus-width = <16>; devbus,turn-off-ps = <60000>; devbus,badr-skew-ps = <0>; devbus,acc-first-ps = <124000>; diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts index ce1375595e5f..4537259ce529 100644 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts @@ -34,7 +34,7 @@ }; spi0: spi@f0004000 { - cs-gpios = <&pioD 13 0>; + cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>; status = "okay"; }; @@ -79,7 +79,7 @@ }; spi1: spi@f8008000 { - cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>; + cs-gpios = <&pioC 25 0>; status = "okay"; }; diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index 366fc2cbcd64..c0e0eae16a27 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -641,7 +641,7 @@ trigger@3 { reg = <3>; trigger-name = "external"; - trigger-value = <0x13>; + trigger-value = <0xd>; trigger-external; }; }; diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index e21dda0e8986..3be973e9889a 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi @@ -10,7 +10,7 @@ #include <dt-bindings/pinctrl/at91.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/gpio/gpio.h> -#include <dt-bindings/clk/at91.h> +#include <dt-bindings/clock/at91.h> / { model = "Atmel AT91SAM9261 family SoC"; diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi index 63e1784d272c..92a52faebef7 100644 --- a/arch/arm/boot/dts/at91sam9rl.dtsi +++ b/arch/arm/boot/dts/at91sam9rl.dtsi @@ -8,7 +8,7 @@ #include "skeleton.dtsi" #include <dt-bindings/pinctrl/at91.h> -#include <dt-bindings/clk/at91.h> +#include <dt-bindings/clock/at91.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/gpio/gpio.h> diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 149b55099935..ab01f2d0e590 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -99,13 +99,13 @@ * hierarchy. */ ocp { - compatible = "ti,omap4-l3-noc", "simple-bus"; + compatible = "ti,dra7-l3-noc", "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; ti,hwmods = "l3_main_1", "l3_main_2"; - reg = <0x44000000 0x2000>, - <0x44800000 0x3000>; + reg = <0x44000000 0x1000000>, + <0x45000000 0x1000>; interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 9583563dd0ef..8a558b7ac999 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -503,7 +503,7 @@ status = "okay"; ak8975@0c { - compatible = "ak,ak8975"; + compatible = "asahi-kasei,ak8975"; reg = <0x0c>; gpios = <&gpj0 7 0>; }; diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts index 090f9830b129..cde19c818667 100644 --- a/arch/arm/boot/dts/exynos5250-arndale.dts +++ b/arch/arm/boot/dts/exynos5250-arndale.dts @@ -107,6 +107,7 @@ regulator-name = "VDD_IOPERI_1.8V"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; op_mode = <1>; }; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 418f2506aaf0..b69fbcb7dcb8 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -382,7 +382,7 @@ spi_0: spi@12d20000 { compatible = "samsung,exynos4210-spi"; reg = <0x12d20000 0x100>; - interrupts = <0 66 0>; + interrupts = <0 68 0>; dmas = <&pdma0 5 &pdma0 4>; dma-names = "tx", "rx"; @@ -398,7 +398,7 @@ spi_1: spi@12d30000 { compatible = "samsung,exynos4210-spi"; reg = <0x12d30000 0x100>; - interrupts = <0 67 0>; + interrupts = <0 69 0>; dmas = <&pdma1 5 &pdma1 4>; dma-names = "tx", "rx"; @@ -414,7 +414,7 @@ spi_2: spi@12d40000 { compatible = "samsung,exynos4210-spi"; reg = <0x12d40000 0x100>; - interrupts = <0 68 0>; + interrupts = <0 70 0>; dmas = <&pdma0 7 &pdma0 6>; dma-names = "tx", "rx"; diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts index 7c8c12969892..a3431d784870 100644 --- a/arch/arm/boot/dts/imx53-mba53.dts +++ b/arch/arm/boot/dts/imx53-mba53.dts @@ -244,7 +244,7 @@ &tve { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_vga_sync_1>; - i2c-ddc-bus = <&i2c3>; + ddc-i2c-bus = <&i2c3>; fsl,tve-mode = "vga"; fsl,hsync-pin = <4>; fsl,vsync-pin = <6>; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 9c2bff2252d0..6a1bf4ff83d5 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -115,7 +115,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx53-ipu"; - reg = <0x18000000 0x080000000>; + reg = <0x18000000 0x08000000>; interrupts = <11 10>; clocks = <&clks IMX5_CLK_IPU_GATE>, <&clks IMX5_CLK_IPU_DI0_GATE>, diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts index 32c6fb4a1162..b939f4f52d16 100644 --- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts +++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts @@ -30,6 +30,16 @@ bootargs = "console=ttyS0,115200n8 earlyprintk"; }; + mbus { + pcie-controller { + status = "okay"; + + pcie@1,0 { + status = "okay"; + }; + }; + }; + ocp@f1000000 { pinctrl@10000 { pmx_usb_led: pmx-usb-led { @@ -73,14 +83,6 @@ ehci@50000 { status = "okay"; }; - - pcie-controller { - status = "okay"; - - pcie@1,0 { - status = "okay"; - }; - }; }; gpio-leds { diff --git a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi index aa78c2d11fe7..e2cc85cc3b87 100644 --- a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi +++ b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi @@ -4,6 +4,16 @@ / { model = "ZyXEL NSA310"; + mbus { + pcie-controller { + status = "okay"; + + pcie@1,0 { + status = "okay"; + }; + }; + }; + ocp@f1000000 { pinctrl: pinctrl@10000 { @@ -26,14 +36,6 @@ status = "okay"; nr-ports = <2>; }; - - pcie-controller { - status = "okay"; - - pcie@1,0 { - status = "okay"; - }; - }; }; gpio_poweroff { diff --git a/arch/arm/boot/dts/kirkwood-t5325.dts b/arch/arm/boot/dts/kirkwood-t5325.dts index 7d1c7677a18f..0bd70d928c69 100644 --- a/arch/arm/boot/dts/kirkwood-t5325.dts +++ b/arch/arm/boot/dts/kirkwood-t5325.dts @@ -127,11 +127,6 @@ i2c@11000 { status = "okay"; - - alc5621: alc5621@1a { - compatible = "realtek,alc5621"; - reg = <0x1a>; - }; }; serial@12000 { diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi index f577b7df9a29..521c587acaee 100644 --- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi +++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi @@ -24,11 +24,10 @@ compatible = "smsc,lan9221", "smsc,lan9115"; bank-width = <2>; gpmc,mux-add-data; - gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <186>; - gpmc,cs-wr-off-ns = <186>; - gpmc,adv-on-ns = <12>; - gpmc,adv-rd-off-ns = <48>; + gpmc,cs-on-ns = <1>; + gpmc,cs-rd-off-ns = <180>; + gpmc,cs-wr-off-ns = <180>; + gpmc,adv-rd-off-ns = <18>; gpmc,adv-wr-off-ns = <48>; gpmc,oe-on-ns = <54>; gpmc,oe-off-ns = <168>; @@ -36,12 +35,10 @@ gpmc,we-off-ns = <168>; gpmc,rd-cycle-ns = <186>; gpmc,wr-cycle-ns = <186>; - gpmc,access-ns = <114>; - gpmc,page-burst-access-ns = <6>; - gpmc,bus-turnaround-ns = <12>; - gpmc,cycle2cycle-delay-ns = <18>; - gpmc,wr-data-mux-bus-ns = <90>; - gpmc,wr-access-ns = <186>; + gpmc,access-ns = <144>; + gpmc,page-burst-access-ns = <24>; + gpmc,bus-turnaround-ns = <90>; + gpmc,cycle2cycle-delay-ns = <90>; gpmc,cycle2cycle-samecsen; gpmc,cycle2cycle-diffcsen; vddvario-supply = <&vddvario>; diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index 22f35ea142c1..8f8c07da4ac1 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi @@ -71,13 +71,6 @@ interrupts = <58>; }; - mailbox: mailbox@48094000 { - compatible = "ti,omap2-mailbox"; - ti,hwmods = "mailbox"; - reg = <0x48094000 0x200>; - interrupts = <26>; - }; - intc: interrupt-controller@1 { compatible = "ti,omap2-intc"; interrupt-controller; diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi index 85b1fb014c43..2d9979835f24 100644 --- a/arch/arm/boot/dts/omap2420.dtsi +++ b/arch/arm/boot/dts/omap2420.dtsi @@ -125,6 +125,14 @@ dma-names = "tx", "rx"; }; + mailbox: mailbox@48094000 { + compatible = "ti,omap2-mailbox"; + reg = <0x48094000 0x200>; + interrupts = <26>, <34>; + interrupt-names = "dsp", "iva"; + ti,hwmods = "mailbox"; + }; + timer1: timer@48028000 { compatible = "ti,omap2420-timer"; reg = <0x48028000 0x400>; diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi index d09697dab55e..42d2c61c9e2d 100644 --- a/arch/arm/boot/dts/omap2430.dtsi +++ b/arch/arm/boot/dts/omap2430.dtsi @@ -216,6 +216,13 @@ dma-names = "tx", "rx"; }; + mailbox: mailbox@48094000 { + compatible = "ti,omap2-mailbox"; + reg = <0x48094000 0x200>; + interrupts = <26>; + ti,hwmods = "mailbox"; + }; + timer1: timer@49018000 { compatible = "ti,omap2420-timer"; reg = <0x49018000 0x400>; diff --git a/arch/arm/boot/dts/omap3-cm-t3x30.dtsi b/arch/arm/boot/dts/omap3-cm-t3x30.dtsi index d00055809e31..25ba08331d88 100644 --- a/arch/arm/boot/dts/omap3-cm-t3x30.dtsi +++ b/arch/arm/boot/dts/omap3-cm-t3x30.dtsi @@ -10,18 +10,6 @@ cpu0-supply = <&vcc>; }; }; - - vddvario: regulator-vddvario { - compatible = "regulator-fixed"; - regulator-name = "vddvario"; - regulator-always-on; - }; - - vdd33a: regulator-vdd33a { - compatible = "regulator-fixed"; - regulator-name = "vdd33a"; - regulator-always-on; - }; }; &omap3_pmx_core { @@ -35,58 +23,34 @@ hsusb0_pins: pinmux_hsusb0_pins { pinctrl-single,pins = < - OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* hsusb0_clk.hsusb0_clk */ - OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0) /* hsusb0_stp.hsusb0_stp */ - OMAP3_CORE1_IOPAD(0x21a4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_dir.hsusb0_dir */ - OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_nxt.hsusb0_nxt */ - OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data0.hsusb2_data0 */ - OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data1.hsusb0_data1 */ - OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data2.hsusb0_data2 */ - OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data3 */ - OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data4 */ - OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data5 */ - OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data6 */ - OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ + OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0) /* hsusb0_clk.hsusb0_clk */ + OMAP3_CORE1_IOPAD(0x21a4, PIN_OUTPUT | MUX_MODE0) /* hsusb0_stp.hsusb0_stp */ + OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_dir.hsusb0_dir */ + OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_nxt.hsusb0_nxt */ + OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data0.hsusb2_data0 */ + OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data1.hsusb0_data1 */ + OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data2.hsusb0_data2 */ + OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data3 */ + OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data4 */ + OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data5 */ + OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data6 */ + OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ >; }; }; +#include "omap-gpmc-smsc911x.dtsi" + &gpmc { ranges = <5 0 0x2c000000 0x01000000>; - smsc1: ethernet@5,0 { + smsc1: ethernet@gpmc { compatible = "smsc,lan9221", "smsc,lan9115"; pinctrl-names = "default"; pinctrl-0 = <&smsc1_pins>; interrupt-parent = <&gpio6>; interrupts = <3 IRQ_TYPE_LEVEL_LOW>; reg = <5 0 0xff>; - bank-width = <2>; - gpmc,mux-add-data; - gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <186>; - gpmc,cs-wr-off-ns = <186>; - gpmc,adv-on-ns = <12>; - gpmc,adv-rd-off-ns = <48>; - gpmc,adv-wr-off-ns = <48>; - gpmc,oe-on-ns = <54>; - gpmc,oe-off-ns = <168>; - gpmc,we-on-ns = <54>; - gpmc,we-off-ns = <168>; - gpmc,rd-cycle-ns = <186>; - gpmc,wr-cycle-ns = <186>; - gpmc,access-ns = <114>; - gpmc,page-burst-access-ns = <6>; - gpmc,bus-turnaround-ns = <12>; - gpmc,cycle2cycle-delay-ns = <18>; - gpmc,wr-data-mux-bus-ns = <90>; - gpmc,wr-access-ns = <186>; - gpmc,cycle2cycle-samecsen; - gpmc,cycle2cycle-diffcsen; - vddvario-supply = <&vddvario>; - vdd33a-supply = <&vdd33a>; - reg-io-width = <4>; - smsc,save-mac-address; }; }; diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi index b97736d98a64..e2d163bf0619 100644 --- a/arch/arm/boot/dts/omap3-igep.dtsi +++ b/arch/arm/boot/dts/omap3-igep.dtsi @@ -107,7 +107,7 @@ >; }; - smsc911x_pins: pinmux_smsc911x_pins { + smsc9221_pins: pinmux_smsc9221_pins { pinctrl-single,pins = < 0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */ >; diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts index 7abd64f6ae21..b22caaaf774b 100644 --- a/arch/arm/boot/dts/omap3-igep0020.dts +++ b/arch/arm/boot/dts/omap3-igep0020.dts @@ -10,7 +10,7 @@ */ #include "omap3-igep.dtsi" -#include "omap-gpmc-smsc911x.dtsi" +#include "omap-gpmc-smsc9221.dtsi" / { model = "IGEPv2 (TI OMAP AM/DM37x)"; @@ -248,7 +248,7 @@ ethernet@gpmc { pinctrl-names = "default"; - pinctrl-0 = <&smsc911x_pins>; + pinctrl-0 = <&smsc9221_pins>; reg = <5 0 0xff>; interrupt-parent = <&gpio6>; interrupts = <16 IRQ_TYPE_LEVEL_LOW>; diff --git a/arch/arm/boot/dts/omap3-sb-t35.dtsi b/arch/arm/boot/dts/omap3-sb-t35.dtsi index 7909c51b05a5..d59e3de1441e 100644 --- a/arch/arm/boot/dts/omap3-sb-t35.dtsi +++ b/arch/arm/boot/dts/omap3-sb-t35.dtsi @@ -2,20 +2,6 @@ * Common support for CompuLab SB-T35 used on SBC-T3530, SBC-T3517 and SBC-T3730 */ -/ { - vddvario_sb_t35: regulator-vddvario-sb-t35 { - compatible = "regulator-fixed"; - regulator-name = "vddvario"; - regulator-always-on; - }; - - vdd33a_sb_t35: regulator-vdd33a-sb-t35 { - compatible = "regulator-fixed"; - regulator-name = "vdd33a"; - regulator-always-on; - }; -}; - &omap3_pmx_core { smsc2_pins: pinmux_smsc2_pins { pinctrl-single,pins = < @@ -37,11 +23,10 @@ reg = <4 0 0xff>; bank-width = <2>; gpmc,mux-add-data; - gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <186>; - gpmc,cs-wr-off-ns = <186>; - gpmc,adv-on-ns = <12>; - gpmc,adv-rd-off-ns = <48>; + gpmc,cs-on-ns = <1>; + gpmc,cs-rd-off-ns = <180>; + gpmc,cs-wr-off-ns = <180>; + gpmc,adv-rd-off-ns = <18>; gpmc,adv-wr-off-ns = <48>; gpmc,oe-on-ns = <54>; gpmc,oe-off-ns = <168>; @@ -49,16 +34,14 @@ gpmc,we-off-ns = <168>; gpmc,rd-cycle-ns = <186>; gpmc,wr-cycle-ns = <186>; - gpmc,access-ns = <114>; - gpmc,page-burst-access-ns = <6>; - gpmc,bus-turnaround-ns = <12>; - gpmc,cycle2cycle-delay-ns = <18>; - gpmc,wr-data-mux-bus-ns = <90>; - gpmc,wr-access-ns = <186>; + gpmc,access-ns = <144>; + gpmc,page-burst-access-ns = <24>; + gpmc,bus-turnaround-ns = <90>; + gpmc,cycle2cycle-delay-ns = <90>; gpmc,cycle2cycle-samecsen; gpmc,cycle2cycle-diffcsen; - vddvario-supply = <&vddvario_sb_t35>; - vdd33a-supply = <&vdd33a_sb_t35>; + vddvario-supply = <&vddvario>; + vdd33a-supply = <&vdd33a>; reg-io-width = <4>; smsc,save-mac-address; }; diff --git a/arch/arm/boot/dts/omap3-sbc-t3517.dts b/arch/arm/boot/dts/omap3-sbc-t3517.dts index 024c9c6c682d..42189b65d393 100644 --- a/arch/arm/boot/dts/omap3-sbc-t3517.dts +++ b/arch/arm/boot/dts/omap3-sbc-t3517.dts @@ -8,6 +8,19 @@ / { model = "CompuLab SBC-T3517 with CM-T3517"; compatible = "compulab,omap3-sbc-t3517", "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3"; + + /* Only one GPMC smsc9220 on SBC-T3517, CM-T3517 uses am35x Ethernet */ + vddvario: regulator-vddvario-sb-t35 { + compatible = "regulator-fixed"; + regulator-name = "vddvario"; + regulator-always-on; + }; + + vdd33a: regulator-vdd33a-sb-t35 { + compatible = "regulator-fixed"; + regulator-name = "vdd33a"; + regulator-always-on; + }; }; &omap3_pmx_core { diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index acb9019dc437..4231191ade06 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -61,7 +61,7 @@ ti,hwmods = "mpu"; }; - iva { + iva: iva { compatible = "ti,iva2.2"; ti,hwmods = "iva"; diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index f8c9855ce587..36b4312a5e0d 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -630,6 +630,13 @@ status = "disabled"; }; + mailbox: mailbox@4a0f4000 { + compatible = "ti,omap4-mailbox"; + reg = <0x4a0f4000 0x200>; + interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; + ti,hwmods = "mailbox"; + }; + timer1: timer@4ae18000 { compatible = "ti,omap5430-timer"; reg = <0x4ae18000 0x80>; diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index eabcfdbb403a..a106b0872910 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -13,7 +13,7 @@ #include <dt-bindings/pinctrl/at91.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/gpio/gpio.h> -#include <dt-bindings/clk/at91.h> +#include <dt-bindings/clock/at91.h> / { model = "Atmel SAMA5D3 family SoC"; diff --git a/arch/arm/boot/dts/sama5d3_mci2.dtsi b/arch/arm/boot/dts/sama5d3_mci2.dtsi index b029fe7ef17a..1b02208ea6ff 100644 --- a/arch/arm/boot/dts/sama5d3_mci2.dtsi +++ b/arch/arm/boot/dts/sama5d3_mci2.dtsi @@ -9,7 +9,7 @@ #include <dt-bindings/pinctrl/at91.h> #include <dt-bindings/interrupt-controller/irq.h> -#include <dt-bindings/clk/at91.h> +#include <dt-bindings/clock/at91.h> / { ahb { diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi index 382b04431f66..02848453ca0c 100644 --- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi +++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi @@ -9,7 +9,7 @@ #include <dt-bindings/pinctrl/at91.h> #include <dt-bindings/interrupt-controller/irq.h> -#include <dt-bindings/clk/at91.h> +#include <dt-bindings/clock/at91.h> / { aliases { diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi index a9fa75e41652..7a8d4c6115f7 100644 --- a/arch/arm/boot/dts/sama5d3_uart.dtsi +++ b/arch/arm/boot/dts/sama5d3_uart.dtsi @@ -9,7 +9,7 @@ #include <dt-bindings/pinctrl/at91.h> #include <dt-bindings/interrupt-controller/irq.h> -#include <dt-bindings/clk/at91.h> +#include <dt-bindings/clock/at91.h> / { aliases { diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts index 7f3baf51a3a9..32dd55e5f4e6 100644 --- a/arch/arm/boot/dts/ste-ccu8540.dts +++ b/arch/arm/boot/dts/ste-ccu8540.dts @@ -18,6 +18,7 @@ compatible = "st-ericsson,ccu8540", "st-ericsson,u8540"; memory@0 { + device_type = "memory"; reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>; }; diff --git a/arch/arm/boot/dts/ste-ccu9540.dts b/arch/arm/boot/dts/ste-ccu9540.dts index 229508750890..651c56d400a4 100644 --- a/arch/arm/boot/dts/ste-ccu9540.dts +++ b/arch/arm/boot/dts/ste-ccu9540.dts @@ -38,8 +38,8 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <4>; - mmc-cap-sd-highspeed; - mmc-cap-mmc-highspeed; + cap-sd-highspeed; + cap-mmc-highspeed; vmmc-supply = <&ab8500_ldo_aux3_reg>; cd-gpios = <&gpio7 6 0x4>; // 230 @@ -63,7 +63,7 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <8>; - mmc-cap-mmc-highspeed; + cap-mmc-highspeed; vmmc-supply = <&ab8500_ldo_aux2_reg>; status = "okay"; diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index 6cb9b68e2188..bf8f0eddc2c0 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -116,8 +116,15 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <4>; - mmc-cap-sd-highspeed; - mmc-cap-mmc-highspeed; + cap-sd-highspeed; + cap-mmc-highspeed; + sd-uhs-sdr12; + sd-uhs-sdr25; + full-pwr-cycle; + st,sig-dir-dat0; + st,sig-dir-dat2; + st,sig-dir-cmd; + st,sig-pin-fbclk; vmmc-supply = <&ab8500_ldo_aux3_reg>; vqmmc-supply = <&vmmci>; pinctrl-names = "default", "sleep"; @@ -132,6 +139,7 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <4>; + non-removable; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdi1_default_mode>; pinctrl-1 = <&sdi1_sleep_mode>; @@ -144,7 +152,9 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <8>; - mmc-cap-mmc-highspeed; + cap-mmc-highspeed; + non-removable; + vmmc-supply = <&db8500_vsmps2_reg>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdi2_default_mode>; pinctrl-1 = <&sdi2_sleep_mode>; @@ -157,7 +167,8 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <8>; - mmc-cap-mmc-highspeed; + cap-mmc-highspeed; + non-removable; vmmc-supply = <&ab8500_ldo_aux2_reg>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdi4_default_mode>; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index 5acc0449676a..d316c955bd5f 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -840,8 +840,8 @@ interrupts = <22>; max-frequency = <48000000>; bus-width = <4>; - mmc-cap-mmc-highspeed; - mmc-cap-sd-highspeed; + cap-mmc-highspeed; + cap-sd-highspeed; cd-gpios = <&gpio3 15 0x1>; cd-inverted; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index a2f632d0be2a..474ef83229cd 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -156,7 +156,7 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <4>; - mmc-cap-mmc-highspeed; + cap-mmc-highspeed; vmmc-supply = <&ab8500_ldo_aux3_reg>; vqmmc-supply = <&vmmci>; pinctrl-names = "default", "sleep"; @@ -195,7 +195,7 @@ arm,primecell-periphid = <0x10480180>; max-frequency = <100000000>; bus-width = <8>; - mmc-cap-mmc-highspeed; + cap-mmc-highspeed; vmmc-supply = <&ab8500_ldo_aux2_reg>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdi4_default_mode>; diff --git a/arch/arm/boot/dts/ste-u300.dts b/arch/arm/boot/dts/ste-u300.dts index 6fe688e9e4da..82a661677e97 100644 --- a/arch/arm/boot/dts/ste-u300.dts +++ b/arch/arm/boot/dts/ste-u300.dts @@ -442,8 +442,8 @@ clock-names = "apb_pclk", "mclk"; max-frequency = <24000000>; bus-width = <4>; // SD-card slot - mmc-cap-mmc-highspeed; - mmc-cap-sd-highspeed; + cap-mmc-highspeed; + cap-sd-highspeed; cd-gpios = <&gpio 12 0x4>; cd-inverted; vmmc-supply = <&ab3100_ldo_g_reg>; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 32efc105df83..aba1c8a3f388 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -87,7 +87,7 @@ pll4: clk@01c20018 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-pll1-clk"; + compatible = "allwinner,sun7i-a20-pll4-clk"; reg = <0x01c20018 0x4>; clocks = <&osc24M>; clock-output-names = "pll4"; @@ -109,6 +109,14 @@ clock-output-names = "pll6_sata", "pll6_other", "pll6"; }; + pll8: clk@01c20040 { + #clock-cells = <0>; + compatible = "allwinner,sun7i-a20-pll4-clk"; + reg = <0x01c20040 0x4>; + clocks = <&osc24M>; + clock-output-names = "pll8"; + }; + cpu: cpu@01c20054 { #clock-cells = <0>; compatible = "allwinner,sun4i-a10-cpu-clk"; @@ -805,9 +813,9 @@ status = "disabled"; }; - i2c4: i2c@01c2bc00 { + i2c4: i2c@01c2c000 { compatible = "allwinner,sun4i-i2c"; - reg = <0x01c2bc00 0x400>; + reg = <0x01c2c000 0x400>; interrupts = <0 89 4>; clocks = <&apb1_gates 15>; clock-frequency = <100000>; diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index f01c0ee0c87e..490f3dced749 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void) { int i; - for_each_cpu(i, &bL_switcher_removed_logical_cpus) - cpu_up(i); + for_each_cpu(i, &bL_switcher_removed_logical_cpus) { + struct device *cpu_dev = get_cpu_device(i); + int ret = device_online(cpu_dev); + if (ret) + dev_err(cpu_dev, "switcher: unable to restore CPU\n"); + } } static int bL_switcher_halve_cpus(void) @@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void) continue; } - ret = cpu_down(i); + ret = device_offline(get_cpu_device(i)); if (ret) { bL_switcher_restore_cpus(); return ret; diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 41bca32409fc..5339009b3c0c 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -1423,55 +1423,38 @@ EXPORT_SYMBOL(edma_clear_event); #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES) -static int edma_of_read_u32_to_s16_array(const struct device_node *np, - const char *propname, s16 *out_values, - size_t sz) +static int edma_xbar_event_map(struct device *dev, struct device_node *node, + struct edma_soc_info *pdata, size_t sz) { - int ret; - - ret = of_property_read_u16_array(np, propname, out_values, sz); - if (ret) - return ret; - - /* Terminate it */ - *out_values++ = -1; - *out_values++ = -1; - - return 0; -} - -static int edma_xbar_event_map(struct device *dev, - struct device_node *node, - struct edma_soc_info *pdata, int len) -{ - int ret, i; + const char pname[] = "ti,edma-xbar-event-map"; struct resource res; void __iomem *xbar; - const s16 (*xbar_chans)[2]; + s16 (*xbar_chans)[2]; + size_t nelm = sz / sizeof(s16); u32 shift, offset, mux; + int ret, i; - xbar_chans = devm_kzalloc(dev, - len/sizeof(s16) + 2*sizeof(s16), - GFP_KERNEL); + xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL); if (!xbar_chans) return -ENOMEM; ret = of_address_to_resource(node, 1, &res); if (ret) - return -EIO; + return -ENOMEM; xbar = devm_ioremap(dev, res.start, resource_size(&res)); if (!xbar) return -ENOMEM; - ret = edma_of_read_u32_to_s16_array(node, - "ti,edma-xbar-event-map", - (s16 *)xbar_chans, - len/sizeof(u32)); + ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm); if (ret) return -EIO; - for (i = 0; xbar_chans[i][0] != -1; i++) { + /* Invalidate last entry for the other user of this mess */ + nelm >>= 1; + xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1; + + for (i = 0; i < nelm; i++) { shift = (xbar_chans[i][1] & 0x03) << 3; offset = xbar_chans[i][1] & 0xfffffffc; mux = readl(xbar + offset); @@ -1480,8 +1463,7 @@ static int edma_xbar_event_map(struct device *dev, writel(mux, (xbar + offset)); } - pdata->xbar_chans = xbar_chans; - + pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; return 0; } diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig index b5df4a511b0a..81ba78eaf54a 100644 --- a/arch/arm/configs/sunxi_defconfig +++ b/arch/arm/configs/sunxi_defconfig @@ -37,7 +37,7 @@ CONFIG_SUN4I_EMAC=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set +CONFIG_STMMAC_ETH=y # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_WLAN is not set CONFIG_SERIAL_8250=y diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h index 680a83e94467..7e95d8535e24 100644 --- a/arch/arm/include/asm/pci.h +++ b/arch/arm/include/asm/pci.h @@ -31,11 +31,6 @@ static inline int pci_proc_domain(struct pci_bus *bus) } #endif /* CONFIG_PCI_DOMAINS */ -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* * The PCI address space does equal the physical memory address space. * The networking and block device layers use this boolean for bounce diff --git a/arch/arm/include/asm/trusted_foundations.h b/arch/arm/include/asm/trusted_foundations.h index b5f7705abcb0..624e1d436c6c 100644 --- a/arch/arm/include/asm/trusted_foundations.h +++ b/arch/arm/include/asm/trusted_foundations.h @@ -54,7 +54,9 @@ static inline void register_trusted_foundations( */ pr_err("No support for Trusted Foundations, continuing in degraded mode.\n"); pr_err("Secondary processors as well as CPU PM will be disabled.\n"); +#if IS_ENABLED(CONFIG_SMP) setup_max_cpus = 0; +#endif cpu_idle_poll_ctrl(true); } diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 12c3a5decc60..75d95799b6e6 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long); #define __put_user_check(x,p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ + const typeof(*(p)) __user *__tmp_p = (p); \ register const typeof(*(p)) __r2 asm("r2") = (x); \ - register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 7704e28c3483..712b50e0a6dc 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -34,6 +34,7 @@ #define _ASM_ARM_XEN_HYPERCALL_H #include <xen/interface/xen.h> +#include <xen/interface/sched.h> long privcmd_call(unsigned call, unsigned long a1, unsigned long a2, unsigned long a3, @@ -48,6 +49,16 @@ int HYPERVISOR_memory_op(unsigned int cmd, void *arg); int HYPERVISOR_physdev_op(int cmd, void *arg); int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); int HYPERVISOR_tmem_op(void *arg); +int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); + +static inline int +HYPERVISOR_suspend(unsigned long start_info_mfn) +{ + struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; + + /* start_info_mfn is unused on ARM */ + return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); +} static inline void MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, @@ -63,9 +74,4 @@ MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, BUG(); } -static inline int -HYPERVISOR_multicall(void *call_list, int nr_calls) -{ - BUG(); -} #endif /* _ASM_ARM_XEN_HYPERCALL_H */ diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 1151188bcd83..50066006e6bd 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -40,6 +40,8 @@ typedef uint64_t xen_pfn_t; #define PRI_xen_pfn "llx" typedef uint64_t xen_ulong_t; #define PRI_xen_ulong "llx" +typedef int64_t xen_long_t; +#define PRI_xen_long "llx" /* Guest handles for primitive C types. */ __DEFINE_GUEST_HANDLE(uchar, unsigned char); __DEFINE_GUEST_HANDLE(uint, unsigned int); diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index cf4f3e867395..ded062f9b358 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -77,7 +77,6 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine) } /* VIRT <-> MACHINE conversion */ #define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v)))) -#define virt_to_pfn(v) (PFN_DOWN(__pa(v))) #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 16d43cd45619..17a26c17f7f5 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -545,6 +545,18 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw) */ pci_bus_add_devices(bus); } + + list_for_each_entry(sys, &head, node) { + struct pci_bus *bus = sys->bus; + + /* Configure PCI Express settings */ + if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { + struct pci_bus *child; + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + } } #ifndef CONFIG_PCI_HOST_ITE8152 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 1420725142ca..efb208de75ec 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -132,6 +132,10 @@ orrne r5, V7M_xPSR_FRAMEPTRALIGN biceq r5, V7M_xPSR_FRAMEPTRALIGN + @ ensure bit 0 is cleared in the PC, otherwise behaviour is + @ unpredictable + bic r4, #1 + @ write basic exception frame stmdb r2!, {r1, r3-r5} ldmia sp, {r1, r3-r5} diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 3c217694ebec..cb791ac6a003 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl, if (unwind_pop_register(ctrl, &vsp, reg)) return -URC_FAILURE; - if (insn & 0x80) + if (insn & 0x8) if (unwind_pop_register(ctrl, &vsp, 14)) return -URC_FAILURE; diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index a0282928e9c1..7cd6f19945ed 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -1308,19 +1308,19 @@ static struct platform_device at91_adc_device = { static struct at91_adc_trigger at91_adc_triggers[] = { [0] = { .name = "timer-counter-0", - .value = AT91_ADC_TRGSEL_TC0 | AT91_ADC_TRGEN, + .value = 0x1, }, [1] = { .name = "timer-counter-1", - .value = AT91_ADC_TRGSEL_TC1 | AT91_ADC_TRGEN, + .value = 0x3, }, [2] = { .name = "timer-counter-2", - .value = AT91_ADC_TRGSEL_TC2 | AT91_ADC_TRGEN, + .value = 0x5, }, [3] = { .name = "external", - .value = AT91_ADC_TRGSEL_EXTERNAL | AT91_ADC_TRGEN, + .value = 0xd, .is_external = true, }, }; diff --git a/arch/arm/mach-imx/devices/platform-ipu-core.c b/arch/arm/mach-imx/devices/platform-ipu-core.c index fc4dd7cedc11..6bd7c3f37ac0 100644 --- a/arch/arm/mach-imx/devices/platform-ipu-core.c +++ b/arch/arm/mach-imx/devices/platform-ipu-core.c @@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera( pdev = platform_device_alloc("mx3-camera", 0); if (!pdev) - goto err; + return ERR_PTR(-ENOMEM); pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); if (!pdev->dev.dma_mask) diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index 34932e0e31fa..7858d5b6f6ce 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -202,9 +202,6 @@ static struct mmci_platform_data lpc32xx_mmci_data = { .ocr_mask = MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | MMC_VDD_33_34, .ios_handler = mmc_handle_ios, - .dma_filter = NULL, - /* No DMA for now since AMBA PL080 dmaengine driver only does scatter - * gather, and the MMCI driver doesn't do it this way */ }; static struct lpc32xx_slc_platform_data lpc32xx_slc_data = { diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c index 87e1d01edecc..2c25050209ce 100644 --- a/arch/arm/mach-msm/board-trout-gpio.c +++ b/arch/arm/mach-msm/board-trout-gpio.c @@ -89,7 +89,7 @@ static int trout_gpio_to_irq(struct gpio_chip *chip, unsigned offset) .base = base_gpio, \ .ngpio = 8, \ }, \ - .reg = (void *) reg_num + TROUT_CPLD_BASE, \ + .reg = reg_num + TROUT_CPLD_BASE, \ .shadow = shadow_val, \ } diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c index 015d544aa017..5edfbd904d06 100644 --- a/arch/arm/mach-msm/board-trout.c +++ b/arch/arm/mach-msm/board-trout.c @@ -78,7 +78,7 @@ static void __init trout_init(void) static struct map_desc trout_io_desc[] __initdata = { { - .virtual = TROUT_CPLD_BASE, + .virtual = (unsigned long)TROUT_CPLD_BASE, .pfn = __phys_to_pfn(TROUT_CPLD_START), .length = TROUT_CPLD_SIZE, .type = MT_DEVICE_NONSHARED diff --git a/arch/arm/mach-msm/board-trout.h b/arch/arm/mach-msm/board-trout.h index b2379ede43bc..adb757abbb92 100644 --- a/arch/arm/mach-msm/board-trout.h +++ b/arch/arm/mach-msm/board-trout.h @@ -58,7 +58,7 @@ #define TROUT_4_TP_LS_EN 19 #define TROUT_5_TP_LS_EN 1 -#define TROUT_CPLD_BASE 0xE8100000 +#define TROUT_CPLD_BASE IOMEM(0xE8100000) #define TROUT_CPLD_START 0x98000000 #define TROUT_CPLD_SIZE SZ_4K diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c index f3d4cf53f746..09520e19b78e 100644 --- a/arch/arm/mach-mvebu/mvebu-soc-id.c +++ b/arch/arm/mach-mvebu/mvebu-soc-id.c @@ -108,7 +108,18 @@ static int __init mvebu_soc_id_init(void) iounmap(pci_base); res_ioremap: - clk_disable_unprepare(clk); + /* + * If the PCIe unit is actually enabled and we have PCI + * support in the kernel, we intentionally do not release the + * reference to the clock. We want to keep it running since + * the bootloader does some PCIe link configuration that the + * kernel is for now unable to do, and gating the clock would + * make us loose this precious configuration. + */ + if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) { + clk_disable_unprepare(clk); + clk_put(clk); + } clk_err: of_node_put(child); diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c index 84cc1482e584..e87f2a83d6bf 100644 --- a/arch/arm/mach-omap2/board-flash.c +++ b/arch/arm/mach-omap2/board-flash.c @@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs, board_nand_data.nr_parts = nr_parts; board_nand_data.devsize = nand_type; - board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW; + board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW; gpmc_nand_init(&board_nand_data, gpmc_t); } #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */ diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 8f5121b89688..eb8c75ec3b1a 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c @@ -456,7 +456,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = { .clkdm_name = "dpll4_clkdm", }; -DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops); +DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, + dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); static struct clk dpll4_m5x2_ck_3630 = { .name = "dpll4_m5x2_ck", diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 01fc710c8181..2498ab025fa2 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -14,6 +14,7 @@ #include <linux/cpuidle.h> #include <linux/cpu_pm.h> #include <linux/export.h> +#include <linux/clockchips.h> #include <asm/cpuidle.h> #include <asm/proc-fns.h> @@ -83,6 +84,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, { struct idle_statedata *cx = state_ptr + index; u32 mpuss_can_lose_context = 0; + int cpu_id = smp_processor_id(); /* * CPU0 has to wait and stay ON until CPU1 is OFF state. @@ -110,6 +112,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); + /* * Call idle CPU PM enter notifier chain so that * VFP and per CPU interrupt context is saved. @@ -165,6 +169,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, if (dev->cpu == 0 && mpuss_can_lose_context) cpu_cluster_pm_exit(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); + fail: cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpu_done[dev->cpu] = false; @@ -172,6 +178,16 @@ fail: return index; } +/* + * For each cpu, setup the broadcast timer because local timers + * stops for the states above C1. + */ +static void omap_setup_broadcast_timer(void *arg) +{ + int cpu = smp_processor_id(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); +} + static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, @@ -189,8 +205,7 @@ static struct cpuidle_driver omap4_idle_driver = { /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ .exit_latency = 328 + 440, .target_residency = 960, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | - CPUIDLE_FLAG_TIMER_STOP, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, .enter = omap_enter_idle_coupled, .name = "C2", .desc = "CPUx OFF, MPUSS CSWR", @@ -199,8 +214,7 @@ static struct cpuidle_driver omap4_idle_driver = { /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ .exit_latency = 460 + 518, .target_residency = 1100, - .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED | - CPUIDLE_FLAG_TIMER_STOP, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, .enter = omap_enter_idle_coupled, .name = "C3", .desc = "CPUx OFF, MPUSS OSWR", @@ -231,5 +245,8 @@ int __init omap4_idle_init(void) if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; + /* Configure the broadcast timer on each cpu */ + on_each_cpu(omap_setup_broadcast_timer, NULL, 1); + return cpuidle_register(&omap4_idle_driver, cpu_online_mask); } diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index 75e92952c18e..4993d4bfe9b2 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S @@ -1,7 +1,7 @@ /* * Secondary CPU startup routine source file. * - * Copyright (C) 2009 Texas Instruments, Inc. + * Copyright (C) 2009-2014 Texas Instruments, Inc. * * Author: * Santosh Shilimkar <santosh.shilimkar@ti.com> @@ -28,7 +28,7 @@ * code. This routine also provides a holding flag into which * secondary core is held until we're ready for it to initialise. * The primary core will update this flag using a hardware -+ * register AuxCoreBoot0. + * register AuxCoreBoot0. */ ENTRY(omap5_secondary_startup) wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 @@ -39,7 +39,7 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 cmp r0, r4 bne wait b secondary_startup -END(omap5_secondary_startup) +ENDPROC(omap5_secondary_startup) /* * OMAP4 specific entry point for secondary CPU to jump from ROM * code. This routine also provides a holding flag into which diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index 892317294fdc..e829664e6a6c 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c @@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = { * current exception. */ - .flags = HWMOD_EXT_OPT_MAIN_CLK, + .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE, .main_clk = "pad_clks_ck", .prcm = { .omap4 = { diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index a8ec16787170..43d03fbf4c0b 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -361,7 +361,7 @@ static void __init omap2_gp_clockevent_init(int gptimer_id, /* Clocksource code */ static struct omap_dm_timer clksrc; -static bool use_gptimer_clksrc; +static bool use_gptimer_clksrc __initdata; /* * clocksource diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h index f565f9944af2..7548db2bfb8a 100644 --- a/arch/arm/mach-orion5x/common.h +++ b/arch/arm/mach-orion5x/common.h @@ -21,7 +21,7 @@ struct mv_sata_platform_data; #define ORION_MBUS_DEVBUS_BOOT_ATTR 0x0f #define ORION_MBUS_DEVBUS_TARGET(cs) 0x01 #define ORION_MBUS_DEVBUS_ATTR(cs) (~(1 << cs)) -#define ORION_MBUS_SRAM_TARGET 0x00 +#define ORION_MBUS_SRAM_TARGET 0x09 #define ORION_MBUS_SRAM_ATTR 0x00 /* diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 486063db2a2f..da4163ff31a2 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -1017,7 +1017,7 @@ static struct asoc_simple_card_info fsi2_hdmi_info = { .platform = "sh_fsi2", .cpu_dai = { .name = "fsib-dai", - .fmt = SND_SOC_DAIFMT_CBM_CFM, + .fmt = SND_SOC_DAIFMT_CBS_CFS, }, .codec_dai = { .name = "sh_mobile_hdmi-hifi", diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile index de544aabf292..9741de956b3e 100644 --- a/arch/arm/mach-ux500/Makefile +++ b/arch/arm/mach-ux500/Makefile @@ -5,8 +5,7 @@ obj-y := cpu.o id.o timer.o pm.o obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o -obj-$(CONFIG_MACH_MOP500) += board-mop500-sdi.o \ - board-mop500-regulators.o \ +obj-$(CONFIG_MACH_MOP500) += board-mop500-regulators.o \ board-mop500-audio.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c deleted file mode 100644 index fcbf3a13a539..000000000000 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (C) ST-Ericsson SA 2010 - * - * Author: Hanumath Prasad <hanumath.prasad@stericsson.com> - * License terms: GNU General Public License (GPL) version 2 - */ - -#include <linux/kernel.h> -#include <linux/gpio.h> -#include <linux/amba/bus.h> -#include <linux/amba/mmci.h> -#include <linux/mmc/host.h> -#include <linux/platform_device.h> -#include <linux/platform_data/dma-ste-dma40.h> - -#include <asm/mach-types.h> - -#include "db8500-regs.h" -#include "board-mop500.h" -#include "ste-dma40-db8500.h" - -/* - * v2 has a new version of this block that need to be forced, the number found - * in hardware is incorrect - */ -#define U8500_SDI_V2_PERIPHID 0x10480180 - -/* - * SDI 0 (MicroSD slot) - */ - -#ifdef CONFIG_STE_DMA40 -struct stedma40_chan_cfg mop500_sdi0_dma_cfg_rx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_DEV_TO_MEM, - .dev_type = DB8500_DMA_DEV29_SD_MM0, -}; - -static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_MEM_TO_DEV, - .dev_type = DB8500_DMA_DEV29_SD_MM0, -}; -#endif - -struct mmci_platform_data mop500_sdi0_data = { - .f_max = 100000000, - .capabilities = MMC_CAP_4_BIT_DATA | - MMC_CAP_SD_HIGHSPEED | - MMC_CAP_MMC_HIGHSPEED | - MMC_CAP_ERASE | - MMC_CAP_UHS_SDR12 | - MMC_CAP_UHS_SDR25, - .gpio_wp = -1, - .sigdir = MCI_ST_FBCLKEN | - MCI_ST_CMDDIREN | - MCI_ST_DATA0DIREN | - MCI_ST_DATA2DIREN, -#ifdef CONFIG_STE_DMA40 - .dma_filter = stedma40_filter, - .dma_rx_param = &mop500_sdi0_dma_cfg_rx, - .dma_tx_param = &mop500_sdi0_dma_cfg_tx, -#endif -}; - -/* - * SDI1 (SDIO WLAN) - */ -#ifdef CONFIG_STE_DMA40 -static struct stedma40_chan_cfg sdi1_dma_cfg_rx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_DEV_TO_MEM, - .dev_type = DB8500_DMA_DEV32_SD_MM1, -}; - -static struct stedma40_chan_cfg sdi1_dma_cfg_tx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_MEM_TO_DEV, - .dev_type = DB8500_DMA_DEV32_SD_MM1, -}; -#endif - -struct mmci_platform_data mop500_sdi1_data = { - .ocr_mask = MMC_VDD_29_30, - .f_max = 100000000, - .capabilities = MMC_CAP_4_BIT_DATA | - MMC_CAP_NONREMOVABLE, - .gpio_cd = -1, - .gpio_wp = -1, -#ifdef CONFIG_STE_DMA40 - .dma_filter = stedma40_filter, - .dma_rx_param = &sdi1_dma_cfg_rx, - .dma_tx_param = &sdi1_dma_cfg_tx, -#endif -}; - -/* - * SDI 2 (POP eMMC, not on DB8500ed) - */ - -#ifdef CONFIG_STE_DMA40 -struct stedma40_chan_cfg mop500_sdi2_dma_cfg_rx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_DEV_TO_MEM, - .dev_type = DB8500_DMA_DEV28_SD_MM2, -}; - -static struct stedma40_chan_cfg mop500_sdi2_dma_cfg_tx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_MEM_TO_DEV, - .dev_type = DB8500_DMA_DEV28_SD_MM2, -}; -#endif - -struct mmci_platform_data mop500_sdi2_data = { - .ocr_mask = MMC_VDD_165_195, - .f_max = 100000000, - .capabilities = MMC_CAP_4_BIT_DATA | - MMC_CAP_8_BIT_DATA | - MMC_CAP_NONREMOVABLE | - MMC_CAP_MMC_HIGHSPEED | - MMC_CAP_ERASE | - MMC_CAP_CMD23, - .gpio_cd = -1, - .gpio_wp = -1, -#ifdef CONFIG_STE_DMA40 - .dma_filter = stedma40_filter, - .dma_rx_param = &mop500_sdi2_dma_cfg_rx, - .dma_tx_param = &mop500_sdi2_dma_cfg_tx, -#endif -}; - -/* - * SDI 4 (on-board eMMC) - */ - -#ifdef CONFIG_STE_DMA40 -struct stedma40_chan_cfg mop500_sdi4_dma_cfg_rx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_DEV_TO_MEM, - .dev_type = DB8500_DMA_DEV42_SD_MM4, -}; - -static struct stedma40_chan_cfg mop500_sdi4_dma_cfg_tx = { - .mode = STEDMA40_MODE_LOGICAL, - .dir = DMA_MEM_TO_DEV, - .dev_type = DB8500_DMA_DEV42_SD_MM4, -}; -#endif - -struct mmci_platform_data mop500_sdi4_data = { - .f_max = 100000000, - .capabilities = MMC_CAP_4_BIT_DATA | - MMC_CAP_8_BIT_DATA | - MMC_CAP_NONREMOVABLE | - MMC_CAP_MMC_HIGHSPEED | - MMC_CAP_ERASE | - MMC_CAP_CMD23, - .gpio_cd = -1, - .gpio_wp = -1, -#ifdef CONFIG_STE_DMA40 - .dma_filter = stedma40_filter, - .dma_rx_param = &mop500_sdi4_dma_cfg_rx, - .dma_tx_param = &mop500_sdi4_dma_cfg_tx, -#endif -}; diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index 32cc0d8d8a0e..7c7b0adca582 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h @@ -8,12 +8,7 @@ #define __BOARD_MOP500_H #include <linux/platform_data/asoc-ux500-msp.h> -#include <linux/amba/mmci.h> -extern struct mmci_platform_data mop500_sdi0_data; -extern struct mmci_platform_data mop500_sdi1_data; -extern struct mmci_platform_data mop500_sdi2_data; -extern struct mmci_platform_data mop500_sdi4_data; extern struct msp_i2s_platform_data msp0_platform_data; extern struct msp_i2s_platform_data msp1_platform_data; extern struct msp_i2s_platform_data msp2_platform_data; diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 8820f602fcd2..fa308f07fae5 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -146,10 +146,6 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { /* Requires call-back bindings. */ OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata), /* Requires DMA bindings. */ - OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data), - OF_DEV_AUXDATA("arm,pl18x", 0x80118000, "sdi1", &mop500_sdi1_data), - OF_DEV_AUXDATA("arm,pl18x", 0x80005000, "sdi2", &mop500_sdi2_data), - OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data), OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, "ux500-msp-i2s.0", &msp0_platform_data), OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80124000, diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index 0c93588fcb91..1ca37c72f12f 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -123,6 +123,11 @@ __v7m_setup: mov pc, lr ENDPROC(__v7m_setup) + .align 2 +__v7m_setup_stack: + .space 4 * 8 @ 8 registers +__v7m_setup_stack_top: + define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 .section ".rodata" @@ -152,6 +157,3 @@ __v7m_proc_info: .long nop_cache_fns @ proc_info_list.cache .size __v7m_proc_info, . - __v7m_proc_info -__v7m_setup_stack: - .space 4 * 8 @ 8 registers -__v7m_setup_stack_top: diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index 5f5b975887fc..b5608b1f9fbd 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -70,6 +70,7 @@ static u32 errata; static struct omap_dma_global_context_registers { u32 dma_irqenable_l0; + u32 dma_irqenable_l1; u32 dma_ocp_sysconfig; u32 dma_gcr; } omap_dma_global_context; @@ -1973,10 +1974,17 @@ static struct irqaction omap24xx_dma_irq; /*----------------------------------------------------------------------------*/ +/* + * Note that we are currently using only IRQENABLE_L0 and L1. + * As the DSP may be using IRQENABLE_L2 and L3, let's not + * touch those for now. + */ void omap_dma_global_context_save(void) { omap_dma_global_context.dma_irqenable_l0 = p->dma_read(IRQENABLE_L0, 0); + omap_dma_global_context.dma_irqenable_l1 = + p->dma_read(IRQENABLE_L1, 0); omap_dma_global_context.dma_ocp_sysconfig = p->dma_read(OCP_SYSCONFIG, 0); omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0); @@ -1991,6 +1999,8 @@ void omap_dma_global_context_restore(void) OCP_SYSCONFIG, 0); p->dma_write(omap_dma_global_context.dma_irqenable_l0, IRQENABLE_L0, 0); + p->dma_write(omap_dma_global_context.dma_irqenable_l1, + IRQENABLE_L1, 0); if (IS_DMA_ERRATA(DMA_ROMCODE_BUG)) p->dma_write(0x3 , IRQSTATUS_L0, 0); diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index b96723e258a0..1e632430570b 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -339,6 +339,14 @@ static int __init xen_pm_init(void) } late_initcall(xen_pm_init); + +/* empty stubs */ +void xen_arch_pre_suspend(void) { } +void xen_arch_post_suspend(int suspend_cancelled) { } +void xen_timer_resume(void) { } +void xen_arch_resume(void) { } + + /* In the hypervisor.S file. */ EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); @@ -350,4 +358,5 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); +EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); EXPORT_SYMBOL_GPL(privcmd_call); diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index d1cf7b7c2200..44e3a5f10c4c 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -89,6 +89,7 @@ HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); HYPERCALL1(tmem_op); +HYPERCALL2(multicall); ENTRY(privcmd_call) stmdb sp!, {r4} diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e94f9458aa6f..993bce527b85 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -138,6 +138,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define __pa(x) __virt_to_phys((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) +#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x)) /* * virt_to_page(k) convert a _valid_ virtual address to struct page * diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 90c811f05a2e..7b1c67a0b485 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -266,7 +266,7 @@ static inline pmd_t pte_pmd(pte_t pte) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) -#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd) +#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) static inline int has_transparent_hugepage(void) { diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 473e5dbf8f39..0f08dfd69ebc 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -97,11 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc) if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) ret = true; - } + /* + * when using forced irq_set_affinity we must ensure that the cpu + * being offlined is not present in the affinity mask, it may be + * selected as the target CPU otherwise + */ + affinity = cpu_online_mask; c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 5e9aec358306..31eb959e9aa8 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd) int pud_huge(pud_t pud) { +#ifndef __PAGETABLE_PMD_FOLDED return !(pud_val(pud) & PUD_TABLE_BIT); +#else + return 0; +#endif } int pmd_huge_support(void) diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 531342ec4bcf..8bbe9401f4f0 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -80,6 +80,7 @@ HYPERCALL2(memory_op); HYPERCALL2(physdev_op); HYPERCALL3(vcpu_op); HYPERCALL1(tmem_op); +HYPERCALL2(multicall); ENTRY(privcmd_call) mov x16, x0 diff --git a/arch/blackfin/include/asm/pci.h b/arch/blackfin/include/asm/pci.h index 74352c4597d9..c737909fba47 100644 --- a/arch/blackfin/include/asm/pci.h +++ b/arch/blackfin/include/asm/pci.h @@ -10,9 +10,4 @@ #define PCIBIOS_MIN_IO 0x00001000 #define PCIBIOS_MIN_MEM 0x10000000 -static inline void pcibios_penalize_isa_irq(int irq) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #endif /* _ASM_BFIN_PCI_H */ diff --git a/arch/cris/include/asm/pci.h b/arch/cris/include/asm/pci.h index f666734926d5..cc2399c175e9 100644 --- a/arch/cris/include/asm/pci.h +++ b/arch/cris/include/asm/pci.h @@ -20,7 +20,6 @@ void pcibios_config_init(void); struct pci_bus * pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); diff --git a/arch/frv/include/asm/pci.h b/arch/frv/include/asm/pci.h index ef03baf5d89d..2035a4d3f9b9 100644 --- a/arch/frv/include/asm/pci.h +++ b/arch/frv/include/asm/pci.h @@ -24,8 +24,6 @@ struct pci_dev; extern void pcibios_set_master(struct pci_dev *dev); -extern void pcibios_penalize_isa_irq(int irq); - #ifdef CONFIG_MMU extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle); extern void consistent_free(void *vaddr); diff --git a/arch/frv/mb93090-mb00/pci-irq.c b/arch/frv/mb93090-mb00/pci-irq.c index c677b9d81d30..1c35c93f942b 100644 --- a/arch/frv/mb93090-mb00/pci-irq.c +++ b/arch/frv/mb93090-mb00/pci-irq.c @@ -55,10 +55,6 @@ void __init pcibios_fixup_irqs(void) } } -void __init pcibios_penalize_isa_irq(int irq) -{ -} - void pcibios_enable_irq(struct pci_dev *dev) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index 7d41cc089822..52af5ed9f60b 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -50,12 +50,6 @@ struct pci_dev; extern unsigned long ia64_max_iommu_merge_mask; #define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) -static inline void -pcibios_penalize_isa_irq (int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #include <asm-generic/pci-dma-compat.h> #ifdef CONFIG_PCI diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index ae763d8bf55a..fb13dc5e8f8c 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -11,7 +11,7 @@ -#define NR_syscalls 314 /* length of syscall table */ +#define NR_syscalls 315 /* length of syscall table */ /* * The following defines stop scripts/checksyscalls.sh from complaining about diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 715e85f858de..7de0a2d65da4 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -327,5 +327,6 @@ #define __NR_finit_module 1335 #define __NR_sched_setattr 1336 #define __NR_sched_getattr 1337 +#define __NR_renameat2 1338 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index fa8d61a312a7..ba3d03503e84 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1775,6 +1775,7 @@ sys_call_table: data8 sys_finit_module // 1335 data8 sys_sched_setattr data8 sys_sched_getattr + data8 sys_renameat2 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c index eee069a0b539..1fe9aa5068ea 100644 --- a/arch/ia64/pci/fixup.c +++ b/arch/ia64/pci/fixup.c @@ -49,9 +49,7 @@ static void pci_fixup_video(struct pci_dev *pdev) * type BRIDGE, or CARDBUS. Host to PCI controllers use * PCI header type NORMAL. */ - if (bridge - &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE) - ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) { + if (bridge && (pci_is_bridge(bridge))) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); if (!(config & PCI_BRIDGE_CTL_VGA)) diff --git a/arch/m68k/Kconfig.debug b/arch/m68k/Kconfig.debug index 229682721240..64776d7ac199 100644 --- a/arch/m68k/Kconfig.debug +++ b/arch/m68k/Kconfig.debug @@ -12,12 +12,17 @@ config BOOTPARAM_STRING config EARLY_PRINTK bool "Early printk" - depends on MVME16x || MAC + depends on !(SUN3 || M68360 || M68000 || COLDFIRE) help Write kernel log output directly to a serial port. + Where implemented, output goes to the framebuffer as well. + PROM console functionality on Sun 3x is not affected by this option. + + Pass "earlyprintk" on the kernel command line to get a + boot console. This is useful for kernel debugging when your machine crashes very - early before the console code is initialized. + early, i.e. before the normal console driver is loaded. You should normally say N here, unless you want to debug such a crash. if !MMU diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c index 2559eefc6aff..90a60d758f8b 100644 --- a/arch/m68k/amiga/amisound.c +++ b/arch/m68k/amiga/amisound.c @@ -51,7 +51,7 @@ void __init amiga_init_sound(void) snd_data = amiga_chip_alloc_res(sizeof(sine_data), &beep_res); if (!snd_data) { - printk (KERN_CRIT "amiga init_sound: failed to allocate chipmem\n"); + pr_crit("amiga init_sound: failed to allocate chipmem\n"); return; } memcpy (snd_data, sine_data, sizeof(sine_data)); diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c index 9625b7132227..01693df7f2f6 100644 --- a/arch/m68k/amiga/config.c +++ b/arch/m68k/amiga/config.c @@ -183,7 +183,7 @@ int __init amiga_parse_bootinfo(const struct bi_record *record) dev->boardaddr = be32_to_cpu(cd->cd_BoardAddr); dev->boardsize = be32_to_cpu(cd->cd_BoardSize); } else - printk("amiga_parse_bootinfo: too many AutoConfig devices\n"); + pr_warn("amiga_parse_bootinfo: too many AutoConfig devices\n"); #endif /* CONFIG_ZORRO */ break; @@ -209,9 +209,9 @@ static void __init amiga_identify(void) memset(&amiga_hw_present, 0, sizeof(amiga_hw_present)); - printk("Amiga hardware found: "); + pr_info("Amiga hardware found: "); if (amiga_model >= AMI_500 && amiga_model <= AMI_DRACO) { - printk("[%s] ", amiga_models[amiga_model-AMI_500]); + pr_cont("[%s] ", amiga_models[amiga_model-AMI_500]); strcat(amiga_model_name, amiga_models[amiga_model-AMI_500]); } @@ -322,7 +322,7 @@ static void __init amiga_identify(void) #define AMIGAHW_ANNOUNCE(name, str) \ if (AMIGAHW_PRESENT(name)) \ - printk(str) + pr_cont(str) AMIGAHW_ANNOUNCE(AMI_VIDEO, "VIDEO "); AMIGAHW_ANNOUNCE(AMI_BLITTER, "BLITTER "); @@ -354,8 +354,8 @@ static void __init amiga_identify(void) AMIGAHW_ANNOUNCE(MAGIC_REKICK, "MAGIC_REKICK "); AMIGAHW_ANNOUNCE(PCMCIA, "PCMCIA "); if (AMIGAHW_PRESENT(ZORRO)) - printk("ZORRO%s ", AMIGAHW_PRESENT(ZORRO3) ? "3" : ""); - printk("\n"); + pr_cont("ZORRO%s ", AMIGAHW_PRESENT(ZORRO3) ? "3" : ""); + pr_cont("\n"); #undef AMIGAHW_ANNOUNCE } @@ -424,7 +424,7 @@ void __init config_amiga(void) if (m68k_memory[i].addr < 16*1024*1024) { if (i == 0) { /* don't cut off the branch we're sitting on */ - printk("Warning: kernel runs in Zorro II memory\n"); + pr_warn("Warning: kernel runs in Zorro II memory\n"); continue; } disabled_z2mem += m68k_memory[i].size; @@ -435,8 +435,8 @@ void __init config_amiga(void) } } if (disabled_z2mem) - printk("%dK of Zorro II memory will not be used as system memory\n", - disabled_z2mem>>10); + pr_info("%dK of Zorro II memory will not be used as system memory\n", + disabled_z2mem>>10); } /* request all RAM */ @@ -475,7 +475,7 @@ static void __init amiga_sched_init(irq_handler_t timer_routine) jiffy_ticks = DIV_ROUND_CLOSEST(amiga_eclock, HZ); if (request_resource(&mb_resources._ciab, &sched_res)) - printk("Cannot allocate ciab.ta{lo,hi}\n"); + pr_warn("Cannot allocate ciab.ta{lo,hi}\n"); ciab.cra &= 0xC0; /* turn off timer A, continuous mode, from Eclk */ ciab.talo = jiffy_ticks % 256; ciab.tahi = jiffy_ticks / 256; diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c index 9268c0f96376..6e62d66c396e 100644 --- a/arch/m68k/apollo/config.c +++ b/arch/m68k/apollo/config.c @@ -65,8 +65,8 @@ int __init apollo_parse_bootinfo(const struct bi_record *record) static void __init dn_setup_model(void) { - printk("Apollo hardware found: "); - printk("[%s]\n", apollo_models[apollo_model - APOLLO_DN3000]); + pr_info("Apollo hardware found: [%s]\n", + apollo_models[apollo_model - APOLLO_DN3000]); switch(apollo_model) { case APOLLO_UNKNOWN: @@ -197,8 +197,10 @@ void dn_sched_init(irq_handler_t timer_routine) *(volatile unsigned char *)(pica+1)&=(~8); #if 0 - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); + pr_info("*(0x10803) %02x\n", + *(volatile unsigned char *)(apollo_timer + 0x3)); + pr_info("*(0x10803) %02x\n", + *(volatile unsigned char *)(apollo_timer + 0x3)); #endif if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine)) @@ -236,12 +238,10 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) { } -int dn_dummy_set_clock_mmss(unsigned long nowtime) { - - printk("set_clock_mmss\n"); - - return 0; - +int dn_dummy_set_clock_mmss(unsigned long nowtime) +{ + pr_info("set_clock_mmss\n"); + return 0; } void dn_dummy_reset(void) { diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c index 0810c8d56e59..5f8cb5a234d9 100644 --- a/arch/m68k/atari/stram.c +++ b/arch/m68k/atari/stram.c @@ -47,6 +47,7 @@ static struct resource stram_pool = { static unsigned long pool_size = 1024*1024; +static unsigned long stram_virt_offset; static int __init atari_stram_setup(char *arg) { @@ -67,14 +68,12 @@ early_param("stram_pool", atari_stram_setup); void __init atari_stram_init(void) { int i; - void *stram_start; /* * determine whether kernel code resides in ST-RAM * (then ST-RAM is the first memory block at virtual 0x0) */ - stram_start = phys_to_virt(0); - kernel_in_stram = (stram_start == 0); + kernel_in_stram = (m68k_memory[0].addr == 0); for (i = 0; i < m68k_num_memory; ++i) { if (m68k_memory[i].addr == 0) { @@ -89,24 +88,62 @@ void __init atari_stram_init(void) /* * This function is called from setup_arch() to reserve the pages needed for - * ST-RAM management. + * ST-RAM management, if the kernel resides in ST-RAM. */ void __init atari_stram_reserve_pages(void *start_mem) { - /* - * always reserve first page of ST-RAM, the first 2 KiB are - * supervisor-only! - */ - if (!kernel_in_stram) - reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT); + if (kernel_in_stram) { + pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n"); + stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size); + stram_pool.end = stram_pool.start + pool_size - 1; + request_resource(&iomem_resource, &stram_pool); + stram_virt_offset = 0; + pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", + pool_size, &stram_pool); + pr_debug("atari_stram pool: stram_virt_offset = %lx\n", + stram_virt_offset); + } +} - stram_pool.start = (resource_size_t)alloc_bootmem_low_pages(pool_size); - stram_pool.end = stram_pool.start + pool_size - 1; - request_resource(&iomem_resource, &stram_pool); - pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", - pool_size, &stram_pool); +/* + * This function is called as arch initcall to reserve the pages needed for + * ST-RAM management, if the kernel does not reside in ST-RAM. + */ +int __init atari_stram_map_pages(void) +{ + if (!kernel_in_stram) { + /* + * Skip page 0, as the fhe first 2 KiB are supervisor-only! + */ + pr_debug("atari_stram pool: kernel not in ST-RAM, using ioremap!\n"); + stram_pool.start = PAGE_SIZE; + stram_pool.end = stram_pool.start + pool_size - 1; + request_resource(&iomem_resource, &stram_pool); + stram_virt_offset = (unsigned long) ioremap(stram_pool.start, + resource_size(&stram_pool)) - stram_pool.start; + pr_debug("atari_stram pool: size = %lu bytes, resource = %pR\n", + pool_size, &stram_pool); + pr_debug("atari_stram pool: stram_virt_offset = %lx\n", + stram_virt_offset); + } + return 0; +} +arch_initcall(atari_stram_map_pages); + + +void *atari_stram_to_virt(unsigned long phys) +{ + return (void *)(phys + stram_virt_offset); +} +EXPORT_SYMBOL(atari_stram_to_virt); + + +unsigned long atari_stram_to_phys(void *virt) +{ + return (unsigned long)(virt - stram_virt_offset); } +EXPORT_SYMBOL(atari_stram_to_phys); void *atari_stram_alloc(unsigned long size, const char *owner) @@ -134,14 +171,14 @@ void *atari_stram_alloc(unsigned long size, const char *owner) } pr_debug("atari_stram_alloc: returning %pR\n", res); - return (void *)res->start; + return atari_stram_to_virt(res->start); } EXPORT_SYMBOL(atari_stram_alloc); void atari_stram_free(void *addr) { - unsigned long start = (unsigned long)addr; + unsigned long start = atari_stram_to_phys(addr); struct resource *res; unsigned long size; diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 96da4963d14b..d7eac833a94f 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -159,6 +159,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -227,6 +228,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -279,6 +281,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -305,7 +308,6 @@ CONFIG_VETH=m CONFIG_A2065=y CONFIG_ARIADNE=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set # CONFIG_NET_VENDOR_HP is not set @@ -315,6 +317,7 @@ CONFIG_ARIADNE=y CONFIG_HYDRA=y CONFIG_APNE=y CONFIG_ZORRO8390=y +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 1b8739f50cbf..650ee75de6cd 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -157,6 +157,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -225,6 +226,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -261,6 +263,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -284,12 +287,12 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 6ea4e91f0caa..3142e69342fa 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -156,6 +156,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -224,6 +225,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -269,6 +271,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -293,11 +296,11 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_ATARILANCE=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index e5a12739ff2d..0daa8a172f30 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -155,6 +155,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -223,6 +224,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -260,6 +262,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -283,12 +286,12 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index 8936d7fb0f0f..88af78f7bad9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -157,6 +157,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -225,6 +226,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -261,6 +263,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -285,12 +288,12 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index be5342cca25b..66f915574a85 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -156,6 +156,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -227,6 +228,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -270,6 +272,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -301,7 +304,6 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MACMACE=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y # CONFIG_NET_VENDOR_INTEL is not set @@ -309,6 +311,7 @@ CONFIG_MAC89x0=y # CONFIG_NET_VENDOR_MICREL is not set CONFIG_MACSONIC=y CONFIG_MAC8390=y +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index f27194ade167..5eaa49924fa6 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -165,6 +165,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -236,6 +237,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -302,6 +304,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -340,7 +343,6 @@ CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MAC89x0=y # CONFIG_NET_VENDOR_HP is not set @@ -354,6 +356,7 @@ CONFIG_MAC8390=y CONFIG_NE2000=m CONFIG_APNE=y CONFIG_ZORRO8390=y +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index c3887603c1db..324d0b4d8351 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -154,6 +154,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -222,6 +223,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -259,6 +261,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -283,12 +286,12 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index f7ff784d05ac..f0cb4338952e 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -155,6 +155,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -223,6 +224,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -260,6 +262,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -283,12 +286,12 @@ CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index f0c72ab037be..d6cf0880c463 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -155,6 +155,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -223,6 +224,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -266,6 +268,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -291,7 +294,6 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set # CONFIG_NET_VENDOR_HP is not set @@ -299,6 +301,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set CONFIG_NE2000=m +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 7bca0f464521..f4e88d1c7472 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -152,6 +152,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -220,6 +221,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -257,6 +259,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -281,11 +284,11 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 317f3e1fec95..49f4032c1ad6 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -152,6 +152,7 @@ CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m CONFIG_IP_SET_HASH_IPPORT=m CONFIG_IP_SET_HASH_IPPORTIP=m CONFIG_IP_SET_HASH_IPPORTNET=m @@ -220,6 +221,7 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_NET_MPLS_GSO=m # CONFIG_WIRELESS is not set @@ -257,6 +259,7 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m @@ -281,12 +284,12 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_VIA is not set diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c index 2e5a787ea11b..a9befe65adc4 100644 --- a/arch/m68k/hp300/config.c +++ b/arch/m68k/hp300/config.c @@ -87,7 +87,7 @@ int __init hp300_parse_bootinfo(const struct bi_record *record) /* serial port address: ignored here */ break; - default: + default: unknown = 1; } @@ -262,11 +262,12 @@ void __init config_hp300(void) #endif mach_max_dma_address = 0xffffffff; - if (hp300_model >= HP_330 && hp300_model <= HP_433S && hp300_model != HP_350) { - printk(KERN_INFO "Detected HP9000 model %s\n", hp300_models[hp300_model-HP_320]); + if (hp300_model >= HP_330 && hp300_model <= HP_433S && + hp300_model != HP_350) { + pr_info("Detected HP9000 model %s\n", + hp300_models[hp300_model-HP_320]); strcat(hp300_model_name, hp300_models[hp300_model-HP_320]); - } - else { + } else { panic("Unknown HP9000 Model"); } #ifdef CONFIG_SERIAL_8250_CONSOLE diff --git a/arch/m68k/include/asm/atari_stram.h b/arch/m68k/include/asm/atari_stram.h index 62e27598af91..4e771c22d6a9 100644 --- a/arch/m68k/include/asm/atari_stram.h +++ b/arch/m68k/include/asm/atari_stram.h @@ -8,6 +8,8 @@ /* public interface */ void *atari_stram_alloc(unsigned long size, const char *owner); void atari_stram_free(void *); +void *atari_stram_to_virt(unsigned long phys); +unsigned long atari_stram_to_phys(void *); /* functions called internally by other parts of the kernel */ void atari_stram_init(void); diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 9d38b73989eb..33afa56ad47a 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 351 +#define NR_syscalls 352 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index b932dd470041..9cd82fbc7817 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -356,5 +356,6 @@ #define __NR_finit_module 348 #define __NR_sched_setattr 349 #define __NR_sched_getattr 350 +#define __NR_renameat2 351 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index 2d5d9be16273..e47778f8588d 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile @@ -25,3 +25,5 @@ obj-$(CONFIG_HAS_DMA) += dma.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_BOOTINFO_PROC) += bootinfo_proc.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + diff --git a/arch/m68k/kernel/early_printk.c b/arch/m68k/kernel/early_printk.c new file mode 100644 index 000000000000..ff9708d71921 --- /dev/null +++ b/arch/m68k/kernel/early_printk.c @@ -0,0 +1,67 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2014 Finn Thain + */ + +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/init.h> +#include <linux/string.h> +#include <asm/setup.h> + +extern void mvme16x_cons_write(struct console *co, + const char *str, unsigned count); + +asmlinkage void __init debug_cons_nputs(const char *s, unsigned n); + +static void __ref debug_cons_write(struct console *c, + const char *s, unsigned n) +{ +#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68360) || \ + defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)) + if (MACH_IS_MVME16x) + mvme16x_cons_write(c, s, n); + else + debug_cons_nputs(s, n); +#endif +} + +static struct console early_console_instance = { + .name = "debug", + .write = debug_cons_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1 +}; + +static int __init setup_early_printk(char *buf) +{ + if (early_console || buf) + return 0; + + early_console = &early_console_instance; + register_console(early_console); + + return 0; +} +early_param("earlyprintk", setup_early_printk); + +/* + * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be called + * after init sections are discarded (for platforms that use it). + */ +#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68360) || \ + defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)) + +static int __init unregister_early_console(void) +{ + if (!early_console || MACH_IS_MVME16x) + return 0; + + return unregister_console(early_console); +} +late_initcall(unregister_early_console); + +#endif diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index 3ab329b88521..dbb118e1a4e0 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S @@ -153,7 +153,7 @@ * ------------ * The console is also able to be turned off. The console in head.S * is specifically for debugging and can be very useful. It is surrounded by - * #ifdef CONSOLE/#endif clauses so it doesn't have to ship in known-good + * #ifdef / #endif clauses so it doesn't have to ship in known-good * kernels. It's basic algorithm is to determine the size of the screen * (in height/width and bit depth) and then use that information for * displaying an 8x8 font or an 8x16 (widthxheight). I prefer the 8x8 for @@ -198,9 +198,8 @@ * CONFIG_xxx: These are the obvious machine configuration defines created * during configuration. These are defined in autoconf.h. * - * CONSOLE: There is support for head.S console in this file. This - * console can talk to a Mac frame buffer, but could easily be extrapolated - * to extend it to support other platforms. + * CONSOLE_DEBUG: Only supports a Mac frame buffer but could easily be + * extended to support other platforms. * * TEST_MMU: This is a test harness for running on any given machine but * getting an MMU dump for another class of machine. The classes of machines @@ -222,7 +221,7 @@ * MMU_PRINT: There is a routine built into head.S that can display the * MMU data structures. It outputs its result through the serial_putc * interface. So where ever that winds up driving data, that's where the - * mmu struct will appear. On the Macintosh that's typically the console. + * mmu struct will appear. * * SERIAL_DEBUG: There are a series of putc() macro statements * scattered through out the code to give progress of status to the @@ -250,8 +249,8 @@ * USE_MFP: Use the ST-MFP port (Modem1) for serial debug. * * Macintosh constants: - * MAC_USE_SCC_A: Use SCC port A (modem) for serial debug and early console. - * MAC_USE_SCC_B: Use SCC port B (printer) for serial debug and early console. + * MAC_USE_SCC_A: Use SCC port A (modem) for serial debug. + * MAC_USE_SCC_B: Use SCC port B (printer) for serial debug. */ #include <linux/linkage.h> @@ -268,27 +267,17 @@ #include <asm/pgtable.h> #include <asm/page.h> #include <asm/asm-offsets.h> - #ifdef CONFIG_MAC - -#include <asm/machw.h> - -#ifdef CONFIG_FRAMEBUFFER_CONSOLE -#define CONSOLE +# include <asm/machw.h> #endif #ifdef CONFIG_EARLY_PRINTK -#define SERIAL_DEBUG -#else -#undef SERIAL_DEBUG +# define SERIAL_DEBUG +# if defined(CONFIG_MAC) && defined(CONFIG_FONT_SUPPORT) +# define CONSOLE_DEBUG +# endif #endif -#else /* !CONFIG_MAC */ - -#define SERIAL_DEBUG - -#endif /* !CONFIG_MAC */ - #undef MMU_PRINT #undef MMU_NOCACHE_KERNEL #undef DEBUG @@ -303,6 +292,7 @@ .globl kernel_pg_dir .globl availmem +.globl m68k_init_mapped_size .globl m68k_pgtable_cachemode .globl m68k_supervisor_cachemode #ifdef CONFIG_MVME16x @@ -480,22 +470,21 @@ func_define serial_putc,1 func_define console_putc,1 func_define console_init -func_define console_put_stats func_define console_put_penguin func_define console_plot_pixel,3 func_define console_scroll .macro putc ch -#if defined(CONSOLE) || defined(SERIAL_DEBUG) +#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG) pea \ch #endif -#ifdef CONSOLE +#ifdef CONSOLE_DEBUG func_call console_putc #endif #ifdef SERIAL_DEBUG func_call serial_putc #endif -#if defined(CONSOLE) || defined(SERIAL_DEBUG) +#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG) addql #4,%sp #endif .endm @@ -515,7 +504,7 @@ func_define putn,1 .endm .macro puts string -#if defined(CONSOLE) || defined(SERIAL_DEBUG) +#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG) __INITDATA .Lstr\@: .string "\string" @@ -651,11 +640,9 @@ ENTRY(__start) lea %pc@(L(mac_rowbytes)),%a1 movel %a0@,%a1@ -#ifdef SERIAL_DEBUG get_bi_record BI_MAC_SCCBASE lea %pc@(L(mac_sccbase)),%a1 movel %a0@,%a1@ -#endif L(test_notmac): #endif /* CONFIG_MAC */ @@ -885,13 +872,12 @@ L(nothp): */ #ifdef CONFIG_MAC is_not_mac(L(nocon)) -# ifdef CONSOLE +# ifdef CONSOLE_DEBUG console_init # ifdef CONFIG_LOGO console_put_penguin # endif /* CONFIG_LOGO */ - console_put_stats -# endif /* CONSOLE */ +# endif /* CONSOLE_DEBUG */ L(nocon): #endif /* CONFIG_MAC */ @@ -922,10 +908,21 @@ L(nocon): * * This block of code does what's necessary to map in the various kinds * of machines for execution of Linux. - * First map the first 4 MB of kernel code & data + * First map the first 4, 8, or 16 MB of kernel code & data */ - mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),#4*1024*1024,\ + get_bi_record BI_MEMCHUNK + movel %a0@(4),%d0 + movel #16*1024*1024,%d1 + cmpl %d0,%d1 + jls 1f + lsrl #1,%d1 + cmpl %d0,%d1 + jls 1f + lsrl #1,%d1 +1: + movel %d1,m68k_init_mapped_size + mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\ %pc@(m68k_supervisor_cachemode) putc 'C' @@ -1396,15 +1393,13 @@ L(mmu_fixup_done): andl L(mac_videobase),%d0 addl #VIDEOMEMBASE,%d0 movel %d0,L(mac_videobase) -#if defined(CONSOLE) +#ifdef CONSOLE_DEBUG movel %pc@(L(phys_kernel_start)),%d0 subl #PAGE_OFFSET,%d0 subl %d0,L(console_font) subl %d0,L(console_font_data) #endif -#ifdef SERIAL_DEBUG orl #0x50000000,L(mac_sccbase) -#endif 1: #endif @@ -2734,7 +2729,12 @@ func_return get_new_page */ #ifdef CONFIG_MAC +/* You may define either or both of these. */ +#define MAC_USE_SCC_A /* Modem port */ +#define MAC_USE_SCC_B /* Printer port */ +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) +/* Initialisation table for SCC with 3.6864 MHz PCLK */ L(scc_initable_mac): .byte 4,0x44 /* x16, 1 stopbit, no parity */ .byte 3,0xc0 /* receiver: 8 bpc */ @@ -2748,6 +2748,7 @@ L(scc_initable_mac): .byte -1 .even #endif +#endif /* CONFIG_MAC */ #ifdef CONFIG_ATARI /* #define USE_PRINTER */ @@ -2756,14 +2757,12 @@ L(scc_initable_mac): #define USE_MFP #if defined(USE_SCC_A) || defined(USE_SCC_B) -#define USE_SCC -/* Initialisation table for SCC */ -L(scc_initable): - .byte 9,12 /* Reset */ +/* Initialisation table for SCC with 7.9872 MHz PCLK */ +/* PCLK == 8.0539 gives baud == 9680.1 */ +L(scc_initable_atari): .byte 4,0x44 /* x16, 1 stopbit, no parity */ .byte 3,0xc0 /* receiver: 8 bpc */ .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ - .byte 9,0 /* no interrupts */ .byte 10,0 /* NRZ */ .byte 11,0x50 /* use baud rate generator */ .byte 12,24,13,0 /* 9600 baud */ @@ -2812,7 +2811,7 @@ LMFP_UDR = 0xfffa2f */ /* - * Initialize serial port hardware for 9600/8/1 + * Initialize serial port hardware */ func_start serial_init,%d0/%d1/%a0/%a1 /* @@ -2822,7 +2821,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 * d0 = boot info offset * CONFIG_ATARI * a0 = address of SCC - * a1 = Liobase address/address of scc_initable + * a1 = Liobase address/address of scc_initable_atari * d0 = init data for serial port * CONFIG_MAC * a0 = address of SCC @@ -2843,6 +2842,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 | movew #61,CUSTOMBASE+C_SERPER-ZTWOBASE 1: #endif + #ifdef CONFIG_ATARI is_not_atari(4f) movel %pc@(L(iobase)),%a1 @@ -2857,9 +2857,21 @@ func_start serial_init,%d0/%d1/%a0/%a1 moveb %a1@(LPSG_READ),%d0 bset #5,%d0 moveb %d0,%a1@(LPSG_WRITE) -#elif defined(USE_SCC) +#elif defined(USE_SCC_A) || defined(USE_SCC_B) lea %a1@(LSCC_CTRL),%a0 - lea %pc@(L(scc_initable)),%a1 + /* Reset SCC register pointer */ + moveb %a0@,%d0 + /* Reset SCC device: write register pointer then register value */ + moveb #9,%a0@ + moveb #0xc0,%a0@ + /* Wait for 5 PCLK cycles, which is about 63 CPU cycles */ + /* 5 / 7.9872 MHz = approx. 0.63 us = 63 / 100 MHz */ + movel #32,%d0 +2: + subq #1,%d0 + jne 2b + /* Initialize channel */ + lea %pc@(L(scc_initable_atari)),%a1 2: moveb %a1@+,%d0 jmi 3f moveb %d0,%a0@ @@ -2877,21 +2889,14 @@ func_start serial_init,%d0/%d1/%a0/%a1 jra L(serial_init_done) 4: #endif + #ifdef CONFIG_MAC is_not_mac(L(serial_init_not_mac)) - -#ifdef SERIAL_DEBUG - -/* You may define either or both of these. */ -#define MAC_USE_SCC_A /* Modem port */ -#define MAC_USE_SCC_B /* Printer port */ - +#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) #define mac_scc_cha_b_ctrl_offset 0x0 #define mac_scc_cha_a_ctrl_offset 0x2 #define mac_scc_cha_b_data_offset 0x4 #define mac_scc_cha_a_data_offset 0x6 - -#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) movel %pc@(L(mac_sccbase)),%a0 /* Reset SCC register pointer */ moveb %a0@(mac_scc_cha_a_ctrl_offset),%d0 @@ -2905,7 +2910,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 subq #1,%d0 jne 5b #endif - #ifdef MAC_USE_SCC_A /* Initialize channel A */ lea %pc@(L(scc_initable_mac)),%a1 @@ -2916,7 +2920,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 jra 5b 6: #endif /* MAC_USE_SCC_A */ - #ifdef MAC_USE_SCC_B /* Initialize channel B */ lea %pc@(L(scc_initable_mac)),%a1 @@ -2927,9 +2930,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 jra 7b 8: #endif /* MAC_USE_SCC_B */ - -#endif /* SERIAL_DEBUG */ - jra L(serial_init_done) L(serial_init_not_mac): #endif /* CONFIG_MAC */ @@ -2959,6 +2959,15 @@ L(serial_init_not_mac): 2: #endif +#ifdef CONFIG_MVME16x + is_not_mvme16x(L(serial_init_not_mvme16x)) + moveb #0x10,M167_PCSCCMICR + moveb #0x10,M167_PCSCCTICR + moveb #0x10,M167_PCSCCRICR + jra L(serial_init_done) +L(serial_init_not_mvme16x): +#endif + #ifdef CONFIG_APOLLO /* We count on the PROM initializing SIO1 */ #endif @@ -2998,27 +3007,19 @@ func_start serial_putc,%d0/%d1/%a0/%a1 #ifdef CONFIG_MAC is_not_mac(5f) - -#ifdef SERIAL_DEBUG - #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) movel %pc@(L(mac_sccbase)),%a1 #endif - #ifdef MAC_USE_SCC_A 3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) jeq 3b moveb %d0,%a1@(mac_scc_cha_a_data_offset) #endif /* MAC_USE_SCC_A */ - #ifdef MAC_USE_SCC_B 4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) jeq 4b moveb %d0,%a1@(mac_scc_cha_b_data_offset) #endif /* MAC_USE_SCC_B */ - -#endif /* SERIAL_DEBUG */ - jra L(serial_putc_done) 5: #endif /* CONFIG_MAC */ @@ -3039,7 +3040,7 @@ func_start serial_putc,%d0/%d1/%a0/%a1 nop bset #5,%d0 moveb %d0,%a1@(LPSG_WRITE) -#elif defined(USE_SCC) +#elif defined(USE_SCC_A) || defined(USE_SCC_B) 3: btst #2,%a1@(LSCC_CTRL) jeq 3b moveb %d0,%a1@(LSCC_DATA) @@ -3195,7 +3196,7 @@ func_start puts,%d0/%a0 movel ARG1,%a0 jra 2f 1: -#ifdef CONSOLE +#ifdef CONSOLE_DEBUG console_putc %d0 #endif #ifdef SERIAL_DEBUG @@ -3224,7 +3225,7 @@ func_start putn,%d0-%d2 jls 2f addb #'A'-('9'+1),%d2 2: -#ifdef CONSOLE +#ifdef CONSOLE_DEBUG console_putc %d2 #endif #ifdef SERIAL_DEBUG @@ -3234,21 +3235,19 @@ func_start putn,%d0-%d2 func_return putn -#ifdef CONFIG_MAC +#ifdef CONFIG_EARLY_PRINTK /* - * mac_early_print - * * This routine takes its parameters on the stack. It then * turns around and calls the internal routines. This routine * is used by the boot console. * * The calling parameters are: - * void mac_early_print(const char *str, unsigned length); + * void debug_cons_nputs(const char *str, unsigned length) * * This routine does NOT understand variable arguments only * simple strings! */ -ENTRY(mac_early_print) +ENTRY(debug_cons_nputs) moveml %d0/%d1/%a0,%sp@- movew %sr,%sp@- ori #0x0700,%sr @@ -3256,7 +3255,7 @@ ENTRY(mac_early_print) movel %sp@(22),%d1 /* fetch parameter */ jra 2f 1: -#ifdef CONSOLE +#ifdef CONSOLE_DEBUG console_putc %d0 #endif #ifdef SERIAL_DEBUG @@ -3270,7 +3269,7 @@ ENTRY(mac_early_print) movew %sp@+,%sr moveml %sp@+,%d0/%d1/%a0 rts -#endif /* CONFIG_MAC */ +#endif /* CONFIG_EARLY_PRINTK */ #if defined(CONFIG_HP300) || defined(CONFIG_APOLLO) func_start set_leds,%d0/%a0 @@ -3292,7 +3291,7 @@ func_start set_leds,%d0/%a0 func_return set_leds #endif -#ifdef CONSOLE +#ifdef CONSOLE_DEBUG /* * For continuity, see the data alignment * to which this structure is tied. @@ -3396,43 +3395,6 @@ L(console_clear_loop): 1: func_return console_init -func_start console_put_stats,%a0/%d7 - /* - * Some of the register usage that follows - * a0 = pointer to boot_info - * d7 = value of boot_info fields - */ - puts "\nMacLinux\n" - -#ifdef SERIAL_DEBUG - puts "\n vidaddr:" - putn %pc@(L(mac_videobase)) /* video addr. */ - - puts "\n _stext:" - lea %pc@(_stext),%a0 - putn %a0 - - puts "\nbootinfo:" - lea %pc@(_end),%a0 - putn %a0 - - puts "\n cpuid:" - putn %pc@(L(cputype)) - -# ifdef CONFIG_MAC - puts "\n sccbase:" - putn %pc@(L(mac_sccbase)) -# endif -# ifdef MMU_PRINT - putc '\n' - jbsr mmu_print_machine_cpu_types -# endif -#endif /* SERIAL_DEBUG */ - - putc '\n' - -func_return console_put_stats - #ifdef CONFIG_LOGO func_start console_put_penguin,%a0-%a1/%d0-%d7 /* @@ -3774,12 +3736,15 @@ L(white_16): L(console_plot_pixel_exit): func_return console_plot_pixel -#endif /* CONSOLE */ +#endif /* CONSOLE_DEBUG */ __INITDATA .align 4 +m68k_init_mapped_size: + .long 0 + #if defined(CONFIG_ATARI) || defined(CONFIG_AMIGA) || \ defined(CONFIG_HP300) || defined(CONFIG_APOLLO) L(custom): @@ -3787,7 +3752,7 @@ L(iobase): .long 0 #endif -#if defined(CONSOLE) +#ifdef CONSOLE_DEBUG L(console_globals): .long 0 /* cursor column */ .long 0 /* cursor row */ @@ -3798,7 +3763,7 @@ L(console_font): .long 0 /* pointer to console font (struct font_desc) */ L(console_font_data): .long 0 /* pointer to console font data */ -#endif /* CONSOLE */ +#endif /* CONSOLE_DEBUG */ #if defined(MMU_PRINT) L(mmu_print_data): @@ -3838,7 +3803,9 @@ M167_CYIER = 0xfff45011 M167_CYLICR = 0xfff45026 M167_CYTEOIR = 0xfff45085 M167_CYTDR = 0xfff450f8 +M167_PCSCCMICR = 0xfff4201d M167_PCSCCTICR = 0xfff4201e +M167_PCSCCRICR = 0xfff4201f M167_PCTPIACKR = 0xfff42025 #endif @@ -3856,10 +3823,8 @@ L(mac_dimensions): .long 0 L(mac_rowbytes): .long 0 -#ifdef SERIAL_DEBUG L(mac_sccbase): .long 0 -#endif #endif /* CONFIG_MAC */ #if defined (CONFIG_APOLLO) diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index b6223dc41d82..501e10212789 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -371,4 +371,5 @@ ENTRY(sys_call_table) .long sys_finit_module .long sys_sched_setattr .long sys_sched_getattr /* 350 */ + .long sys_renameat2 diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c index 982c3fe73c4a..a471eab1a4dd 100644 --- a/arch/m68k/mac/config.c +++ b/arch/m68k/mac/config.c @@ -71,31 +71,6 @@ static void mac_get_model(char *str); static void mac_identify(void); static void mac_report_hardware(void); -#ifdef CONFIG_EARLY_PRINTK -asmlinkage void __init mac_early_print(const char *s, unsigned n); - -static void __init mac_early_cons_write(struct console *con, - const char *s, unsigned n) -{ - mac_early_print(s, n); -} - -static struct console __initdata mac_early_cons = { - .name = "early", - .write = mac_early_cons_write, - .flags = CON_PRINTBUFFER | CON_BOOT, - .index = -1 -}; - -int __init mac_unregister_early_cons(void) -{ - /* mac_early_print can't be used after init sections are discarded */ - return unregister_console(&mac_early_cons); -} - -late_initcall(mac_unregister_early_cons); -#endif - static void __init mac_sched_init(irq_handler_t vector) { via_init_clock(vector); @@ -190,10 +165,6 @@ void __init config_mac(void) mach_beep = mac_mksound; #endif -#ifdef CONFIG_EARLY_PRINTK - register_console(&mac_early_cons); -#endif - /* * Determine hardware present */ diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 7d4024432163..b958916e5eac 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -45,7 +45,7 @@ EXPORT_SYMBOL(mm_cachebits); #endif /* size of memory already mapped in head.S */ -#define INIT_MAPPED_SIZE (4UL<<20) +extern __initdata unsigned long m68k_init_mapped_size; extern unsigned long availmem; @@ -271,10 +271,12 @@ void __init paging_init(void) */ addr = m68k_memory[0].addr; size = m68k_memory[0].size; - free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr)); + free_bootmem_node(NODE_DATA(0), availmem, + min(m68k_init_mapped_size, size) - (availmem - addr)); map_node(0); - if (size > INIT_MAPPED_SIZE) - free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE); + if (size > m68k_init_mapped_size) + free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size, + size - m68k_init_mapped_size); for (i = 1; i < m68k_num_memory; i++) map_node(i); diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index eab7d342757e..a53803cc66cd 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -213,7 +213,7 @@ static void __init mvme16x_init_IRQ (void) #define CySCRH (0x22) #define CyTFTC (0x80) -static void cons_write(struct console *co, const char *str, unsigned count) +void mvme16x_cons_write(struct console *co, const char *str, unsigned count) { volatile unsigned char *base_addr = (u_char *)CD2401_ADDR; volatile u_char sink; @@ -268,20 +268,6 @@ static void cons_write(struct console *co, const char *str, unsigned count) base_addr[CyIER] = ier; } -static struct console cons_info = -{ - .name = "sercon", - .write = cons_write, - .flags = CON_PRINTBUFFER | CON_BOOT, - .index = -1, -}; - -static void __init mvme16x_early_console(void) -{ - register_console(&cons_info); - - printk(KERN_INFO "MVME16x: early console registered\n"); -} #endif void __init config_mvme16x(void) @@ -336,16 +322,6 @@ void __init config_mvme16x(void) else { mvme16x_config = MVME16x_CONFIG_GOT_LP | MVME16x_CONFIG_GOT_CD2401; - - /* Dont allow any interrupts from the CD2401 until the interrupt */ - /* handlers are installed */ - - pcc2chip[PccSCCMICR] = 0x10; - pcc2chip[PccSCCTICR] = 0x10; - pcc2chip[PccSCCRICR] = 0x10; -#ifdef CONFIG_EARLY_PRINTK - mvme16x_early_console(); -#endif } } diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index 5d6b4b407dda..2d6f0de77325 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -15,6 +15,7 @@ static inline void wr_fence(void) volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE; barrier(); *flushptr = 0; + barrier(); } #else /* CONFIG_METAG_META21 */ @@ -35,6 +36,7 @@ static inline void wr_fence(void) *flushptr = 0; *flushptr = 0; *flushptr = 0; + barrier(); } #endif /* !CONFIG_METAG_META21 */ @@ -68,6 +70,7 @@ static inline void fence(void) volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; barrier(); *flushptr = 0; + barrier(); } #define smp_mb() fence() #define smp_rmb() fence() diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index f16477d1f571..a8a37477c66e 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -22,6 +22,8 @@ /* Add an extra page of padding at the top of the stack for the guard page. */ #define STACK_TOP (TASK_SIZE - PAGE_SIZE) #define STACK_TOP_MAX STACK_TOP +/* Maximum virtual space for stack */ +#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild index 84e09feb4d54..ab78be2b6eb0 100644 --- a/arch/metag/include/uapi/asm/Kbuild +++ b/arch/metag/include/uapi/asm/Kbuild @@ -4,11 +4,11 @@ include include/uapi/asm-generic/Kbuild.asm header-y += byteorder.h header-y += ech.h header-y += ptrace.h -header-y += resource.h header-y += sigcontext.h header-y += siginfo.h header-y += swab.h header-y += unistd.h generic-y += mman.h +generic-y += resource.h generic-y += setup.h diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h deleted file mode 100644 index 526d23cc3054..000000000000 --- a/arch/metag/include/uapi/asm/resource.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _UAPI_METAG_RESOURCE_H -#define _UAPI_METAG_RESOURCE_H - -#define _STK_LIM_MAX (1 << 28) -#include <asm-generic/resource.h> - -#endif /* _UAPI_METAG_RESOURCE_H */ diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h index 935f9bec414a..335524040fff 100644 --- a/arch/microblaze/include/asm/pci.h +++ b/arch/microblaze/include/asm/pci.h @@ -44,11 +44,6 @@ struct pci_dev; */ #define pcibios_assign_all_busses() 0 -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #ifdef CONFIG_PCI extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); extern struct dma_map_ops *get_pci_dma_ops(void); diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c index 70996cc66aa2..a59de1bc1ce0 100644 --- a/arch/microblaze/pci/pci-common.c +++ b/arch/microblaze/pci/pci-common.c @@ -168,26 +168,6 @@ struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node) return NULL; } -static ssize_t pci_show_devspec(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pci_dev *pdev; - struct device_node *np; - - pdev = to_pci_dev(dev); - np = pci_device_to_OF_node(pdev); - if (np == NULL || np->full_name == NULL) - return 0; - return sprintf(buf, "%s", np->full_name); -} -static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); - -/* Add sysfs properties */ -int pcibios_add_platform_entries(struct pci_dev *pdev) -{ - return device_create_file(&pdev->dev, &dev_attr_devspec); -} - void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 1a5b4032cb66..60a359cfa328 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -151,7 +151,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \ -Wa,--trap cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \ -Wa,--trap -cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \ +cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \ -Wa,--trap cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \ diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c index 5abf4e894216..2a66e908f6a9 100644 --- a/arch/mips/dec/ecc-berr.c +++ b/arch/mips/dec/ecc-berr.c @@ -21,6 +21,7 @@ #include <asm/addrspace.h> #include <asm/bootinfo.h> #include <asm/cpu.h> +#include <asm/cpu-type.h> #include <asm/irq_regs.h> #include <asm/processor.h> #include <asm/ptrace.h> diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c index f434b759e3b9..ec606363b806 100644 --- a/arch/mips/dec/kn02xa-berr.c +++ b/arch/mips/dec/kn02xa-berr.c @@ -19,6 +19,7 @@ #include <linux/types.h> #include <asm/addrspace.h> +#include <asm/cpu-type.h> #include <asm/irq_regs.h> #include <asm/ptrace.h> #include <asm/traps.h> diff --git a/arch/mips/dec/prom/Makefile b/arch/mips/dec/prom/Makefile index 064ae7a76bdc..ae73e42ac20b 100644 --- a/arch/mips/dec/prom/Makefile +++ b/arch/mips/dec/prom/Makefile @@ -6,4 +6,3 @@ lib-y += init.o memory.o cmdline.o identify.o console.o lib-$(CONFIG_32BIT) += locore.o -lib-$(CONFIG_64BIT) += call_o32.o diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S deleted file mode 100644 index 8c8498159e43..000000000000 --- a/arch/mips/dec/prom/call_o32.S +++ /dev/null @@ -1,89 +0,0 @@ -/* - * O32 interface for the 64 (or N32) ABI. - * - * Copyright (C) 2002 Maciej W. Rozycki - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <asm/asm.h> -#include <asm/regdef.h> - -/* Maximum number of arguments supported. Must be even! */ -#define O32_ARGC 32 -/* Number of static registers we save. */ -#define O32_STATC 11 -/* Frame size for both of the above. */ -#define O32_FRAMESZ (4 * O32_ARGC + SZREG * O32_STATC) - - .text - -/* - * O32 function call dispatcher, for interfacing 32-bit ROM routines. - * - * The standard 64 (N32) calling sequence is supported, with a0 - * holding a function pointer, a1-a7 -- its first seven arguments - * and the stack -- remaining ones (up to O32_ARGC, including a1-a7). - * Static registers, gp and fp are preserved, v0 holds a result. - * This code relies on the called o32 function for sp and ra - * restoration and thus both this dispatcher and the current stack - * have to be placed in a KSEGx (or KUSEG) address space. Any - * pointers passed have to point to addresses within one of these - * spaces as well. - */ -NESTED(call_o32, O32_FRAMESZ, ra) - REG_SUBU sp,O32_FRAMESZ - - REG_S ra,O32_FRAMESZ-1*SZREG(sp) - REG_S fp,O32_FRAMESZ-2*SZREG(sp) - REG_S gp,O32_FRAMESZ-3*SZREG(sp) - REG_S s7,O32_FRAMESZ-4*SZREG(sp) - REG_S s6,O32_FRAMESZ-5*SZREG(sp) - REG_S s5,O32_FRAMESZ-6*SZREG(sp) - REG_S s4,O32_FRAMESZ-7*SZREG(sp) - REG_S s3,O32_FRAMESZ-8*SZREG(sp) - REG_S s2,O32_FRAMESZ-9*SZREG(sp) - REG_S s1,O32_FRAMESZ-10*SZREG(sp) - REG_S s0,O32_FRAMESZ-11*SZREG(sp) - - move jp,a0 - - sll a0,a1,zero - sll a1,a2,zero - sll a2,a3,zero - sll a3,a4,zero - sw a5,0x10(sp) - sw a6,0x14(sp) - sw a7,0x18(sp) - - PTR_LA t0,O32_FRAMESZ(sp) - PTR_LA t1,0x1c(sp) - li t2,O32_ARGC-7 -1: - lw t3,(t0) - REG_ADDU t0,SZREG - sw t3,(t1) - REG_SUBU t2,1 - REG_ADDU t1,4 - bnez t2,1b - - jalr jp - - REG_L s0,O32_FRAMESZ-11*SZREG(sp) - REG_L s1,O32_FRAMESZ-10*SZREG(sp) - REG_L s2,O32_FRAMESZ-9*SZREG(sp) - REG_L s3,O32_FRAMESZ-8*SZREG(sp) - REG_L s4,O32_FRAMESZ-7*SZREG(sp) - REG_L s5,O32_FRAMESZ-6*SZREG(sp) - REG_L s6,O32_FRAMESZ-5*SZREG(sp) - REG_L s7,O32_FRAMESZ-4*SZREG(sp) - REG_L gp,O32_FRAMESZ-3*SZREG(sp) - REG_L fp,O32_FRAMESZ-2*SZREG(sp) - REG_L ra,O32_FRAMESZ-1*SZREG(sp) - - REG_ADDU sp,O32_FRAMESZ - jr ra -END(call_o32) diff --git a/arch/mips/fw/lib/call_o32.S b/arch/mips/fw/lib/call_o32.S index b308b2a0613e..4703fe4dbd9a 100644 --- a/arch/mips/fw/lib/call_o32.S +++ b/arch/mips/fw/lib/call_o32.S @@ -1,7 +1,7 @@ /* * O32 interface for the 64 (or N32) ABI. * - * Copyright (C) 2002 Maciej W. Rozycki + * Copyright (C) 2002, 2014 Maciej W. Rozycki * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -12,28 +12,37 @@ #include <asm/asm.h> #include <asm/regdef.h> +/* O32 register size. */ +#define O32_SZREG 4 /* Maximum number of arguments supported. Must be even! */ #define O32_ARGC 32 -/* Number of static registers we save. */ +/* Number of static registers we save. */ #define O32_STATC 11 -/* Frame size for static register */ -#define O32_FRAMESZ (SZREG * O32_STATC) -/* Frame size on new stack */ -#define O32_FRAMESZ_NEW (SZREG + 4 * O32_ARGC) +/* Argument area frame size. */ +#define O32_ARGSZ (O32_SZREG * O32_ARGC) +/* Static register save area frame size. */ +#define O32_STATSZ (SZREG * O32_STATC) +/* Stack pointer register save area frame size. */ +#define O32_SPSZ SZREG +/* Combined area frame size. */ +#define O32_FRAMESZ (O32_ARGSZ + O32_SPSZ + O32_STATSZ) +/* Switched stack frame size. */ +#define O32_NFRAMESZ (O32_ARGSZ + O32_SPSZ) .text /* * O32 function call dispatcher, for interfacing 32-bit ROM routines. * - * The standard 64 (N32) calling sequence is supported, with a0 - * holding a function pointer, a1 a new stack pointer, a2-a7 -- its - * first six arguments and the stack -- remaining ones (up to O32_ARGC, - * including a2-a7). Static registers, gp and fp are preserved, v0 holds - * a result. This code relies on the called o32 function for sp and ra - * restoration and this dispatcher has to be placed in a KSEGx (or KUSEG) - * address space. Any pointers passed have to point to addresses within - * one of these spaces as well. + * The standard 64 (N32) calling sequence is supported, with a0 holding + * a function pointer, a1 a pointer to the new stack to call the + * function with or 0 if no stack switching is requested, a2-a7 -- the + * function call's first six arguments, and the stack -- the remaining + * arguments (up to O32_ARGC, including a2-a7). Static registers, gp + * and fp are preserved, v0 holds the result. This code relies on the + * called o32 function for sp and ra restoration and this dispatcher has + * to be placed in a KSEGx (or KUSEG) address space. Any pointers + * passed have to point to addresses within one of these spaces as well. */ NESTED(call_o32, O32_FRAMESZ, ra) REG_SUBU sp,O32_FRAMESZ @@ -51,32 +60,36 @@ NESTED(call_o32, O32_FRAMESZ, ra) REG_S s0,O32_FRAMESZ-11*SZREG(sp) move jp,a0 - REG_SUBU s0,a1,O32_FRAMESZ_NEW - REG_S sp,O32_FRAMESZ_NEW-1*SZREG(s0) + + move fp,sp + beqz a1,0f + REG_SUBU fp,a1,O32_NFRAMESZ +0: + REG_S sp,O32_NFRAMESZ-1*SZREG(fp) sll a0,a2,zero sll a1,a3,zero sll a2,a4,zero sll a3,a5,zero - sw a6,0x10(s0) - sw a7,0x14(s0) + sw a6,4*O32_SZREG(fp) + sw a7,5*O32_SZREG(fp) PTR_LA t0,O32_FRAMESZ(sp) - PTR_LA t1,0x18(s0) + PTR_LA t1,6*O32_SZREG(fp) li t2,O32_ARGC-6 1: lw t3,(t0) REG_ADDU t0,SZREG sw t3,(t1) REG_SUBU t2,1 - REG_ADDU t1,4 + REG_ADDU t1,O32_SZREG bnez t2,1b - move sp,s0 + move sp,fp jalr jp - REG_L sp,O32_FRAMESZ_NEW-1*SZREG(sp) + REG_L sp,O32_NFRAMESZ-1*SZREG(sp) REG_L s0,O32_FRAMESZ-11*SZREG(sp) REG_L s1,O32_FRAMESZ-10*SZREG(sp) diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c index 2c2cb182af4e..6aa264b9856a 100644 --- a/arch/mips/fw/sni/sniprom.c +++ b/arch/mips/fw/sni/sniprom.c @@ -40,7 +40,8 @@ #ifdef CONFIG_64BIT -static u8 o32_stk[16384]; +/* O32 stack has to be 8-byte aligned. */ +static u64 o32_stk[4096]; #define O32_STK &o32_stk[sizeof(o32_stk)] #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \ diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index dc2135be2a3a..ff2707ab3295 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -39,14 +39,14 @@ struct cache_desc { #define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */ struct cpuinfo_mips { - unsigned int udelay_val; - unsigned int asid_cache; + unsigned long asid_cache; /* * Capability and feature descriptor structure for MIPS CPU */ unsigned long options; unsigned long ases; + unsigned int udelay_val; unsigned int processor_id; unsigned int fpu_id; unsigned int msa_id; diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h index c0ead6313845..b59a2103b61a 100644 --- a/arch/mips/include/asm/dec/prom.h +++ b/arch/mips/include/asm/dec/prom.h @@ -113,31 +113,31 @@ extern int (*__pmax_close)(int); #define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \ __asm__(#fun " = call_o32") -int __DEC_PROM_O32(_rex_bootinit, (int (*)(void))); -int __DEC_PROM_O32(_rex_bootread, (int (*)(void))); -int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), memmap *)); +int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *)); +int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *)); +int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *)); unsigned long *__DEC_PROM_O32(_rex_slot_address, - (unsigned long *(*)(int), int)); -void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void))); -int __DEC_PROM_O32(_rex_getsysid, (int (*)(void))); -void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void))); - -int __DEC_PROM_O32(_prom_getchar, (int (*)(void))); -char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), char *)); -int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), char *, ...)); - - -#define rex_bootinit() _rex_bootinit(__rex_bootinit) -#define rex_bootread() _rex_bootread(__rex_bootread) -#define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, x) -#define rex_slot_address(x) _rex_slot_address(__rex_slot_address, x) -#define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo) -#define rex_getsysid() _rex_getsysid(__rex_getsysid) -#define rex_clear_cache() _rex_clear_cache(__rex_clear_cache) - -#define prom_getchar() _prom_getchar(__prom_getchar) -#define prom_getenv(x) _prom_getenv(__prom_getenv, x) -#define prom_printf(x...) _prom_printf(__prom_printf, x) + (unsigned long *(*)(int), void *, int)); +void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *)); +int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *)); +void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *)); + +int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *)); +char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *)); +int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...)); + + +#define rex_bootinit() _rex_bootinit(__rex_bootinit, NULL) +#define rex_bootread() _rex_bootread(__rex_bootread, NULL) +#define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, NULL, x) +#define rex_slot_address(x) _rex_slot_address(__rex_slot_address, NULL, x) +#define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo, NULL) +#define rex_getsysid() _rex_getsysid(__rex_getsysid, NULL) +#define rex_clear_cache() _rex_clear_cache(__rex_clear_cache, NULL) + +#define prom_getchar() _prom_getchar(__prom_getchar, NULL) +#define prom_getenv(x) _prom_getenv(__prom_getenv, NULL, x) +#define prom_printf(x...) _prom_printf(__prom_printf, NULL, x) #else /* !CONFIG_64BIT */ diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 12d6842962be..974b0e308963 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -73,11 +73,6 @@ extern unsigned long PCIBIOS_MIN_MEM; extern void pcibios_set_master(struct pci_dev *dev); -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #define HAVE_PCI_MMAP extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, diff --git a/arch/mips/include/asm/rm9k-ocd.h b/arch/mips/include/asm/rm9k-ocd.h deleted file mode 100644 index b0b80d9ecf96..000000000000 --- a/arch/mips/include/asm/rm9k-ocd.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2004 by Basler Vision Technologies AG - * Author: Thomas Koeller <thomas.koeller@baslerweb.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#if !defined(_ASM_RM9K_OCD_H) -#define _ASM_RM9K_OCD_H - -#include <linux/types.h> -#include <linux/spinlock.h> -#include <asm/io.h> - -extern volatile void __iomem * const ocd_base; -extern volatile void __iomem * const titan_base; - -#define ocd_addr(__x__) (ocd_base + (__x__)) -#define titan_addr(__x__) (titan_base + (__x__)) -#define scram_addr(__x__) (scram_base + (__x__)) - -/* OCD register access */ -#define ocd_readl(__offs__) __raw_readl(ocd_addr(__offs__)) -#define ocd_readw(__offs__) __raw_readw(ocd_addr(__offs__)) -#define ocd_readb(__offs__) __raw_readb(ocd_addr(__offs__)) -#define ocd_writel(__val__, __offs__) \ - __raw_writel((__val__), ocd_addr(__offs__)) -#define ocd_writew(__val__, __offs__) \ - __raw_writew((__val__), ocd_addr(__offs__)) -#define ocd_writeb(__val__, __offs__) \ - __raw_writeb((__val__), ocd_addr(__offs__)) - -/* TITAN register access - 32 bit-wide only */ -#define titan_readl(__offs__) __raw_readl(titan_addr(__offs__)) -#define titan_writel(__val__, __offs__) \ - __raw_writel((__val__), titan_addr(__offs__)) - -/* Protect access to shared TITAN registers */ -extern spinlock_t titan_lock; -extern int titan_irqflags; -#define lock_titan_regs() spin_lock_irqsave(&titan_lock, titan_irqflags) -#define unlock_titan_regs() spin_unlock_irqrestore(&titan_lock, titan_irqflags) - -#endif /* !defined(_ASM_RM9K_OCD_H) */ diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index c6e9cd2bca8d..17960fe7a8ce 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -133,6 +133,8 @@ static inline int syscall_get_arch(void) #ifdef CONFIG_64BIT if (!test_thread_flag(TIF_32BIT_REGS)) arch |= __AUDIT_ARCH_64BIT; + if (test_thread_flag(TIF_32BIT_ADDR)) + arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32; #endif #if defined(__LITTLE_ENDIAN) arch |= __AUDIT_ARCH_LE; diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index df6e775f3fef..3125797f2a88 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -484,13 +484,13 @@ enum MIPS6e_i8_func { * Damn ... bitfields depend from byteorder :-( */ #ifdef __MIPSEB__ -#define BITFIELD_FIELD(field, more) \ +#define __BITFIELD_FIELD(field, more) \ field; \ more #elif defined(__MIPSEL__) -#define BITFIELD_FIELD(field, more) \ +#define __BITFIELD_FIELD(field, more) \ more \ field; @@ -499,112 +499,112 @@ enum MIPS6e_i8_func { #endif struct j_format { - BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ - BITFIELD_FIELD(unsigned int target : 26, + __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */ + __BITFIELD_FIELD(unsigned int target : 26, ;)) }; struct i_format { /* signed immediate format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rs : 5, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(signed int simmediate : 16, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(signed int simmediate : 16, ;)))) }; struct u_format { /* unsigned immediate format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rs : 5, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int uimmediate : 16, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int uimmediate : 16, ;)))) }; struct c_format { /* Cache (>= R6000) format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rs : 5, - BITFIELD_FIELD(unsigned int c_op : 3, - BITFIELD_FIELD(unsigned int cache : 2, - BITFIELD_FIELD(unsigned int simmediate : 16, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int c_op : 3, + __BITFIELD_FIELD(unsigned int cache : 2, + __BITFIELD_FIELD(unsigned int simmediate : 16, ;))))) }; struct r_format { /* Register format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rs : 5, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int rd : 5, - BITFIELD_FIELD(unsigned int re : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int re : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct p_format { /* Performance counter format (R10000) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rs : 5, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int rd : 5, - BITFIELD_FIELD(unsigned int re : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int re : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct f_format { /* FPU register format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int : 1, - BITFIELD_FIELD(unsigned int fmt : 4, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int rd : 5, - BITFIELD_FIELD(unsigned int re : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int : 1, + __BITFIELD_FIELD(unsigned int fmt : 4, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int re : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;))))))) }; struct ma_format { /* FPU multiply and add format (MIPS IV) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int fr : 5, - BITFIELD_FIELD(unsigned int ft : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int func : 4, - BITFIELD_FIELD(unsigned int fmt : 2, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int fr : 5, + __BITFIELD_FIELD(unsigned int ft : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int func : 4, + __BITFIELD_FIELD(unsigned int fmt : 2, ;))))))) }; struct b_format { /* BREAK and SYSCALL */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int code : 20, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int code : 20, + __BITFIELD_FIELD(unsigned int func : 6, ;))) }; struct ps_format { /* MIPS-3D / paired single format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rs : 5, - BITFIELD_FIELD(unsigned int ft : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int ft : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct v_format { /* MDMX vector format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int sel : 4, - BITFIELD_FIELD(unsigned int fmt : 1, - BITFIELD_FIELD(unsigned int vt : 5, - BITFIELD_FIELD(unsigned int vs : 5, - BITFIELD_FIELD(unsigned int vd : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int sel : 4, + __BITFIELD_FIELD(unsigned int fmt : 1, + __BITFIELD_FIELD(unsigned int vt : 5, + __BITFIELD_FIELD(unsigned int vs : 5, + __BITFIELD_FIELD(unsigned int vd : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;))))))) }; struct spec3_format { /* SPEC3 */ - BITFIELD_FIELD(unsigned int opcode:6, - BITFIELD_FIELD(unsigned int rs:5, - BITFIELD_FIELD(unsigned int rt:5, - BITFIELD_FIELD(signed int simmediate:9, - BITFIELD_FIELD(unsigned int func:7, + __BITFIELD_FIELD(unsigned int opcode:6, + __BITFIELD_FIELD(unsigned int rs:5, + __BITFIELD_FIELD(unsigned int rt:5, + __BITFIELD_FIELD(signed int simmediate:9, + __BITFIELD_FIELD(unsigned int func:7, ;))))) }; @@ -616,141 +616,141 @@ struct spec3_format { /* SPEC3 */ * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE. */ struct fb_format { /* FPU branch format (MIPS32) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int bc : 5, - BITFIELD_FIELD(unsigned int cc : 3, - BITFIELD_FIELD(unsigned int flag : 2, - BITFIELD_FIELD(signed int simmediate : 16, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int bc : 5, + __BITFIELD_FIELD(unsigned int cc : 3, + __BITFIELD_FIELD(unsigned int flag : 2, + __BITFIELD_FIELD(signed int simmediate : 16, ;))))) }; struct fp0_format { /* FPU multiply and add format (MIPS32) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int fmt : 5, - BITFIELD_FIELD(unsigned int ft : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int fmt : 5, + __BITFIELD_FIELD(unsigned int ft : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int ft : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int fmt : 3, - BITFIELD_FIELD(unsigned int op : 2, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int ft : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int fmt : 3, + __BITFIELD_FIELD(unsigned int op : 2, + __BITFIELD_FIELD(unsigned int func : 6, ;))))))) }; struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int op : 5, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int op : 5, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fmt : 2, - BITFIELD_FIELD(unsigned int op : 8, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fmt : 2, + __BITFIELD_FIELD(unsigned int op : 8, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int cc : 3, - BITFIELD_FIELD(unsigned int zero : 2, - BITFIELD_FIELD(unsigned int fmt : 2, - BITFIELD_FIELD(unsigned int op : 3, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int cc : 3, + __BITFIELD_FIELD(unsigned int zero : 2, + __BITFIELD_FIELD(unsigned int fmt : 2, + __BITFIELD_FIELD(unsigned int op : 3, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))))) }; struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fmt : 3, - BITFIELD_FIELD(unsigned int op : 7, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fmt : 3, + __BITFIELD_FIELD(unsigned int op : 7, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct mm_fp4_format { /* FPU c.cond format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int cc : 3, - BITFIELD_FIELD(unsigned int fmt : 3, - BITFIELD_FIELD(unsigned int cond : 4, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int cc : 3, + __BITFIELD_FIELD(unsigned int fmt : 3, + __BITFIELD_FIELD(unsigned int cond : 4, + __BITFIELD_FIELD(unsigned int func : 6, ;))))))) }; struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int index : 5, - BITFIELD_FIELD(unsigned int base : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int op : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int index : 5, + __BITFIELD_FIELD(unsigned int base : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int op : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct fp6_format { /* FPU madd and msub format (MIPS IV) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int fr : 5, - BITFIELD_FIELD(unsigned int ft : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int fr : 5, + __BITFIELD_FIELD(unsigned int ft : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int ft : 5, - BITFIELD_FIELD(unsigned int fs : 5, - BITFIELD_FIELD(unsigned int fd : 5, - BITFIELD_FIELD(unsigned int fr : 5, - BITFIELD_FIELD(unsigned int func : 6, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int ft : 5, + __BITFIELD_FIELD(unsigned int fs : 5, + __BITFIELD_FIELD(unsigned int fd : 5, + __BITFIELD_FIELD(unsigned int fr : 5, + __BITFIELD_FIELD(unsigned int func : 6, ;)))))) }; struct mm_i_format { /* Immediate format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(unsigned int rs : 5, - BITFIELD_FIELD(signed int simmediate : 16, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(signed int simmediate : 16, ;)))) }; struct mm_m_format { /* Multi-word load/store format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rd : 5, - BITFIELD_FIELD(unsigned int base : 5, - BITFIELD_FIELD(unsigned int func : 4, - BITFIELD_FIELD(signed int simmediate : 12, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int base : 5, + __BITFIELD_FIELD(unsigned int func : 4, + __BITFIELD_FIELD(signed int simmediate : 12, ;))))) }; struct mm_x_format { /* Scaled indexed load format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int index : 5, - BITFIELD_FIELD(unsigned int base : 5, - BITFIELD_FIELD(unsigned int rd : 5, - BITFIELD_FIELD(unsigned int func : 11, + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int index : 5, + __BITFIELD_FIELD(unsigned int base : 5, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int func : 11, ;))))) }; @@ -758,51 +758,51 @@ struct mm_x_format { /* Scaled indexed load format (microMIPS) */ * microMIPS instruction formats (16-bit length) */ struct mm_b0_format { /* Unconditional branch format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(signed int simmediate : 10, - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(signed int simmediate : 10, + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;))) }; struct mm_b1_format { /* Conditional branch format (microMIPS) */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rs : 3, - BITFIELD_FIELD(signed int simmediate : 7, - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 3, + __BITFIELD_FIELD(signed int simmediate : 7, + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;)))) }; struct mm16_m_format { /* Multi-word load/store format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int func : 4, - BITFIELD_FIELD(unsigned int rlist : 2, - BITFIELD_FIELD(unsigned int imm : 4, - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int func : 4, + __BITFIELD_FIELD(unsigned int rlist : 2, + __BITFIELD_FIELD(unsigned int imm : 4, + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;))))) }; struct mm16_rb_format { /* Signed immediate format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rt : 3, - BITFIELD_FIELD(unsigned int base : 3, - BITFIELD_FIELD(signed int simmediate : 4, - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rt : 3, + __BITFIELD_FIELD(unsigned int base : 3, + __BITFIELD_FIELD(signed int simmediate : 4, + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;))))) }; struct mm16_r3_format { /* Load from global pointer format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rt : 3, - BITFIELD_FIELD(signed int simmediate : 7, - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rt : 3, + __BITFIELD_FIELD(signed int simmediate : 7, + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;)))) }; struct mm16_r5_format { /* Load/store from stack pointer format */ - BITFIELD_FIELD(unsigned int opcode : 6, - BITFIELD_FIELD(unsigned int rt : 5, - BITFIELD_FIELD(signed int simmediate : 5, - BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(signed int simmediate : 5, + __BITFIELD_FIELD(unsigned int : 16, /* Ignored */ ;)))) }; @@ -810,57 +810,57 @@ struct mm16_r5_format { /* Load/store from stack pointer format */ * MIPS16e instruction formats (16-bit length) */ struct m16e_rr { - BITFIELD_FIELD(unsigned int opcode : 5, - BITFIELD_FIELD(unsigned int rx : 3, - BITFIELD_FIELD(unsigned int nd : 1, - BITFIELD_FIELD(unsigned int l : 1, - BITFIELD_FIELD(unsigned int ra : 1, - BITFIELD_FIELD(unsigned int func : 5, + __BITFIELD_FIELD(unsigned int opcode : 5, + __BITFIELD_FIELD(unsigned int rx : 3, + __BITFIELD_FIELD(unsigned int nd : 1, + __BITFIELD_FIELD(unsigned int l : 1, + __BITFIELD_FIELD(unsigned int ra : 1, + __BITFIELD_FIELD(unsigned int func : 5, ;)))))) }; struct m16e_jal { - BITFIELD_FIELD(unsigned int opcode : 5, - BITFIELD_FIELD(unsigned int x : 1, - BITFIELD_FIELD(unsigned int imm20_16 : 5, - BITFIELD_FIELD(signed int imm25_21 : 5, + __BITFIELD_FIELD(unsigned int opcode : 5, + __BITFIELD_FIELD(unsigned int x : 1, + __BITFIELD_FIELD(unsigned int imm20_16 : 5, + __BITFIELD_FIELD(signed int imm25_21 : 5, ;)))) }; struct m16e_i64 { - BITFIELD_FIELD(unsigned int opcode : 5, - BITFIELD_FIELD(unsigned int func : 3, - BITFIELD_FIELD(unsigned int imm : 8, + __BITFIELD_FIELD(unsigned int opcode : 5, + __BITFIELD_FIELD(unsigned int func : 3, + __BITFIELD_FIELD(unsigned int imm : 8, ;))) }; struct m16e_ri64 { - BITFIELD_FIELD(unsigned int opcode : 5, - BITFIELD_FIELD(unsigned int func : 3, - BITFIELD_FIELD(unsigned int ry : 3, - BITFIELD_FIELD(unsigned int imm : 5, + __BITFIELD_FIELD(unsigned int opcode : 5, + __BITFIELD_FIELD(unsigned int func : 3, + __BITFIELD_FIELD(unsigned int ry : 3, + __BITFIELD_FIELD(unsigned int imm : 5, ;)))) }; struct m16e_ri { - BITFIELD_FIELD(unsigned int opcode : 5, - BITFIELD_FIELD(unsigned int rx : 3, - BITFIELD_FIELD(unsigned int imm : 8, + __BITFIELD_FIELD(unsigned int opcode : 5, + __BITFIELD_FIELD(unsigned int rx : 3, + __BITFIELD_FIELD(unsigned int imm : 8, ;))) }; struct m16e_rri { - BITFIELD_FIELD(unsigned int opcode : 5, - BITFIELD_FIELD(unsigned int rx : 3, - BITFIELD_FIELD(unsigned int ry : 3, - BITFIELD_FIELD(unsigned int imm : 5, + __BITFIELD_FIELD(unsigned int opcode : 5, + __BITFIELD_FIELD(unsigned int rx : 3, + __BITFIELD_FIELD(unsigned int ry : 3, + __BITFIELD_FIELD(unsigned int imm : 5, ;)))) }; struct m16e_i8 { - BITFIELD_FIELD(unsigned int opcode : 5, - BITFIELD_FIELD(unsigned int func : 3, - BITFIELD_FIELD(unsigned int imm : 8, + __BITFIELD_FIELD(unsigned int opcode : 5, + __BITFIELD_FIELD(unsigned int func : 3, + __BITFIELD_FIELD(unsigned int imm : 8, ;))) }; diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index d6e154a9e6a5..5805414777e0 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -371,16 +371,17 @@ #define __NR_finit_module (__NR_Linux + 348) #define __NR_sched_setattr (__NR_Linux + 349) #define __NR_sched_getattr (__NR_Linux + 350) +#define __NR_renameat2 (__NR_Linux + 351) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 350 +#define __NR_Linux_syscalls 351 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 350 +#define __NR_O32_Linux_syscalls 351 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -699,16 +700,17 @@ #define __NR_getdents64 (__NR_Linux + 308) #define __NR_sched_setattr (__NR_Linux + 309) #define __NR_sched_getattr (__NR_Linux + 310) +#define __NR_renameat2 (__NR_Linux + 311) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 310 +#define __NR_Linux_syscalls 311 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 310 +#define __NR_64_Linux_syscalls 311 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1031,15 +1033,16 @@ #define __NR_finit_module (__NR_Linux + 312) #define __NR_sched_setattr (__NR_Linux + 313) #define __NR_sched_getattr (__NR_Linux + 314) +#define __NR_renameat2 (__NR_Linux + 315) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 314 +#define __NR_Linux_syscalls 315 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 314 +#define __NR_N32_Linux_syscalls 315 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d78bf445a9c..76122ff5cb5e 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -317,7 +317,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == beql_op) + if (insn.i_format.opcode == beql_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -329,7 +329,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bnel_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -341,7 +341,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == blezl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; @@ -353,7 +353,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == bnel_op) + if (insn.i_format.opcode == bgtzl_op) ret = BRANCH_LIKELY_TAKEN; } else epc += 8; diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index e40971b51d2f..037a44d962f3 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -124,14 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "kscratch registers\t: %d\n", hweight8(cpu_data[n].kscratch_mask)); seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); -#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) - if (cpu_has_mipsmt) { - seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id); -#if defined(CONFIG_MIPS_MT_SMTC) - seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id); -#endif - } -#endif + sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", cpu_has_vce ? "%u" : "not available"); seq_printf(m, fmt, 'D', vced_count); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 71f85f427034..f639ccd5060c 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child, enum pt_watch_style style; int i; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs))) return -EIO; @@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child, #endif __put_user(style, &addr->style); - __put_user(current_cpu_data.watch_reg_use_cnt, + __put_user(boot_cpu_data.watch_reg_use_cnt, &addr->WATCH_STYLE.num_valid); - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __put_user(child->thread.watch.mips3264.watchlo[i], &addr->WATCH_STYLE.watchlo[i]); __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff, &addr->WATCH_STYLE.watchhi[i]); - __put_user(current_cpu_data.watch_reg_masks[i], + __put_user(boot_cpu_data.watch_reg_masks[i], &addr->WATCH_STYLE.watch_masks[i]); } for (; i < 8; i++) { @@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child, unsigned long lt[NUM_WATCH_REGS]; u16 ht[NUM_WATCH_REGS]; - if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0) + if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) return -EIO; if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs))) return -EIO; /* Check the values. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); #ifdef CONFIG_32BIT if (lt[i] & __UA_LIMIT) @@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child, return -EINVAL; } /* Install them. */ - for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) { + for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { if (lt[i] & 7) watch_active = 1; child->thread.watch.mips3264.watchlo[i] = lt[i]; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index fdc70b400442..3245474f19d5 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -577,3 +577,4 @@ EXPORT(sys_call_table) PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ + PTR sys_renameat2 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index dd99c3285aea..be2fedd4ae33 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -430,4 +430,5 @@ EXPORT(sys_call_table) PTR sys_getdents64 PTR sys_sched_setattr PTR sys_sched_getattr /* 5310 */ + PTR sys_renameat2 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f68d2f4f0090..c1dbcda4b816 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -423,4 +423,5 @@ EXPORT(sysn32_call_table) PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr + PTR sys_renameat2 /* 6315 */ .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 70f6acecd928..f1343ccd7ed7 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -556,4 +556,5 @@ EXPORT(sys32_call_table) PTR sys_finit_module PTR sys_sched_setattr PTR sys_sched_getattr /* 4350 */ + PTR sys_renameat2 .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 074e857ced28..8119ac2fdfc9 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1545,7 +1545,7 @@ asmlinkage void cache_parity_error(void) reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); if (cpu_has_mips_r2 && - ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("Error bits: %s%s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", reg_val & (1<<28) ? "ET " : "", @@ -1585,7 +1585,7 @@ asmlinkage void do_ftlb(void) /* For the moment, report the problem and hang. */ if (cpu_has_mips_r2 && - ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) { + ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", read_c0_ecc()); pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts index fac1f5b178eb..143b8a37b5e4 100644 --- a/arch/mips/lantiq/dts/easy50712.dts +++ b/arch/mips/lantiq/dts/easy50712.dts @@ -8,6 +8,7 @@ }; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 2e4825e48388..9901237563c5 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -56,14 +56,20 @@ #define UNIT(unit) ((unit)*NBYTES) #define ADDC(sum,reg) \ + .set push; \ + .set noat; \ ADD sum, reg; \ sltu v1, sum, reg; \ ADD sum, v1; \ + .set pop #define ADDC32(sum,reg) \ + .set push; \ + .set noat; \ addu sum, reg; \ sltu v1, sum, reg; \ addu sum, v1; \ + .set pop #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ LOAD _t0, (offset + UNIT(0))(src); \ @@ -710,6 +716,8 @@ LEAF(csum_partial) ADDC(sum, t2) .Ldone\@: /* fold checksum */ + .set push + .set noat #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 @@ -732,6 +740,7 @@ LEAF(csum_partial) or sum, sum, t0 1: #endif + .set pop .set reorder ADDC32(sum, psum) jr ra diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c index 44713af15a62..705cfb7c1a74 100644 --- a/arch/mips/lib/delay.c +++ b/arch/mips/lib/delay.c @@ -6,7 +6,7 @@ * Copyright (C) 1994 by Waldorf Electronics * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2007, 2014 Maciej W. Rozycki */ #include <linux/module.h> #include <linux/param.h> @@ -15,6 +15,12 @@ #include <asm/compiler.h> #include <asm/war.h> +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS +#define GCC_DADDI_IMM_ASM() "I" +#else +#define GCC_DADDI_IMM_ASM() "r" +#endif + void __delay(unsigned long loops) { __asm__ __volatile__ ( @@ -22,13 +28,13 @@ void __delay(unsigned long loops) " .align 3 \n" "1: bnez %0, 1b \n" #if BITS_PER_LONG == 32 - " subu %0, 1 \n" + " subu %0, %1 \n" #else - " dsubu %0, 1 \n" + " dsubu %0, %1 \n" #endif " .set reorder \n" : "=r" (loops) - : "0" (loops)); + : GCC_DADDI_IMM_ASM() (1), "0" (loops)); } EXPORT_SYMBOL(__delay); diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index d3301cd1e9a5..3c32baf8b494 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -35,7 +35,6 @@ LEAF(__strncpy_from_\func\()_asm) bnez v0, .Lfault\@ FEXPORT(__strncpy_from_\func\()_nocheck_asm) - .set noreorder move t0, zero move v1, a1 .ifeqs "\func","kernel" @@ -45,21 +44,21 @@ FEXPORT(__strncpy_from_\func\()_nocheck_asm) .endif PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) + sb v0, (a0) beqz v0, 2f - sb v0, (a0) PTR_ADDIU t0, 1 + PTR_ADDIU a0, 1 bne t0, a2, 1b - PTR_ADDIU a0, 1 2: PTR_ADDU v0, a1, t0 xor v0, a1 bltz v0, .Lfault\@ - nop + move v0, t0 jr ra # return n - move v0, t0 END(__strncpy_from_\func\()_asm) -.Lfault\@: jr ra - li v0, -EFAULT +.Lfault\@: + li v0, -EFAULT + jr ra .section __ex_table,"a" PTR 1b, .Lfault\@ diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 7397be226a06..603d79a95f47 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig @@ -64,7 +64,6 @@ config LEMOTE_MACH3A bool "Lemote Loongson 3A family machines" select ARCH_SPARSEMEM_ENABLE select GENERIC_ISA_DMA_SUPPORT_BROKEN - select GENERIC_HARDIRQS_NO__DO_IRQ select BOOT_ELF32 select BOARD_SCACHE select CSRC_R4K diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c index c639b9db0012..12c75db23420 100644 --- a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c +++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c @@ -27,8 +27,7 @@ #include <cs5536/cs5536_mfgpt.h> -DEFINE_SPINLOCK(mfgpt_lock); -EXPORT_SYMBOL(mfgpt_lock); +static DEFINE_RAW_SPINLOCK(mfgpt_lock); static u32 mfgpt_base; @@ -55,7 +54,7 @@ EXPORT_SYMBOL(enable_mfgpt0_counter); static void init_mfgpt_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - spin_lock(&mfgpt_lock); + raw_spin_lock(&mfgpt_lock); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -79,7 +78,7 @@ static void init_mfgpt_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock(&mfgpt_lock); + raw_spin_unlock(&mfgpt_lock); } static struct clock_event_device mfgpt_clockevent = { @@ -157,7 +156,7 @@ static cycle_t mfgpt_read(struct clocksource *cs) static int old_count; static u32 old_jifs; - spin_lock_irqsave(&mfgpt_lock, flags); + raw_spin_lock_irqsave(&mfgpt_lock, flags); /* * Although our caller may have the read side of xtime_lock, * this is now a seqlock, and we are cheating in this routine @@ -191,7 +190,7 @@ static cycle_t mfgpt_read(struct clocksource *cs) old_count = count; old_jifs = jifs; - spin_unlock_irqrestore(&mfgpt_lock, flags); + raw_spin_unlock_irqrestore(&mfgpt_lock, flags); return (cycle_t) (jifs * COMPARE) + count; } diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c index e1f427f4f5f3..67dd94ef28e6 100644 --- a/arch/mips/loongson/lemote-2f/clock.c +++ b/arch/mips/loongson/lemote-2f/clock.c @@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put); int clk_set_rate(struct clk *clk, unsigned long rate) { + unsigned int rate_khz = rate / 1000; int ret = 0; int regval; int i; @@ -111,10 +112,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate) if (loongson2_clockmod_table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; - if (rate == loongson2_clockmod_table[i].frequency) + if (rate_khz == loongson2_clockmod_table[i].frequency) break; } - if (rate != loongson2_clockmod_table[i].frequency) + if (rate_khz != loongson2_clockmod_table[i].frequency) return -ENOTSUPP; clk->rate = rate; diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 58033c44690d..b611102e23b5 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -273,7 +273,7 @@ void build_clear_page(void) uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) - uasm_i_lui(&buf, AT, 0xa000); + uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; @@ -424,7 +424,7 @@ void build_copy_page(void) uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) - uasm_i_lui(&buf, AT, 0xa000); + uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * cache_line_size : 0; diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S index 30a494db99c2..a5427c6e9757 100644 --- a/arch/mips/mm/tlb-funcs.S +++ b/arch/mips/mm/tlb-funcs.S @@ -16,8 +16,10 @@ #define FASTPATH_SIZE 128 +EXPORT(tlbmiss_handler_setup_pgd_start) LEAF(tlbmiss_handler_setup_pgd) - .space 16 * 4 +1: j 1b /* Dummy, will be replaced. */ + .space 64 END(tlbmiss_handler_setup_pgd) EXPORT(tlbmiss_handler_setup_pgd_end) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ee88367ab3ad..f99ec587b151 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1422,16 +1422,17 @@ static void build_r4000_tlb_refill_handler(void) extern u32 handle_tlbl[], handle_tlbl_end[]; extern u32 handle_tlbs[], handle_tlbs_end[]; extern u32 handle_tlbm[], handle_tlbm_end[]; -extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; +extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[]; +extern u32 tlbmiss_handler_setup_pgd_end[]; static void build_setup_pgd(void) { const int a0 = 4; const int __maybe_unused a1 = 5; const int __maybe_unused a2 = 6; - u32 *p = tlbmiss_handler_setup_pgd; + u32 *p = tlbmiss_handler_setup_pgd_start; const int tlbmiss_handler_setup_pgd_size = - tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; + tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start; #ifndef CONFIG_MIPS_PGD_C0_CONTEXT long pgdc = (long)pgd_current; #endif diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c index 6d0f4ab3632d..f2364e419682 100644 --- a/arch/mips/mti-malta/malta-memory.c +++ b/arch/mips/mti-malta/malta-memory.c @@ -27,7 +27,7 @@ unsigned long physical_memsize = 0L; fw_memblock_t * __init fw_getmdesc(int eva) { char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr; - unsigned long memsize, ememsize __maybe_unused = 0; + unsigned long memsize = 0, ememsize __maybe_unused = 0; static char cmdline[COMMAND_LINE_SIZE] __initdata; int tmp; diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index b128cb973ebe..7f6ce6d734c0 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c @@ -53,7 +53,6 @@ static struct resource rc32434_res_pci_mem1 = { .start = 0x50000000, .end = 0x5FFFFFFF, .flags = IORESOURCE_MEM, - .parent = &rc32434_res_pci_mem1, .sibling = NULL, .child = &rc32434_res_pci_mem2 }; diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts index 35eb874ab7f1..709f58132f5c 100644 --- a/arch/mips/ralink/dts/mt7620a_eval.dts +++ b/arch/mips/ralink/dts/mt7620a_eval.dts @@ -7,6 +7,7 @@ model = "Ralink MT7620A evaluation board"; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts index 322d7002595b..0a685db093d4 100644 --- a/arch/mips/ralink/dts/rt2880_eval.dts +++ b/arch/mips/ralink/dts/rt2880_eval.dts @@ -7,6 +7,7 @@ model = "Ralink RT2880 evaluation board"; memory@0 { + device_type = "memory"; reg = <0x8000000 0x2000000>; }; diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts index 0ac73ea28198..ec9e9a035541 100644 --- a/arch/mips/ralink/dts/rt3052_eval.dts +++ b/arch/mips/ralink/dts/rt3052_eval.dts @@ -7,6 +7,7 @@ model = "Ralink RT3052 evaluation board"; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts index 2fa6b330bf4f..e8df21a5d10d 100644 --- a/arch/mips/ralink/dts/rt3883_eval.dts +++ b/arch/mips/ralink/dts/rt3883_eval.dts @@ -7,6 +7,7 @@ model = "Ralink RT3883 evaluation board"; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 166323824683..5f70af25c7d0 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h @@ -48,7 +48,6 @@ extern void unit_pci_init(void); #define PCIBIOS_MIN_MEM 0xB8000000 void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq); /* Dynamic DMA mapping stuff. * i386 has everything mapped statically. diff --git a/arch/mn10300/unit-asb2305/pci-irq.c b/arch/mn10300/unit-asb2305/pci-irq.c index 77439da04671..fcb28ceb824d 100644 --- a/arch/mn10300/unit-asb2305/pci-irq.c +++ b/arch/mn10300/unit-asb2305/pci-irq.c @@ -40,10 +40,6 @@ void __init pcibios_fixup_irqs(void) } } -void __init pcibios_penalize_isa_irq(int irq) -{ -} - void pcibios_enable_irq(struct pci_dev *dev) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 1faefed32749..108d48e652af 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -22,6 +22,7 @@ config PARISC select GENERIC_SMP_IDLE_THREAD select GENERIC_STRNCPY_FROM_USER select SYSCTL_ARCH_UNALIGN_ALLOW + select SYSCTL_EXCEPTION_TRACE select HAVE_MOD_ARCH_SPECIFIC select VIRT_TO_BUS select MODULES_USE_ELF_RELA diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 465154076d23..20df2b04fc09 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -215,11 +215,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't need to penalize isa irq's */ -} - static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { return channel ? 15 : 14; diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 198a86feb574..d951c9681ab3 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h @@ -55,6 +55,11 @@ #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX DEFAULT_TASK_SIZE +/* Allow bigger stacks for 64-bit processes */ +#define STACK_SIZE_MAX (USER_WIDE_MODE \ + ? (1 << 30) /* 1 GB */ \ + : (CONFIG_MAX_STACK_SIZE_MB*1024*1024)) + #endif #ifndef __ASSEMBLY__ diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 265ae5190b0a..47e0e21d2272 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -829,8 +829,9 @@ #define __NR_sched_setattr (__NR_Linux + 334) #define __NR_sched_getattr (__NR_Linux + 335) #define __NR_utimes (__NR_Linux + 336) +#define __NR_renameat2 (__NR_Linux + 337) -#define __NR_Linux_syscalls (__NR_utimes + 1) +#define __NR_Linux_syscalls (__NR_renameat2 + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 31ffa9b55322..e1ffea2f9a0b 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void) { unsigned long stack_base; - /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */ + /* Limit stack size - see setup_arg_pages() in fs/exec.c */ stack_base = rlimit_max(RLIMIT_STACK); - if (stack_base > (1 << 30)) - stack_base = 1 << 30; + if (stack_base > STACK_SIZE_MAX) + stack_base = STACK_SIZE_MAX; return PAGE_ALIGN(STACK_TOP - stack_base); } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index a63bb179f79a..838786011037 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -589,10 +589,13 @@ cas_nocontend: # endif /* ENABLE_LWS_DEBUG */ + rsm PSW_SM_I, %r0 /* Disable interrupts */ + /* COW breaks can cause contention on UP systems */ LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */ cas_wouldblock: ldo 2(%r0), %r28 /* 2nd case */ + ssm PSW_SM_I, %r0 b lws_exit /* Contended... */ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ @@ -619,15 +622,17 @@ cas_action: stw %r1, 4(%sr2,%r20) #endif /* The load and store could fail */ -1: ldw 0(%sr3,%r26), %r28 +1: ldw,ma 0(%sr3,%r26), %r28 sub,<> %r28, %r25, %r0 -2: stw %r24, 0(%sr3,%r26) +2: stw,ma %r24, 0(%sr3,%r26) /* Free lock */ - stw %r20, 0(%sr2,%r20) + stw,ma %r20, 0(%sr2,%r20) #if ENABLE_LWS_DEBUG /* Clear thread register indicator */ stw %r0, 4(%sr2,%r20) #endif + /* Enable interrupts */ + ssm PSW_SM_I, %r0 /* Return to userspace, set no error */ b lws_exit copy %r0, %r21 @@ -639,6 +644,7 @@ cas_action: #if ENABLE_LWS_DEBUG stw %r0, 4(%sr2,%r20) #endif + ssm PSW_SM_I, %r0 b lws_exit ldo -EFAULT(%r0),%r21 /* set errno */ nop diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 83ead0ea127d..c5fa7a697fba 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -432,6 +432,7 @@ ENTRY_SAME(sched_setattr) ENTRY_SAME(sched_getattr) /* 335 */ ENTRY_COMP(utimes) + ENTRY_SAME(renameat2) /* Nothing yet */ diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 1cd1d0c83b6d..47ee620d15d2 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/console.h> #include <linux/bug.h> +#include <linux/ratelimit.h> #include <asm/assembly.h> #include <asm/uaccess.h> @@ -42,9 +43,6 @@ #include "../math-emu/math-emu.h" /* for handle_fpe() */ -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ - /* dumped to the console via printk) */ - #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) DEFINE_SPINLOCK(pa_dbit_lock); #endif @@ -160,6 +158,17 @@ void show_regs(struct pt_regs *regs) } } +static DEFINE_RATELIMIT_STATE(_hppa_rs, + DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + +#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \ + if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \ + printk(fmt, ##__VA_ARGS__); \ + show_regs(regs); \ + } \ +} + + static void do_show_stack(struct unwind_frame_info *info) { int i = 1; @@ -229,12 +238,10 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) if (err == 0) return; /* STFU */ - printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", + parisc_printk_ratelimited(1, regs, + KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); -#ifdef PRINT_USER_FAULTS - /* XXX for debugging only */ - show_regs(regs); -#endif + return; } @@ -321,14 +328,11 @@ static void handle_break(struct pt_regs *regs) (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); } -#ifdef PRINT_USER_FAULTS - if (unlikely(iir != GDB_BREAK_INSN)) { - printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", + if (unlikely(iir != GDB_BREAK_INSN)) + parisc_printk_ratelimited(0, regs, + KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", iir & 31, (iir>>13) & ((1<<13)-1), task_pid_nr(current), current->comm); - show_regs(regs); - } -#endif /* send standard GDB signal */ handle_gdb_break(regs, TRAP_BRKPT); @@ -758,11 +762,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs) default: if (user_mode(regs)) { -#ifdef PRINT_USER_FAULTS - printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", - task_pid_nr(current), current->comm); - show_regs(regs); -#endif + parisc_printk_ratelimited(0, regs, KERN_DEBUG + "handle_interruption() pid=%d command='%s'\n", + task_pid_nr(current), current->comm); /* SIGBUS, for lack of a better one. */ si.si_signo = SIGBUS; si.si_code = BUS_OBJERR; @@ -779,16 +781,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (user_mode(regs)) { if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { -#ifdef PRINT_USER_FAULTS - if (fault_space == 0) - printk(KERN_DEBUG "User Fault on Kernel Space "); - else - printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", - code); - printk(KERN_CONT "pid=%d command='%s'\n", - task_pid_nr(current), current->comm); - show_regs(regs); -#endif + parisc_printk_ratelimited(0, regs, KERN_DEBUG + "User fault %d on space 0x%08lx, pid=%d command='%s'\n", + code, fault_space, + task_pid_nr(current), current->comm); si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SEGV_MAPERR; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 747550762f3c..3ca9c1131cfe 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -19,10 +19,6 @@ #include <asm/uaccess.h> #include <asm/traps.h> -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ - /* dumped to the console via printk) */ - - /* Various important other fields */ #define bit22set(x) (x & 0x00000200) #define bits23_25set(x) (x & 0x000001c0) @@ -34,6 +30,8 @@ DEFINE_PER_CPU(struct exception_data, exception_data); +int show_unhandled_signals = 1; + /* * parisc_acctyp(unsigned int inst) -- * Given a PA-RISC memory access instruction, determine if the @@ -173,6 +171,32 @@ int fixup_exception(struct pt_regs *regs) return 0; } +/* + * Print out info about fatal segfaults, if the show_unhandled_signals + * sysctl is set: + */ +static inline void +show_signal_msg(struct pt_regs *regs, unsigned long code, + unsigned long address, struct task_struct *tsk, + struct vm_area_struct *vma) +{ + if (!unhandled_signal(tsk, SIGSEGV)) + return; + + if (!printk_ratelimit()) + return; + + pr_warn("\n"); + pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx", + tsk->comm, code, address); + print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); + if (vma) + pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n", + vma->vm_start, vma->vm_end); + + show_regs(regs); +} + void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address) { @@ -270,16 +294,8 @@ bad_area: if (user_mode(regs)) { struct siginfo si; -#ifdef PRINT_USER_FAULTS - printk(KERN_DEBUG "\n"); - printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", - task_pid_nr(tsk), tsk->comm, code, address); - if (vma) { - printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", - vma->vm_start, vma->vm_end); - } - show_regs(regs); -#endif + show_signal_msg(regs, code, address, tsk, vma); + switch (code) { case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 4c0cedf4e2c7..ce4c68a4a823 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -150,7 +150,9 @@ endif CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) -KBUILD_CPPFLAGS += -Iarch/$(ARCH) +asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1) + +KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr) KBUILD_AFLAGS += -Iarch/$(ARCH) KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) CPP = $(CC) -E $(KBUILD_CFLAGS) diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 95145a15c708..1b0739bc14b5 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -46,11 +46,6 @@ struct pci_dev; #define pcibios_assign_all_busses() \ (pci_has_flag(PCI_REASSIGN_ALL_BUS)) -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) { diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 6586a40a46ce..cded7c1278ef 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -318,11 +318,16 @@ n: addi reg,reg,(name - 0b)@l; #ifdef __powerpc64__ +#ifdef HAVE_AS_ATHIGH +#define __AS_ATHIGH high +#else +#define __AS_ATHIGH h +#endif #define LOAD_REG_IMMEDIATE(reg,expr) \ lis reg,(expr)@highest; \ ori reg,reg,(expr)@higher; \ rldicr reg,reg,32,31; \ - oris reg,reg,(expr)@h; \ + oris reg,reg,(expr)@__AS_ATHIGH; \ ori reg,reg,(expr)@l; #define LOAD_REG_ADDR(reg,name) \ diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index d0e784e0ff48..521790330672 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end) (unsigned long)_stext < end; } +static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end) +{ +#ifdef CONFIG_KVM_GUEST + extern char kvm_tmp[]; + return start < (unsigned long)kvm_tmp && + (unsigned long)&kvm_tmp[1024 * 1024] < end; +#else + return 0; +#endif +} + #undef dereference_function_descriptor static inline void *dereference_function_descriptor(void *ptr) { diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 3ddf70276706..ea4dc3a89c1f 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -361,3 +361,4 @@ SYSCALL(finit_module) SYSCALL(ni_syscall) /* sys_kcmp */ SYSCALL_SPU(sched_setattr) SYSCALL_SPU(sched_getattr) +SYSCALL_SPU(renameat2) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 4494f029b632..9b892bbd9d84 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 357 +#define __NR_syscalls 358 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 881bf2e2560d..2d526f7b48da 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -379,5 +379,6 @@ #define __NR_kcmp 354 #define __NR_sched_setattr 355 #define __NR_sched_getattr 356 +#define __NR_renameat2 357 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 6a0175297b0d..dd8695f6cb6d 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -74,7 +74,7 @@ #define KVM_INST_MTSRIN 0x7c0001e4 static bool kvm_patching_worked = true; -static char kvm_tmp[1024 * 1024]; +char kvm_tmp[1024 * 1024]; static int kvm_tmp_index; static inline void kvm_patch_ins(u32 *inst, u32 new_inst) diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 59d229a2a3e0..879b3aacac32 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -237,7 +237,7 @@ static void wake_offline_cpus(void) if (!cpu_online(cpu)) { printk(KERN_INFO "kexec: Waking offline cpu %d.\n", cpu); - cpu_up(cpu); + WARN_ON(cpu_up(cpu)); } } } diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index d9476c1fc959..24d342e91790 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -201,26 +201,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) return NULL; } -static ssize_t pci_show_devspec(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct pci_dev *pdev; - struct device_node *np; - - pdev = to_pci_dev (dev); - np = pci_device_to_OF_node(pdev); - if (np == NULL || np->full_name == NULL) - return 0; - return sprintf(buf, "%s", np->full_name); -} -static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); - -/* Add sysfs properties */ -int pcibios_add_platform_entries(struct pci_dev *pdev) -{ - return device_create_file(&pdev->dev, &dev_attr_devspec); -} - /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c index c1e17ae68a08..5b789177aa29 100644 --- a/arch/powerpc/kernel/pci-hotplug.c +++ b/arch/powerpc/kernel/pci-hotplug.c @@ -98,8 +98,7 @@ void pcibios_add_pci_devices(struct pci_bus * bus) max = bus->busn_res.start; for (pass = 0; pass < 2; pass++) { list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) max = pci_scan_bridge(bus, dev, max, pass); } diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 83c26d829991..059e244484fe 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c @@ -362,8 +362,7 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus, /* Now scan child busses */ list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { + if (pci_is_bridge(dev)) { of_scan_pci_bridge(dev); } } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 122a580f7322..7e711bdcc6da 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -813,9 +813,6 @@ static void __init clocksource_init(void) static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { - /* Don't adjust the decrementer if some irq work is pending */ - if (test_irq_work_pending()) - return 0; __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; set_dec(evt); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 94e597e6f15c..7af190a266b3 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void) r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (r) return r; -#ifdef CONFIG_KVM_BOOK3S_32 +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER r = kvmppc_book3s_init_pr(); #endif return r; @@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void) static void kvmppc_book3s_exit(void) { -#ifdef CONFIG_KVM_BOOK3S_32 +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER kvmppc_book3s_exit_pr(); #endif kvm_exit(); @@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init); module_exit(kvmppc_book3s_exit); /* On 32bit this is our one and only kernel module */ -#ifdef CONFIG_KVM_BOOK3S_32 +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS("devname:kvm"); #endif diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 1d6c56ad5b60..8fcc36306a02 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, pte_size = psize; pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size); - if (pte_present(pte)) { + if (pte_present(pte) && !pte_numa(pte)) { if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b031f932c0cc..07c8b5b0f9d2 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) mr r3, r9 bl kvmppc_save_fp +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM +BEGIN_FTR_SECTION + b 2f +END_FTR_SECTION_IFCLR(CPU_FTR_TM) + /* Turn on TM. */ + mfmsr r8 + li r0, 1 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG + mtmsrd r8 + + ld r5, VCPU_MSR(r9) + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 + beq 1f /* TM not active in guest. */ + + li r3, TM_CAUSE_KVM_RESCHED + + /* Clear the MSR RI since r1, r13 are all going to be foobar. */ + li r5, 0 + mtmsrd r5, 1 + + /* All GPRs are volatile at this point. */ + TRECLAIM(R3) + + /* Temporarily store r13 and r9 so we have some regs to play with */ + SET_SCRATCH0(r13) + GET_PACA(r13) + std r9, PACATMSCRATCH(r13) + ld r9, HSTATE_KVM_VCPU(r13) + + /* Get a few more GPRs free. */ + std r29, VCPU_GPRS_TM(29)(r9) + std r30, VCPU_GPRS_TM(30)(r9) + std r31, VCPU_GPRS_TM(31)(r9) + + /* Save away PPR and DSCR soon so don't run with user values. */ + mfspr r31, SPRN_PPR + HMT_MEDIUM + mfspr r30, SPRN_DSCR + ld r29, HSTATE_DSCR(r13) + mtspr SPRN_DSCR, r29 + + /* Save all but r9, r13 & r29-r31 */ + reg = 0 + .rept 29 + .if (reg != 9) && (reg != 13) + std reg, VCPU_GPRS_TM(reg)(r9) + .endif + reg = reg + 1 + .endr + /* ... now save r13 */ + GET_SCRATCH0(r4) + std r4, VCPU_GPRS_TM(13)(r9) + /* ... and save r9 */ + ld r4, PACATMSCRATCH(r13) + std r4, VCPU_GPRS_TM(9)(r9) + + /* Reload stack pointer and TOC. */ + ld r1, HSTATE_HOST_R1(r13) + ld r2, PACATOC(r13) + + /* Set MSR RI now we have r1 and r13 back. */ + li r5, MSR_RI + mtmsrd r5, 1 + + /* Save away checkpinted SPRs. */ + std r31, VCPU_PPR_TM(r9) + std r30, VCPU_DSCR_TM(r9) + mflr r5 + mfcr r6 + mfctr r7 + mfspr r8, SPRN_AMR + mfspr r10, SPRN_TAR + std r5, VCPU_LR_TM(r9) + stw r6, VCPU_CR_TM(r9) + std r7, VCPU_CTR_TM(r9) + std r8, VCPU_AMR_TM(r9) + std r10, VCPU_TAR_TM(r9) + + /* Restore r12 as trap number. */ + lwz r12, VCPU_TRAP(r9) + + /* Save FP/VSX. */ + addi r3, r9, VCPU_FPRS_TM + bl .store_fp_state + addi r3, r9, VCPU_VRS_TM + bl .store_vr_state + mfspr r6, SPRN_VRSAVE + stw r6, VCPU_VRSAVE_TM(r9) +1: + /* + * We need to save these SPRs after the treclaim so that the software + * error code is recorded correctly in the TEXASR. Also the user may + * change these outside of a transaction, so they must always be + * context switched. + */ + mfspr r5, SPRN_TFHAR + mfspr r6, SPRN_TFIAR + mfspr r7, SPRN_TEXASR + std r5, VCPU_TFHAR(r9) + std r6, VCPU_TFIAR(r9) + std r7, VCPU_TEXASR(r9) +2: +#endif + /* Increment yield count if they have a VPA */ ld r8, VCPU_VPA(r9) /* do they have a VPA? */ cmpdi r8, 0 diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index c5c052a9729c..02f1defd8bb9 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, goto free_vcpu; vcpu->arch.book3s = vcpu_book3s; -#ifdef CONFIG_KVM_BOOK3S_32 +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER vcpu->arch.shadow_vcpu = kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); if (!vcpu->arch.shadow_vcpu) @@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, uninit_vcpu: kvm_vcpu_uninit(vcpu); free_shadow_vcpu: -#ifdef CONFIG_KVM_BOOK3S_32 +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER kfree(vcpu->arch.shadow_vcpu); free_vcpu3s: #endif @@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); kvm_vcpu_uninit(vcpu); -#ifdef CONFIG_KVM_BOOK3S_32 +#ifdef CONFIG_KVM_BOOK3S_32_HANDLER kfree(vcpu->arch.shadow_vcpu); #endif vfree(vcpu_book3s); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index d766d6ee33fe..06ba83b036d3 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; + /* Make kvm guest trampolines executable */ + if (overlaps_kvm_tmp(vaddr, vaddr + step)) + tprot &= ~HPTE_R_N; + /* * If relocatable, check if it overlaps interrupt vectors that * are copied down to real 0. For relocatable kernel diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 253fefe3d1a0..5b51079f3e3b 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -549,7 +549,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) ret = ioda_eeh_phb_reset(hose, option); } else { bus = eeh_pe_bus_get(pe); - if (pci_is_root_bus(bus)) + if (pci_is_root_bus(bus) || + pci_is_root_bus(bus->parent)) ret = ioda_eeh_root_reset(hose, option); else ret = ioda_eeh_bridge_reset(hose, bus->self, option); diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index cf3c0089bef2..23223cd63e54 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, else memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); spin_unlock(&ctrblk_lock); + } else { + if (!nbytes) + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 0a5aac8a9412..7acb77f7ef1a 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, else memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); spin_unlock(&ctrblk_lock); + } else { + if (!nbytes) + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 2583466f576b..79b5f0783a30 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -120,6 +120,8 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) return (zdev->fh & (1UL << 31)) ? true : false; } +extern const struct attribute_group *zpci_attr_groups[]; + /* ----------------------------------------------------------------------------- Prototypes ----------------------------------------------------------------------------- */ @@ -166,10 +168,6 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {} struct zpci_dev *get_zdev(struct pci_dev *); struct zpci_dev *get_zdev_by_fid(u32); -/* sysfs */ -int zpci_sysfs_add_device(struct device *); -void zpci_sysfs_remove_device(struct device *); - /* DMA */ int zpci_dma_init(void); void zpci_dma_exit(void); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index b3ecb8f5b6ce..9ae6664ff08c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -158,6 +158,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: case KVM_CAP_S390_CSS_SUPPORT: + case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_DEVICE_CTRL: case KVM_CAP_ENABLE_CAP_VM: diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 452d3ebd9d0f..e9f8fa9337fe 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize, return NULL; memset(header, 0, sz); header->pages = sz / PAGE_SIZE; - hole = sz - (bpfsize + sizeof(*header)); + hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header)); /* Insert random number of illegal instructions before BPF code * and make sure the first instruction starts at an even address. */ diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 1df1d29ac81d..bdf02570d1df 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -530,11 +530,6 @@ static void zpci_unmap_resources(struct zpci_dev *zdev) } } -int pcibios_add_platform_entries(struct pci_dev *pdev) -{ - return zpci_sysfs_add_device(&pdev->dev); -} - static int __init zpci_irq_init(void) { int rc; @@ -671,6 +666,7 @@ int pcibios_add_device(struct pci_dev *pdev) int i; zdev->pdev = pdev; + pdev->dev.groups = zpci_attr_groups; zpci_map_resources(zdev); for (i = 0; i < PCI_BAR_COUNT; i++) { diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index ab4a91393005..b56a3958f1a7 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c @@ -72,36 +72,18 @@ static ssize_t store_recover(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(recover, S_IWUSR, NULL, store_recover); -static struct device_attribute *zpci_dev_attrs[] = { - &dev_attr_function_id, - &dev_attr_function_handle, - &dev_attr_pchid, - &dev_attr_pfgid, - &dev_attr_recover, +static struct attribute *zpci_dev_attrs[] = { + &dev_attr_function_id.attr, + &dev_attr_function_handle.attr, + &dev_attr_pchid.attr, + &dev_attr_pfgid.attr, + &dev_attr_recover.attr, + NULL, +}; +static struct attribute_group zpci_attr_group = { + .attrs = zpci_dev_attrs, +}; +const struct attribute_group *zpci_attr_groups[] = { + &zpci_attr_group, NULL, }; - -int zpci_sysfs_add_device(struct device *dev) -{ - int i, rc = 0; - - for (i = 0; zpci_dev_attrs[i]; i++) { - rc = device_create_file(dev, zpci_dev_attrs[i]); - if (rc) - goto error; - } - return 0; - -error: - while (--i >= 0) - device_remove_file(dev, zpci_dev_attrs[i]); - return rc; -} - -void zpci_sysfs_remove_device(struct device *dev) -{ - int i; - - for (i = 0; zpci_dev_attrs[i]; i++) - device_remove_file(dev, zpci_dev_attrs[i]); -} diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c index d6cde700e316..1d1c5a227e50 100644 --- a/arch/sh/drivers/pci/fixups-dreamcast.c +++ b/arch/sh/drivers/pci/fixups-dreamcast.c @@ -31,6 +31,8 @@ static void gapspci_fixup_resources(struct pci_dev *dev) { struct pci_channel *p = dev->sysdata; + struct resource res; + struct pci_bus_region region; printk(KERN_NOTICE "PCI: Fixing up device %s\n", pci_name(dev)); @@ -50,11 +52,21 @@ static void gapspci_fixup_resources(struct pci_dev *dev) /* * Redirect dma memory allocations to special memory window. + * + * If this GAPSPCI region were mapped by a BAR, the CPU + * phys_addr_t would be pci_resource_start(), and the bus + * address would be pci_bus_address(pci_resource_start()). + * But apparently there's no BAR mapping it, so we just + * "know" its CPU address is GAPSPCI_DMA_BASE. */ + res.start = GAPSPCI_DMA_BASE; + res.end = GAPSPCI_DMA_BASE + GAPSPCI_DMA_SIZE - 1; + res.flags = IORESOURCE_MEM; + pcibios_resource_to_bus(dev->bus, ®ion, &res); BUG_ON(!dma_declare_coherent_memory(&dev->dev, - GAPSPCI_DMA_BASE, - GAPSPCI_DMA_BASE, - GAPSPCI_DMA_SIZE, + res.start, + region.start, + resource_size(&res), DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE)); break; diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h index bff96c2e7d25..5b4511552998 100644 --- a/arch/sh/include/asm/pci.h +++ b/arch/sh/include/asm/pci.h @@ -70,11 +70,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); extern void pcibios_set_master(struct pci_dev *dev); -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* Dynamic DMA mapping stuff. * SuperH has everything mapped statically like x86. */ diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h index dc503297481f..53e9b4987db0 100644 --- a/arch/sparc/include/asm/pci_32.h +++ b/arch/sparc/include/asm/pci_32.h @@ -16,11 +16,6 @@ #define PCI_IRQ_NONE 0xffffffff -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* Dynamic DMA mapping stuff. */ #define PCI_DMA_BUS_IS_PHYS (0) diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h index 1633b718d3bc..c6c7396e7627 100644 --- a/arch/sparc/include/asm/pci_64.h +++ b/arch/sparc/include/asm/pci_64.h @@ -16,11 +16,6 @@ #define PCI_IRQ_NONE 0xffffffff -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* The PCI address space does not equal the physical memory * address space. The networking and block device layers use * this boolean for bounce buffer decisions. diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index fde5abaac0cc..1a49ffdf9da9 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -24,7 +24,8 @@ /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB). * The page copy blockops can use 0x6000000 to 0x8000000. - * The TSB is mapped in the 0x8000000 to 0xa000000 range. + * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range. + * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range. * The PROM resides in an area spanning 0xf0000000 to 0x100000000. * The vmalloc area spans 0x100000000 to 0x200000000. * Since modules need to be in the lowest 32-bits of the address space, @@ -33,7 +34,8 @@ * 0x400000000. */ #define TLBTEMP_BASE _AC(0x0000000006000000,UL) -#define TSBMAP_BASE _AC(0x0000000008000000,UL) +#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL) +#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL) #define MODULES_VADDR _AC(0x0000000010000000,UL) #define MODULES_LEN _AC(0x00000000e0000000,UL) #define MODULES_END _AC(0x00000000f0000000,UL) diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 1555bbcae1ee..857ad77df9c0 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -543,8 +543,7 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm, printk("PCI: dev header type: %x\n", dev->hdr_type); - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) of_scan_pci_bridge(pbm, child, dev); } } diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index a364000ca1aa..7f41d40b7e6e 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s, size_t count) { unsigned long val, err; - int ret = sscanf(buf, "%ld", &val); + int ret = sscanf(buf, "%lu", &val); if (ret != 1) return -EINVAL; diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S index 2c20ad63ddbf..30eee6e8a81b 100644 --- a/arch/sparc/lib/NG2memcpy.S +++ b/arch/sparc/lib/NG2memcpy.S @@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ */ VISEntryHalf + membar #Sync alignaddr %o1, %g0, %g0 add %o1, (64 - 1), %o4 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index a8ff0d1a3b69..4ced3fc66130 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -281,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) show_regs(regs); } -static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs, - unsigned long addr) -{ - static int times; - - if (times++ < 10) - printk(KERN_ERR "FAULT[%s:%d]: 32-bit process " - "reports 64-bit fault address [%lx]\n", - current->comm, current->pid, addr); - show_regs(regs); -} - asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) { enum ctx_state prev_state = exception_enter(); @@ -322,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) goto intr_or_no_mm; } } - if (unlikely((address >> 32) != 0)) { - bogus_32bit_fault_address(regs, address); + if (unlikely((address >> 32) != 0)) goto intr_or_no_mm; - } } if (regs->tstate & TSTATE_PRIV) { diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index f5d506fdddad..fe19b81acc09 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign mm->context.tsb_block[tsb_idx].tsb_nentries = tsb_bytes / sizeof(struct tsb); - base = TSBMAP_BASE; + switch (tsb_idx) { + case MM_TSB_BASE: + base = TSBMAP_8K_BASE; + break; +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + case MM_TSB_HUGE: + base = TSBMAP_4M_BASE; + break; +#endif + default: + BUG(); + } + tte = pgprot_val(PAGE_KERNEL_LOCKED); tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h index f5e108f4a151..654407e98619 100644 --- a/arch/unicore32/include/asm/pci.h +++ b/arch/unicore32/include/asm/pci.h @@ -18,11 +18,6 @@ #include <asm-generic/pci.h> #include <mach/hardware.h> /* for PCIBIOS_MIN_* */ -static inline void pcibios_penalize_isa_irq(int irq, int active) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - #ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h index a8091216963b..68c05398bba9 100644 --- a/arch/x86/include/asm/hugetlb.h +++ b/arch/x86/include/asm/hugetlb.h @@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + ptep_clear_flush(vma, addr, ptep); } static inline int huge_pte_none(pte_t pte) diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8de6d9cf3b95..678205195ae1 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -1,7 +1,7 @@ #ifndef _ASM_X86_PAGE_64_DEFS_H #define _ASM_X86_PAGE_64_DEFS_H -#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE_ORDER 2 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define CURRENT_MASK (~(THREAD_SIZE - 1)) diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 96ae4f4040bb..0892ea0e683f 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -68,7 +68,6 @@ void pcibios_config_init(void); void pcibios_scan_root(int bus); void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq, int active); struct irq_routing_table *pcibios_get_irq_routing_table(void); int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index e709884d0ef9..ca08a27b90b3 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -343,7 +343,7 @@ HYPERVISOR_memory_op(unsigned int cmd, void *arg) } static inline int -HYPERVISOR_multicall(void *call_list, int nr_calls) +HYPERVISOR_multicall(void *call_list, uint32_t nr_calls) { return _hypercall2(int, multicall, call_list, nr_calls); } diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index fd9cb7695b5f..3400dbaec3c3 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h @@ -54,6 +54,9 @@ typedef unsigned long xen_pfn_t; #define PRI_xen_pfn "lx" typedef unsigned long xen_ulong_t; #define PRI_xen_ulong "lx" +typedef long xen_long_t; +#define PRI_xen_long "lx" + /* Guest handles for primitive C types. */ __DEFINE_GUEST_HANDLE(uchar, unsigned char); __DEFINE_GUEST_HANDLE(uint, unsigned int); diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 9fa8aa051f54..76164e173a24 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -10,6 +10,8 @@ * * Copyright 2002 Andi Kleen, SuSE Labs. */ +#define pr_fmt(fmt) "AGP: " fmt + #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> @@ -75,14 +77,13 @@ static u32 __init allocate_aperture(void) addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR, aper_size, aper_size); if (!addr) { - printk(KERN_ERR - "Cannot allocate aperture memory hole (%lx,%uK)\n", - addr, aper_size>>10); + pr_err("Cannot allocate aperture memory hole [mem %#010lx-%#010lx] (%uKB)\n", + addr, addr + aper_size - 1, aper_size >> 10); return 0; } memblock_reserve(addr, aper_size); - printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", - aper_size >> 10, addr); + pr_info("Mapping aperture over RAM [mem %#010lx-%#010lx] (%uKB)\n", + addr, addr + aper_size - 1, aper_size >> 10); register_nosave_region(addr >> PAGE_SHIFT, (addr+aper_size) >> PAGE_SHIFT); @@ -126,10 +127,11 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) u64 aper; u32 old_order; - printk(KERN_INFO "AGP bridge at %02x:%02x:%02x\n", bus, slot, func); + pr_info("pci 0000:%02x:%02x:%02x: AGP bridge\n", bus, slot, func); apsizereg = read_pci_config_16(bus, slot, func, cap + 0x14); if (apsizereg == 0xffffffff) { - printk(KERN_ERR "APSIZE in AGP bridge unreadable\n"); + pr_err("pci 0000:%02x:%02x.%d: APSIZE unreadable\n", + bus, slot, func); return 0; } @@ -153,16 +155,18 @@ static u32 __init read_agp(int bus, int slot, int func, int cap, u32 *order) * On some sick chips, APSIZE is 0. It means it wants 4G * so let double check that order, and lets trust AMD NB settings: */ - printk(KERN_INFO "Aperture from AGP @ %Lx old size %u MB\n", - aper, 32 << old_order); + pr_info("pci 0000:%02x:%02x.%d: AGP aperture [bus addr %#010Lx-%#010Lx] (old size %uMB)\n", + bus, slot, func, aper, aper + (32ULL << (old_order + 20)) - 1, + 32 << old_order); if (aper + (32ULL<<(20 + *order)) > 0x100000000ULL) { - printk(KERN_INFO "Aperture size %u MB (APSIZE %x) is not right, using settings from NB\n", - 32 << *order, apsizereg); + pr_info("pci 0000:%02x:%02x.%d: AGP aperture size %uMB (APSIZE %#x) is not right, using settings from NB\n", + bus, slot, func, 32 << *order, apsizereg); *order = old_order; } - printk(KERN_INFO "Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", - aper, 32 << *order, apsizereg); + pr_info("pci 0000:%02x:%02x.%d: AGP aperture [bus addr %#010Lx-%#010Lx] (%uMB, APSIZE %#x)\n", + bus, slot, func, aper, aper + (32ULL << (*order + 20)) - 1, + 32 << *order, apsizereg); if (!aperture_valid(aper, (32*1024*1024) << *order, 32<<20)) return 0; @@ -218,7 +222,7 @@ static u32 __init search_agp_bridge(u32 *order, int *valid_agp) } } } - printk(KERN_INFO "No AGP bridge found\n"); + pr_info("No AGP bridge found\n"); return 0; } @@ -310,7 +314,8 @@ void __init early_gart_iommu_check(void) if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { /* reserve it, so we can reuse it in second kernel */ - printk(KERN_INFO "update e820 for GART\n"); + pr_info("e820: reserve [mem %#010Lx-%#010Lx] for GART\n", + aper_base, aper_base + aper_size - 1); e820_add_region(aper_base, aper_size, E820_RESERVED); update_e820(); } @@ -354,7 +359,7 @@ int __init gart_iommu_hole_init(void) !early_pci_allowed()) return -ENODEV; - printk(KERN_INFO "Checking aperture...\n"); + pr_info("Checking aperture...\n"); if (!fallback_aper_force) agp_aper_base = search_agp_bridge(&agp_aper_order, &valid_agp); @@ -395,8 +400,9 @@ int __init gart_iommu_hole_init(void) aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; aper_base <<= 25; - printk(KERN_INFO "Node %d: aperture @ %Lx size %u MB\n", - node, aper_base, aper_size >> 20); + pr_info("Node %d: aperture [bus addr %#010Lx-%#010Lx] (%uMB)\n", + node, aper_base, aper_base + aper_size - 1, + aper_size >> 20); node++; if (!aperture_valid(aper_base, aper_size, 64<<20)) { @@ -407,9 +413,9 @@ int __init gart_iommu_hole_init(void) if (!no_iommu && max_pfn > MAX_DMA32_PFN && !printed_gart_size_msg) { - printk(KERN_ERR "you are using iommu with agp, but GART size is less than 64M\n"); - printk(KERN_ERR "please increase GART size in your BIOS setup\n"); - printk(KERN_ERR "if BIOS doesn't have that option, contact your HW vendor!\n"); + pr_err("you are using iommu with agp, but GART size is less than 64MB\n"); + pr_err("please increase GART size in your BIOS setup\n"); + pr_err("if BIOS doesn't have that option, contact your HW vendor!\n"); printed_gart_size_msg = 1; } } else { @@ -446,13 +452,10 @@ out: force_iommu || valid_agp || fallback_aper_force) { - printk(KERN_INFO - "Your BIOS doesn't leave a aperture memory hole\n"); - printk(KERN_INFO - "Please enable the IOMMU option in the BIOS setup\n"); - printk(KERN_INFO - "This costs you %d MB of RAM\n", - 32 << fallback_aper_order); + pr_info("Your BIOS doesn't leave a aperture memory hole\n"); + pr_info("Please enable the IOMMU option in the BIOS setup\n"); + pr_info("This costs you %dMB of RAM\n", + 32 << fallback_aper_order); aper_order = fallback_aper_order; aper_alloc = allocate_aperture(); diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index aa333d966886..adb02aa62af5 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ - FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ EVENT_CONSTRAINT_END }; diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index 384df5105fbc..136ac74dee82 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c @@ -27,6 +27,7 @@ static int __init x86_rdrand_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_RDRAND); + setup_clear_cpu_cap(X86_FEATURE_RDSEED); return 1; } __setup("nordrand", x86_rdrand_setup); diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index af1d14a9ebda..dcbbaa165bde 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -20,6 +20,8 @@ #include <asm/mmu_context.h> #include <asm/syscalls.h> +int sysctl_ldt16 = 0; + #ifdef CONFIG_SMP static void flush_ldt(void *current_mm) { @@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) * IRET leaking the high bits of the kernel stack address. */ #ifdef CONFIG_X86_64 - if (!ldt_info.seg_32bit) { + if (!ldt_info.seg_32bit && !sysctl_ldt16) { error = -EINVAL; goto out_unlock; } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 33e8c028842f..138ceffc6377 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7778,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) exec_control = vmcs12->pin_based_vm_exec_control; exec_control |= vmcs_config.pin_based_exec_ctrl; - exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER; + exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER | + PIN_BASED_POSTED_INTR); vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control); vmx->nested.preemption_timer_expired = false; @@ -7815,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) if (!vmx->rdtscp_enabled) exec_control &= ~SECONDARY_EXEC_RDTSCP; /* Take the following fields only from vmcs12 */ - exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + SECONDARY_EXEC_APIC_REGISTER_VIRT); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) exec_control |= vmcs12->secondary_vm_exec_control; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b6c0bacca9bd..20316c67b824 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); static u32 tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); +static bool backwards_tsc_observed = false; + #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { @@ -1486,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) &ka->master_kernel_ns, &ka->master_cycle_now); - ka->use_master_clock = host_tsc_clocksource & vcpus_matched; + ka->use_master_clock = host_tsc_clocksource && vcpus_matched + && !backwards_tsc_observed; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); @@ -6945,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage) */ if (backwards_tsc) { u64 delta_cyc = max_tsc - local_tsc; + backwards_tsc_observed = true; list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.tsc_offset_adjustment += delta_cyc; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index dc017735bb91..6d5663a599a7 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -171,7 +171,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ header->pages = sz / PAGE_SIZE; - hole = sz - (proglen + sizeof(*header)); + hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header)); /* insert a random number of int3 instructions before BPF code */ *image_ptr = &header->image[prandom_u32() % hole]; diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 01edac6c5e18..5075371ab593 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -489,8 +489,12 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) } node = acpi_get_node(device->handle); - if (node == NUMA_NO_NODE) + if (node == NUMA_NO_NODE) { node = x86_pci_root_bus_node(busnum); + if (node != 0 && node != NUMA_NO_NODE) + dev_info(&device->dev, FW_BUG "no _PXM; falling back to node %d from hardware (may be inconsistent with ACPI node numbers)\n", + node); + } if (node != NUMA_NO_NODE && !node_online(node)) node = NUMA_NO_NODE; diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index e88f4c53d7f6..c20d2cc7ef64 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -11,27 +11,33 @@ #include "bus_numa.h" -/* - * This discovers the pcibus <-> node mapping on AMD K8. - * also get peer root bus resource for io,mmio - */ +#define AMD_NB_F0_NODE_ID 0x60 +#define AMD_NB_F0_UNIT_ID 0x64 +#define AMD_NB_F1_CONFIG_MAP_REG 0xe0 + +#define RANGE_NUM 16 +#define AMD_NB_F1_CONFIG_MAP_RANGES 4 -struct pci_hostbridge_probe { +struct amd_hostbridge { u32 bus; u32 slot; - u32 vendor; u32 device; }; -static struct pci_hostbridge_probe pci_probes[] __initdata = { - { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1100 }, - { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, - { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, - { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 }, +/* + * IMPORTANT NOTE: + * hb_probes[] and early_root_info_init() is in maintenance mode. + * It only supports K8, Fam10h, Fam11h, and Fam15h_00h-0fh . + * Future processor will rely on information in ACPI. + */ +static struct amd_hostbridge hb_probes[] __initdata = { + { 0, 0x18, 0x1100 }, /* K8 */ + { 0, 0x18, 0x1200 }, /* Family10h */ + { 0xff, 0, 0x1200 }, /* Family10h */ + { 0, 0x18, 0x1300 }, /* Family11h */ + { 0, 0x18, 0x1600 }, /* Family15h */ }; -#define RANGE_NUM 16 - static struct pci_root_info __init *find_pci_root_info(int node, int link) { struct pci_root_info *info; @@ -45,12 +51,12 @@ static struct pci_root_info __init *find_pci_root_info(int node, int link) } /** - * early_fill_mp_bus_to_node() + * early_root_info_init() * called before pcibios_scan_root and pci_scan_bus - * fills the mp_bus_to_cpumask array based according to the LDT Bus Number - * Registers found in the K8 northbridge + * fills the mp_bus_to_cpumask array based according + * to the LDT Bus Number Registers found in the northbridge. */ -static int __init early_fill_mp_bus_info(void) +static int __init early_root_info_init(void) { int i; unsigned bus; @@ -75,19 +81,21 @@ static int __init early_fill_mp_bus_info(void) return -1; found = false; - for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { + for (i = 0; i < ARRAY_SIZE(hb_probes); i++) { u32 id; u16 device; u16 vendor; - bus = pci_probes[i].bus; - slot = pci_probes[i].slot; + bus = hb_probes[i].bus; + slot = hb_probes[i].slot; id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); - vendor = id & 0xffff; device = (id>>16) & 0xffff; - if (pci_probes[i].vendor == vendor && - pci_probes[i].device == device) { + + if (vendor != PCI_VENDOR_ID_AMD) + continue; + + if (hb_probes[i].device == device) { found = true; break; } @@ -96,10 +104,16 @@ static int __init early_fill_mp_bus_info(void) if (!found) return 0; - for (i = 0; i < 4; i++) { + /* + * We should learn topology and routing information from _PXM and + * _CRS methods in the ACPI namespace. We extract node numbers + * here to work around BIOSes that don't supply _PXM. + */ + for (i = 0; i < AMD_NB_F1_CONFIG_MAP_RANGES; i++) { int min_bus; int max_bus; - reg = read_pci_config(bus, slot, 1, 0xe0 + (i << 2)); + reg = read_pci_config(bus, slot, 1, + AMD_NB_F1_CONFIG_MAP_REG + (i << 2)); /* Check if that register is enabled for bus range */ if ((reg & 7) != 3) @@ -113,10 +127,21 @@ static int __init early_fill_mp_bus_info(void) info = alloc_pci_root_info(min_bus, max_bus, node, link); } + /* + * The following code extracts routing information for use on old + * systems where Linux doesn't automatically use host bridge _CRS + * methods (or when the user specifies "pci=nocrs"). + * + * We only do this through Fam11h, because _CRS should be enough on + * newer systems. + */ + if (boot_cpu_data.x86 > 0x11) + return 0; + /* get the default node and link for left over res */ - reg = read_pci_config(bus, slot, 0, 0x60); + reg = read_pci_config(bus, slot, 0, AMD_NB_F0_NODE_ID); def_node = (reg >> 8) & 0x07; - reg = read_pci_config(bus, slot, 0, 0x64); + reg = read_pci_config(bus, slot, 0, AMD_NB_F0_UNIT_ID); def_link = (reg >> 8) & 0x03; memset(range, 0, sizeof(range)); @@ -363,7 +388,7 @@ static int __init pci_io_ecs_init(void) int cpu; /* assume all cpus from fam10h have IO ECS */ - if (boot_cpu_data.x86 < 0x10) + if (boot_cpu_data.x86 < 0x10) return 0; /* Try the PCI method first. */ @@ -387,7 +412,7 @@ static int __init amd_postcore_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return 0; - early_fill_mp_bus_info(); + early_root_info_init(); pci_io_ecs_init(); return 0; diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c index 614392ced7d6..bb461cfd01ab 100644 --- a/arch/x86/pci/broadcom_bus.c +++ b/arch/x86/pci/broadcom_bus.c @@ -60,8 +60,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func) word1 = read_pci_config_16(bus, slot, func, 0xc4); word2 = read_pci_config_16(bus, slot, func, 0xc6); if (word1 != word2) { - res.start = (word1 << 16) | 0x0000; - res.end = (word2 << 16) | 0xffff; + res.start = ((resource_size_t) word1 << 16) | 0x0000; + res.end = ((resource_size_t) word2 << 16) | 0xffff; res.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; update_res(info, res.start, res.end, res.flags, 0); } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 94ae9ae9574f..b5e60268d93f 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -6,6 +6,7 @@ #include <linux/dmi.h> #include <linux/pci.h> #include <linux/vgaarb.h> +#include <asm/hpet.h> #include <asm/pci_x86.h> static void pci_fixup_i450nx(struct pci_dev *d) @@ -337,9 +338,7 @@ static void pci_fixup_video(struct pci_dev *pdev) * type BRIDGE, or CARDBUS. Host to PCI controllers use * PCI header type NORMAL. */ - if (bridge - && ((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE) - || (bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) { + if (bridge && (pci_is_bridge(bridge))) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); if (!(config & PCI_BRIDGE_CTL_VGA)) @@ -526,6 +525,19 @@ static void sb600_disable_hpet_bar(struct pci_dev *dev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar); +#ifdef CONFIG_HPET_TIMER +static void sb600_hpet_quirk(struct pci_dev *dev) +{ + struct resource *r = &dev->resource[1]; + + if (r->flags & IORESOURCE_MEM && r->start == hpet_address) { + r->flags |= IORESOURCE_PCI_FIXED; + dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n"); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk); +#endif + /* * Twinhead H12Y needs us to block out a region otherwise we map devices * there and any access kills the box. diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index db6b1ab43255..a19ed92e74e4 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -271,11 +271,16 @@ static void pcibios_allocate_dev_resources(struct pci_dev *dev, int pass) "BAR %d: reserving %pr (d=%d, p=%d)\n", idx, r, disabled, pass); if (pci_claim_resource(dev, idx) < 0) { - /* We'll assign a new address later */ - pcibios_save_fw_addr(dev, - idx, r->start); - r->end -= r->start; - r->start = 0; + if (r->flags & IORESOURCE_PCI_FIXED) { + dev_info(&dev->dev, "BAR %d %pR is immovable\n", + idx, r); + } else { + /* We'll assign a new address later */ + pcibios_save_fw_addr(dev, + idx, r->start); + r->end -= r->start; + r->start = 0; + } } } } @@ -356,6 +361,12 @@ static int __init pcibios_assign_resources(void) return 0; } +/** + * called in fs_initcall (one below subsys_initcall), + * give a chance for motherboard reserve resources + */ +fs_initcall(pcibios_assign_resources); + void pcibios_resource_survey_bus(struct pci_bus *bus) { dev_printk(KERN_DEBUG, &bus->dev, "Allocating resources\n"); @@ -392,12 +403,6 @@ void __init pcibios_resource_survey(void) ioapic_insert_resources(); } -/** - * called in fs_initcall (one below subsys_initcall), - * give a chance for motherboard reserve resources - */ -fs_initcall(pcibios_assign_resources); - static const struct vm_operations_struct pci_mmap_ops = { .access = generic_access_phys, }; diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 00348980a3a6..e1f220e3ca68 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -39,6 +39,7 @@ #ifdef CONFIG_X86_64 #define vdso_enabled sysctl_vsyscall32 #define arch_setup_additional_pages syscall32_setup_pages +extern int sysctl_ldt16; #endif /* @@ -249,6 +250,13 @@ static struct ctl_table abi_table2[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "ldt16", + .data = &sysctl_ldt16, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, {} }; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c34bfc4bbe7f..f17b29210ac4 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1339,6 +1339,7 @@ xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) static struct notifier_block xen_panic_block = { .notifier_call= xen_panic_event, + .priority = INT_MIN }; int xen_panic_handler_init(void) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 86e02eabb640..6f6e15d28466 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2510,6 +2510,95 @@ void __init xen_hvm_init_mmu_ops(void) } #endif +#ifdef CONFIG_XEN_PVH +/* + * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user + * space creating new guest on pvh dom0 and needing to map domU pages. + */ +static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn, + unsigned int domid) +{ + int rc, err = 0; + xen_pfn_t gpfn = lpfn; + xen_ulong_t idx = fgfn; + + struct xen_add_to_physmap_range xatp = { + .domid = DOMID_SELF, + .foreign_domid = domid, + .size = 1, + .space = XENMAPSPACE_gmfn_foreign, + }; + set_xen_guest_handle(xatp.idxs, &idx); + set_xen_guest_handle(xatp.gpfns, &gpfn); + set_xen_guest_handle(xatp.errs, &err); + + rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); + if (rc < 0) + return rc; + return err; +} + +static int xlate_remove_from_p2m(unsigned long spfn, int count) +{ + struct xen_remove_from_physmap xrp; + int i, rc; + + for (i = 0; i < count; i++) { + xrp.domid = DOMID_SELF; + xrp.gpfn = spfn+i; + rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); + if (rc) + break; + } + return rc; +} + +struct xlate_remap_data { + unsigned long fgfn; /* foreign domain's gfn */ + pgprot_t prot; + domid_t domid; + int index; + struct page **pages; +}; + +static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, + void *data) +{ + int rc; + struct xlate_remap_data *remap = data; + unsigned long pfn = page_to_pfn(remap->pages[remap->index++]); + pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot)); + + rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid); + if (rc) + return rc; + native_set_pte(ptep, pteval); + + return 0; +} + +static int xlate_remap_gfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long mfn, + int nr, pgprot_t prot, unsigned domid, + struct page **pages) +{ + int err; + struct xlate_remap_data pvhdata; + + BUG_ON(!pages); + + pvhdata.fgfn = mfn; + pvhdata.prot = prot; + pvhdata.domid = domid; + pvhdata.index = 0; + pvhdata.pages = pages; + err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, + xlate_map_pte_fn, &pvhdata); + flush_tlb_all(); + return err; +} +#endif + #define REMAP_BATCH_SIZE 16 struct remap_data { @@ -2522,7 +2611,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct remap_data *rmd = data; - pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); + pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot)); rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; rmd->mmu_update->val = pte_val_ma(pte); @@ -2544,13 +2633,18 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long range; int err = 0; - if (xen_feature(XENFEAT_auto_translated_physmap)) - return -EINVAL; - - prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); - BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); + if (xen_feature(XENFEAT_auto_translated_physmap)) { +#ifdef CONFIG_XEN_PVH + /* We need to update the local page tables and the xen HAP */ + return xlate_remap_gfn_range(vma, addr, mfn, nr, prot, + domid, pages); +#else + return -EINVAL; +#endif + } + rmd.mfn = mfn; rmd.prot = prot; @@ -2588,6 +2682,25 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) return 0; +#ifdef CONFIG_XEN_PVH + while (numpgs--) { + /* + * The mmu has already cleaned up the process mmu + * resources at this point (lookup_address will return + * NULL). + */ + unsigned long pfn = page_to_pfn(pages[numpgs]); + + xlate_remove_from_p2m(pfn, 1); + } + /* + * We don't need to flush tlbs because as part of + * xlate_remove_from_p2m, the hypervisor will do tlb flushes + * after removing the p2m entries from the EPT/NPT + */ + return 0; +#else return -EINVAL; +#endif } EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 85e5d78c9874..9bb3d82ffec8 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -36,7 +36,7 @@ * pfn_to_mfn(0xc0000)=0xc0000 * * The benefit of this is, that we can assume for non-RAM regions (think - * PCI BARs, or ACPI spaces), we can create mappings easily b/c we + * PCI BARs, or ACPI spaces), we can create mappings easily because we * get the PFN value to match the MFN. * * For this to work efficiently we have one new page p2m_identity and @@ -60,7 +60,7 @@ * There is also a digram of the P2M at the end that can help. * Imagine your E820 looking as so: * - * 1GB 2GB + * 1GB 2GB 4GB * /-------------------+---------\/----\ /----------\ /---+-----\ * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | * \-------------------+---------/\----/ \----------/ \---+-----/ @@ -77,9 +77,8 @@ * of the PFN and the end PFN (263424 and 512256 respectively). The first step * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page * covers 512^2 of page estate (1GB) and in case the start or end PFN is not - * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn - * to end pfn. We reserve_brk top leaf pages if they are missing (means they - * point to p2m_mid_missing). + * aligned on 512^2*PAGE_SIZE (1GB) we reserve_brk new middle and leaf pages as + * required to split any existing p2m_mid_missing middle pages. * * With the E820 example above, 263424 is not 1GB aligned so we allocate a * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. @@ -88,7 +87,7 @@ * Next stage is to determine if we need to do a more granular boundary check * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. * We check if the start pfn and end pfn violate that boundary check, and if - * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer + * so reserve_brk a (p2m[x][y]) leaf page. This way we have a much finer * granularity of setting which PFNs are missing and which ones are identity. * In our example 263424 and 512256 both fail the check so we reserve_brk two * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" @@ -102,9 +101,10 @@ * * The next step is to walk from the start pfn to the end pfn setting * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. - * If we find that the middle leaf is pointing to p2m_missing we can swap it - * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this - * point we do not need to worry about boundary aligment (so no need to + * If we find that the middle entry is pointing to p2m_missing we can swap it + * over to p2m_identity - this way covering 4MB (or 2MB) PFN space (and + * similarly swapping p2m_mid_missing for p2m_mid_identity for larger regions). + * At this point we do not need to worry about boundary aligment (so no need to * reserve_brk a middle page, figure out which PFNs are "missing" and which * ones are identity), as that has been done earlier. If we find that the * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference @@ -118,6 +118,9 @@ * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] * contain the INVALID_P2M_ENTRY value and are considered "missing." * + * Finally, the region beyond the end of of the E820 (4 GB in this example) + * is set to be identity (in case there are MMIO regions placed here). + * * This is what the p2m ends up looking (for the E820 above) with this * fabulous drawing: * @@ -129,21 +132,27 @@ * |-----| \ | [p2m_identity]+\\ | .... | * | 2 |--\ \-------------------->| ... | \\ \----------------/ * |-----| \ \---------------/ \\ - * | 3 |\ \ \\ p2m_identity - * |-----| \ \-------------------->/---------------\ /-----------------\ - * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | - * \-----/ / | [p2m_identity]+-->| ..., ~0 | - * / /---------------\ | .... | \-----------------/ - * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | - * / | IDENTITY[@256]|<----/ \---------------/ - * / | ~0, ~0, .... | - * | \---------------/ - * | - * p2m_mid_missing p2m_missing - * /-----------------\ /------------\ - * | [p2m_missing] +---->| ~0, ~0, ~0 | - * | [p2m_missing] +---->| ..., ~0 | - * \-----------------/ \------------/ + * | 3 |-\ \ \\ p2m_identity [1] + * |-----| \ \-------------------->/---------------\ /-----------------\ + * | .. |\ | | [p2m_identity]+-->| ~0, ~0, ~0, ... | + * \-----/ | | | [p2m_identity]+-->| ..., ~0 | + * | | | .... | \-----------------/ + * | | +-[x], ~0, ~0.. +\ + * | | \---------------/ \ + * | | \-> /---------------\ + * | V p2m_mid_missing p2m_missing | IDENTITY[@0] | + * | /-----------------\ /------------\ | IDENTITY[@256]| + * | | [p2m_missing] +---->| ~0, ~0, ...| | ~0, ~0, .... | + * | | [p2m_missing] +---->| ..., ~0 | \---------------/ + * | | ... | \------------/ + * | \-----------------/ + * | + * | p2m_mid_identity + * | /-----------------\ + * \-->| [p2m_identity] +---->[1] + * | [p2m_identity] +---->[1] + * | ... | + * \-----------------/ * * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) */ @@ -187,13 +196,15 @@ static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE); +static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE); RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); /* We might hit two boundary violations at the start and end, at max each * boundary violation will require three middle nodes. */ -RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); +RESERVE_BRK(p2m_mid_extra, PAGE_SIZE * 2 * 3); /* When we populate back during bootup, the amount of pages can vary. The * max we have is seen is 395979, but that does not mean it can't be more. @@ -242,20 +253,20 @@ static void p2m_top_mfn_p_init(unsigned long **top) top[i] = p2m_mid_missing_mfn; } -static void p2m_mid_init(unsigned long **mid) +static void p2m_mid_init(unsigned long **mid, unsigned long *leaf) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) - mid[i] = p2m_missing; + mid[i] = leaf; } -static void p2m_mid_mfn_init(unsigned long *mid) +static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) - mid[i] = virt_to_mfn(p2m_missing); + mid[i] = virt_to_mfn(leaf); } static void p2m_init(unsigned long *p2m) @@ -286,7 +297,9 @@ void __ref xen_build_mfn_list_list(void) /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(p2m_mid_missing_mfn); + p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); + p2m_mid_identity_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity); p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_p_init(p2m_top_mfn_p); @@ -295,7 +308,8 @@ void __ref xen_build_mfn_list_list(void) p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ - p2m_mid_mfn_init(p2m_mid_missing_mfn); + p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); + p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { @@ -327,7 +341,7 @@ void __ref xen_build_mfn_list_list(void) * it too late. */ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(mid_mfn_p); + p2m_mid_mfn_init(mid_mfn_p, p2m_missing); p2m_top_mfn_p[topidx] = mid_mfn_p; } @@ -365,16 +379,17 @@ void __init xen_build_dynamic_phys_to_machine(void) p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_missing); + p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_init(p2m_identity); p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(p2m_mid_missing); + p2m_mid_init(p2m_mid_missing, p2m_missing); + p2m_mid_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); + p2m_mid_init(p2m_mid_identity, p2m_identity); p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_init(p2m_top); - p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_init(p2m_identity); - /* * The domain builder gives us a pre-constructed p2m array in * mfn_list for all the pages initially given to us, so we just @@ -386,7 +401,7 @@ void __init xen_build_dynamic_phys_to_machine(void) if (p2m_top[topidx] == p2m_mid_missing) { unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(mid); + p2m_mid_init(mid, p2m_missing); p2m_top[topidx] = mid; } @@ -492,7 +507,7 @@ unsigned long get_phys_to_machine(unsigned long pfn) unsigned topidx, mididx, idx; if (unlikely(pfn >= MAX_P2M_PFN)) - return INVALID_P2M_ENTRY; + return IDENTITY_FRAME(pfn); topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); @@ -545,7 +560,7 @@ static bool alloc_p2m(unsigned long pfn) if (!mid) return false; - p2m_mid_init(mid); + p2m_mid_init(mid, p2m_missing); if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) free_p2m_page(mid); @@ -565,7 +580,7 @@ static bool alloc_p2m(unsigned long pfn) if (!mid_mfn) return false; - p2m_mid_mfn_init(mid_mfn); + p2m_mid_mfn_init(mid_mfn, p2m_missing); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn); @@ -596,7 +611,7 @@ static bool alloc_p2m(unsigned long pfn) return true; } -static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary) +static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary) { unsigned topidx, mididx, idx; unsigned long *p2m; @@ -638,7 +653,7 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary return true; } -static bool __init early_alloc_p2m(unsigned long pfn) +static bool __init early_alloc_p2m_middle(unsigned long pfn) { unsigned topidx = p2m_top_index(pfn); unsigned long *mid_mfn_p; @@ -649,7 +664,7 @@ static bool __init early_alloc_p2m(unsigned long pfn) if (mid == p2m_mid_missing) { mid = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_init(mid); + p2m_mid_init(mid, p2m_missing); p2m_top[topidx] = mid; @@ -658,12 +673,12 @@ static bool __init early_alloc_p2m(unsigned long pfn) /* And the save/restore P2M tables.. */ if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); - p2m_mid_mfn_init(mid_mfn_p); + p2m_mid_mfn_init(mid_mfn_p, p2m_missing); p2m_top_mfn_p[topidx] = mid_mfn_p; p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); /* Note: we don't set mid_mfn_p[midix] here, - * look in early_alloc_p2m_middle */ + * look in early_alloc_p2m() */ } return true; } @@ -739,7 +754,7 @@ found: /* This shouldn't happen */ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) - early_alloc_p2m(set_pfn); + early_alloc_p2m_middle(set_pfn); if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) return false; @@ -754,13 +769,13 @@ found: bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { - if (!early_alloc_p2m(pfn)) + if (!early_alloc_p2m_middle(pfn)) return false; if (early_can_reuse_p2m_middle(pfn, mfn)) return __set_phys_to_machine(pfn, mfn); - if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) + if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/)) return false; if (!__set_phys_to_machine(pfn, mfn)) @@ -769,12 +784,30 @@ bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) return true; } + +static void __init early_split_p2m(unsigned long pfn) +{ + unsigned long mididx, idx; + + mididx = p2m_mid_index(pfn); + idx = p2m_index(pfn); + + /* + * Allocate new middle and leaf pages if this pfn lies in the + * middle of one. + */ + if (mididx || idx) + early_alloc_p2m_middle(pfn); + if (idx) + early_alloc_p2m(pfn, false); +} + unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) { unsigned long pfn; - if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) + if (unlikely(pfn_s >= MAX_P2M_PFN)) return 0; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) @@ -783,19 +816,30 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s, if (pfn_s > pfn_e) return 0; - for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); - pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); - pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) - { - WARN_ON(!early_alloc_p2m(pfn)); - } + if (pfn_e > MAX_P2M_PFN) + pfn_e = MAX_P2M_PFN; - early_alloc_p2m_middle(pfn_s, true); - early_alloc_p2m_middle(pfn_e, true); + early_split_p2m(pfn_s); + early_split_p2m(pfn_e); + + for (pfn = pfn_s; pfn < pfn_e;) { + unsigned topidx = p2m_top_index(pfn); + unsigned mididx = p2m_mid_index(pfn); - for (pfn = pfn_s; pfn < pfn_e; pfn++) if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) break; + pfn++; + + /* + * If the PFN was set to a middle or leaf identity + * page the remainder must also be identity, so skip + * ahead to the next middle or leaf entry. + */ + if (p2m_top[topidx] == p2m_mid_identity) + pfn = ALIGN(pfn, P2M_MID_PER_PAGE * P2M_PER_PAGE); + else if (p2m_top[topidx][mididx] == p2m_identity) + pfn = ALIGN(pfn, P2M_PER_PAGE); + } if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), "Identity mapping failed. We are %ld short of 1-1 mappings!\n", @@ -825,8 +869,22 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) /* For sparse holes were the p2m leaf has real PFN along with * PCI holes, stick in the PFN as the MFN value. + * + * set_phys_range_identity() will have allocated new middle + * and leaf pages as required so an existing p2m_mid_missing + * or p2m_missing mean that whole range will be identity so + * these can be switched to p2m_mid_identity or p2m_identity. */ if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { + if (p2m_top[topidx] == p2m_mid_identity) + return true; + + if (p2m_top[topidx] == p2m_mid_missing) { + WARN_ON(cmpxchg(&p2m_top[topidx], p2m_mid_missing, + p2m_mid_identity) != p2m_mid_missing); + return true; + } + if (p2m_top[topidx][mididx] == p2m_identity) return true; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 0982233b9b84..210426a26cc0 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -89,10 +89,10 @@ static void __init xen_add_extra_mem(u64 start, u64 size) for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); - if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) + if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) continue; - WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", - pfn, mfn); + WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", + pfn, mfn); __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } @@ -469,6 +469,15 @@ char * __init xen_memory_setup(void) } /* + * Set the rest as identity mapped, in case PCI BARs are + * located here. + * + * PFNs above MAX_P2M_PFN are considered identity mapped as + * well. + */ + set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul); + + /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 45329c8c226e..c4df9dbd63b7 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c @@ -12,8 +12,10 @@ #include "xen-ops.h" #include "mmu.h" -void xen_arch_pre_suspend(void) +static void xen_pv_pre_suspend(void) { + xen_mm_pin_all(); + xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); xen_start_info->console.domU.mfn = mfn_to_pfn(xen_start_info->console.domU.mfn); @@ -26,7 +28,7 @@ void xen_arch_pre_suspend(void) BUG(); } -void xen_arch_hvm_post_suspend(int suspend_cancelled) +static void xen_hvm_post_suspend(int suspend_cancelled) { #ifdef CONFIG_XEN_PVHVM int cpu; @@ -41,7 +43,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled) #endif } -void xen_arch_post_suspend(int suspend_cancelled) +static void xen_pv_post_suspend(int suspend_cancelled) { xen_build_mfn_list_list(); @@ -60,6 +62,21 @@ void xen_arch_post_suspend(int suspend_cancelled) xen_vcpu_restore(); } + xen_mm_unpin_all(); +} + +void xen_arch_pre_suspend(void) +{ + if (xen_pv_domain()) + xen_pv_pre_suspend(); +} + +void xen_arch_post_suspend(int cancelled) +{ + if (xen_pv_domain()) + xen_pv_post_suspend(cancelled); + else + xen_hvm_post_suspend(cancelled); } static void xen_vcpu_notify_restore(void *data) diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 1cb6f4c37300..c834d4b231f0 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -31,6 +31,8 @@ void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); void xen_reserve_top(void); extern unsigned long xen_max_p2m_pfn; +void xen_mm_pin_all(void); +void xen_mm_unpin_all(void); void xen_set_pat(u64); char * __init xen_memory_setup(void); diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h index 614be031a79a..5d52dc43dfe7 100644 --- a/arch/xtensa/include/asm/pci.h +++ b/arch/xtensa/include/asm/pci.h @@ -22,11 +22,6 @@ extern struct pci_controller* pcibios_alloc_controller(void); -static inline void pcibios_penalize_isa_irq(int irq) -{ - /* We don't do dynamic PCI IRQ allocation */ -} - /* Assume some values. (We should revise them, if necessary) */ #define PCIBIOS_MIN_IO 0x2000 diff --git a/block/Makefile b/block/Makefile index 20645e88fb57..a2ce6ac935ec 100644 --- a/block/Makefile +++ b/block/Makefile @@ -2,13 +2,15 @@ # Makefile for the kernel block layer # -obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ +obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \ blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \ - genhd.o scsi_ioctl.o partition-generic.o partitions/ + genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ + partitions/ +obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o @@ -20,3 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o +obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o diff --git a/fs/bio-integrity.c b/block/bio-integrity.c index 1c2ce0c87711..9e241063a616 100644 --- a/fs/bio-integrity.c +++ b/block/bio-integrity.c @@ -617,7 +617,7 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size) if (!bs->bio_integrity_pool) return -1; - bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size); + bs->bvec_integrity_pool = biovec_create_pool(pool_size); if (!bs->bvec_integrity_pool) { mempool_destroy(bs->bio_integrity_pool); return -1; diff --git a/fs/bio.c b/block/bio.c index 6f0362b77806..96d28eee8a1e 100644 --- a/fs/bio.c +++ b/block/bio.c @@ -305,6 +305,8 @@ static void bio_chain_endio(struct bio *bio, int error) /** * bio_chain - chain bio completions + * @bio: the target bio + * @parent: the @bio's parent bio * * The caller won't have a bi_end_io called when @bio completes - instead, * @parent's bi_end_io won't be called until both @parent and @bio have @@ -1011,8 +1013,7 @@ static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, bio->bi_private = bmd; } -static struct bio_map_data *bio_alloc_map_data(int nr_segs, - unsigned int iov_count, +static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, gfp_t gfp_mask) { if (iov_count > UIO_MAXIOV) @@ -1154,7 +1155,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, if (offset) nr_pages++; - bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask); + bmd = bio_alloc_map_data(iov_count, gfp_mask); if (!bmd) return ERR_PTR(-ENOMEM); @@ -1859,7 +1860,7 @@ EXPORT_SYMBOL_GPL(bio_trim); * create memory pools for biovec's in a bio_set. * use the global biovec slabs created for general use. */ -mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries) +mempool_t *biovec_create_pool(int pool_entries) { struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; @@ -1922,7 +1923,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) if (!bs->bio_pool) goto bad; - bs->bvec_pool = biovec_create_pool(bs, pool_size); + bs->bvec_pool = biovec_create_pool(pool_size); if (!bs->bvec_pool) goto bad; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index e4a4145926f6..1039fb9ff5f5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -451,7 +451,20 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, struct blkcg_gq *blkg; int i; - mutex_lock(&blkcg_pol_mutex); + /* + * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex + * which ends up putting cgroup's internal cgroup_tree_mutex under + * it; however, cgroup_tree_mutex is nested above cgroup file + * active protection and grabbing blkcg_pol_mutex from a cgroup + * file operation creates a possible circular dependency. cgroup + * internal locking is planned to go through further simplification + * and this issue should go away soon. For now, let's trylock + * blkcg_pol_mutex and restart the write on failure. + * + * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com + */ + if (!mutex_trylock(&blkcg_pol_mutex)) + return restart_syscall(); spin_lock_irq(&blkcg->lock); /* diff --git a/block/blk-core.c b/block/blk-core.c index a0e3096c4bb5..40d654861c33 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -146,8 +146,8 @@ void blk_dump_rq_flags(struct request *rq, char *msg) printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); - printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", - rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); + printk(KERN_INFO " bio %p, biotail %p, len %u\n", + rq->bio, rq->biotail, blk_rq_bytes(rq)); if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { printk(KERN_INFO " cdb: "); @@ -251,8 +251,10 @@ void blk_sync_queue(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) - cancel_delayed_work_sync(&hctx->delayed_work); + queue_for_each_hw_ctx(q, hctx, i) { + cancel_delayed_work_sync(&hctx->run_work); + cancel_delayed_work_sync(&hctx->delay_work); + } } else { cancel_delayed_work_sync(&q->delay_work); } @@ -574,12 +576,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q) return NULL; - if (percpu_counter_init(&q->mq_usage_counter, 0)) - goto fail_q; - q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); if (q->id < 0) - goto fail_c; + goto fail_q; q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; @@ -637,8 +636,6 @@ fail_bdi: bdi_destroy(&q->backing_dev_info); fail_id: ida_simple_remove(&blk_queue_ida, q->id); -fail_c: - percpu_counter_destroy(&q->mq_usage_counter); fail_q: kmem_cache_free(blk_requestq_cachep, q); return NULL; @@ -846,6 +843,47 @@ static void freed_request(struct request_list *rl, unsigned int flags) __freed_request(rl, sync ^ 1); } +int blk_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct request_list *rl; + + spin_lock_irq(q->queue_lock); + q->nr_requests = nr; + blk_queue_congestion_threshold(q); + + /* congestion isn't cgroup aware and follows root blkcg for now */ + rl = &q->root_rl; + + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_SYNC); + else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_SYNC); + + if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_ASYNC); + else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_ASYNC); + + blk_queue_for_each_rl(rl, q) { + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_SYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_rl_full(rl, BLK_RW_ASYNC); + } else { + blk_clear_rl_full(rl, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + + spin_unlock_irq(q->queue_lock); + return 0; +} + /* * Determine if elevator data should be initialized when allocating the * request associated with @bio. @@ -1135,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { if (q->mq_ops) - return blk_mq_alloc_request(q, rw, gfp_mask); + return blk_mq_alloc_request(q, rw, gfp_mask, false); else return blk_old_get_request(q, rw, gfp_mask); } @@ -1231,12 +1269,15 @@ static void add_acct_request(struct request_queue *q, struct request *rq, static void part_round_stats_single(int cpu, struct hd_struct *part, unsigned long now) { + int inflight; + if (now == part->stamp) return; - if (part_in_flight(part)) { + inflight = part_in_flight(part); + if (inflight) { __part_stat_add(cpu, part, time_in_queue, - part_in_flight(part) * (now - part->stamp)); + inflight * (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; @@ -1360,7 +1401,6 @@ void blk_add_request_payload(struct request *rq, struct page *page, rq->__data_len = rq->resid_len = len; rq->nr_phys_segments = 1; - rq->buffer = bio_data(bio); } EXPORT_SYMBOL_GPL(blk_add_request_payload); @@ -1402,12 +1442,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bio->bi_next = req->bio; req->bio = bio; - /* - * may not be valid. if the low level driver said - * it didn't need a bounce buffer then it better - * not touch req->buffer either... - */ - req->buffer = bio_data(bio); req->__sector = bio->bi_iter.bi_sector; req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); @@ -1432,6 +1466,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, * added on the elevator at this point. In addition, we don't have * reliable access to the elevator outside queue lock. Only check basic * merging parameters without querying the elevator. + * + * Caller must ensure !blk_queue_nomerges(q) beforehand. */ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int *request_count) @@ -1441,9 +1477,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool ret = false; struct list_head *plug_list; - if (blk_queue_nomerges(q)) - goto out; - plug = current->plug; if (!plug) goto out; @@ -1522,7 +1555,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) * Check if we can merge with the plugged list before grabbing * any locks. */ - if (blk_attempt_plug_merge(q, bio, &request_count)) + if (!blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) return; spin_lock_irq(q->queue_lock); @@ -1654,7 +1688,7 @@ static int __init fail_make_request_debugfs(void) struct dentry *dir = fault_create_debugfs_attr("fail_make_request", NULL, &fail_make_request); - return IS_ERR(dir) ? PTR_ERR(dir) : 0; + return PTR_ERR_OR_ZERO(dir); } late_initcall(fail_make_request_debugfs); @@ -2434,7 +2468,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) } req->__data_len -= total_bytes; - req->buffer = bio_data(req->bio); /* update sector only for requests with clear definition of sector */ if (req->cmd_type == REQ_TYPE_FS) @@ -2503,7 +2536,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); /* * queue lock must be held */ -static void blk_finish_request(struct request *req, int error) +void blk_finish_request(struct request *req, int error) { if (blk_rq_tagged(req)) blk_queue_end_tag(req->q, req); @@ -2529,6 +2562,7 @@ static void blk_finish_request(struct request *req, int error) __blk_put_request(req->q, req); } } +EXPORT_SYMBOL(blk_finish_request); /** * blk_end_bidi_request - Complete a bidi request @@ -2752,10 +2786,9 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ rq->cmd_flags |= bio->bi_rw & REQ_WRITE; - if (bio_has_data(bio)) { + if (bio_has_data(bio)) rq->nr_phys_segments = bio_phys_segments(q, bio); - rq->buffer = bio_data(bio); - } + rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; @@ -2831,7 +2864,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /* * Copy attributes of the original request to the clone request. - * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. + * The actual data parts (e.g. ->cmd, ->sense) are not copied. */ static void __blk_rq_prep_clone(struct request *dst, struct request *src) { @@ -2857,7 +2890,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src) * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. - * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) + * The actual data parts of @rq_src (e.g. ->cmd, ->sense) * are not copied, and copying such parts is the caller's responsibility. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. @@ -2904,20 +2937,25 @@ free_and_out: } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) +int kblockd_schedule_work(struct work_struct *work) { return queue_work(kblockd_workqueue, work); } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *dwork, unsigned long delay) +int kblockd_schedule_delayed_work(struct delayed_work *dwork, + unsigned long delay) { return queue_delayed_work(kblockd_workqueue, dwork, delay); } EXPORT_SYMBOL(kblockd_schedule_delayed_work); -#define PLUG_MAGIC 0x91827364 +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, + unsigned long delay) +{ + return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); +} +EXPORT_SYMBOL(kblockd_schedule_delayed_work_on); /** * blk_start_plug - initialize blk_plug and track it inside the task_struct @@ -2937,7 +2975,6 @@ void blk_start_plug(struct blk_plug *plug) { struct task_struct *tsk = current; - plug->magic = PLUG_MAGIC; INIT_LIST_HEAD(&plug->list); INIT_LIST_HEAD(&plug->mq_list); INIT_LIST_HEAD(&plug->cb_list); @@ -3034,8 +3071,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) LIST_HEAD(list); unsigned int depth; - BUG_ON(plug->magic != PLUG_MAGIC); - flush_plug_callbacks(plug, from_schedule); if (!list_empty(&plug->mq_list)) diff --git a/block/blk-flush.c b/block/blk-flush.c index 43e6b4755e9a..ff87c664b7df 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq) blk_clear_rq_complete(rq); } -static void mq_flush_run(struct work_struct *work) -{ - struct request *rq; - - rq = container_of(work, struct request, mq_flush_work); - - memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_insert_request(rq, false, true, false); -} - static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { - INIT_WORK(&rq->mq_flush_work, mq_flush_run); - kblockd_schedule_work(rq->q, &rq->mq_flush_work); + struct request_queue *q = rq->q; + + blk_mq_add_to_requeue_list(rq, add_front); + blk_mq_kick_requeue_list(q); return false; } else { if (add_front) @@ -231,8 +223,10 @@ static void flush_end_io(struct request *flush_rq, int error) struct request *rq, *n; unsigned long flags = 0; - if (q->mq_ops) + if (q->mq_ops) { spin_lock_irqsave(&q->mq_flush_lock, flags); + q->flush_rq->cmd_flags = 0; + } running = &q->flush_queue[q->flush_running_idx]; BUG_ON(q->flush_pending_idx == q->flush_running_idx); @@ -306,23 +300,9 @@ static bool blk_kick_flush(struct request_queue *q) */ q->flush_pending_idx ^= 1; - if (q->mq_ops) { - struct blk_mq_ctx *ctx = first_rq->mq_ctx; - struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); - - blk_mq_rq_init(hctx, q->flush_rq); - q->flush_rq->mq_ctx = ctx; - - /* - * Reuse the tag value from the fist waiting request, - * with blk-mq the tag is generated during request - * allocation and drivers can rely on it being inside - * the range they asked for. - */ - q->flush_rq->tag = first_rq->tag; - } else { - blk_rq_init(q, q->flush_rq); - } + blk_rq_init(q, q->flush_rq); + if (q->mq_ops) + blk_mq_clone_flush_request(q->flush_rq, first_rq); q->flush_rq->cmd_type = REQ_TYPE_FS; q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c index c11d24e379e2..d828b44a404b 100644 --- a/block/blk-iopoll.c +++ b/block/blk-iopoll.c @@ -64,12 +64,12 @@ EXPORT_SYMBOL(__blk_iopoll_complete); * iopoll handler will not be invoked again before blk_iopoll_sched_prep() * is called. **/ -void blk_iopoll_complete(struct blk_iopoll *iopoll) +void blk_iopoll_complete(struct blk_iopoll *iop) { unsigned long flags; local_irq_save(flags); - __blk_iopoll_complete(iopoll); + __blk_iopoll_complete(iop); local_irq_restore(flags); } EXPORT_SYMBOL(blk_iopoll_complete); diff --git a/block/blk-lib.c b/block/blk-lib.c index 97a733cf3d5f..8411be3c19d3 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -226,8 +226,8 @@ EXPORT_SYMBOL(blkdev_issue_write_same); * Generate and issue number of bios with zerofiled pages. */ -int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask) +static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask) { int ret; struct bio *bio; diff --git a/block/blk-map.c b/block/blk-map.c index f7b22bc21518..f890d4345b0c 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -155,7 +155,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; - rq->buffer = NULL; return 0; unmap_rq: blk_rq_unmap_user(bio); @@ -238,7 +237,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); - rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_user_iov); @@ -325,7 +323,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, } blk_queue_bounce(q, &rq->bio); - rq->buffer = NULL; return 0; } EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/block/blk-merge.c b/block/blk-merge.c index 6c583f9c5b65..b3bf0df0f4c2 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -13,7 +13,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, struct bio *bio) { struct bio_vec bv, bvprv = { NULL }; - int cluster, high, highprv = 1; + int cluster, high, highprv = 1, no_sg_merge; unsigned int seg_size, nr_phys_segs; struct bio *fbio, *bbio; struct bvec_iter iter; @@ -35,12 +35,21 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, cluster = blk_queue_cluster(q); seg_size = 0; nr_phys_segs = 0; + no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); + high = 0; for_each_bio(bio) { bio_for_each_segment(bv, bio, iter) { /* + * If SG merging is disabled, each bio vector is + * a segment + */ + if (no_sg_merge) + goto new_segment; + + /* * the trick here is making sure that a high page is - * never considered part of another segment, since that - * might change with the bounce page. + * never considered part of another segment, since + * that might change with the bounce page. */ high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); if (!high && !highprv && cluster) { @@ -84,11 +93,16 @@ void blk_recalc_rq_segments(struct request *rq) void blk_recount_segments(struct request_queue *q, struct bio *bio) { - struct bio *nxt = bio->bi_next; + if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) + bio->bi_phys_segments = bio->bi_vcnt; + else { + struct bio *nxt = bio->bi_next; + + bio->bi_next = NULL; + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); + bio->bi_next = nxt; + } - bio->bi_next = NULL; - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); - bio->bi_next = nxt; bio->bi_flags |= (1 << BIO_SEG_VALID); } EXPORT_SYMBOL(blk_recount_segments); diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 136ef8643bba..bb3ed488f7b5 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -1,3 +1,8 @@ +/* + * CPU notifier helper code for blk-mq + * + * Copyright (C) 2013-2014 Jens Axboe + */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -18,14 +23,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, { unsigned int cpu = (unsigned long) hcpu; struct blk_mq_cpu_notifier *notify; + int ret = NOTIFY_OK; raw_spin_lock(&blk_mq_cpu_notify_lock); - list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) - notify->notify(notify->data, action, cpu); + list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { + ret = notify->notify(notify->data, action, cpu); + if (ret != NOTIFY_OK) + break; + } raw_spin_unlock(&blk_mq_cpu_notify_lock); - return NOTIFY_OK; + return ret; } void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) @@ -45,7 +54,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) } void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, - void (*fn)(void *, unsigned long, unsigned int), + int (*fn)(void *, unsigned long, unsigned int), void *data) { notifier->notify = fn; diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 097921329619..1065d7c65fa1 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -1,3 +1,8 @@ +/* + * CPU <-> hardware queue mapping helpers + * + * Copyright (C) 2013-2014 Jens Axboe + */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/module.h> @@ -80,19 +85,35 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) return 0; } -unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg) +unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) { unsigned int *map; /* If cpus are offline, map them to first hctx */ map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, - reg->numa_node); + set->numa_node); if (!map) return NULL; - if (!blk_mq_update_queue_map(map, reg->nr_hw_queues)) + if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) return map; kfree(map); return NULL; } + +/* + * We have no quick way of doing reverse lookups. This is only used at + * queue init time, so runtime isn't important. + */ +int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) +{ + int i; + + for_each_possible_cpu(i) { + if (index == mq_map[i]) + return cpu_to_node(i); + } + + return NUMA_NO_NODE; +} diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index b0ba264b0522..ed5217867555 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -203,59 +203,24 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx, return ret; } -static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page) -{ - ssize_t ret; - - spin_lock(&hctx->lock); - ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI)); - spin_unlock(&hctx->lock); - - return ret; -} - -static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx, - const char *page, size_t len) +static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) { - struct blk_mq_ctx *ctx; - unsigned long ret; - unsigned int i; - - if (kstrtoul(page, 10, &ret)) { - pr_err("blk-mq-sysfs: invalid input '%s'\n", page); - return -EINVAL; - } - - spin_lock(&hctx->lock); - if (ret) - hctx->flags |= BLK_MQ_F_SHOULD_IPI; - else - hctx->flags &= ~BLK_MQ_F_SHOULD_IPI; - spin_unlock(&hctx->lock); - - hctx_for_each_ctx(hctx, ctx, i) - ctx->ipi_redirect = !!ret; - - return len; + return blk_mq_tag_sysfs_show(hctx->tags, page); } -static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page) +static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page) { - return blk_mq_tag_sysfs_show(hctx->tags, page); + return sprintf(page, "%u\n", atomic_read(&hctx->nr_active)); } static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) { - unsigned int i, queue_num, first = 1; + unsigned int i, first = 1; ssize_t ret = 0; blk_mq_disable_hotplug(); - for_each_online_cpu(i) { - queue_num = hctx->queue->mq_map[i]; - if (queue_num != hctx->queue_num) - continue; - + for_each_cpu(i, hctx->cpumask) { if (first) ret += sprintf(ret + page, "%u", i); else @@ -307,15 +272,14 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = { .attr = {.name = "dispatched", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_dispatched_show, }; +static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = { + .attr = {.name = "active", .mode = S_IRUGO }, + .show = blk_mq_hw_sysfs_active_show, +}; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = { .attr = {.name = "pending", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_rq_list_show, }; -static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = { - .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR}, - .show = blk_mq_hw_sysfs_ipi_show, - .store = blk_mq_hw_sysfs_ipi_store, -}; static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = { .attr = {.name = "tags", .mode = S_IRUGO }, .show = blk_mq_hw_sysfs_tags_show, @@ -330,9 +294,9 @@ static struct attribute *default_hw_ctx_attrs[] = { &blk_mq_hw_sysfs_run.attr, &blk_mq_hw_sysfs_dispatched.attr, &blk_mq_hw_sysfs_pending.attr, - &blk_mq_hw_sysfs_ipi.attr, &blk_mq_hw_sysfs_tags.attr, &blk_mq_hw_sysfs_cpus.attr, + &blk_mq_hw_sysfs_active.attr, NULL, }; @@ -363,6 +327,42 @@ static struct kobj_type blk_mq_hw_ktype = { .release = blk_mq_sysfs_release, }; +static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_ctx *ctx; + int i; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return; + + hctx_for_each_ctx(hctx, ctx, i) + kobject_del(&ctx->kobj); + + kobject_del(&hctx->kobj); +} + +static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct blk_mq_ctx *ctx; + int i, ret; + + if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + return 0; + + ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); + if (ret) + return ret; + + hctx_for_each_ctx(hctx, ctx, i) { + ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); + if (ret) + break; + } + + return ret; +} + void blk_mq_unregister_disk(struct gendisk *disk) { struct request_queue *q = disk->queue; @@ -371,11 +371,11 @@ void blk_mq_unregister_disk(struct gendisk *disk) int i, j; queue_for_each_hw_ctx(q, hctx, i) { - hctx_for_each_ctx(hctx, ctx, j) { - kobject_del(&ctx->kobj); + blk_mq_unregister_hctx(hctx); + + hctx_for_each_ctx(hctx, ctx, j) kobject_put(&ctx->kobj); - } - kobject_del(&hctx->kobj); + kobject_put(&hctx->kobj); } @@ -386,15 +386,30 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&disk_to_dev(disk)->kobj); } +static void blk_mq_sysfs_init(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; + int i, j; + + kobject_init(&q->mq_kobj, &blk_mq_ktype); + + queue_for_each_hw_ctx(q, hctx, i) { + kobject_init(&hctx->kobj, &blk_mq_hw_ktype); + + hctx_for_each_ctx(hctx, ctx, j) + kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); + } +} + int blk_mq_register_disk(struct gendisk *disk) { struct device *dev = disk_to_dev(disk); struct request_queue *q = disk->queue; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx; - int ret, i, j; + int ret, i; - kobject_init(&q->mq_kobj, &blk_mq_ktype); + blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) @@ -403,20 +418,10 @@ int blk_mq_register_disk(struct gendisk *disk) kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { - kobject_init(&hctx->kobj, &blk_mq_hw_ktype); - ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i); + hctx->flags |= BLK_MQ_F_SYSFS_UP; + ret = blk_mq_register_hctx(hctx); if (ret) break; - - if (!hctx->nr_ctx) - continue; - - hctx_for_each_ctx(hctx, ctx, j) { - kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); - ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); - if (ret) - break; - } } if (ret) { @@ -426,3 +431,26 @@ int blk_mq_register_disk(struct gendisk *disk) return 0; } + +void blk_mq_sysfs_unregister(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_unregister_hctx(hctx); +} + +int blk_mq_sysfs_register(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i, ret = 0; + + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_register_hctx(hctx); + if (ret) + break; + } + + return ret; +} diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 83ae96c51a27..d90c4aeb7dd3 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -1,78 +1,345 @@ +/* + * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread + * over multiple cachelines to avoid ping-pong between multiple submitters + * or submitter and completer. Uses rolling wakeups to avoid falling of + * the scaling cliff when we run out of tags and have to start putting + * submitters to sleep. + * + * Uses active queue tracking to support fairer distribution of tags + * between multiple submitters when a shared tag map is used. + * + * Copyright (C) 2013-2014 Jens Axboe + */ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/percpu_ida.h> +#include <linux/random.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" +static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) +{ + int i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_align_bitmap *bm = &bt->map[i]; + int ret; + + ret = find_first_zero_bit(&bm->word, bm->depth); + if (ret < bm->depth) + return true; + } + + return false; +} + +bool blk_mq_has_free_tags(struct blk_mq_tags *tags) +{ + if (!tags) + return true; + + return bt_has_free_tags(&tags->bitmap_tags); +} + +static inline void bt_index_inc(unsigned int *index) +{ + *index = (*index + 1) & (BT_WAIT_QUEUES - 1); +} + /* - * Per tagged queue (tag address space) map + * If a previously inactive queue goes active, bump the active user count. */ -struct blk_mq_tags { - unsigned int nr_tags; - unsigned int nr_reserved_tags; - unsigned int nr_batch_move; - unsigned int nr_max_cache; +bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && + !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + atomic_inc(&hctx->tags->active_queues); - struct percpu_ida free_tags; - struct percpu_ida reserved_tags; -}; + return true; +} -void blk_mq_wait_for_tags(struct blk_mq_tags *tags) +/* + * Wakeup all potentially sleeping on normal (non-reserved) tags + */ +static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) { - int tag = blk_mq_get_tag(tags, __GFP_WAIT, false); - blk_mq_put_tag(tags, tag); + struct blk_mq_bitmap_tags *bt; + int i, wake_index; + + bt = &tags->bitmap_tags; + wake_index = bt->wake_index; + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) + wake_up(&bs->wait); + + bt_index_inc(&wake_index); + } } -bool blk_mq_has_free_tags(struct blk_mq_tags *tags) +/* + * If a previously busy queue goes inactive, potential waiters could now + * be allowed to queue. Wake them up and check. + */ +void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + struct blk_mq_tags *tags = hctx->tags; + + if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return; + + atomic_dec(&tags->active_queues); + + blk_mq_tag_wakeup_all(tags); +} + +/* + * For shared tag users, we track the number of currently active users + * and attempt to provide a fair share of the tag depth for each of them. + */ +static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, + struct blk_mq_bitmap_tags *bt) +{ + unsigned int depth, users; + + if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return true; + if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) + return true; + + /* + * Don't try dividing an ant + */ + if (bt->depth == 1) + return true; + + users = atomic_read(&hctx->tags->active_queues); + if (!users) + return true; + + /* + * Allow at least some tags + */ + depth = max((bt->depth + users - 1) / users, 4U); + return atomic_read(&hctx->nr_active) < depth; +} + +static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) { - return !tags || - percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0; + int tag, org_last_tag, end; + + org_last_tag = last_tag; + end = bm->depth; + do { +restart: + tag = find_next_zero_bit(&bm->word, end, last_tag); + if (unlikely(tag >= end)) { + /* + * We started with an offset, start from 0 to + * exhaust the map. + */ + if (org_last_tag && last_tag) { + end = last_tag; + last_tag = 0; + goto restart; + } + return -1; + } + last_tag = tag + 1; + } while (test_and_set_bit_lock(tag, &bm->word)); + + return tag; } -static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp) +/* + * Straight forward bitmap tag implementation, where each bit is a tag + * (cleared == free, and set == busy). The small twist is using per-cpu + * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue + * contexts. This enables us to drastically limit the space searched, + * without dirtying an extra shared cacheline like we would if we stored + * the cache value inside the shared blk_mq_bitmap_tags structure. On top + * of that, each word of tags is in a separate cacheline. This means that + * multiple users will tend to stick to different cachelines, at least + * until the map is exhausted. + */ +static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, + unsigned int *tag_cache) { + unsigned int last_tag, org_last_tag; + int index, i, tag; + + if (!hctx_may_queue(hctx, bt)) + return -1; + + last_tag = org_last_tag = *tag_cache; + index = TAG_TO_INDEX(bt, last_tag); + + for (i = 0; i < bt->map_nr; i++) { + tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); + if (tag != -1) { + tag += (index << bt->bits_per_word); + goto done; + } + + last_tag = 0; + if (++index >= bt->map_nr) + index = 0; + } + + *tag_cache = 0; + return -1; + + /* + * Only update the cache from the allocation path, if we ended + * up using the specific cached tag. + */ +done: + if (tag == org_last_tag) { + last_tag = tag + 1; + if (last_tag >= bt->depth - 1) + last_tag = 0; + + *tag_cache = last_tag; + } + + return tag; +} + +static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, + struct blk_mq_hw_ctx *hctx) +{ + struct bt_wait_state *bs; + + if (!hctx) + return &bt->bs[0]; + + bs = &bt->bs[hctx->wait_index]; + bt_index_inc(&hctx->wait_index); + return bs; +} + +static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, + unsigned int *last_tag, gfp_t gfp) +{ + struct bt_wait_state *bs; + DEFINE_WAIT(wait); int tag; - tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ? - TASK_UNINTERRUPTIBLE : TASK_RUNNING); - if (tag < 0) - return BLK_MQ_TAG_FAIL; - return tag + tags->nr_reserved_tags; + tag = __bt_get(hctx, bt, last_tag); + if (tag != -1) + return tag; + + if (!(gfp & __GFP_WAIT)) + return -1; + + bs = bt_wait_ptr(bt, hctx); + do { + bool was_empty; + + was_empty = list_empty(&wait.task_list); + prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); + + tag = __bt_get(hctx, bt, last_tag); + if (tag != -1) + break; + + if (was_empty) + atomic_set(&bs->wait_cnt, bt->wake_cnt); + + io_schedule(); + } while (1); + + finish_wait(&bs->wait, &wait); + return tag; +} + +static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, + struct blk_mq_hw_ctx *hctx, + unsigned int *last_tag, gfp_t gfp) +{ + int tag; + + tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp); + if (tag >= 0) + return tag + tags->nr_reserved_tags; + + return BLK_MQ_TAG_FAIL; } static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, gfp_t gfp) { - int tag; + int tag, zero = 0; if (unlikely(!tags->nr_reserved_tags)) { WARN_ON_ONCE(1); return BLK_MQ_TAG_FAIL; } - tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ? - TASK_UNINTERRUPTIBLE : TASK_RUNNING); + tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp); if (tag < 0) return BLK_MQ_TAG_FAIL; + return tag; } -unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved) +unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, + gfp_t gfp, bool reserved) { if (!reserved) - return __blk_mq_get_tag(tags, gfp); + return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp); - return __blk_mq_get_reserved_tag(tags, gfp); + return __blk_mq_get_reserved_tag(hctx->tags, gfp); +} + +static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) +{ + int i, wake_index; + + wake_index = bt->wake_index; + for (i = 0; i < BT_WAIT_QUEUES; i++) { + struct bt_wait_state *bs = &bt->bs[wake_index]; + + if (waitqueue_active(&bs->wait)) { + if (wake_index != bt->wake_index) + bt->wake_index = wake_index; + + return bs; + } + + bt_index_inc(&wake_index); + } + + return NULL; +} + +static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) +{ + const int index = TAG_TO_INDEX(bt, tag); + struct bt_wait_state *bs; + + /* + * The unlock memory barrier need to order access to req in free + * path and clearing tag bit + */ + clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); + + bs = bt_wake_ptr(bt); + if (bs && atomic_dec_and_test(&bs->wait_cnt)) { + atomic_set(&bs->wait_cnt, bt->wake_cnt); + bt_index_inc(&bt->wake_index); + wake_up(&bs->wait); + } } static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) { BUG_ON(tag >= tags->nr_tags); - percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags); + bt_clear_tag(&tags->bitmap_tags, tag); } static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, @@ -80,22 +347,43 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, { BUG_ON(tag >= tags->nr_reserved_tags); - percpu_ida_free(&tags->reserved_tags, tag); + bt_clear_tag(&tags->breserved_tags, tag); } -void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) +void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, + unsigned int *last_tag) { - if (tag >= tags->nr_reserved_tags) - __blk_mq_put_tag(tags, tag); - else + struct blk_mq_tags *tags = hctx->tags; + + if (tag >= tags->nr_reserved_tags) { + const int real_tag = tag - tags->nr_reserved_tags; + + __blk_mq_put_tag(tags, real_tag); + *last_tag = real_tag; + } else __blk_mq_put_reserved_tag(tags, tag); } -static int __blk_mq_tag_iter(unsigned id, void *data) +static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, + unsigned long *free_map, unsigned int off) { - unsigned long *tag_map = data; - __set_bit(id, tag_map); - return 0; + int i; + + for (i = 0; i < bt->map_nr; i++) { + struct blk_align_bitmap *bm = &bt->map[i]; + int bit = 0; + + do { + bit = find_next_zero_bit(&bm->word, bm->depth, bit); + if (bit >= bm->depth) + break; + + __set_bit(bit + off, free_map); + bit++; + } while (1); + + off += (1 << bt->bits_per_word); + } } void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, @@ -109,21 +397,128 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, if (!tag_map) return; - percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map); + bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); if (tags->nr_reserved_tags) - percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter, - tag_map); + bt_for_each_free(&tags->breserved_tags, tag_map, 0); fn(data, tag_map); kfree(tag_map); } +EXPORT_SYMBOL(blk_mq_tag_busy_iter); + +static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) +{ + unsigned int i, used; + + for (i = 0, used = 0; i < bt->map_nr; i++) { + struct blk_align_bitmap *bm = &bt->map[i]; + + used += bitmap_weight(&bm->word, bm->depth); + } + + return bt->depth - used; +} + +static void bt_update_count(struct blk_mq_bitmap_tags *bt, + unsigned int depth) +{ + unsigned int tags_per_word = 1U << bt->bits_per_word; + unsigned int map_depth = depth; + + if (depth) { + int i; + + for (i = 0; i < bt->map_nr; i++) { + bt->map[i].depth = min(map_depth, tags_per_word); + map_depth -= bt->map[i].depth; + } + } + + bt->wake_cnt = BT_WAIT_BATCH; + if (bt->wake_cnt > depth / 4) + bt->wake_cnt = max(1U, depth / 4); + + bt->depth = depth; +} + +static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, + int node, bool reserved) +{ + int i; + + bt->bits_per_word = ilog2(BITS_PER_LONG); + + /* + * Depth can be zero for reserved tags, that's not a failure + * condition. + */ + if (depth) { + unsigned int nr, tags_per_word; + + tags_per_word = (1 << bt->bits_per_word); + + /* + * If the tag space is small, shrink the number of tags + * per word so we spread over a few cachelines, at least. + * If less than 4 tags, just forget about it, it's not + * going to work optimally anyway. + */ + if (depth >= 4) { + while (tags_per_word * 4 > depth) { + bt->bits_per_word--; + tags_per_word = (1 << bt->bits_per_word); + } + } + + nr = ALIGN(depth, tags_per_word) / tags_per_word; + bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), + GFP_KERNEL, node); + if (!bt->map) + return -ENOMEM; + + bt->map_nr = nr; + } + + bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); + if (!bt->bs) { + kfree(bt->map); + return -ENOMEM; + } + + for (i = 0; i < BT_WAIT_QUEUES; i++) + init_waitqueue_head(&bt->bs[i].wait); + + bt_update_count(bt, depth); + return 0; +} + +static void bt_free(struct blk_mq_bitmap_tags *bt) +{ + kfree(bt->map); + kfree(bt->bs); +} + +static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, + int node) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; + + if (bt_alloc(&tags->bitmap_tags, depth, node, false)) + goto enomem; + if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) + goto enomem; + + return tags; +enomem: + bt_free(&tags->bitmap_tags); + kfree(tags); + return NULL; +} struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node) { - unsigned int nr_tags, nr_cache; struct blk_mq_tags *tags; - int ret; if (total_tags > BLK_MQ_TAG_MAX) { pr_err("blk-mq: tag depth too large\n"); @@ -134,73 +529,59 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, if (!tags) return NULL; - nr_tags = total_tags - reserved_tags; - nr_cache = nr_tags / num_possible_cpus(); - - if (nr_cache < BLK_MQ_TAG_CACHE_MIN) - nr_cache = BLK_MQ_TAG_CACHE_MIN; - else if (nr_cache > BLK_MQ_TAG_CACHE_MAX) - nr_cache = BLK_MQ_TAG_CACHE_MAX; - tags->nr_tags = total_tags; tags->nr_reserved_tags = reserved_tags; - tags->nr_max_cache = nr_cache; - tags->nr_batch_move = max(1u, nr_cache / 2); - ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags - - tags->nr_reserved_tags, - tags->nr_max_cache, - tags->nr_batch_move); - if (ret) - goto err_free_tags; + return blk_mq_init_bitmap_tags(tags, node); +} - if (reserved_tags) { - /* - * With max_cahe and batch set to 1, the allocator fallbacks to - * no cached. It's fine reserved tags allocation is slow. - */ - ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags, - 1, 1); - if (ret) - goto err_reserved_tags; - } +void blk_mq_free_tags(struct blk_mq_tags *tags) +{ + bt_free(&tags->bitmap_tags); + bt_free(&tags->breserved_tags); + kfree(tags); +} - return tags; +void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) +{ + unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; -err_reserved_tags: - percpu_ida_destroy(&tags->free_tags); -err_free_tags: - kfree(tags); - return NULL; + *tag = prandom_u32() % depth; } -void blk_mq_free_tags(struct blk_mq_tags *tags) +int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) { - percpu_ida_destroy(&tags->free_tags); - percpu_ida_destroy(&tags->reserved_tags); - kfree(tags); + tdepth -= tags->nr_reserved_tags; + if (tdepth > tags->nr_tags) + return -EINVAL; + + /* + * Don't need (or can't) update reserved tags here, they remain + * static and should never need resizing. + */ + bt_update_count(&tags->bitmap_tags, tdepth); + blk_mq_tag_wakeup_all(tags); + return 0; } ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) { char *orig_page = page; - unsigned int cpu; + unsigned int free, res; if (!tags) return 0; - page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u," - " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags, - tags->nr_batch_move, tags->nr_max_cache); + page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " + "bits_per_word=%u\n", + tags->nr_tags, tags->nr_reserved_tags, + tags->bitmap_tags.bits_per_word); - page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", - percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids), - percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids)); + free = bt_unused_tags(&tags->bitmap_tags); + res = bt_unused_tags(&tags->breserved_tags); - for_each_possible_cpu(cpu) { - page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu, - percpu_ida_free_tags(&tags->free_tags, cpu)); - } + page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); + page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); return page - orig_page; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 947ba2c6148e..c959de58d2a5 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -1,17 +1,59 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H -struct blk_mq_tags; +#include "blk-mq.h" + +enum { + BT_WAIT_QUEUES = 8, + BT_WAIT_BATCH = 8, +}; + +struct bt_wait_state { + atomic_t wait_cnt; + wait_queue_head_t wait; +} ____cacheline_aligned_in_smp; + +#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word) +#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1)) + +struct blk_mq_bitmap_tags { + unsigned int depth; + unsigned int wake_cnt; + unsigned int bits_per_word; + + unsigned int map_nr; + struct blk_align_bitmap *map; + + unsigned int wake_index; + struct bt_wait_state *bs; +}; + +/* + * Tag address space map. + */ +struct blk_mq_tags { + unsigned int nr_tags; + unsigned int nr_reserved_tags; + + atomic_t active_queues; + + struct blk_mq_bitmap_tags bitmap_tags; + struct blk_mq_bitmap_tags breserved_tags; + + struct request **rqs; + struct list_head page_list; +}; + extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); extern void blk_mq_free_tags(struct blk_mq_tags *tags); -extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved); -extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags); -extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag); -extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); +extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); +extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); +extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); +extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); enum { BLK_MQ_TAG_CACHE_MIN = 1, @@ -24,4 +66,23 @@ enum { BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1, }; +extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *); +extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *); + +static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return false; + + return __blk_mq_tag_busy(hctx); +} + +static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) +{ + if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) + return; + + __blk_mq_tag_idle(hctx); +} + #endif diff --git a/block/blk-mq.c b/block/blk-mq.c index 1d2a9bdbee57..0f5879c42dcd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1,3 +1,9 @@ +/* + * Block multiqueue core code + * + * Copyright (C) 2013-2014 Jens Axboe + * Copyright (C) 2013-2014 Christoph Hellwig + */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> @@ -56,38 +62,40 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { unsigned int i; - for (i = 0; i < hctx->nr_ctx_map; i++) - if (hctx->ctx_map[i]) + for (i = 0; i < hctx->ctx_map.map_size; i++) + if (hctx->ctx_map.map[i].word) return true; return false; } +static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) +{ + return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word]; +} + +#define CTX_TO_BIT(hctx, ctx) \ + ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1)) + /* * Mark this ctx as having pending work in this hardware queue */ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { - if (!test_bit(ctx->index_hw, hctx->ctx_map)) - set_bit(ctx->index_hw, hctx->ctx_map); + struct blk_align_bitmap *bm = get_bm(hctx, ctx); + + if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word)) + set_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } -static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, - gfp_t gfp, bool reserved) +static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx) { - struct request *rq; - unsigned int tag; + struct blk_align_bitmap *bm = get_bm(hctx, ctx); - tag = blk_mq_get_tag(hctx->tags, gfp, reserved); - if (tag != BLK_MQ_TAG_FAIL) { - rq = hctx->rqs[tag]; - rq->tag = tag; - - return rq; - } - - return NULL; + clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word); } static int blk_mq_queue_enter(struct request_queue *q) @@ -186,78 +194,95 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, if (blk_queue_io_stat(q)) rw_flags |= REQ_IO_STAT; + INIT_LIST_HEAD(&rq->queuelist); + /* csd/requeue_work/fifo_time is initialized before use */ + rq->q = q; rq->mq_ctx = ctx; - rq->cmd_flags = rw_flags; - rq->start_time = jiffies; + rq->cmd_flags |= rw_flags; + /* do not touch atomic flags, it needs atomic ops against the timer */ + rq->cpu = -1; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + rq->rq_disk = NULL; + rq->part = NULL; +#ifdef CONFIG_BLK_CGROUP + rq->rl = NULL; set_start_time_ns(rq); + rq->io_start_time_ns = 0; +#endif + rq->nr_phys_segments = 0; +#if defined(CONFIG_BLK_DEV_INTEGRITY) + rq->nr_integrity_segments = 0; +#endif + rq->special = NULL; + /* tag was already set */ + rq->errors = 0; + + rq->extra_len = 0; + rq->sense_len = 0; + rq->resid_len = 0; + rq->sense = NULL; + + INIT_LIST_HEAD(&rq->timeout_list); + rq->end_io = NULL; + rq->end_io_data = NULL; + rq->next_rq = NULL; + ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } -static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, - int rw, gfp_t gfp, - bool reserved) +static struct request * +__blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved) { struct request *rq; + unsigned int tag; - do { - struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu); + tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); + if (tag != BLK_MQ_TAG_FAIL) { + rq = hctx->tags->rqs[tag]; - rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved); - if (rq) { - blk_mq_rq_ctx_init(q, ctx, rq, rw); - break; + rq->cmd_flags = 0; + if (blk_mq_tag_busy(hctx)) { + rq->cmd_flags = REQ_MQ_INFLIGHT; + atomic_inc(&hctx->nr_active); } - blk_mq_put_ctx(ctx); - if (!(gfp & __GFP_WAIT)) - break; - - __blk_mq_run_hw_queue(hctx); - blk_mq_wait_for_tags(hctx->tags); - } while (1); + rq->tag = tag; + blk_mq_rq_ctx_init(q, ctx, rq, rw); + return rq; + } - return rq; + return NULL; } -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp) +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, + bool reserved) { + struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; struct request *rq; if (blk_mq_queue_enter(q)) return NULL; - rq = blk_mq_alloc_request_pinned(q, rw, gfp, false); - if (rq) - blk_mq_put_ctx(rq->mq_ctx); - return rq; -} - -struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, - gfp_t gfp) -{ - struct request *rq; + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (blk_mq_queue_enter(q)) - return NULL; + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, + reserved); + if (!rq && (gfp & __GFP_WAIT)) { + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); - rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); - if (rq) - blk_mq_put_ctx(rq->mq_ctx); + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved); + } + blk_mq_put_ctx(ctx); return rq; } -EXPORT_SYMBOL(blk_mq_alloc_reserved_request); - -/* - * Re-init and set pdu, if we have it - */ -void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq) -{ - blk_rq_init(hctx->queue, rq); - - if (hctx->cmd_size) - rq->special = blk_mq_rq_to_pdu(rq); -} +EXPORT_SYMBOL(blk_mq_alloc_request); static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct request *rq) @@ -265,9 +290,11 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, const int tag = rq->tag; struct request_queue *q = rq->q; - blk_mq_rq_init(hctx, rq); - blk_mq_put_tag(hctx->tags, tag); + if (rq->cmd_flags & REQ_MQ_INFLIGHT) + atomic_dec(&hctx->nr_active); + clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + blk_mq_put_tag(hctx, tag, &ctx->last_tag); blk_mq_queue_exit(q); } @@ -283,20 +310,47 @@ void blk_mq_free_request(struct request *rq) __blk_mq_free_request(hctx, ctx, rq); } -bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) +/* + * Clone all relevant state from a request that has been put on hold in + * the flush state machine into the preallocated flush request that hangs + * off the request queue. + * + * For a driver the flush request should be invisible, that's why we are + * impersonating the original request here. + */ +void blk_mq_clone_flush_request(struct request *flush_rq, + struct request *orig_rq) { - if (blk_update_request(rq, error, blk_rq_bytes(rq))) - return true; + struct blk_mq_hw_ctx *hctx = + orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu); + + flush_rq->mq_ctx = orig_rq->mq_ctx; + flush_rq->tag = orig_rq->tag; + memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq), + hctx->cmd_size); +} +inline void __blk_mq_end_io(struct request *rq, int error) +{ blk_account_io_done(rq); - if (rq->end_io) + if (rq->end_io) { rq->end_io(rq, error); - else + } else { + if (unlikely(blk_bidi_rq(rq))) + blk_mq_free_request(rq->next_rq); blk_mq_free_request(rq); - return false; + } +} +EXPORT_SYMBOL(__blk_mq_end_io); + +void blk_mq_end_io(struct request *rq, int error) +{ + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + BUG(); + __blk_mq_end_io(rq, error); } -EXPORT_SYMBOL(blk_mq_end_io_partial); +EXPORT_SYMBOL(blk_mq_end_io); static void __blk_mq_complete_request_remote(void *data) { @@ -305,18 +359,22 @@ static void __blk_mq_complete_request_remote(void *data) rq->q->softirq_done_fn(rq); } -void __blk_mq_complete_request(struct request *rq) +static void blk_mq_ipi_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; + bool shared = false; int cpu; - if (!ctx->ipi_redirect) { + if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { rq->q->softirq_done_fn(rq); return; } cpu = get_cpu(); - if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; @@ -327,6 +385,16 @@ void __blk_mq_complete_request(struct request *rq) put_cpu(); } +void __blk_mq_complete_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + if (!q->softirq_done_fn) + blk_mq_end_io(rq, rq->errors); + else + blk_mq_ipi_complete_request(rq); +} + /** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed @@ -337,7 +405,9 @@ void __blk_mq_complete_request(struct request *rq) **/ void blk_mq_complete_request(struct request *rq) { - if (unlikely(blk_should_fake_timeout(rq->q))) + struct request_queue *q = rq->q; + + if (unlikely(blk_should_fake_timeout(q))) return; if (!blk_mark_rq_complete(rq)) __blk_mq_complete_request(rq); @@ -350,13 +420,31 @@ static void blk_mq_start_request(struct request *rq, bool last) trace_block_rq_issue(q, rq); + rq->resid_len = blk_rq_bytes(rq); + if (unlikely(blk_bidi_rq(rq))) + rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); + /* * Just mark start time and set the started bit. Due to memory * ordering, we know we'll see the correct deadline as long as - * REQ_ATOMIC_STARTED is seen. + * REQ_ATOMIC_STARTED is seen. Use the default queue timeout, + * unless one has been set in the request. + */ + if (!rq->timeout) + rq->deadline = jiffies + q->rq_timeout; + else + rq->deadline = jiffies + rq->timeout; + + /* + * Mark us as started and clear complete. Complete might have been + * set if requeue raced with timeout, which then marked it as + * complete. So be sure to clear complete again when we start + * the request, otherwise we'll ignore the completion event. */ - rq->deadline = jiffies + q->rq_timeout; - set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); + if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); if (q->dma_drain_size && blk_rq_bytes(rq)) { /* @@ -378,7 +466,7 @@ static void blk_mq_start_request(struct request *rq, bool last) rq->cmd_flags |= REQ_END; } -static void blk_mq_requeue_request(struct request *rq) +static void __blk_mq_requeue_request(struct request *rq) { struct request_queue *q = rq->q; @@ -391,6 +479,86 @@ static void blk_mq_requeue_request(struct request *rq) rq->nr_phys_segments--; } +void blk_mq_requeue_request(struct request *rq) +{ + __blk_mq_requeue_request(rq); + blk_clear_rq_complete(rq); + + BUG_ON(blk_queued_rq(rq)); + blk_mq_add_to_requeue_list(rq, true); +} +EXPORT_SYMBOL(blk_mq_requeue_request); + +static void blk_mq_requeue_work(struct work_struct *work) +{ + struct request_queue *q = + container_of(work, struct request_queue, requeue_work); + LIST_HEAD(rq_list); + struct request *rq, *next; + unsigned long flags; + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + list_for_each_entry_safe(rq, next, &rq_list, queuelist) { + if (!(rq->cmd_flags & REQ_SOFTBARRIER)) + continue; + + rq->cmd_flags &= ~REQ_SOFTBARRIER; + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, true, false, false); + } + + while (!list_empty(&rq_list)) { + rq = list_entry(rq_list.next, struct request, queuelist); + list_del_init(&rq->queuelist); + blk_mq_insert_request(rq, false, false, false); + } + + blk_mq_run_queues(q, false); +} + +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) +{ + struct request_queue *q = rq->q; + unsigned long flags; + + /* + * We abuse this flag that is otherwise used by the I/O scheduler to + * request head insertation from the workqueue. + */ + BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); + + spin_lock_irqsave(&q->requeue_lock, flags); + if (at_head) { + rq->cmd_flags |= REQ_SOFTBARRIER; + list_add(&rq->queuelist, &q->requeue_list); + } else { + list_add_tail(&rq->queuelist, &q->requeue_list); + } + spin_unlock_irqrestore(&q->requeue_lock, flags); +} +EXPORT_SYMBOL(blk_mq_add_to_requeue_list); + +void blk_mq_kick_requeue_list(struct request_queue *q) +{ + kblockd_schedule_work(&q->requeue_work); +} +EXPORT_SYMBOL(blk_mq_kick_requeue_list); + +struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag) +{ + struct request_queue *q = hctx->queue; + + if ((q->flush_rq->cmd_flags & REQ_FLUSH_SEQ) && + q->flush_rq->tag == tag) + return q->flush_rq; + + return hctx->tags->rqs[tag]; +} +EXPORT_SYMBOL(blk_mq_tag_to_rq); + struct blk_mq_timeout_data { struct blk_mq_hw_ctx *hctx; unsigned long *next; @@ -412,12 +580,13 @@ static void blk_mq_timeout_check(void *__data, unsigned long *free_tags) do { struct request *rq; - tag = find_next_zero_bit(free_tags, hctx->queue_depth, tag); - if (tag >= hctx->queue_depth) + tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag); + if (tag >= hctx->tags->nr_tags) break; - rq = hctx->rqs[tag++]; - + rq = blk_mq_tag_to_rq(hctx, tag++); + if (rq->q != hctx->queue) + continue; if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) continue; @@ -442,6 +611,28 @@ static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx, blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data); } +static enum blk_eh_timer_return blk_mq_rq_timed_out(struct request *rq) +{ + struct request_queue *q = rq->q; + + /* + * We know that complete is set at this point. If STARTED isn't set + * anymore, then the request isn't active and the "timeout" should + * just be ignored. This can happen due to the bitflag ordering. + * Timeout first checks if STARTED is set, and if it is, assumes + * the request is active. But if we race with completion, then + * we both flags will get cleared. So check here again, and ignore + * a timeout event with a request that isn't active. + */ + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + return BLK_EH_NOT_HANDLED; + + if (!q->mq_ops->timeout) + return BLK_EH_RESET_TIMER; + + return q->mq_ops->timeout(rq); +} + static void blk_mq_rq_timer(unsigned long data) { struct request_queue *q = (struct request_queue *) data; @@ -449,11 +640,24 @@ static void blk_mq_rq_timer(unsigned long data) unsigned long next = 0; int i, next_set = 0; - queue_for_each_hw_ctx(q, hctx, i) + queue_for_each_hw_ctx(q, hctx, i) { + /* + * If not software queues are currently mapped to this + * hardware queue, there's nothing to check + */ + if (!hctx->nr_ctx || !hctx->tags) + continue; + blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set); + } - if (next_set) - mod_timer(&q->timeout, round_jiffies_up(next)); + if (next_set) { + next = blk_rq_timeout(round_jiffies_up(next)); + mod_timer(&q->timeout, next); + } else { + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_tag_idle(hctx); + } } /* @@ -495,9 +699,38 @@ static bool blk_mq_attempt_merge(struct request_queue *q, return false; } -void blk_mq_add_timer(struct request *rq) +/* + * Process software queues that have been marked busy, splicing them + * to the for-dispatch + */ +static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) { - __blk_add_timer(rq, NULL); + struct blk_mq_ctx *ctx; + int i; + + for (i = 0; i < hctx->ctx_map.map_size; i++) { + struct blk_align_bitmap *bm = &hctx->ctx_map.map[i]; + unsigned int off, bit; + + if (!bm->word) + continue; + + bit = 0; + off = i * hctx->ctx_map.bits_per_word; + do { + bit = find_next_bit(&bm->word, bm->depth, bit); + if (bit >= bm->depth) + break; + + ctx = hctx->ctxs[bit + off]; + clear_bit(bit, &bm->word); + spin_lock(&ctx->lock); + list_splice_tail_init(&ctx->rq_list, list); + spin_unlock(&ctx->lock); + + bit++; + } while (1); + } } /* @@ -509,10 +742,11 @@ void blk_mq_add_timer(struct request *rq) static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; - struct blk_mq_ctx *ctx; struct request *rq; LIST_HEAD(rq_list); - int bit, queued; + int queued; + + WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)); if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; @@ -522,15 +756,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) /* * Touch any software queue that has pending entries. */ - for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) { - clear_bit(bit, hctx->ctx_map); - ctx = hctx->ctxs[bit]; - BUG_ON(bit != ctx->index_hw); - - spin_lock(&ctx->lock); - list_splice_tail_init(&ctx->rq_list, &rq_list); - spin_unlock(&ctx->lock); - } + flush_busy_ctxs(hctx, &rq_list); /* * If we have previous entries on our dispatch list, grab them @@ -544,13 +770,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) } /* - * Delete and return all entries from our dispatch list - */ - queued = 0; - - /* * Now process all the entries, sending them to the driver. */ + queued = 0; while (!list_empty(&rq_list)) { int ret; @@ -565,13 +787,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) queued++; continue; case BLK_MQ_RQ_QUEUE_BUSY: - /* - * FIXME: we should have a mechanism to stop the queue - * like blk_stop_queue, otherwise we will waste cpu - * time - */ list_add(&rq->queuelist, &rq_list); - blk_mq_requeue_request(rq); + __blk_mq_requeue_request(rq); break; default: pr_err("blk-mq: bad return on queue: %d\n", ret); @@ -601,17 +818,44 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) } } +/* + * It'd be great if the workqueue API had a way to pass + * in a mask and had some smarts for more clever placement. + * For now we just round-robin here, switching for every + * BLK_MQ_CPU_WORK_BATCH queued items. + */ +static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) +{ + int cpu = hctx->next_cpu; + + if (--hctx->next_cpu_batch <= 0) { + int next_cpu; + + next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask); + if (next_cpu >= nr_cpu_ids) + next_cpu = cpumask_first(hctx->cpumask); + + hctx->next_cpu = next_cpu; + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + } + + return cpu; +} + void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state))) return; - if (!async) + if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask)) __blk_mq_run_hw_queue(hctx); + else if (hctx->queue->nr_hw_queues == 1) + kblockd_schedule_delayed_work(&hctx->run_work, 0); else { - struct request_queue *q = hctx->queue; + unsigned int cpu; - kblockd_schedule_delayed_work(q, &hctx->delayed_work, 0); + cpu = blk_mq_hctx_next_cpu(hctx); + kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0); } } @@ -626,14 +870,17 @@ void blk_mq_run_queues(struct request_queue *q, bool async) test_bit(BLK_MQ_S_STOPPED, &hctx->state)) continue; + preempt_disable(); blk_mq_run_hw_queue(hctx, async); + preempt_enable(); } } EXPORT_SYMBOL(blk_mq_run_queues); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { - cancel_delayed_work(&hctx->delayed_work); + cancel_delayed_work(&hctx->run_work); + cancel_delayed_work(&hctx->delay_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } EXPORT_SYMBOL(blk_mq_stop_hw_queue); @@ -651,11 +898,25 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queues); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + + preempt_disable(); __blk_mq_run_hw_queue(hctx); + preempt_enable(); } EXPORT_SYMBOL(blk_mq_start_hw_queue); -void blk_mq_start_stopped_hw_queues(struct request_queue *q) +void blk_mq_start_hw_queues(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + blk_mq_start_hw_queue(hctx); +} +EXPORT_SYMBOL(blk_mq_start_hw_queues); + + +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; int i; @@ -665,19 +926,47 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q) continue; clear_bit(BLK_MQ_S_STOPPED, &hctx->state); - blk_mq_run_hw_queue(hctx, true); + preempt_disable(); + blk_mq_run_hw_queue(hctx, async); + preempt_enable(); } } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); -static void blk_mq_work_fn(struct work_struct *work) +static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx; - hctx = container_of(work, struct blk_mq_hw_ctx, delayed_work.work); + hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); + __blk_mq_run_hw_queue(hctx); } +static void blk_mq_delay_work_fn(struct work_struct *work) +{ + struct blk_mq_hw_ctx *hctx; + + hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work); + + if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state)) + __blk_mq_run_hw_queue(hctx); +} + +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) +{ + unsigned long tmo = msecs_to_jiffies(msecs); + + if (hctx->queue->nr_hw_queues == 1) + kblockd_schedule_delayed_work(&hctx->delay_work, tmo); + else { + unsigned int cpu; + + cpu = blk_mq_hctx_next_cpu(hctx); + kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo); + } +} +EXPORT_SYMBOL(blk_mq_delay_queue); + static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head) { @@ -689,12 +978,13 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, list_add(&rq->queuelist, &ctx->rq_list); else list_add_tail(&rq->queuelist, &ctx->rq_list); + blk_mq_hctx_mark_pending(hctx, ctx); /* * We do this early, to ensure we are on the right CPU. */ - blk_mq_add_timer(rq); + blk_add_timer(rq); } void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, @@ -719,10 +1009,10 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, spin_unlock(&ctx->lock); } - blk_mq_put_ctx(current_ctx); - if (run_queue) blk_mq_run_hw_queue(hctx, async); + + blk_mq_put_ctx(current_ctx); } static void blk_mq_insert_requests(struct request_queue *q, @@ -758,9 +1048,8 @@ static void blk_mq_insert_requests(struct request_queue *q, } spin_unlock(&ctx->lock); - blk_mq_put_ctx(current_ctx); - blk_mq_run_hw_queue(hctx, from_schedule); + blk_mq_put_ctx(current_ctx); } static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b) @@ -823,24 +1112,169 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) { init_request_from_bio(rq, bio); - blk_account_io_start(rq, 1); + + if (blk_do_io_stat(rq)) { + rq->start_time = jiffies; + blk_account_io_start(rq, 1); + } } -static void blk_mq_make_request(struct request_queue *q, struct bio *bio) +static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx, + struct blk_mq_ctx *ctx, + struct request *rq, struct bio *bio) +{ + struct request_queue *q = hctx->queue; + + if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE)) { + blk_mq_bio_to_request(rq, bio); + spin_lock(&ctx->lock); +insert_rq: + __blk_mq_insert_request(hctx, rq, false); + spin_unlock(&ctx->lock); + return false; + } else { + spin_lock(&ctx->lock); + if (!blk_mq_attempt_merge(q, ctx, bio)) { + blk_mq_bio_to_request(rq, bio); + goto insert_rq; + } + + spin_unlock(&ctx->lock); + __blk_mq_free_request(hctx, ctx, rq); + return true; + } +} + +struct blk_map_ctx { + struct blk_mq_hw_ctx *hctx; + struct blk_mq_ctx *ctx; +}; + +static struct request *blk_mq_map_request(struct request_queue *q, + struct bio *bio, + struct blk_map_ctx *data) { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; + struct request *rq; + int rw = bio_data_dir(bio); + + if (unlikely(blk_mq_queue_enter(q))) { + bio_endio(bio, -EIO); + return NULL; + } + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + + if (rw_is_sync(bio->bi_rw)) + rw |= REQ_SYNC; + + trace_block_getrq(q, bio, rw); + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); + if (unlikely(!rq)) { + __blk_mq_run_hw_queue(hctx); + blk_mq_put_ctx(ctx); + trace_block_sleeprq(q, bio, rw); + + ctx = blk_mq_get_ctx(q); + hctx = q->mq_ops->map_queue(q, ctx->cpu); + rq = __blk_mq_alloc_request(q, hctx, ctx, rw, + __GFP_WAIT|GFP_ATOMIC, false); + } + + hctx->queued++; + data->hctx = hctx; + data->ctx = ctx; + return rq; +} + +/* + * Multiple hardware queue variant. This will not use per-process plugs, + * but will attempt to bypass the hctx queueing if we can go straight to + * hardware for SYNC IO. + */ +static void blk_mq_make_request(struct request_queue *q, struct bio *bio) +{ const int is_sync = rw_is_sync(bio->bi_rw); const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); - int rw = bio_data_dir(bio); + struct blk_map_ctx data; struct request *rq; + + blk_queue_bounce(q, &bio); + + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { + bio_endio(bio, -EIO); + return; + } + + rq = blk_mq_map_request(q, bio, &data); + if (unlikely(!rq)) + return; + + if (unlikely(is_flush_fua)) { + blk_mq_bio_to_request(rq, bio); + blk_insert_flush(rq); + goto run_queue; + } + + if (is_sync) { + int ret; + + blk_mq_bio_to_request(rq, bio); + blk_mq_start_request(rq, true); + blk_add_timer(rq); + + /* + * For OK queue, we are done. For error, kill it. Any other + * error (busy), just add it to our list as we previously + * would have done + */ + ret = q->mq_ops->queue_rq(data.hctx, rq); + if (ret == BLK_MQ_RQ_QUEUE_OK) + goto done; + else { + __blk_mq_requeue_request(rq); + + if (ret == BLK_MQ_RQ_QUEUE_ERROR) { + rq->errors = -EIO; + blk_mq_end_io(rq, rq->errors); + goto done; + } + } + } + + if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { + /* + * For a SYNC request, send it to the hardware immediately. For + * an ASYNC request, just ensure that we run it later on. The + * latter allows for merging opportunities and more efficient + * dispatching. + */ +run_queue: + blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); + } +done: + blk_mq_put_ctx(data.ctx); +} + +/* + * Single hardware queue variant. This will attempt to use any per-process + * plug for merging and IO deferral. + */ +static void blk_sq_make_request(struct request_queue *q, struct bio *bio) +{ + const int is_sync = rw_is_sync(bio->bi_rw); + const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA); unsigned int use_plug, request_count = 0; + struct blk_map_ctx data; + struct request *rq; /* * If we have multiple hardware queues, just go directly to * one of those for sync IO. */ - use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync); + use_plug = !is_flush_fua && !is_sync; blk_queue_bounce(q, &bio); @@ -849,37 +1283,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) return; } - if (use_plug && blk_attempt_plug_merge(q, bio, &request_count)) + if (use_plug && !blk_queue_nomerges(q) && + blk_attempt_plug_merge(q, bio, &request_count)) return; - if (blk_mq_queue_enter(q)) { - bio_endio(bio, -EIO); - return; - } - - ctx = blk_mq_get_ctx(q); - hctx = q->mq_ops->map_queue(q, ctx->cpu); - - if (is_sync) - rw |= REQ_SYNC; - trace_block_getrq(q, bio, rw); - rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); - if (likely(rq)) - blk_mq_rq_ctx_init(q, ctx, rq, rw); - else { - blk_mq_put_ctx(ctx); - trace_block_sleeprq(q, bio, rw); - rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC, - false); - ctx = rq->mq_ctx; - hctx = q->mq_ops->map_queue(q, ctx->cpu); - } - - hctx->queued++; + rq = blk_mq_map_request(q, bio, &data); if (unlikely(is_flush_fua)) { blk_mq_bio_to_request(rq, bio); - blk_mq_put_ctx(ctx); blk_insert_flush(rq); goto run_queue; } @@ -901,31 +1312,23 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) trace_block_plug(q); } list_add_tail(&rq->queuelist, &plug->mq_list); - blk_mq_put_ctx(ctx); + blk_mq_put_ctx(data.ctx); return; } } - spin_lock(&ctx->lock); - - if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && - blk_mq_attempt_merge(q, ctx, bio)) - __blk_mq_free_request(hctx, ctx, rq); - else { - blk_mq_bio_to_request(rq, bio); - __blk_mq_insert_request(hctx, rq, false); + if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { + /* + * For a SYNC request, send it to the hardware immediately. For + * an ASYNC request, just ensure that we run it later on. The + * latter allows for merging opportunities and more efficient + * dispatching. + */ +run_queue: + blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua); } - spin_unlock(&ctx->lock); - blk_mq_put_ctx(ctx); - - /* - * For a SYNC request, send it to the hardware immediately. For an - * ASYNC request, just ensure that we run it later on. The latter - * allows for merging opportunities and more efficient dispatching. - */ -run_queue: - blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua); + blk_mq_put_ctx(data.ctx); } /* @@ -937,32 +1340,153 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) } EXPORT_SYMBOL(blk_mq_map_queue); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *reg, - unsigned int hctx_index) +static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, unsigned int hctx_idx) { - return kmalloc_node(sizeof(struct blk_mq_hw_ctx), - GFP_KERNEL | __GFP_ZERO, reg->numa_node); + struct page *page; + + if (tags->rqs && set->ops->exit_request) { + int i; + + for (i = 0; i < tags->nr_tags; i++) { + if (!tags->rqs[i]) + continue; + set->ops->exit_request(set->driver_data, tags->rqs[i], + hctx_idx, i); + } + } + + while (!list_empty(&tags->page_list)) { + page = list_first_entry(&tags->page_list, struct page, lru); + list_del_init(&page->lru); + __free_pages(page, page->private); + } + + kfree(tags->rqs); + + blk_mq_free_tags(tags); } -EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue); -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx, - unsigned int hctx_index) +static size_t order_to_size(unsigned int order) { - kfree(hctx); + return (size_t)PAGE_SIZE << order; } -EXPORT_SYMBOL(blk_mq_free_single_hw_queue); -static void blk_mq_hctx_notify(void *data, unsigned long action, - unsigned int cpu) +static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, + unsigned int hctx_idx) +{ + struct blk_mq_tags *tags; + unsigned int i, j, entries_per_page, max_order = 4; + size_t rq_size, left; + + tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags, + set->numa_node); + if (!tags) + return NULL; + + INIT_LIST_HEAD(&tags->page_list); + + tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *), + GFP_KERNEL, set->numa_node); + if (!tags->rqs) { + blk_mq_free_tags(tags); + return NULL; + } + + /* + * rq_size is the size of the request plus driver payload, rounded + * to the cacheline size + */ + rq_size = round_up(sizeof(struct request) + set->cmd_size, + cache_line_size()); + left = rq_size * set->queue_depth; + + for (i = 0; i < set->queue_depth; ) { + int this_order = max_order; + struct page *page; + int to_do; + void *p; + + while (left < order_to_size(this_order - 1) && this_order) + this_order--; + + do { + page = alloc_pages_node(set->numa_node, GFP_KERNEL, + this_order); + if (page) + break; + if (!this_order--) + break; + if (order_to_size(this_order) < rq_size) + break; + } while (1); + + if (!page) + goto fail; + + page->private = this_order; + list_add_tail(&page->lru, &tags->page_list); + + p = page_address(page); + entries_per_page = order_to_size(this_order) / rq_size; + to_do = min(entries_per_page, set->queue_depth - i); + left -= to_do * rq_size; + for (j = 0; j < to_do; j++) { + tags->rqs[i] = p; + if (set->ops->init_request) { + if (set->ops->init_request(set->driver_data, + tags->rqs[i], hctx_idx, i, + set->numa_node)) + goto fail; + } + + p += rq_size; + i++; + } + } + + return tags; + +fail: + pr_warn("%s: failed to allocate requests\n", __func__); + blk_mq_free_rq_map(set, tags, hctx_idx); + return NULL; +} + +static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap) +{ + kfree(bitmap->map); +} + +static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node) +{ + unsigned int bpw = 8, total, num_maps, i; + + bitmap->bits_per_word = bpw; + + num_maps = ALIGN(nr_cpu_ids, bpw) / bpw; + bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap), + GFP_KERNEL, node); + if (!bitmap->map) + return -ENOMEM; + + bitmap->map_size = num_maps; + + total = nr_cpu_ids; + for (i = 0; i < num_maps; i++) { + bitmap->map[i].depth = min(total, bitmap->bits_per_word); + total -= bitmap->map[i].depth; + } + + return 0; +} + +static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) { - struct blk_mq_hw_ctx *hctx = data; struct request_queue *q = hctx->queue; struct blk_mq_ctx *ctx; LIST_HEAD(tmp); - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) - return; - /* * Move ctx entries to new CPU, if this one is going away. */ @@ -971,12 +1495,12 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_list)) { list_splice_init(&ctx->rq_list, &tmp); - clear_bit(ctx->index_hw, hctx->ctx_map); + blk_mq_hctx_clear_pending(hctx, ctx); } spin_unlock(&ctx->lock); if (list_empty(&tmp)) - return; + return NOTIFY_OK; ctx = blk_mq_get_ctx(q); spin_lock(&ctx->lock); @@ -993,210 +1517,103 @@ static void blk_mq_hctx_notify(void *data, unsigned long action, blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); - blk_mq_put_ctx(ctx); blk_mq_run_hw_queue(hctx, true); + blk_mq_put_ctx(ctx); + return NOTIFY_OK; } -static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx, - int (*init)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) +static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) { - unsigned int i; - int ret = 0; - - for (i = 0; i < hctx->queue_depth; i++) { - struct request *rq = hctx->rqs[i]; - - ret = init(data, hctx, rq, i); - if (ret) - break; - } - - return ret; -} + struct request_queue *q = hctx->queue; + struct blk_mq_tag_set *set = q->tag_set; -int blk_mq_init_commands(struct request_queue *q, - int (*init)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) -{ - struct blk_mq_hw_ctx *hctx; - unsigned int i; - int ret = 0; + if (set->tags[hctx->queue_num]) + return NOTIFY_OK; - queue_for_each_hw_ctx(q, hctx, i) { - ret = blk_mq_init_hw_commands(hctx, init, data); - if (ret) - break; - } + set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); + if (!set->tags[hctx->queue_num]) + return NOTIFY_STOP; - return ret; + hctx->tags = set->tags[hctx->queue_num]; + return NOTIFY_OK; } -EXPORT_SYMBOL(blk_mq_init_commands); -static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx, - void (*free)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) +static int blk_mq_hctx_notify(void *data, unsigned long action, + unsigned int cpu) { - unsigned int i; + struct blk_mq_hw_ctx *hctx = data; - for (i = 0; i < hctx->queue_depth; i++) { - struct request *rq = hctx->rqs[i]; + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) + return blk_mq_hctx_cpu_offline(hctx, cpu); + else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) + return blk_mq_hctx_cpu_online(hctx, cpu); - free(data, hctx, rq, i); - } + return NOTIFY_OK; } -void blk_mq_free_commands(struct request_queue *q, - void (*free)(void *, struct blk_mq_hw_ctx *, - struct request *, unsigned int), - void *data) +static void blk_mq_exit_hw_queues(struct request_queue *q, + struct blk_mq_tag_set *set, int nr_queue) { struct blk_mq_hw_ctx *hctx; unsigned int i; - queue_for_each_hw_ctx(q, hctx, i) - blk_mq_free_hw_commands(hctx, free, data); -} -EXPORT_SYMBOL(blk_mq_free_commands); + queue_for_each_hw_ctx(q, hctx, i) { + if (i == nr_queue) + break; -static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx) -{ - struct page *page; + if (set->ops->exit_hctx) + set->ops->exit_hctx(hctx, i); - while (!list_empty(&hctx->page_list)) { - page = list_first_entry(&hctx->page_list, struct page, lru); - list_del_init(&page->lru); - __free_pages(page, page->private); + blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); + kfree(hctx->ctxs); + blk_mq_free_bitmap(&hctx->ctx_map); } - kfree(hctx->rqs); - - if (hctx->tags) - blk_mq_free_tags(hctx->tags); -} - -static size_t order_to_size(unsigned int order) -{ - size_t ret = PAGE_SIZE; - - while (order--) - ret *= 2; - - return ret; } -static int blk_mq_init_rq_map(struct blk_mq_hw_ctx *hctx, - unsigned int reserved_tags, int node) +static void blk_mq_free_hw_queues(struct request_queue *q, + struct blk_mq_tag_set *set) { - unsigned int i, j, entries_per_page, max_order = 4; - size_t rq_size, left; - - INIT_LIST_HEAD(&hctx->page_list); - - hctx->rqs = kmalloc_node(hctx->queue_depth * sizeof(struct request *), - GFP_KERNEL, node); - if (!hctx->rqs) - return -ENOMEM; - - /* - * rq_size is the size of the request plus driver payload, rounded - * to the cacheline size - */ - rq_size = round_up(sizeof(struct request) + hctx->cmd_size, - cache_line_size()); - left = rq_size * hctx->queue_depth; - - for (i = 0; i < hctx->queue_depth;) { - int this_order = max_order; - struct page *page; - int to_do; - void *p; - - while (left < order_to_size(this_order - 1) && this_order) - this_order--; - - do { - page = alloc_pages_node(node, GFP_KERNEL, this_order); - if (page) - break; - if (!this_order--) - break; - if (order_to_size(this_order) < rq_size) - break; - } while (1); - - if (!page) - break; - - page->private = this_order; - list_add_tail(&page->lru, &hctx->page_list); - - p = page_address(page); - entries_per_page = order_to_size(this_order) / rq_size; - to_do = min(entries_per_page, hctx->queue_depth - i); - left -= to_do * rq_size; - for (j = 0; j < to_do; j++) { - hctx->rqs[i] = p; - blk_mq_rq_init(hctx, hctx->rqs[i]); - p += rq_size; - i++; - } - } - - if (i < (reserved_tags + BLK_MQ_TAG_MIN)) - goto err_rq_map; - else if (i != hctx->queue_depth) { - hctx->queue_depth = i; - pr_warn("%s: queue depth set to %u because of low memory\n", - __func__, i); - } + struct blk_mq_hw_ctx *hctx; + unsigned int i; - hctx->tags = blk_mq_init_tags(hctx->queue_depth, reserved_tags, node); - if (!hctx->tags) { -err_rq_map: - blk_mq_free_rq_map(hctx); - return -ENOMEM; + queue_for_each_hw_ctx(q, hctx, i) { + free_cpumask_var(hctx->cpumask); + kfree(hctx); } - - return 0; } static int blk_mq_init_hw_queues(struct request_queue *q, - struct blk_mq_reg *reg, void *driver_data) + struct blk_mq_tag_set *set) { struct blk_mq_hw_ctx *hctx; - unsigned int i, j; + unsigned int i; /* * Initialize hardware queues */ queue_for_each_hw_ctx(q, hctx, i) { - unsigned int num_maps; int node; node = hctx->numa_node; if (node == NUMA_NO_NODE) - node = hctx->numa_node = reg->numa_node; + node = hctx->numa_node = set->numa_node; - INIT_DELAYED_WORK(&hctx->delayed_work, blk_mq_work_fn); + INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); + INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; hctx->queue_num = i; - hctx->flags = reg->flags; - hctx->queue_depth = reg->queue_depth; - hctx->cmd_size = reg->cmd_size; + hctx->flags = set->flags; + hctx->cmd_size = set->cmd_size; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); blk_mq_register_cpu_notifier(&hctx->cpu_notifier); - if (blk_mq_init_rq_map(hctx, reg->reserved_tags, node)) - break; + hctx->tags = set->tags[i]; /* * Allocate space for all possible cpus to avoid allocation in @@ -1207,17 +1624,13 @@ static int blk_mq_init_hw_queues(struct request_queue *q, if (!hctx->ctxs) break; - num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG; - hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long), - GFP_KERNEL, node); - if (!hctx->ctx_map) + if (blk_mq_alloc_bitmap(&hctx->ctx_map, node)) break; - hctx->nr_ctx_map = num_maps; hctx->nr_ctx = 0; - if (reg->ops->init_hctx && - reg->ops->init_hctx(hctx, driver_data, i)) + if (set->ops->init_hctx && + set->ops->init_hctx(hctx, set->driver_data, i)) break; } @@ -1227,17 +1640,7 @@ static int blk_mq_init_hw_queues(struct request_queue *q, /* * Init failed */ - queue_for_each_hw_ctx(q, hctx, j) { - if (i == j) - break; - - if (reg->ops->exit_hctx) - reg->ops->exit_hctx(hctx, j); - - blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - blk_mq_free_rq_map(hctx); - kfree(hctx->ctxs); - } + blk_mq_exit_hw_queues(q, set, i); return 1; } @@ -1258,12 +1661,13 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, __ctx->queue = q; /* If the cpu isn't online, the cpu is mapped to first hctx */ - hctx = q->mq_ops->map_queue(q, i); - hctx->nr_ctx++; - if (!cpu_online(i)) continue; + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->cpumask); + hctx->nr_ctx++; + /* * Set local node, IFF we have more than one hw queue. If * not, we remain on the home node of the device @@ -1280,6 +1684,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) struct blk_mq_ctx *ctx; queue_for_each_hw_ctx(q, hctx, i) { + cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; } @@ -1288,115 +1693,208 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ + if (!cpu_online(i)) + continue; + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } + + queue_for_each_hw_ctx(q, hctx, i) { + /* + * If not software queues are mapped to this hardware queue, + * disable it and free the request entries + */ + if (!hctx->nr_ctx) { + struct blk_mq_tag_set *set = q->tag_set; + + if (set->tags[i]) { + blk_mq_free_rq_map(set, set->tags[i], i); + set->tags[i] = NULL; + hctx->tags = NULL; + } + continue; + } + + /* + * Initialize batch roundrobin counts + */ + hctx->next_cpu = cpumask_first(hctx->cpumask); + hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; + } } -struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg, - void *driver_data) +static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) { - struct blk_mq_hw_ctx **hctxs; - struct blk_mq_ctx *ctx; + struct blk_mq_hw_ctx *hctx; struct request_queue *q; + bool shared; int i; - if (!reg->nr_hw_queues || - !reg->ops->queue_rq || !reg->ops->map_queue || - !reg->ops->alloc_hctx || !reg->ops->free_hctx) - return ERR_PTR(-EINVAL); + if (set->tag_list.next == set->tag_list.prev) + shared = false; + else + shared = true; + + list_for_each_entry(q, &set->tag_list, tag_set_list) { + blk_mq_freeze_queue(q); - if (!reg->queue_depth) - reg->queue_depth = BLK_MQ_MAX_DEPTH; - else if (reg->queue_depth > BLK_MQ_MAX_DEPTH) { - pr_err("blk-mq: queuedepth too large (%u)\n", reg->queue_depth); - reg->queue_depth = BLK_MQ_MAX_DEPTH; + queue_for_each_hw_ctx(q, hctx, i) { + if (shared) + hctx->flags |= BLK_MQ_F_TAG_SHARED; + else + hctx->flags &= ~BLK_MQ_F_TAG_SHARED; + } + blk_mq_unfreeze_queue(q); } +} - if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN)) - return ERR_PTR(-EINVAL); +static void blk_mq_del_queue_tag_set(struct request_queue *q) +{ + struct blk_mq_tag_set *set = q->tag_set; + + blk_mq_freeze_queue(q); + + mutex_lock(&set->tag_list_lock); + list_del_init(&q->tag_set_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); + + blk_mq_unfreeze_queue(q); +} + +static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, + struct request_queue *q) +{ + q->tag_set = set; + + mutex_lock(&set->tag_list_lock); + list_add_tail(&q->tag_set_list, &set->tag_list); + blk_mq_update_tag_set_depth(set); + mutex_unlock(&set->tag_list_lock); +} + +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) +{ + struct blk_mq_hw_ctx **hctxs; + struct blk_mq_ctx *ctx; + struct request_queue *q; + unsigned int *map; + int i; ctx = alloc_percpu(struct blk_mq_ctx); if (!ctx) return ERR_PTR(-ENOMEM); - hctxs = kmalloc_node(reg->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, - reg->numa_node); + hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL, + set->numa_node); if (!hctxs) goto err_percpu; - for (i = 0; i < reg->nr_hw_queues; i++) { - hctxs[i] = reg->ops->alloc_hctx(reg, i); + map = blk_mq_make_queue_map(set); + if (!map) + goto err_map; + + for (i = 0; i < set->nr_hw_queues; i++) { + int node = blk_mq_hw_queue_to_node(map, i); + + hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx), + GFP_KERNEL, node); if (!hctxs[i]) goto err_hctxs; - hctxs[i]->numa_node = NUMA_NO_NODE; + if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL)) + goto err_hctxs; + + atomic_set(&hctxs[i]->nr_active, 0); + hctxs[i]->numa_node = node; hctxs[i]->queue_num = i; } - q = blk_alloc_queue_node(GFP_KERNEL, reg->numa_node); + q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); if (!q) goto err_hctxs; - q->mq_map = blk_mq_make_queue_map(reg); - if (!q->mq_map) + if (percpu_counter_init(&q->mq_usage_counter, 0)) goto err_map; setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); blk_queue_rq_timeout(q, 30000); q->nr_queues = nr_cpu_ids; - q->nr_hw_queues = reg->nr_hw_queues; + q->nr_hw_queues = set->nr_hw_queues; + q->mq_map = map; q->queue_ctx = ctx; q->queue_hw_ctx = hctxs; - q->mq_ops = reg->ops; + q->mq_ops = set->ops; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; + if (!(set->flags & BLK_MQ_F_SG_MERGE)) + q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; + q->sg_reserved_size = INT_MAX; - blk_queue_make_request(q, blk_mq_make_request); - blk_queue_rq_timed_out(q, reg->ops->timeout); - if (reg->timeout) - blk_queue_rq_timeout(q, reg->timeout); + INIT_WORK(&q->requeue_work, blk_mq_requeue_work); + INIT_LIST_HEAD(&q->requeue_list); + spin_lock_init(&q->requeue_lock); + + if (q->nr_hw_queues > 1) + blk_queue_make_request(q, blk_mq_make_request); + else + blk_queue_make_request(q, blk_sq_make_request); + + blk_queue_rq_timed_out(q, blk_mq_rq_timed_out); + if (set->timeout) + blk_queue_rq_timeout(q, set->timeout); + + /* + * Do this after blk_queue_make_request() overrides it... + */ + q->nr_requests = set->queue_depth; - if (reg->ops->complete) - blk_queue_softirq_done(q, reg->ops->complete); + if (set->ops->complete) + blk_queue_softirq_done(q, set->ops->complete); blk_mq_init_flush(q); - blk_mq_init_cpu_queues(q, reg->nr_hw_queues); + blk_mq_init_cpu_queues(q, set->nr_hw_queues); - q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size, - cache_line_size()), GFP_KERNEL); + q->flush_rq = kzalloc(round_up(sizeof(struct request) + + set->cmd_size, cache_line_size()), + GFP_KERNEL); if (!q->flush_rq) goto err_hw; - if (blk_mq_init_hw_queues(q, reg, driver_data)) + if (blk_mq_init_hw_queues(q, set)) goto err_flush_rq; - blk_mq_map_swqueue(q); - mutex_lock(&all_q_mutex); list_add_tail(&q->all_q_node, &all_q_list); mutex_unlock(&all_q_mutex); + blk_mq_add_queue_tag_set(set, q); + + blk_mq_map_swqueue(q); + return q; err_flush_rq: kfree(q->flush_rq); err_hw: - kfree(q->mq_map); -err_map: blk_cleanup_queue(q); err_hctxs: - for (i = 0; i < reg->nr_hw_queues; i++) { + kfree(map); + for (i = 0; i < set->nr_hw_queues; i++) { if (!hctxs[i]) break; - reg->ops->free_hctx(hctxs[i], i); + free_cpumask_var(hctxs[i]->cpumask); + kfree(hctxs[i]); } +err_map: kfree(hctxs); err_percpu: free_percpu(ctx); @@ -1406,18 +1904,14 @@ EXPORT_SYMBOL(blk_mq_init_queue); void blk_mq_free_queue(struct request_queue *q) { - struct blk_mq_hw_ctx *hctx; - int i; + struct blk_mq_tag_set *set = q->tag_set; - queue_for_each_hw_ctx(q, hctx, i) { - kfree(hctx->ctx_map); - kfree(hctx->ctxs); - blk_mq_free_rq_map(hctx); - blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier); - if (q->mq_ops->exit_hctx) - q->mq_ops->exit_hctx(hctx, i); - q->mq_ops->free_hctx(hctx, i); - } + blk_mq_del_queue_tag_set(q); + + blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); + blk_mq_free_hw_queues(q, set); + + percpu_counter_destroy(&q->mq_usage_counter); free_percpu(q->queue_ctx); kfree(q->queue_hw_ctx); @@ -1437,6 +1931,8 @@ static void blk_mq_queue_reinit(struct request_queue *q) { blk_mq_freeze_queue(q); + blk_mq_sysfs_unregister(q); + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); /* @@ -1447,6 +1943,8 @@ static void blk_mq_queue_reinit(struct request_queue *q) blk_mq_map_swqueue(q); + blk_mq_sysfs_register(q); + blk_mq_unfreeze_queue(q); } @@ -1456,10 +1954,10 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, struct request_queue *q; /* - * Before new mapping is established, hotadded cpu might already start - * handling requests. This doesn't break anything as we map offline - * CPUs to first hardware queue. We will re-init queue below to get - * optimal settings. + * Before new mappings are established, hotadded cpu might already + * start handling requests. This doesn't break anything as we map + * offline CPUs to first hardware queue. We will re-init the queue + * below to get optimal settings. */ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) @@ -1472,6 +1970,81 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; } +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) +{ + int i; + + if (!set->nr_hw_queues) + return -EINVAL; + if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH) + return -EINVAL; + if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) + return -EINVAL; + + if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue) + return -EINVAL; + + + set->tags = kmalloc_node(set->nr_hw_queues * + sizeof(struct blk_mq_tags *), + GFP_KERNEL, set->numa_node); + if (!set->tags) + goto out; + + for (i = 0; i < set->nr_hw_queues; i++) { + set->tags[i] = blk_mq_init_rq_map(set, i); + if (!set->tags[i]) + goto out_unwind; + } + + mutex_init(&set->tag_list_lock); + INIT_LIST_HEAD(&set->tag_list); + + return 0; + +out_unwind: + while (--i >= 0) + blk_mq_free_rq_map(set, set->tags[i], i); +out: + return -ENOMEM; +} +EXPORT_SYMBOL(blk_mq_alloc_tag_set); + +void blk_mq_free_tag_set(struct blk_mq_tag_set *set) +{ + int i; + + for (i = 0; i < set->nr_hw_queues; i++) { + if (set->tags[i]) + blk_mq_free_rq_map(set, set->tags[i], i); + } + + kfree(set->tags); +} +EXPORT_SYMBOL(blk_mq_free_tag_set); + +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) +{ + struct blk_mq_tag_set *set = q->tag_set; + struct blk_mq_hw_ctx *hctx; + int i, ret; + + if (!set || nr > set->queue_depth) + return -EINVAL; + + ret = 0; + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_tag_update_depth(hctx->tags, nr); + if (ret) + break; + } + + if (!ret) + q->nr_requests = nr; + + return ret; +} + void blk_mq_disable_hotplug(void) { mutex_lock(&all_q_mutex); diff --git a/block/blk-mq.h b/block/blk-mq.h index ebbe6bac9d61..de7b3bbd5bd6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -1,6 +1,8 @@ #ifndef INT_BLK_MQ_H #define INT_BLK_MQ_H +struct blk_mq_tag_set; + struct blk_mq_ctx { struct { spinlock_t lock; @@ -9,7 +11,8 @@ struct blk_mq_ctx { unsigned int cpu; unsigned int index_hw; - unsigned int ipi_redirect; + + unsigned int last_tag ____cacheline_aligned_in_smp; /* incremented at dispatch time */ unsigned long rq_dispatched[2]; @@ -20,21 +23,23 @@ struct blk_mq_ctx { struct request_queue *queue; struct kobject kobj; -}; +} ____cacheline_aligned_in_smp; void __blk_mq_complete_request(struct request *rq); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); -void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq); +void blk_mq_clone_flush_request(struct request *flush_rq, + struct request *orig_rq); +int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); /* * CPU hotplug helpers */ struct blk_mq_cpu_notifier; void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, - void (*fn)(void *, unsigned long, unsigned int), + int (*fn)(void *, unsigned long, unsigned int), void *data); void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); @@ -45,10 +50,23 @@ void blk_mq_disable_hotplug(void); /* * CPU -> queue mappings */ -struct blk_mq_reg; -extern unsigned int *blk_mq_make_queue_map(struct blk_mq_reg *reg); +extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); -void blk_mq_add_timer(struct request *rq); +/* + * sysfs helpers + */ +extern int blk_mq_sysfs_register(struct request_queue *q); +extern void blk_mq_sysfs_unregister(struct request_queue *q); + +/* + * Basic implementation of sparser bitmap, allowing the user to spread + * the bits over more cachelines. + */ +struct blk_align_bitmap { + unsigned long word; + unsigned long depth; +} ____cacheline_aligned_in_smp; #endif diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 7500f876dae4..23321fbab293 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { - struct request_list *rl; unsigned long nr; - int ret; + int ret, err; - if (!q->request_fn) + if (!q->request_fn && !q->mq_ops) return -EINVAL; ret = queue_var_store(&nr, page, count); @@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; - spin_lock_irq(q->queue_lock); - q->nr_requests = nr; - blk_queue_congestion_threshold(q); - - /* congestion isn't cgroup aware and follows root blkcg for now */ - rl = &q->root_rl; - - if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_SYNC); - else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_SYNC); - - if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) - blk_set_queue_congested(q, BLK_RW_ASYNC); - else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) - blk_clear_queue_congested(q, BLK_RW_ASYNC); - - blk_queue_for_each_rl(rl, q) { - if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_SYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_SYNC); - wake_up(&rl->wait[BLK_RW_SYNC]); - } - - if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { - blk_set_rl_full(rl, BLK_RW_ASYNC); - } else { - blk_clear_rl_full(rl, BLK_RW_ASYNC); - wake_up(&rl->wait[BLK_RW_ASYNC]); - } - } + if (q->request_fn) + err = blk_update_nr_requests(q, nr); + else + err = blk_mq_update_nr_requests(q, nr); + + if (err) + return err; - spin_unlock_irq(q->queue_lock); return ret; } @@ -544,8 +517,6 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); - percpu_counter_destroy(&q->mq_usage_counter); - if (q->mq_ops) blk_mq_free_queue(q); diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 033745cd7fba..9353b4683359 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -744,7 +744,7 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, static bool throtl_slice_used(struct throtl_grp *tg, bool rw) { if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) - return 0; + return false; return 1; } @@ -842,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, if (tg->io_disp[rw] + 1 <= io_allowed) { if (wait) *wait = 0; - return 1; + return true; } /* Calc approx time to dispatch */ @@ -880,7 +880,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (wait) *wait = 0; - return 1; + return true; } /* Calc approx time to dispatch */ @@ -923,7 +923,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, if (tg->bps[rw] == -1 && tg->iops[rw] == -1) { if (wait) *wait = 0; - return 1; + return true; } /* @@ -1258,7 +1258,7 @@ out_unlock: * of throtl_data->service_queue. Those bio's are ready and issued by this * function. */ -void blk_throtl_dispatch_work_fn(struct work_struct *work) +static void blk_throtl_dispatch_work_fn(struct work_struct *work) { struct throtl_data *td = container_of(work, struct throtl_data, dispatch_work); diff --git a/block/blk-timeout.c b/block/blk-timeout.c index d96f7061c6fd..95a09590ccfd 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req) __blk_complete_request(req); break; case BLK_EH_RESET_TIMER: - if (q->mq_ops) - blk_mq_add_timer(req); - else - blk_add_timer(req); - + blk_add_timer(req); blk_clear_rq_complete(req); break; case BLK_EH_NOT_HANDLED: @@ -170,7 +166,26 @@ void blk_abort_request(struct request *req) } EXPORT_SYMBOL_GPL(blk_abort_request); -void __blk_add_timer(struct request *req, struct list_head *timeout_list) +unsigned long blk_rq_timeout(unsigned long timeout) +{ + unsigned long maxt; + + maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT); + if (time_after(timeout, maxt)) + timeout = maxt; + + return timeout; +} + +/** + * blk_add_timer - Start timeout timer for a single request + * @req: request that is about to start running. + * + * Notes: + * Each request has its own timer, and as it is added to the queue, we + * set up the timer. When the request completes, we cancel the timer. + */ +void blk_add_timer(struct request *req) { struct request_queue *q = req->q; unsigned long expiry; @@ -188,32 +203,29 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list) req->timeout = q->rq_timeout; req->deadline = jiffies + req->timeout; - if (timeout_list) - list_add_tail(&req->timeout_list, timeout_list); + if (!q->mq_ops) + list_add_tail(&req->timeout_list, &req->q->timeout_list); /* * If the timer isn't already pending or this timeout is earlier * than an existing one, modify the timer. Round up to next nearest * second. */ - expiry = round_jiffies_up(req->deadline); + expiry = blk_rq_timeout(round_jiffies_up(req->deadline)); if (!timer_pending(&q->timeout) || - time_before(expiry, q->timeout.expires)) - mod_timer(&q->timeout, expiry); + time_before(expiry, q->timeout.expires)) { + unsigned long diff = q->timeout.expires - expiry; -} + /* + * Due to added timer slack to group timers, the timer + * will often be a little in front of what we asked for. + * So apply some tolerance here too, otherwise we keep + * modifying the timer because expires for value X + * will be X + something. + */ + if (!timer_pending(&q->timeout) || (diff >= HZ / 2)) + mod_timer(&q->timeout, expiry); + } -/** - * blk_add_timer - Start timeout timer for a single request - * @req: request that is about to start running. - * - * Notes: - * Each request has its own timer, and as it is added to the queue, we - * set up the timer. When the request completes, we cancel the timer. - */ -void blk_add_timer(struct request *req) -{ - __blk_add_timer(req, &req->q->timeout_list); } - diff --git a/block/blk.h b/block/blk.h index 1d880f1f957f..45385e9abf6f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -9,6 +9,9 @@ /* Number of requests a "batching" process may submit */ #define BLK_BATCH_REQ 32 +/* Max future timer expiry for timeouts */ +#define BLK_MAX_TIMEOUT (5 * HZ) + extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *request_cachep; extern struct kobj_type blk_queue_ktype; @@ -37,9 +40,9 @@ bool __blk_end_bidi_request(struct request *rq, int error, void blk_rq_timed_out_timer(unsigned long data); void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, unsigned int *next_set); -void __blk_add_timer(struct request *req, struct list_head *timeout_list); +unsigned long blk_rq_timeout(unsigned long timeout); +void blk_add_timer(struct request *req); void blk_delete_timer(struct request *); -void blk_add_timer(struct request *); bool bio_attempt_front_merge(struct request_queue *q, struct request *req, @@ -185,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) return q->nr_congestion_off; } +extern int blk_update_nr_requests(struct request_queue *, unsigned int); + /* * Contribute to IO statistics IFF: * diff --git a/mm/bounce.c b/block/bounce.c index 523918b8c6dc..523918b8c6dc 100644 --- a/mm/bounce.c +++ b/block/bounce.c diff --git a/block/bsg.c b/block/bsg.c index 420a5a9f1b23..e5214c148096 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -1008,7 +1008,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent, /* * we need a proper transport to send commands, not a stacked device */ - if (!q->request_fn) + if (!queue_is_rq_based(q)) return 0; bcd = &q->bsg_dev; diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e0985f1955e7..22dffebc7c73 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -908,7 +908,7 @@ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); - kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); + kblockd_schedule_work(&cfqd->unplug_work); } } @@ -4460,7 +4460,7 @@ out_free: static ssize_t cfq_var_show(unsigned int var, char *page) { - return sprintf(page, "%d\n", var); + return sprintf(page, "%u\n", var); } static ssize_t diff --git a/fs/ioprio.c b/block/ioprio.c index e50170ca7c33..e50170ca7c33 100644 --- a/fs/ioprio.c +++ b/block/ioprio.c diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 26487972ac54..9c28a5b38042 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -205,10 +205,6 @@ int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm) if (capable(CAP_SYS_RAWIO)) return 0; - /* if there's no filter set, assume we're filtering everything out */ - if (!filter) - return -EPERM; - /* Anybody who can open the device can do a read-safe command */ if (test_bit(cmd[0], filter->read_ok)) return 0; diff --git a/drivers/Makefile b/drivers/Makefile index d05d81b19b50..7183b6af5dac 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN) += sn/ obj-y += firmware/ obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_SUPERH) += sh/ -obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/ +obj-$(CONFIG_ARCH_SHMOBILE) += sh/ ifndef CONFIG_ARCH_USES_GETTIMEOFFSET obj-y += clocksource/ endif diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ab686b310100..a34a22841002 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -47,6 +47,23 @@ config ACPI_SLEEP depends on SUSPEND || HIBERNATION default y +config ACPI_PROCFS_POWER + bool "Deprecated power /proc/acpi directories" + depends on PROC_FS + help + For backwards compatibility, this option allows + deprecated power /proc/acpi/ directories to exist, even when + they have been replaced by functions in /sys. + The deprecated directories (and their replacements) include: + /proc/acpi/battery/* (/sys/class/power_supply/*) + /proc/acpi/ac_adapter/* (sys/class/power_supply/*) + This option has no effect on /proc/acpi/ directories + and functions, which do not yet exist in /sys + This option, together with the proc directories, will be + deleted in the future. + + Say N to delete power /proc/acpi/ directories that have moved to /sys/ + config ACPI_EC_DEBUGFS tristate "EC read/write access through /sys/kernel/debug/ec" default n diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 0331f91d56e6..bce34afadcd0 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -47,6 +47,7 @@ acpi-y += sysfs.o acpi-$(CONFIG_X86) += acpi_cmos_rtc.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o +acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o ifdef CONFIG_ACPI_VIDEO acpi-y += video_detect.o endif diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 2c01c1da29ce..c67f6f5ad611 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); +static int acpi_ac_add(struct acpi_device *device); +static int acpi_ac_remove(struct acpi_device *device); +static void acpi_ac_notify(struct acpi_device *device, u32 event); + +static const struct acpi_device_id ac_device_ids[] = { + {"ACPI0003", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, ac_device_ids); + +#ifdef CONFIG_PM_SLEEP +static int acpi_ac_resume(struct device *dev); +#endif +static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); + static int ac_sleep_before_get_state_ms; +static struct acpi_driver acpi_ac_driver = { + .name = "ac", + .class = ACPI_AC_CLASS, + .ids = ac_device_ids, + .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, + .ops = { + .add = acpi_ac_add, + .remove = acpi_ac_remove, + .notify = acpi_ac_notify, + }, + .drv.pm = &acpi_ac_pm, +}; + struct acpi_ac { struct power_supply charger; - struct platform_device *pdev; + struct acpi_device * device; unsigned long long state; struct notifier_block battery_nb; }; @@ -69,10 +97,12 @@ struct acpi_ac { static int acpi_ac_get_state(struct acpi_ac *ac) { - acpi_status status; - acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); + acpi_status status = AE_OK; + + if (!ac) + return -EINVAL; - status = acpi_evaluate_integer(handle, "_PSR", NULL, + status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, @@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = { Driver Model -------------------------------------------------------------------------- */ -static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) +static void acpi_ac_notify(struct acpi_device *device, u32 event) { - struct acpi_ac *ac = data; - struct acpi_device *adev; + struct acpi_ac *ac = acpi_driver_data(device); if (!ac) return; @@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) msleep(ac_sleep_before_get_state_ms); acpi_ac_get_state(ac); - adev = ACPI_COMPANION(&ac->pdev->dev); - acpi_bus_generate_netlink_event(adev->pnp.device_class, - dev_name(&ac->pdev->dev), - event, (u32) ac->state); - acpi_notifier_call_chain(adev, event, (u32) ac->state); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, + (u32) ac->state); + acpi_notifier_call_chain(device, event, (u32) ac->state); kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); } @@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = { {}, }; -static int acpi_ac_probe(struct platform_device *pdev) +static int acpi_ac_add(struct acpi_device *device) { int result = 0; struct acpi_ac *ac = NULL; - struct acpi_device *adev; - if (!pdev) - return -EINVAL; - adev = ACPI_COMPANION(&pdev->dev); - if (!adev) - return -ENODEV; + if (!device) + return -EINVAL; ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); if (!ac) return -ENOMEM; - strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); - strcpy(acpi_device_class(adev), ACPI_AC_CLASS); - ac->pdev = pdev; - platform_set_drvdata(pdev, ac); + ac->device = device; + strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_AC_CLASS); + device->driver_data = ac; result = acpi_ac_get_state(ac); if (result) goto end; - ac->charger.name = acpi_device_bid(adev); + ac->charger.name = acpi_device_bid(device); ac->charger.type = POWER_SUPPLY_TYPE_MAINS; ac->charger.properties = ac_props; ac->charger.num_properties = ARRAY_SIZE(ac_props); ac->charger.get_property = get_ac_property; - result = power_supply_register(&pdev->dev, &ac->charger); + result = power_supply_register(&ac->device->dev, &ac->charger); if (result) goto end; - result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); - if (result) { - power_supply_unregister(&ac->charger); - goto end; - } printk(KERN_INFO PREFIX "%s [%s] (%s)\n", - acpi_device_name(adev), acpi_device_bid(adev), + acpi_device_name(device), acpi_device_bid(device), ac->state ? "on-line" : "off-line"); ac->battery_nb.notifier_call = acpi_ac_battery_notify; @@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev) if (!dev) return -EINVAL; - ac = platform_get_drvdata(to_platform_device(dev)); + ac = acpi_driver_data(to_acpi_device(dev)); if (!ac) return -EINVAL; @@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev) #else #define acpi_ac_resume NULL #endif -static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); -static int acpi_ac_remove(struct platform_device *pdev) +static int acpi_ac_remove(struct acpi_device *device) { - struct acpi_ac *ac; + struct acpi_ac *ac = NULL; + - if (!pdev) + if (!device || !acpi_driver_data(device)) return -EINVAL; - acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_ALL_NOTIFY, acpi_ac_notify_handler); + ac = acpi_driver_data(device); - ac = platform_get_drvdata(pdev); if (ac->charger.dev) power_supply_unregister(&ac->charger); unregister_acpi_notifier(&ac->battery_nb); @@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev) return 0; } -static const struct acpi_device_id acpi_ac_match[] = { - { "ACPI0003", 0 }, - { } -}; -MODULE_DEVICE_TABLE(acpi, acpi_ac_match); - -static struct platform_driver acpi_ac_driver = { - .probe = acpi_ac_probe, - .remove = acpi_ac_remove, - .driver = { - .name = "acpi-ac", - .owner = THIS_MODULE, - .pm = &acpi_ac_pm_ops, - .acpi_match_table = ACPI_PTR(acpi_ac_match), - }, -}; - static int __init acpi_ac_init(void) { int result; @@ -316,7 +315,7 @@ static int __init acpi_ac_init(void) if (acpi_disabled) return -ENODEV; - result = platform_driver_register(&acpi_ac_driver); + result = acpi_bus_register_driver(&acpi_ac_driver); if (result < 0) return -ENODEV; @@ -325,7 +324,7 @@ static int __init acpi_ac_init(void) static void __exit acpi_ac_exit(void) { - platform_driver_unregister(&acpi_ac_driver); + acpi_bus_unregister_driver(&acpi_ac_driver); } module_init(acpi_ac_init); module_exit(acpi_ac_exit); diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index dbfe49e5fd63..1d4950388fa1 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform"); static const struct acpi_device_id acpi_platform_device_ids[] = { { "PNP0D40" }, - { "ACPI0003" }, { "VPC2004" }, { "BCM4752" }, diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index b06f5f55ada9..52c81c49cc7d 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -405,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device, goto err; pr->dev = dev; - dev->offline = pr->flags.need_hotplug_init; /* Trigger the processor driver's .probe() if present. */ if (device_attach(dev) >= 0) diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 49bbc71fad54..a08a448068dd 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); * address. Although ACPICA adheres to the ACPI specification which * requires the use of the corresponding 64-bit address if it is non-zero, * some machines have been found to have a corrupted non-zero 64-bit - * address. Default is FALSE, do not favor the 32-bit addresses. + * address. Default is TRUE, favor the 32-bit addresses. */ -ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE); /* * Optionally truncate I/O addresses to 16 bits. Provides compatibility diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index a4702eee91a8..9fb85f38de90 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) u32 table_count; struct acpi_table_header *table; acpi_physical_address address; + acpi_physical_address rsdt_address; u32 length; u8 *table_entry; acpi_status status; @@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) * as per the ACPI specification. */ address = (acpi_physical_address) rsdp->xsdt_physical_address; + rsdt_address = + (acpi_physical_address) rsdp->rsdt_physical_address; table_entry_size = ACPI_XSDT_ENTRY_SIZE; } else { /* Root table is an RSDT (32-bit physical addresses) */ address = (acpi_physical_address) rsdp->rsdt_physical_address; + rsdt_address = address; table_entry_size = ACPI_RSDT_ENTRY_SIZE; } @@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) /* Fall back to the RSDT */ - address = - (acpi_physical_address) rsdp->rsdt_physical_address; + address = rsdt_address; table_entry_size = ACPI_RSDT_ENTRY_SIZE; } } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 9a2c63b20050..6e7b2a12860d 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -36,6 +36,12 @@ #include <linux/suspend.h> #include <asm/unaligned.h> +#ifdef CONFIG_ACPI_PROCFS_POWER +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <asm/uaccess.h> +#endif + #include <linux/acpi.h> #include <linux/power_supply.h> @@ -64,6 +70,19 @@ static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); +#ifdef CONFIG_ACPI_PROCFS_POWER +extern struct proc_dir_entry *acpi_lock_battery_dir(void); +extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); + +enum acpi_battery_files { + info_tag = 0, + state_tag, + alarm_tag, + ACPI_BATTERY_NUMFILES, +}; + +#endif + static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, {"", 0}, @@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; +#ifdef CONFIG_ACPI_PROCFS_POWER +inline char *acpi_battery_units(struct acpi_battery *battery) +{ + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? + "mA" : "mW"; +} +#endif + /* -------------------------------------------------------------------------- Battery Management -------------------------------------------------------------------------- */ @@ -717,6 +744,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery) } /* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_PROCFS_POWER +static struct proc_dir_entry *acpi_battery_dir; + +static int acpi_battery_print_info(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + seq_printf(seq, "present: %s\n", + acpi_battery_present(battery) ? "yes" : "no"); + if (!acpi_battery_present(battery)) + goto end; + if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "design capacity: unknown\n"); + else + seq_printf(seq, "design capacity: %d %sh\n", + battery->design_capacity, + acpi_battery_units(battery)); + + if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "last full capacity: unknown\n"); + else + seq_printf(seq, "last full capacity: %d %sh\n", + battery->full_charge_capacity, + acpi_battery_units(battery)); + + seq_printf(seq, "battery technology: %srechargeable\n", + (!battery->technology)?"non-":""); + + if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "design voltage: unknown\n"); + else + seq_printf(seq, "design voltage: %d mV\n", + battery->design_voltage); + seq_printf(seq, "design capacity warning: %d %sh\n", + battery->design_capacity_warning, + acpi_battery_units(battery)); + seq_printf(seq, "design capacity low: %d %sh\n", + battery->design_capacity_low, + acpi_battery_units(battery)); + seq_printf(seq, "cycle count: %i\n", battery->cycle_count); + seq_printf(seq, "capacity granularity 1: %d %sh\n", + battery->capacity_granularity_1, + acpi_battery_units(battery)); + seq_printf(seq, "capacity granularity 2: %d %sh\n", + battery->capacity_granularity_2, + acpi_battery_units(battery)); + seq_printf(seq, "model number: %s\n", battery->model_number); + seq_printf(seq, "serial number: %s\n", battery->serial_number); + seq_printf(seq, "battery type: %s\n", battery->type); + seq_printf(seq, "OEM info: %s\n", battery->oem_info); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery info\n"); + return result; +} + +static int acpi_battery_print_state(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + seq_printf(seq, "present: %s\n", + acpi_battery_present(battery) ? "yes" : "no"); + if (!acpi_battery_present(battery)) + goto end; + + seq_printf(seq, "capacity state: %s\n", + (battery->state & 0x04) ? "critical" : "ok"); + if ((battery->state & 0x01) && (battery->state & 0x02)) + seq_printf(seq, + "charging state: charging/discharging\n"); + else if (battery->state & 0x01) + seq_printf(seq, "charging state: discharging\n"); + else if (battery->state & 0x02) + seq_printf(seq, "charging state: charging\n"); + else + seq_printf(seq, "charging state: charged\n"); + + if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "present rate: unknown\n"); + else + seq_printf(seq, "present rate: %d %s\n", + battery->rate_now, acpi_battery_units(battery)); + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "remaining capacity: unknown\n"); + else + seq_printf(seq, "remaining capacity: %d %sh\n", + battery->capacity_now, acpi_battery_units(battery)); + if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "present voltage: unknown\n"); + else + seq_printf(seq, "present voltage: %d mV\n", + battery->voltage_now); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery state\n"); + + return result; +} + +static int acpi_battery_print_alarm(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + if (!acpi_battery_present(battery)) { + seq_printf(seq, "present: no\n"); + goto end; + } + seq_printf(seq, "alarm: "); + if (!battery->alarm) + seq_printf(seq, "unsupported\n"); + else + seq_printf(seq, "%u %sh\n", battery->alarm, + acpi_battery_units(battery)); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery alarm\n"); + return result; +} + +static ssize_t acpi_battery_write_alarm(struct file *file, + const char __user * buffer, + size_t count, loff_t * ppos) +{ + int result = 0; + char alarm_string[12] = { '\0' }; + struct seq_file *m = file->private_data; + struct acpi_battery *battery = m->private; + + if (!battery || (count > sizeof(alarm_string) - 1)) + return -EINVAL; + if (!acpi_battery_present(battery)) { + result = -ENODEV; + goto end; + } + if (copy_from_user(alarm_string, buffer, count)) { + result = -EFAULT; + goto end; + } + alarm_string[count] = '\0'; + battery->alarm = simple_strtol(alarm_string, NULL, 0); + result = acpi_battery_set_alarm(battery); + end: + if (!result) + return count; + return result; +} + +typedef int(*print_func)(struct seq_file *seq, int result); + +static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = { + acpi_battery_print_info, + acpi_battery_print_state, + acpi_battery_print_alarm, +}; + +static int acpi_battery_read(int fid, struct seq_file *seq) +{ + struct acpi_battery *battery = seq->private; + int result = acpi_battery_update(battery); + return acpi_print_funcs[fid](seq, result); +} + +#define DECLARE_FILE_FUNCTIONS(_name) \ +static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \ +{ \ + return acpi_battery_read(_name##_tag, seq); \ +} \ +static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \ +} + +DECLARE_FILE_FUNCTIONS(info); +DECLARE_FILE_FUNCTIONS(state); +DECLARE_FILE_FUNCTIONS(alarm); + +#undef DECLARE_FILE_FUNCTIONS + +#define FILE_DESCRIPTION_RO(_name) \ + { \ + .name = __stringify(_name), \ + .mode = S_IRUGO, \ + .ops = { \ + .open = acpi_battery_##_name##_open_fs, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + .owner = THIS_MODULE, \ + }, \ + } + +#define FILE_DESCRIPTION_RW(_name) \ + { \ + .name = __stringify(_name), \ + .mode = S_IFREG | S_IRUGO | S_IWUSR, \ + .ops = { \ + .open = acpi_battery_##_name##_open_fs, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .write = acpi_battery_write_##_name, \ + .release = single_release, \ + .owner = THIS_MODULE, \ + }, \ + } + +static const struct battery_file { + struct file_operations ops; + umode_t mode; + const char *name; +} acpi_battery_file[] = { + FILE_DESCRIPTION_RO(info), + FILE_DESCRIPTION_RO(state), + FILE_DESCRIPTION_RW(alarm), +}; + +#undef FILE_DESCRIPTION_RO +#undef FILE_DESCRIPTION_RW + +static int acpi_battery_add_fs(struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + int i; + + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_battery_dir); + if (!acpi_device_dir(device)) + return -ENODEV; + } + + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { + entry = proc_create_data(acpi_battery_file[i].name, + acpi_battery_file[i].mode, + acpi_device_dir(device), + &acpi_battery_file[i].ops, + acpi_driver_data(device)); + if (!entry) + return -ENODEV; + } + return 0; +} + +static void acpi_battery_remove_fs(struct acpi_device *device) +{ + int i; + if (!acpi_device_dir(device)) + return; + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) + remove_proc_entry(acpi_battery_file[i].name, + acpi_device_dir(device)); + + remove_proc_entry(acpi_device_bid(device), acpi_battery_dir); + acpi_device_dir(device) = NULL; +} + +#endif + +/* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device) result = acpi_battery_update(battery); if (result) goto fail; +#ifdef CONFIG_ACPI_PROCFS_POWER + result = acpi_battery_add_fs(device); +#endif + if (result) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_remove_fs(device); +#endif + goto fail; + } printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), @@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device) return -EINVAL; battery = acpi_driver_data(device); unregister_pm_notifier(&battery->pm_nb); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_remove_fs(device); +#endif sysfs_remove_battery(battery); mutex_destroy(&battery->lock); mutex_destroy(&battery->sysfs_lock); @@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) if (dmi_check_system(bat_dmi_table)) battery_bix_broken_package = 1; - acpi_bus_register_driver(&acpi_battery_driver); + +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_dir = acpi_lock_battery_dir(); + if (!acpi_battery_dir) + return; +#endif + if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_battery_dir(acpi_battery_dir); +#endif + return; + } + return; } static int __init acpi_battery_init(void) @@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void) static void __exit acpi_battery_exit(void) { acpi_bus_unregister_driver(&acpi_battery_driver); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_battery_dir(acpi_battery_dir); +#endif } module_init(acpi_battery_init); diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index afec4526c48a..3d8413d02a97 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), }, }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 7737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. @@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), }, }, + /* + * Without this this EEEpc exports a non working WMI interface, with + * this it exports a working "good old" eeepc_laptop interface, fixing + * both brightness control, and rfkill not working. + */ + { + .callback = dmi_enable_osi_linux, + .ident = "Asus EEE PC 1015PX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"), + }, + }, {} }; diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c new file mode 100644 index 000000000000..6c9ee68e46fb --- /dev/null +++ b/drivers/acpi/cm_sbs.c @@ -0,0 +1,105 @@ +/* + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/types.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +#define PREFIX "ACPI: " + +ACPI_MODULE_NAME("cm_sbs"); +#define ACPI_AC_CLASS "ac_adapter" +#define ACPI_BATTERY_CLASS "battery" +#define _COMPONENT ACPI_SBS_COMPONENT +static struct proc_dir_entry *acpi_ac_dir; +static struct proc_dir_entry *acpi_battery_dir; + +static DEFINE_MUTEX(cm_sbs_mutex); + +static int lock_ac_dir_cnt; +static int lock_battery_dir_cnt; + +struct proc_dir_entry *acpi_lock_ac_dir(void) +{ + mutex_lock(&cm_sbs_mutex); + if (!acpi_ac_dir) + acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); + if (acpi_ac_dir) { + lock_ac_dir_cnt++; + } else { + printk(KERN_ERR PREFIX + "Cannot create %s\n", ACPI_AC_CLASS); + } + mutex_unlock(&cm_sbs_mutex); + return acpi_ac_dir; +} +EXPORT_SYMBOL(acpi_lock_ac_dir); + +void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) +{ + mutex_lock(&cm_sbs_mutex); + if (acpi_ac_dir_param) + lock_ac_dir_cnt--; + if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { + remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); + acpi_ac_dir = NULL; + } + mutex_unlock(&cm_sbs_mutex); +} +EXPORT_SYMBOL(acpi_unlock_ac_dir); + +struct proc_dir_entry *acpi_lock_battery_dir(void) +{ + mutex_lock(&cm_sbs_mutex); + if (!acpi_battery_dir) { + acpi_battery_dir = + proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); + } + if (acpi_battery_dir) { + lock_battery_dir_cnt++; + } else { + printk(KERN_ERR PREFIX + "Cannot create %s\n", ACPI_BATTERY_CLASS); + } + mutex_unlock(&cm_sbs_mutex); + return acpi_battery_dir; +} +EXPORT_SYMBOL(acpi_lock_battery_dir); + +void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) +{ + mutex_lock(&cm_sbs_mutex); + if (acpi_battery_dir_param) + lock_battery_dir_cnt--; + if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param + && acpi_battery_dir) { + remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); + acpi_battery_dir = NULL; + } + mutex_unlock(&cm_sbs_mutex); + return; +} +EXPORT_SYMBOL(acpi_unlock_battery_dir); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index c1e31a41f949..25bbc55dca89 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void) static void __exit acpi_thermal_exit(void) { - destroy_workqueue(acpi_thermal_pm_queue); acpi_bus_unregister_driver(&acpi_thermal_driver); + destroy_workqueue(acpi_thermal_pm_queue); return; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 8b6990e417ec..f8bc5a755dda 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, { .callback = video_set_use_native_backlight, - .ident = "ThinkPad T430s", + .ident = "ThinkPad T430 and T430s", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"), }, }, { @@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, }, { - .callback = video_set_use_native_backlight, + .callback = video_set_use_native_backlight, .ident = "ThinkPad X1 Carbon", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), @@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = { .ident = "Dell Inspiron 7520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), }, }, { @@ -513,6 +513,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, { .callback = video_set_use_native_backlight, + .ident = "Acer Aspire 5742G", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"), + }, + }, + { + .callback = video_set_use_native_backlight, .ident = "Acer Aspire V5-431", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index c2706047337f..0033fafc470b 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -815,7 +815,7 @@ config PATA_AT32 config PATA_AT91 tristate "PATA support for AT91SAM9260" - depends on ARM && ARCH_AT91 + depends on ARM && SOC_AT91SAM9 help This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 71e15b73513d..60707814a84b 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev) return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); } +static bool ahci_broken_devslp(struct pci_dev *pdev) +{ + /* device with broken DEVSLP but still showing SDS capability */ + static const struct pci_device_id ids[] = { + { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */ + {} + }; + + return pci_match_id(ids, pdev); +} + #ifdef CONFIG_ATA_ACPI static void ahci_gtf_filter_workaround(struct ata_host *host) { @@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; + /* must set flag prior to save config in order to take effect */ + if (ahci_broken_devslp(pdev)) + hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; + /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index b5eb886da226..af63c75c2001 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -236,6 +236,7 @@ enum { port start (wait until error-handling stage) */ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ + AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ /* ap->flags bits */ diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 497c7abe1c7d..8befeb69eeb1 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -29,9 +29,25 @@ #include "ahci.h" enum { - PORT_PHY_CTL = 0x178, /* Port0 PHY Control */ - PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */ - HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ + /* Timer 1-ms Register */ + IMX_TIMER1MS = 0x00e0, + /* Port0 PHY Control Register */ + IMX_P0PHYCR = 0x0178, + IMX_P0PHYCR_TEST_PDDQ = 1 << 20, + IMX_P0PHYCR_CR_READ = 1 << 19, + IMX_P0PHYCR_CR_WRITE = 1 << 18, + IMX_P0PHYCR_CR_CAP_DATA = 1 << 17, + IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16, + /* Port0 PHY Status Register */ + IMX_P0PHYSR = 0x017c, + IMX_P0PHYSR_CR_ACK = 1 << 18, + IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0, + /* Lane0 Output Status Register */ + IMX_LANE0_OUT_STAT = 0x2003, + IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1, + /* Clock Reset Register */ + IMX_CLOCK_RESET = 0x7f3f, + IMX_CLOCK_RESET_RESET = 1 << 0, }; enum ahci_imx_type { @@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support static void ahci_imx_host_stop(struct ata_host *host); +static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert) +{ + int timeout = 10; + u32 crval; + u32 srval; + + /* Assert or deassert the bit */ + crval = readl(mmio + IMX_P0PHYCR); + if (assert) + crval |= bit; + else + crval &= ~bit; + writel(crval, mmio + IMX_P0PHYCR); + + /* Wait for the cr_ack signal */ + do { + srval = readl(mmio + IMX_P0PHYSR); + if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK) + break; + usleep_range(100, 200); + } while (--timeout); + + return timeout ? 0 : -ETIMEDOUT; +} + +static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio) +{ + u32 crval = addr; + int ret; + + /* Supply the address on cr_data_in */ + writel(crval, mmio + IMX_P0PHYCR); + + /* Assert the cr_cap_addr signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true); + if (ret) + return ret; + + /* Deassert cr_cap_addr */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false); + if (ret) + return ret; + + return 0; +} + +static int imx_phy_reg_write(u16 val, void __iomem *mmio) +{ + u32 crval = val; + int ret; + + /* Supply the data on cr_data_in */ + writel(crval, mmio + IMX_P0PHYCR); + + /* Assert the cr_cap_data signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true); + if (ret) + return ret; + + /* Deassert cr_cap_data */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false); + if (ret) + return ret; + + if (val & IMX_CLOCK_RESET_RESET) { + /* + * In case we're resetting the phy, it's unable to acknowledge, + * so we return immediately here. + */ + crval |= IMX_P0PHYCR_CR_WRITE; + writel(crval, mmio + IMX_P0PHYCR); + goto out; + } + + /* Assert the cr_write signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true); + if (ret) + return ret; + + /* Deassert cr_write */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false); + if (ret) + return ret; + +out: + return 0; +} + +static int imx_phy_reg_read(u16 *val, void __iomem *mmio) +{ + int ret; + + /* Assert the cr_read signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true); + if (ret) + return ret; + + /* Capture the data from cr_data_out[] */ + *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT; + + /* Deassert cr_read */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false); + if (ret) + return ret; + + return 0; +} + +static int imx_sata_phy_reset(struct ahci_host_priv *hpriv) +{ + void __iomem *mmio = hpriv->mmio; + int timeout = 10; + u16 val; + int ret; + + /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */ + ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio); + if (ret) + return ret; + ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio); + if (ret) + return ret; + + /* Wait for PHY RX_PLL to be stable */ + do { + usleep_range(100, 200); + ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio); + if (ret) + return ret; + ret = imx_phy_reg_read(&val, mmio); + if (ret) + return ret; + if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE) + break; + } while (--timeout); + + return timeout ? 0 : -ETIMEDOUT; +} + static int imx_sata_enable(struct ahci_host_priv *hpriv) { struct imx_ahci_priv *imxpriv = hpriv->plat_data; + struct device *dev = &imxpriv->ahci_pdev->dev; int ret; if (imxpriv->no_device) @@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, IMX6Q_GPR13_SATA_MPLL_CLK_EN, IMX6Q_GPR13_SATA_MPLL_CLK_EN); + + usleep_range(100, 200); + + ret = imx_sata_phy_reset(hpriv); + if (ret) { + dev_err(dev, "failed to reset phy: %d\n", ret); + goto disable_regulator; + } } usleep_range(1000, 2000); @@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap) * without full reset once the pddq mode is enabled making it * impossible to use as part of libata LPM. */ - reg_val = readl(mmio + PORT_PHY_CTL); - writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); + reg_val = readl(mmio + IMX_P0PHYCR); + writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); imx_sata_disable(hpriv); imxpriv->no_device = true; } @@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev) if (!imxpriv) return -ENOMEM; + imxpriv->ahci_pdev = pdev; imxpriv->no_device = false; imxpriv->first_time = true; imxpriv->type = (enum ahci_imx_type)of_id->data; @@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev) /* * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, - * and IP vendor specific register HOST_TIMER1MS. + * and IP vendor specific register IMX_TIMER1MS. * Configure CAP_SSS (support stagered spin up). * Implement the port0. * Get the ahb clock rate, and configure the TIMER1MS register. @@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev) } reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; - writel(reg_val, hpriv->mmio + HOST_TIMER1MS); + writel(reg_val, hpriv->mmio + IMX_TIMER1MS); ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); if (ret) diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 6bd4f660b4e1..b9861453fc81 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev, cap &= ~HOST_CAP_SNTF; } + if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) { + dev_info(dev, + "controller can't do DEVSLP, turning off\n"); + cap2 &= ~HOST_CAP2_SDS; + cap2 &= ~HOST_CAP2_SADM; + } + if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); cap |= HOST_CAP_FBS; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 943cc8b83e59..ea83828bfea9 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq, static void ata_port_detach(struct ata_port *ap) { unsigned long flags; + struct ata_link *link; + struct ata_device *dev; if (!ap->ops->error_handler) goto skip_eh; @@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap) cancel_delayed_work_sync(&ap->hotplug_task); skip_eh: + /* clean up zpodd on port removal */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ALL) { + if (zpodd_dev_enabled(dev)) + zpodd_exit(dev); + } + } if (ap->pmp_link) { int i; for (i = 0; i < SATA_PMP_MAX_PORTS; i++) diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index bc256b641027..7d6e84a51424 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -10,13 +10,13 @@ struct dma_coherent_mem { void *virt_base; dma_addr_t device_base; - phys_addr_t pfn_base; + unsigned long pfn_base; int size; int flags; unsigned long *bitmap; }; -int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void __iomem *mem_base = NULL; @@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ - mem_base = ioremap(bus_addr, size); + mem_base = ioremap(phys_addr, size); if (!mem_base) goto out; @@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dev->dma_mem->virt_base = mem_base; dev->dma_mem->device_base = device_addr; - dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); + dev->dma_mem->pfn_base = PFN_DOWN(phys_addr); dev->dma_mem->size = pages; dev->dma_mem->flags = flags; @@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, *ret = -ENXIO; if (off < count && user_count <= count - off) { - unsigned pfn = mem->pfn_base + start + off; + unsigned long pfn = mem->pfn_base + start + off; *ret = remap_pfn_range(vma, vma->vm_start, pfn, user_count << PAGE_SHIFT, vma->vm_page_prot); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 0ce39a33b3c2..6cd08e145bfa 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) /** * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() * @dev: Device to declare coherent memory for - * @bus_addr: Bus address of coherent memory to be declared + * @phys_addr: Physical address of coherent memory to be declared * @device_addr: Device address of coherent memory to be declared * @size: Size of coherent memory to be declared * @flags: Flags @@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) * RETURNS: * 0 on success, -errno on failure. */ -int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void *res; @@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, if (!res) return -ENOMEM; - rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, + rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, flags); if (rc == 0) devres_add(dev, res); diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 748dea4f34dc..758da2287d9a 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1406,7 +1406,7 @@ next_segment: track = block / (floppy->dtype->sects * floppy->type->sect_mult); sector = block % (floppy->dtype->sects * floppy->type->sect_mult); - data = rq->buffer + 512 * cnt; + data = bio_data(rq->bio) + 512 * cnt; #ifdef DEBUG printk("access to track %d, sector %d, with buffer at " "0x%08lx\n", track, sector, data); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 96b629e1f0c9..2104b1b4ccda 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1484,7 +1484,7 @@ repeat: ReqCnt = 0; ReqCmd = rq_data_dir(fd_request); ReqBlock = blk_rq_pos(fd_request); - ReqBuffer = fd_request->buffer; + ReqBuffer = bio_data(fd_request->bio); setup_req_params( drive ); do_fd_action( drive ); @@ -1952,7 +1952,7 @@ static int __init atari_floppy_init (void) goto Enomem; } TrackBuffer = DMABuffer + 512; - PhysDMABuffer = virt_to_phys(DMABuffer); + PhysDMABuffer = atari_stram_to_phys(DMABuffer); PhysTrackBuffer = virt_to_phys(TrackBuffer); BufferDrive = BufferSide = BufferTrack = -1; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 73894ca33956..4595c22f33f7 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -4080,7 +4080,7 @@ static void cciss_interrupt_mode(ctlr_info_t *h) goto default_int_mode; if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { - err = pci_enable_msix(h->pdev, cciss_msix_entries, 4); + err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4); if (!err) { h->intr[0] = cciss_msix_entries[0].vector; h->intr[1] = cciss_msix_entries[1].vector; @@ -4088,10 +4088,6 @@ static void cciss_interrupt_mode(ctlr_info_t *h) h->intr[3] = cciss_msix_entries[3].vector; h->msix_vector = 1; return; - } - if (err > 0) { - dev_warn(&h->pdev->dev, - "only %d MSI-X vectors available\n", err); } else { dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 90ae4ba8f9ee..05a1780ffa85 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -29,7 +29,6 @@ #include <linux/drbd_limits.h> #include <linux/dynamic_debug.h> #include "drbd_int.h" -#include "drbd_wrappers.h" enum al_transaction_types { @@ -204,7 +203,7 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd BUG_ON(!bdev->md_bdev); - drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", + dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", current->comm, current->pid, __func__, (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", (void*)_RET_IP_ ); @@ -276,7 +275,6 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval return _al_get(device, first, true); } -static bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i) { /* for bios crossing activity log extent boundaries, @@ -846,7 +844,7 @@ void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size, int wake_up = 0; unsigned long flags; - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { + if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { drbd_err(device, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", (unsigned long long)sector, size); return; @@ -920,7 +918,7 @@ int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size if (size == 0) return 0; - if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { + if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { drbd_err(device, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return 0; @@ -1023,8 +1021,7 @@ int drbd_rs_begin_io(struct drbd_device *device, sector_t sector) unsigned int enr = BM_SECT_TO_EXT(sector); struct bm_extent *bm_ext; int i, sig; - int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait. - 200 times -> 20 seconds. */ + bool sa; retry: sig = wait_event_interruptible(device->al_wait, @@ -1035,12 +1032,15 @@ retry: if (test_bit(BME_LOCKED, &bm_ext->flags)) return 0; + /* step aside only while we are above c-min-rate; unless disabled. */ + sa = drbd_rs_c_min_rate_throttle(device); + for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { sig = wait_event_interruptible(device->al_wait, !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) || - test_bit(BME_PRIORITY, &bm_ext->flags)); + (sa && test_bit(BME_PRIORITY, &bm_ext->flags))); - if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { + if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) { spin_lock_irq(&device->al_lock); if (lc_put(device->resync, &bm_ext->lce) == 0) { bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ @@ -1052,9 +1052,6 @@ retry: return -EINTR; if (schedule_timeout_interruptible(HZ/10)) return -EINTR; - if (sa && --sa == 0) - drbd_warn(device, "drbd_rs_begin_io() stepped aside for 20sec." - "Resync stalled?\n"); goto retry; } } @@ -1288,7 +1285,7 @@ void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size) sector_t esector, nr_sectors; int wake_up = 0; - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { + if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { drbd_err(device, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", (unsigned long long)sector, size); return; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index e7093d4291f1..a76ceb344d64 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -382,6 +382,12 @@ enum { __EE_CALL_AL_COMPLETE_IO, __EE_MAY_SET_IN_SYNC, + /* is this a TRIM aka REQ_DISCARD? */ + __EE_IS_TRIM, + /* our lower level cannot handle trim, + * and we want to fall back to zeroout instead */ + __EE_IS_TRIM_USE_ZEROOUT, + /* In case a barrier failed, * we need to resubmit without the barrier flag. */ __EE_RESUBMITTED, @@ -405,7 +411,9 @@ enum { }; #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) -#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) +#define EE_IS_TRIM (1<<__EE_IS_TRIM) +#define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT) +#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS) @@ -579,6 +587,7 @@ struct drbd_resource { struct list_head resources; struct res_opts res_opts; struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */ + struct mutex adm_mutex; /* mutex to serialize administrative requests */ spinlock_t req_lock; unsigned susp:1; /* IO suspended by user */ @@ -609,6 +618,7 @@ struct drbd_connection { struct drbd_socket data; /* data/barrier/cstate/parameter packets */ struct drbd_socket meta; /* ping/ack (metadata) packets */ int agreed_pro_version; /* actually used protocol version */ + u32 agreed_features; unsigned long last_received; /* in jiffies, either socket */ unsigned int ko_count; @@ -814,6 +824,28 @@ struct drbd_device { struct submit_worker submit; }; +struct drbd_config_context { + /* assigned from drbd_genlmsghdr */ + unsigned int minor; + /* assigned from request attributes, if present */ + unsigned int volume; +#define VOLUME_UNSPECIFIED (-1U) + /* pointer into the request skb, + * limited lifetime! */ + char *resource_name; + struct nlattr *my_addr; + struct nlattr *peer_addr; + + /* reply buffer */ + struct sk_buff *reply_skb; + /* pointer into reply buffer */ + struct drbd_genlmsghdr *reply_dh; + /* resolved from attributes, if possible */ + struct drbd_device *device; + struct drbd_resource *resource; + struct drbd_connection *connection; +}; + static inline struct drbd_device *minor_to_device(unsigned int minor) { return (struct drbd_device *)idr_find(&drbd_devices, minor); @@ -821,7 +853,7 @@ static inline struct drbd_device *minor_to_device(unsigned int minor) static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device) { - return list_first_entry(&device->peer_devices, struct drbd_peer_device, peer_devices); + return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices); } #define for_each_resource(resource, _resources) \ @@ -1139,6 +1171,12 @@ struct bm_extent { #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ +/* For now, don't allow more than one activity log extent worth of data + * to be discarded in one go. We may need to rework drbd_al_begin_io() + * to allow for even larger discard ranges */ +#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE +#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9) + extern int drbd_bm_init(struct drbd_device *device); extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); extern void drbd_bm_cleanup(struct drbd_device *device); @@ -1229,9 +1267,9 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); extern rwlock_t global_state_lock; extern int conn_lowest_minor(struct drbd_connection *connection); -enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr); +extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor); extern void drbd_destroy_device(struct kref *kref); -extern void drbd_delete_device(struct drbd_device *mdev); +extern void drbd_delete_device(struct drbd_device *device); extern struct drbd_resource *drbd_create_resource(const char *name); extern void drbd_free_resource(struct drbd_resource *resource); @@ -1257,7 +1295,7 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t); /* drbd_nl.c */ -extern int drbd_msg_put_info(const char *info); +extern int drbd_msg_put_info(struct sk_buff *skb, const char *info); extern void drbd_suspend_io(struct drbd_device *device); extern void drbd_resume_io(struct drbd_device *device); extern char *ppsize(char *buf, unsigned long long size); @@ -1283,6 +1321,10 @@ extern void conn_try_outdate_peer_async(struct drbd_connection *connection); extern int drbd_khelper(struct drbd_device *device, char *cmd); /* drbd_worker.c */ +/* bi_end_io handlers */ +extern void drbd_md_io_complete(struct bio *bio, int error); +extern void drbd_peer_request_endio(struct bio *bio, int error); +extern void drbd_request_endio(struct bio *bio, int error); extern int drbd_worker(struct drbd_thread *thi); enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); void drbd_resync_after_changed(struct drbd_device *device); @@ -1332,16 +1374,20 @@ extern int w_start_resync(struct drbd_work *, int); extern void resync_timer_fn(unsigned long data); extern void start_resync_timer_fn(unsigned long data); +extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); + /* drbd_receiver.c */ extern int drbd_receiver(struct drbd_thread *thi); extern int drbd_asender(struct drbd_thread *thi); -extern int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector); +extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); +extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector); extern int drbd_submit_peer_request(struct drbd_device *, struct drbd_peer_request *, const unsigned, const int); extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, sector_t, unsigned int, + bool, gfp_t) __must_hold(local); extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, int); @@ -1401,6 +1447,37 @@ static inline void drbd_tcp_quickack(struct socket *sock) (char*)&val, sizeof(val)); } +/* sets the number of 512 byte sectors of our virtual device */ +static inline void drbd_set_my_capacity(struct drbd_device *device, + sector_t size) +{ + /* set_capacity(device->this_bdev->bd_disk, size); */ + set_capacity(device->vdisk, size); + device->this_bdev->bd_inode->i_size = (loff_t)size << 9; +} + +/* + * used to submit our private bio + */ +static inline void drbd_generic_make_request(struct drbd_device *device, + int fault_type, struct bio *bio) +{ + __release(local); + if (!bio->bi_bdev) { + printk(KERN_ERR "drbd%d: drbd_generic_make_request: " + "bio->bi_bdev == NULL\n", + device_to_minor(device)); + dump_stack(); + bio_endio(bio, -ENODEV); + return; + } + + if (drbd_insert_fault(device, fault_type)) + bio_endio(bio, -EIO); + else + generic_make_request(bio); +} + void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo); /* drbd_proc.c */ @@ -1410,6 +1487,7 @@ extern const char *drbd_conn_str(enum drbd_conns s); extern const char *drbd_role_str(enum drbd_role s); /* drbd_actlog.c */ +extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i); extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i); extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate); extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i); @@ -2144,7 +2222,7 @@ static inline void drbd_md_flush(struct drbd_device *device) static inline struct drbd_connection *first_connection(struct drbd_resource *resource) { - return list_first_entry(&resource->connections, + return list_first_entry_or_null(&resource->connections, struct drbd_connection, connections); } diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 331e5cc1227d..960645c26e6f 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1607,8 +1607,8 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long b return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; } -/* Used to send write requests - * R_PRIMARY -> Peer (P_DATA) +/* Used to send write or TRIM aka REQ_DISCARD requests + * R_PRIMARY -> Peer (P_DATA, P_TRIM) */ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req) { @@ -1640,6 +1640,16 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * dp_flags |= DP_SEND_WRITE_ACK; } p->dp_flags = cpu_to_be32(dp_flags); + + if (dp_flags & DP_DISCARD) { + struct p_trim *t = (struct p_trim*)p; + t->size = cpu_to_be32(req->i.size); + err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0); + goto out; + } + + /* our digest is still only over the payload. + * TRIM does not carry any payload. */ if (dgs) drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1); err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); @@ -1675,6 +1685,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * ... Be noisy about digest too large ... } */ } +out: mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ return err; @@ -2570,6 +2581,7 @@ struct drbd_resource *drbd_create_resource(const char *name) INIT_LIST_HEAD(&resource->connections); list_add_tail_rcu(&resource->resources, &drbd_resources); mutex_init(&resource->conf_update); + mutex_init(&resource->adm_mutex); spin_lock_init(&resource->req_lock); return resource; @@ -2687,14 +2699,16 @@ static int init_submitter(struct drbd_device *device) return 0; } -enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr) +enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor) { + struct drbd_resource *resource = adm_ctx->resource; struct drbd_connection *connection; struct drbd_device *device; struct drbd_peer_device *peer_device, *tmp_peer_device; struct gendisk *disk; struct request_queue *q; int id; + int vnr = adm_ctx->volume; enum drbd_ret_code err = ERR_NOMEM; device = minor_to_device(minor); @@ -2763,7 +2777,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (id < 0) { if (id == -ENOSPC) { err = ERR_MINOR_EXISTS; - drbd_msg_put_info("requested minor exists already"); + drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already"); } goto out_no_minor_idr; } @@ -2773,7 +2787,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (id < 0) { if (id == -ENOSPC) { err = ERR_MINOR_EXISTS; - drbd_msg_put_info("requested minor exists already"); + drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already"); } goto out_idr_remove_minor; } @@ -2794,7 +2808,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (id < 0) { if (id == -ENOSPC) { err = ERR_INVALID_REQUEST; - drbd_msg_put_info("requested volume exists already"); + drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already"); } goto out_idr_remove_from_resource; } @@ -2803,7 +2817,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (init_submitter(device)) { err = ERR_NOMEM; - drbd_msg_put_info("unable to create submit workqueue"); + drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue"); goto out_idr_remove_vol; } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 526414bc2cab..1b35c45c92b7 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -34,7 +34,6 @@ #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" -#include "drbd_wrappers.h" #include <asm/unaligned.h> #include <linux/drbd_limits.h> #include <linux/kthread.h> @@ -82,32 +81,6 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb); /* used blkdev_get_by_path, to claim our meta data device(s) */ static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; -/* Configuration is strictly serialized, because generic netlink message - * processing is strictly serialized by the genl_lock(). - * Which means we can use one static global drbd_config_context struct. - */ -static struct drbd_config_context { - /* assigned from drbd_genlmsghdr */ - unsigned int minor; - /* assigned from request attributes, if present */ - unsigned int volume; -#define VOLUME_UNSPECIFIED (-1U) - /* pointer into the request skb, - * limited lifetime! */ - char *resource_name; - struct nlattr *my_addr; - struct nlattr *peer_addr; - - /* reply buffer */ - struct sk_buff *reply_skb; - /* pointer into reply buffer */ - struct drbd_genlmsghdr *reply_dh; - /* resolved from attributes, if possible */ - struct drbd_device *device; - struct drbd_resource *resource; - struct drbd_connection *connection; -} adm_ctx; - static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) { genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb)))); @@ -117,9 +90,8 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only * reason it could fail was no space in skb, and there are 4k available. */ -int drbd_msg_put_info(const char *info) +int drbd_msg_put_info(struct sk_buff *skb, const char *info) { - struct sk_buff *skb = adm_ctx.reply_skb; struct nlattr *nla; int err = -EMSGSIZE; @@ -143,42 +115,46 @@ int drbd_msg_put_info(const char *info) * and per-family private info->pointers. * But we need to stay compatible with older kernels. * If it returns successfully, adm_ctx members are valid. + * + * At this point, we still rely on the global genl_lock(). + * If we want to avoid that, and allow "genl_family.parallel_ops", we may need + * to add additional synchronization against object destruction/modification. */ #define DRBD_ADM_NEED_MINOR 1 #define DRBD_ADM_NEED_RESOURCE 2 #define DRBD_ADM_NEED_CONNECTION 4 -static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, - unsigned flags) +static int drbd_adm_prepare(struct drbd_config_context *adm_ctx, + struct sk_buff *skb, struct genl_info *info, unsigned flags) { struct drbd_genlmsghdr *d_in = info->userhdr; const u8 cmd = info->genlhdr->cmd; int err; - memset(&adm_ctx, 0, sizeof(adm_ctx)); + memset(adm_ctx, 0, sizeof(*adm_ctx)); /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */ if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN)) return -EPERM; - adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); - if (!adm_ctx.reply_skb) { + adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!adm_ctx->reply_skb) { err = -ENOMEM; goto fail; } - adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb, + adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb, info, &drbd_genl_family, 0, cmd); /* put of a few bytes into a fresh skb of >= 4k will always succeed. * but anyways */ - if (!adm_ctx.reply_dh) { + if (!adm_ctx->reply_dh) { err = -ENOMEM; goto fail; } - adm_ctx.reply_dh->minor = d_in->minor; - adm_ctx.reply_dh->ret_code = NO_ERROR; + adm_ctx->reply_dh->minor = d_in->minor; + adm_ctx->reply_dh->ret_code = NO_ERROR; - adm_ctx.volume = VOLUME_UNSPECIFIED; + adm_ctx->volume = VOLUME_UNSPECIFIED; if (info->attrs[DRBD_NLA_CFG_CONTEXT]) { struct nlattr *nla; /* parse and validate only */ @@ -188,111 +164,131 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, /* It was present, and valid, * copy it over to the reply skb. */ - err = nla_put_nohdr(adm_ctx.reply_skb, + err = nla_put_nohdr(adm_ctx->reply_skb, info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len, info->attrs[DRBD_NLA_CFG_CONTEXT]); if (err) goto fail; - /* and assign stuff to the global adm_ctx */ + /* and assign stuff to the adm_ctx */ nla = nested_attr_tb[__nla_type(T_ctx_volume)]; if (nla) - adm_ctx.volume = nla_get_u32(nla); + adm_ctx->volume = nla_get_u32(nla); nla = nested_attr_tb[__nla_type(T_ctx_resource_name)]; if (nla) - adm_ctx.resource_name = nla_data(nla); - adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; - adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; - if ((adm_ctx.my_addr && - nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) || - (adm_ctx.peer_addr && - nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) { + adm_ctx->resource_name = nla_data(nla); + adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; + adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; + if ((adm_ctx->my_addr && + nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) || + (adm_ctx->peer_addr && + nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) { err = -EINVAL; goto fail; } } - adm_ctx.minor = d_in->minor; - adm_ctx.device = minor_to_device(d_in->minor); - if (adm_ctx.resource_name) { - adm_ctx.resource = drbd_find_resource(adm_ctx.resource_name); + adm_ctx->minor = d_in->minor; + adm_ctx->device = minor_to_device(d_in->minor); + + /* We are protected by the global genl_lock(). + * But we may explicitly drop it/retake it in drbd_adm_set_role(), + * so make sure this object stays around. */ + if (adm_ctx->device) + kref_get(&adm_ctx->device->kref); + + if (adm_ctx->resource_name) { + adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name); } - if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) { - drbd_msg_put_info("unknown minor"); + if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor"); return ERR_MINOR_INVALID; } - if (!adm_ctx.resource && (flags & DRBD_ADM_NEED_RESOURCE)) { - drbd_msg_put_info("unknown resource"); - if (adm_ctx.resource_name) + if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource"); + if (adm_ctx->resource_name) return ERR_RES_NOT_KNOWN; return ERR_INVALID_REQUEST; } if (flags & DRBD_ADM_NEED_CONNECTION) { - if (adm_ctx.resource) { - drbd_msg_put_info("no resource name expected"); + if (adm_ctx->resource) { + drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected"); return ERR_INVALID_REQUEST; } - if (adm_ctx.device) { - drbd_msg_put_info("no minor number expected"); + if (adm_ctx->device) { + drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected"); return ERR_INVALID_REQUEST; } - if (adm_ctx.my_addr && adm_ctx.peer_addr) - adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr), - nla_len(adm_ctx.my_addr), - nla_data(adm_ctx.peer_addr), - nla_len(adm_ctx.peer_addr)); - if (!adm_ctx.connection) { - drbd_msg_put_info("unknown connection"); + if (adm_ctx->my_addr && adm_ctx->peer_addr) + adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr), + nla_len(adm_ctx->my_addr), + nla_data(adm_ctx->peer_addr), + nla_len(adm_ctx->peer_addr)); + if (!adm_ctx->connection) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection"); return ERR_INVALID_REQUEST; } } /* some more paranoia, if the request was over-determined */ - if (adm_ctx.device && adm_ctx.resource && - adm_ctx.device->resource != adm_ctx.resource) { + if (adm_ctx->device && adm_ctx->resource && + adm_ctx->device->resource != adm_ctx->resource) { pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n", - adm_ctx.minor, adm_ctx.resource->name, - adm_ctx.device->resource->name); - drbd_msg_put_info("minor exists in different resource"); + adm_ctx->minor, adm_ctx->resource->name, + adm_ctx->device->resource->name); + drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource"); return ERR_INVALID_REQUEST; } - if (adm_ctx.device && - adm_ctx.volume != VOLUME_UNSPECIFIED && - adm_ctx.volume != adm_ctx.device->vnr) { + if (adm_ctx->device && + adm_ctx->volume != VOLUME_UNSPECIFIED && + adm_ctx->volume != adm_ctx->device->vnr) { pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", - adm_ctx.minor, adm_ctx.volume, - adm_ctx.device->vnr, - adm_ctx.device->resource->name); - drbd_msg_put_info("minor exists as different volume"); + adm_ctx->minor, adm_ctx->volume, + adm_ctx->device->vnr, + adm_ctx->device->resource->name); + drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume"); return ERR_INVALID_REQUEST; } + /* still, provide adm_ctx->resource always, if possible. */ + if (!adm_ctx->resource) { + adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource + : adm_ctx->connection ? adm_ctx->connection->resource : NULL; + if (adm_ctx->resource) + kref_get(&adm_ctx->resource->kref); + } + return NO_ERROR; fail: - nlmsg_free(adm_ctx.reply_skb); - adm_ctx.reply_skb = NULL; + nlmsg_free(adm_ctx->reply_skb); + adm_ctx->reply_skb = NULL; return err; } -static int drbd_adm_finish(struct genl_info *info, int retcode) +static int drbd_adm_finish(struct drbd_config_context *adm_ctx, + struct genl_info *info, int retcode) { - if (adm_ctx.connection) { - kref_put(&adm_ctx.connection->kref, drbd_destroy_connection); - adm_ctx.connection = NULL; + if (adm_ctx->device) { + kref_put(&adm_ctx->device->kref, drbd_destroy_device); + adm_ctx->device = NULL; } - if (adm_ctx.resource) { - kref_put(&adm_ctx.resource->kref, drbd_destroy_resource); - adm_ctx.resource = NULL; + if (adm_ctx->connection) { + kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection); + adm_ctx->connection = NULL; + } + if (adm_ctx->resource) { + kref_put(&adm_ctx->resource->kref, drbd_destroy_resource); + adm_ctx->resource = NULL; } - if (!adm_ctx.reply_skb) + if (!adm_ctx->reply_skb) return -ENOMEM; - adm_ctx.reply_dh->ret_code = retcode; - drbd_adm_send_reply(adm_ctx.reply_skb, info); + adm_ctx->reply_dh->ret_code = retcode; + drbd_adm_send_reply(adm_ctx->reply_skb, info); return 0; } @@ -426,6 +422,14 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connec } rcu_read_unlock(); + if (fp == FP_NOT_AVAIL) { + /* IO Suspending works on the whole resource. + Do it only for one device. */ + vnr = 0; + peer_device = idr_get_next(&connection->peer_devices, &vnr); + drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0)); + } + return fp; } @@ -438,12 +442,13 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) char *ex_to_string; int r; + spin_lock_irq(&connection->resource->req_lock); if (connection->cstate >= C_WF_REPORT_PARAMS) { drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n"); + spin_unlock_irq(&connection->resource->req_lock); return false; } - spin_lock_irq(&connection->resource->req_lock); connect_cnt = connection->connect_cnt; spin_unlock_irq(&connection->resource->req_lock); @@ -654,11 +659,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) put_ldev(device); } } else { - mutex_lock(&device->resource->conf_update); + /* Called from drbd_adm_set_role only. + * We are still holding the conf_update mutex. */ nc = first_peer_device(device)->connection->net_conf; if (nc) nc->discard_my_data = 0; /* without copy; single bit op is atomic */ - mutex_unlock(&device->resource->conf_update); set_disk_ro(device->vdisk, false); if (get_ldev(device)) { @@ -700,11 +705,12 @@ static const char *from_attrs_err_to_txt(int err) int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct set_role_parms parms; int err; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -715,17 +721,22 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) err = set_role_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + genl_unlock(); + mutex_lock(&adm_ctx.resource->adm_mutex); if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); else retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); + + mutex_unlock(&adm_ctx.resource->adm_mutex); + genl_lock(); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -1104,15 +1115,18 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_ struct request_queue * const q = device->rq_queue; unsigned int max_hw_sectors = max_bio_size >> 9; unsigned int max_segments = 0; + struct request_queue *b = NULL; if (get_ldev_if_state(device, D_ATTACHING)) { - struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue; + b = device->ldev->backing_bdev->bd_disk->queue; max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); rcu_read_lock(); max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; rcu_read_unlock(); - put_ldev(device); + + blk_set_stacking_limits(&q->limits); + blk_queue_max_write_same_sectors(q, 0); } blk_queue_logical_block_size(q, 512); @@ -1121,8 +1135,25 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); - if (get_ldev_if_state(device, D_ATTACHING)) { - struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue; + if (b) { + struct drbd_connection *connection = first_peer_device(device)->connection; + + if (blk_queue_discard(b) && + (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) { + /* For now, don't allow more than one activity log extent worth of data + * to be discarded in one go. We may need to rework drbd_al_begin_io() + * to allow for even larger discard ranges */ + q->limits.max_discard_sectors = DRBD_MAX_DISCARD_SECTORS; + + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + /* REALLY? Is stacking secdiscard "legal"? */ + if (blk_queue_secdiscard(b)) + queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); + } else { + q->limits.max_discard_sectors = 0; + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q); + } blk_queue_stack_limits(q, b); @@ -1164,8 +1195,14 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device) peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ else peer = DRBD_MAX_BIO_SIZE; - } + /* We may later detach and re-attach on a disconnected Primary. + * Avoid this setting to jump back in that case. + * We want to store what we know the peer DRBD can handle, + * not what the peer IO backend can handle. */ + if (peer > device->peer_max_bio_size) + device->peer_max_bio_size = peer; + } new = min(local, peer); if (device->state.role == R_PRIMARY && new < now) @@ -1258,19 +1295,21 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev) int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct drbd_device *device; struct disk_conf *new_disk_conf, *old_disk_conf; struct fifo_buffer *old_plan = NULL, *new_plan = NULL; int err, fifo_size; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; device = adm_ctx.device; + mutex_lock(&adm_ctx.resource->adm_mutex); /* we also need a disk * to change the options on */ @@ -1294,7 +1333,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) err = disk_conf_from_attrs_for_change(new_disk_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail_unlock; } @@ -1385,12 +1424,15 @@ fail_unlock: success: put_ldev(device); out: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; int err; enum drbd_ret_code retcode; @@ -1406,13 +1448,14 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) enum drbd_state_rv rv; struct net_conf *nc; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto finish; device = adm_ctx.device; + mutex_lock(&adm_ctx.resource->adm_mutex); conn_reconfig_start(first_peer_device(device)->connection); /* if you want to reconfigure, please tear down first */ @@ -1455,7 +1498,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) err = disk_conf_from_attrs(new_disk_conf, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } @@ -1619,7 +1662,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) } if (device->state.conn < C_CONNECTED && - device->state.role == R_PRIMARY && + device->state.role == R_PRIMARY && device->ed_uuid && (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { drbd_err(device, "Can only attach to data with current UUID=%016llX\n", (unsigned long long)device->ed_uuid); @@ -1797,7 +1840,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); put_ldev(device); conn_reconfig_done(first_peer_device(device)->connection); - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; force_diskless_dec: @@ -1819,9 +1863,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) kfree(new_disk_conf); lc_destroy(resync_lru); kfree(new_plan); - + mutex_unlock(&adm_ctx.resource->adm_mutex); finish: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -1860,11 +1904,12 @@ out: * Only then we have finally detached. */ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct detach_parms parms = { }; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -1874,14 +1919,16 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) err = detach_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + mutex_lock(&adm_ctx.resource->adm_mutex); retcode = adm_detach(adm_ctx.device, parms.force_detach); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2055,6 +2102,7 @@ static void free_crypto(struct crypto *crypto) int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct drbd_connection *connection; struct net_conf *old_net_conf, *new_net_conf = NULL; @@ -2063,13 +2111,14 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) int rsr; /* re-sync running */ struct crypto crypto = { }; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; connection = adm_ctx.connection; + mutex_lock(&adm_ctx.resource->adm_mutex); new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); if (!new_net_conf) { @@ -2084,7 +2133,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) old_net_conf = connection->net_conf; if (!old_net_conf) { - drbd_msg_put_info("net conf missing, try connect"); + drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect"); retcode = ERR_INVALID_REQUEST; goto fail; } @@ -2096,7 +2145,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) err = net_conf_from_attrs_for_change(new_net_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } @@ -2167,12 +2216,15 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) done: conn_reconfig_done(connection); out: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_peer_device *peer_device; struct net_conf *old_net_conf, *new_net_conf = NULL; struct crypto crypto = { }; @@ -2182,14 +2234,14 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) int i; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) { - drbd_msg_put_info("connection endpoint(s) missing"); + drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing"); retcode = ERR_INVALID_REQUEST; goto out; } @@ -2215,6 +2267,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) } } + mutex_lock(&adm_ctx.resource->adm_mutex); connection = first_connection(adm_ctx.resource); conn_reconfig_start(connection); @@ -2235,7 +2288,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) err = net_conf_from_attrs(new_net_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } @@ -2284,7 +2337,8 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); conn_reconfig_done(connection); - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; fail: @@ -2292,8 +2346,9 @@ fail: kfree(new_net_conf); conn_reconfig_done(connection); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2356,13 +2411,14 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct disconnect_parms parms; struct drbd_connection *connection; enum drbd_state_rv rv; enum drbd_ret_code retcode; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -2374,18 +2430,20 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) err = disconnect_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } } + mutex_lock(&adm_ctx.resource->adm_mutex); rv = conn_try_disconnect(connection, parms.force_disconnect); if (rv < SS_SUCCESS) retcode = rv; /* FIXME: Type mismatch. */ else retcode = NO_ERROR; + mutex_unlock(&adm_ctx.resource->adm_mutex); fail: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2407,6 +2465,7 @@ void resync_after_online_grow(struct drbd_device *device) int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct disk_conf *old_disk_conf, *new_disk_conf = NULL; struct resize_parms rs; struct drbd_device *device; @@ -2417,12 +2476,13 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) sector_t u_size; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto fail; + goto finish; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; if (!get_ldev(device)) { retcode = ERR_NO_DISK; @@ -2436,7 +2496,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) err = resize_parms_from_attrs(&rs, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail_ldev; } } @@ -2482,7 +2542,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) goto fail_ldev; } - if (device->state.conn != C_CONNECTED) { + if (device->state.conn != C_CONNECTED && !rs.resize_force) { retcode = ERR_MD_LAYOUT_CONNECTED; goto fail_ldev; } @@ -2528,7 +2588,9 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) } fail: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; fail_ldev: @@ -2538,11 +2600,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct res_opts res_opts; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -2555,33 +2618,37 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) err = res_opts_from_attrs(&res_opts, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } + mutex_lock(&adm_ctx.resource->adm_mutex); err = set_resource_options(adm_ctx.resource, &res_opts); if (err) { retcode = ERR_INVALID_REQUEST; if (err == -ENOMEM) retcode = ERR_NOMEM; } + mutex_unlock(&adm_ctx.resource->adm_mutex); fail: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; /* If there is still bitmap IO pending, probably because of a previous @@ -2605,26 +2672,29 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) } else retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); drbd_resume_io(device); - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info, union drbd_state mask, union drbd_state val) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); retcode = drbd_request_state(adm_ctx.device, mask, val); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2639,15 +2709,17 @@ static int drbd_bmio_set_susp_al(struct drbd_device *device) int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; int retcode; /* drbd_ret_code, drbd_state_rv */ struct drbd_device *device; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; /* If there is still bitmap IO pending, probably because of a previous @@ -2674,40 +2746,45 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) } else retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); drbd_resume_io(device); - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO) retcode = ERR_PAUSE_IS_SET; + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; union drbd_dev_state s; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { s = adm_ctx.device->state; if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { @@ -2717,9 +2794,9 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) retcode = ERR_PAUSE_IS_CLEAR; } } - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2730,15 +2807,17 @@ int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info) int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; if (test_bit(NEW_CUR_UUID, &device->flags)) { drbd_uuid_new_current(device); @@ -2753,9 +2832,9 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO); } drbd_resume_io(device); - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2931,10 +3010,11 @@ nla_put_failure: int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -2946,7 +3026,7 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) return err; } out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -3133,11 +3213,12 @@ dump: int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct timeout_parms tp; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3154,17 +3235,18 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) return err; } out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; enum drbd_ret_code retcode; struct start_ov_parms parms; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3179,10 +3261,12 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) int err = start_ov_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + mutex_lock(&adm_ctx.resource->adm_mutex); + /* w_make_ov_request expects position to be aligned */ device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1); device->ov_stop_sector = parms.ov_stop_sector; @@ -3193,21 +3277,24 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); retcode = drbd_request_state(device, NS(conn, C_VERIFY_S)); drbd_resume_io(device); + + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; enum drbd_ret_code retcode; int skip_initial_sync = 0; int err; struct new_c_uuid_parms args; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3219,11 +3306,12 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) err = new_c_uuid_parms_from_attrs(&args, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out_nolock; } } + mutex_lock(&adm_ctx.resource->adm_mutex); mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */ if (!get_ldev(device)) { @@ -3268,22 +3356,24 @@ out_dec: put_ldev(device); out: mutex_unlock(device->state_mutex); + mutex_unlock(&adm_ctx.resource->adm_mutex); out_nolock: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } static enum drbd_ret_code -drbd_check_resource_name(const char *name) +drbd_check_resource_name(struct drbd_config_context *adm_ctx) { + const char *name = adm_ctx->resource_name; if (!name || !name[0]) { - drbd_msg_put_info("resource name missing"); + drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing"); return ERR_MANDATORY_TAG; } /* if we want to use these in sysfs/configfs/debugfs some day, * we must not allow slashes */ if (strchr(name, '/')) { - drbd_msg_put_info("invalid resource name"); + drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name"); return ERR_INVALID_REQUEST; } return NO_ERROR; @@ -3291,11 +3381,12 @@ drbd_check_resource_name(const char *name) int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct res_opts res_opts; int err; - retcode = drbd_adm_prepare(skb, info, 0); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3305,48 +3396,50 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) err = res_opts_from_attrs(&res_opts, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } - retcode = drbd_check_resource_name(adm_ctx.resource_name); + retcode = drbd_check_resource_name(&adm_ctx); if (retcode != NO_ERROR) goto out; if (adm_ctx.resource) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) { retcode = ERR_INVALID_REQUEST; - drbd_msg_put_info("resource exists"); + drbd_msg_put_info(adm_ctx.reply_skb, "resource exists"); } /* else: still NO_ERROR */ goto out; } + /* not yet safe for genl_family.parallel_ops */ if (!conn_create(adm_ctx.resource_name, &res_opts)) retcode = ERR_NOMEM; out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_genlmsghdr *dh = info->userhdr; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; if (dh->minor > MINORMASK) { - drbd_msg_put_info("requested minor out of range"); + drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range"); retcode = ERR_INVALID_REQUEST; goto out; } if (adm_ctx.volume > DRBD_VOLUME_MAX) { - drbd_msg_put_info("requested volume id out of range"); + drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range"); retcode = ERR_INVALID_REQUEST; goto out; } @@ -3360,9 +3453,11 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) goto out; } - retcode = drbd_create_device(adm_ctx.resource, dh->minor, adm_ctx.volume); + mutex_lock(&adm_ctx.resource->adm_mutex); + retcode = drbd_create_device(&adm_ctx, dh->minor); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -3383,35 +3478,40 @@ static enum drbd_ret_code adm_del_minor(struct drbd_device *device) int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); retcode = adm_del_minor(adm_ctx.device); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_resource *resource; struct drbd_connection *connection; struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ unsigned i; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; resource = adm_ctx.resource; + mutex_lock(&resource->adm_mutex); /* demote */ for_each_connection(connection, resource) { struct drbd_peer_device *peer_device; @@ -3419,14 +3519,14 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) idr_for_each_entry(&connection->peer_devices, peer_device, i) { retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0); if (retcode < SS_SUCCESS) { - drbd_msg_put_info("failed to demote"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote"); goto out; } } retcode = conn_try_disconnect(connection, 0); if (retcode < SS_SUCCESS) { - drbd_msg_put_info("failed to disconnect"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect"); goto out; } } @@ -3435,7 +3535,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) idr_for_each_entry(&resource->devices, device, i) { retcode = adm_detach(device, 0); if (retcode < SS_SUCCESS || retcode > NO_ERROR) { - drbd_msg_put_info("failed to detach"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach"); goto out; } } @@ -3453,7 +3553,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) retcode = adm_del_minor(device); if (retcode != NO_ERROR) { /* "can not happen" */ - drbd_msg_put_info("failed to delete volume"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume"); goto out; } } @@ -3462,25 +3562,28 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) synchronize_rcu(); drbd_free_resource(resource); retcode = NO_ERROR; - out: - drbd_adm_finish(info, retcode); + mutex_unlock(&resource->adm_mutex); +finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_resource *resource; struct drbd_connection *connection; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; resource = adm_ctx.resource; + mutex_lock(&resource->adm_mutex); for_each_connection(connection, resource) { if (connection->cstate > C_STANDALONE) { retcode = ERR_NET_CONFIGURED; @@ -3499,7 +3602,9 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) drbd_free_resource(resource); retcode = NO_ERROR; out: - drbd_adm_finish(info, retcode); + mutex_unlock(&resource->adm_mutex); +finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c index fa672b6df8d6..b2d4791498a6 100644 --- a/drivers/block/drbd/drbd_nla.c +++ b/drivers/block/drbd/drbd_nla.c @@ -1,4 +1,3 @@ -#include "drbd_wrappers.h" #include <linux/kernel.h> #include <net/netlink.h> #include <linux/drbd_genl_api.h> diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 2f26e8ffa45b..89736bdbbc70 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -116,7 +116,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se /* ------------------------ ~18s average ------------------------ */ i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS; dt = (jiffies - device->rs_mark_time[i]) / HZ; - if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) + if (dt > 180) stalled = 1; if (!dt) diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h index 3c04ec0ea333..2da9104a3851 100644 --- a/drivers/block/drbd/drbd_protocol.h +++ b/drivers/block/drbd/drbd_protocol.h @@ -54,6 +54,11 @@ enum drbd_packet { P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */ P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */ P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */ + /* 0x2e to 0x30 reserved, used in drbd 9 */ + + /* REQ_DISCARD. We used "discard" in different contexts before, + * which is why I chose TRIM here, to disambiguate. */ + P_TRIM = 0x31, P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ P_MAX_OPT_CMD = 0x101, @@ -119,6 +124,11 @@ struct p_data { u32 dp_flags; } __packed; +struct p_trim { + struct p_data p_data; + u32 size; /* == bio->bi_size */ +} __packed; + /* * commands which share a struct: * p_block_ack: @@ -150,6 +160,8 @@ struct p_block_req { * ReportParams */ +#define FF_TRIM 1 + struct p_connection_features { u32 protocol_min; u32 feature_flags; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 68e3992e8838..b6c8aaf4931b 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -46,9 +46,10 @@ #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" - #include "drbd_vli.h" +#define PRO_FEATURES (FF_TRIM) + struct packet_info { enum drbd_packet cmd; unsigned int size; @@ -65,7 +66,7 @@ enum finish_epoch { static int drbd_do_features(struct drbd_connection *connection); static int drbd_do_auth(struct drbd_connection *connection); static int drbd_disconnected(struct drbd_peer_device *); - +static void conn_wait_active_ee_empty(struct drbd_connection *connection); static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event); static int e_end_block(struct drbd_work *, int); @@ -234,9 +235,17 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device) * @retry: whether to retry, if not enough pages are available right now * * Tries to allocate number pages, first from our own page pool, then from - * the kernel, unless this allocation would exceed the max_buffers setting. + * the kernel. * Possibly retry until DRBD frees sufficient pages somewhere else. * + * If this allocation would exceed the max_buffers setting, we throttle + * allocation (schedule_timeout) to give the system some room to breathe. + * + * We do not use max-buffers as hard limit, because it could lead to + * congestion and further to a distributed deadlock during online-verify or + * (checksum based) resync, if the max-buffers, socket buffer sizes and + * resync-rate settings are mis-configured. + * * Returns a page chain linked via page->private. */ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number, @@ -246,10 +255,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int struct page *page = NULL; struct net_conf *nc; DEFINE_WAIT(wait); - int mxb; + unsigned int mxb; - /* Yes, we may run up to @number over max_buffers. If we - * follow it strictly, the admin will get it wrong anyways. */ rcu_read_lock(); nc = rcu_dereference(peer_device->connection->net_conf); mxb = nc ? nc->max_buffers : 1000000; @@ -277,7 +284,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int break; } - schedule(); + if (schedule_timeout(HZ/10) == 0) + mxb = UINT_MAX; } finish_wait(&drbd_pp_wait, &wait); @@ -331,7 +339,7 @@ You must not have the req_lock: struct drbd_peer_request * drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, - unsigned int data_size, gfp_t gfp_mask) __must_hold(local) + unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local) { struct drbd_device *device = peer_device->device; struct drbd_peer_request *peer_req; @@ -348,7 +356,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto return NULL; } - if (data_size) { + if (has_payload && data_size) { page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT)); if (!page) goto fail; @@ -1026,24 +1034,27 @@ randomize: if (drbd_send_protocol(connection) == -EOPNOTSUPP) return -1; + /* Prevent a race between resync-handshake and + * being promoted to Primary. + * + * Grab and release the state mutex, so we know that any current + * drbd_set_role() is finished, and any incoming drbd_set_role + * will see the STATE_SENT flag, and wait for it to be cleared. + */ + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) + mutex_lock(peer_device->device->state_mutex); + set_bit(STATE_SENT, &connection->flags); + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) + mutex_unlock(peer_device->device->state_mutex); + rcu_read_lock(); idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { struct drbd_device *device = peer_device->device; kref_get(&device->kref); rcu_read_unlock(); - /* Prevent a race between resync-handshake and - * being promoted to Primary. - * - * Grab and release the state mutex, so we know that any current - * drbd_set_role() is finished, and any incoming drbd_set_role - * will see the STATE_SENT flag, and wait for it to be cleared. - */ - mutex_lock(device->state_mutex); - mutex_unlock(device->state_mutex); - if (discard_my_data) set_bit(DISCARD_MY_DATA, &device->flags); else @@ -1315,6 +1326,20 @@ int drbd_submit_peer_request(struct drbd_device *device, unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; int err = -ENOMEM; + if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) { + /* wait for all pending IO completions, before we start + * zeroing things out. */ + conn_wait_active_ee_empty(first_peer_device(device)->connection); + if (blkdev_issue_zeroout(device->ldev->backing_bdev, + sector, ds >> 9, GFP_NOIO)) + peer_req->flags |= EE_WAS_ERROR; + drbd_endio_write_sec_final(peer_req); + return 0; + } + + if (peer_req->flags & EE_IS_TRIM) + nr_pages = 0; /* discards don't have any payload. */ + /* In most cases, we will only need one bio. But in case the lower * level restrictions happen to be different at this offset on this * side than those of the sending peer, we may need to submit the @@ -1326,7 +1351,7 @@ int drbd_submit_peer_request(struct drbd_device *device, next_bio: bio = bio_alloc(GFP_NOIO, nr_pages); if (!bio) { - drbd_err(device, "submit_ee: Allocation of a bio failed\n"); + drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages); goto fail; } /* > peer_req->i.sector, unless this is the first bio */ @@ -1340,6 +1365,11 @@ next_bio: bios = bio; ++n_bios; + if (rw & REQ_DISCARD) { + bio->bi_iter.bi_size = ds; + goto submit; + } + page_chain_for_each(page) { unsigned len = min_t(unsigned, ds, PAGE_SIZE); if (!bio_add_page(bio, page, len, 0)) { @@ -1360,8 +1390,9 @@ next_bio: sector += len >> 9; --nr_pages; } - D_ASSERT(device, page == NULL); D_ASSERT(device, ds == 0); +submit: + D_ASSERT(device, page == NULL); atomic_set(&peer_req->pending_bios, n_bios); do { @@ -1490,19 +1521,21 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf * and from receive_Data */ static struct drbd_peer_request * read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, - int data_size) __must_hold(local) + struct packet_info *pi) __must_hold(local) { struct drbd_device *device = peer_device->device; const sector_t capacity = drbd_get_capacity(device->this_bdev); struct drbd_peer_request *peer_req; struct page *page; int dgs, ds, err; + int data_size = pi->size; void *dig_in = peer_device->connection->int_dig_in; void *dig_vv = peer_device->connection->int_dig_vv; unsigned long *data; + struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL; dgs = 0; - if (peer_device->connection->peer_integrity_tfm) { + if (!trim && peer_device->connection->peer_integrity_tfm) { dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); /* * FIXME: Receive the incoming digest into the receive buffer @@ -1514,9 +1547,15 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, data_size -= dgs; } + if (trim) { + D_ASSERT(peer_device, data_size == 0); + data_size = be32_to_cpu(trim->size); + } + if (!expect(IS_ALIGNED(data_size, 512))) return NULL; - if (!expect(data_size <= DRBD_MAX_BIO_SIZE)) + /* prepare for larger trim requests. */ + if (!trim && !expect(data_size <= DRBD_MAX_BIO_SIZE)) return NULL; /* even though we trust out peer, @@ -1532,11 +1571,11 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ - peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, GFP_NOIO); + peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO); if (!peer_req) return NULL; - if (!data_size) + if (trim) return peer_req; ds = data_size; @@ -1676,12 +1715,12 @@ static int e_end_resync_block(struct drbd_work *w, int unused) } static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, - int data_size) __releases(local) + struct packet_info *pi) __releases(local) { struct drbd_device *device = peer_device->device; struct drbd_peer_request *peer_req; - peer_req = read_in_block(peer_device, ID_SYNCER, sector, data_size); + peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); if (!peer_req) goto fail; @@ -1697,7 +1736,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto list_add(&peer_req->w.list, &device->sync_ee); spin_unlock_irq(&device->resource->req_lock); - atomic_add(data_size >> 9, &device->rs_sect_ev); + atomic_add(pi->size >> 9, &device->rs_sect_ev); if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) return 0; @@ -1785,7 +1824,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet /* data is submitted to disk within recv_resync_read. * corresponding put_ldev done below on error, * or in drbd_peer_request_endio. */ - err = recv_resync_read(peer_device, sector, pi->size); + err = recv_resync_read(peer_device, sector, pi); } else { if (__ratelimit(&drbd_ratelimit_state)) drbd_err(device, "Can not write resync data to local disk.\n"); @@ -2196,7 +2235,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * */ sector = be64_to_cpu(p->sector); - peer_req = read_in_block(peer_device, p->block_id, sector, pi->size); + peer_req = read_in_block(peer_device, p->block_id, sector, pi); if (!peer_req) { put_ldev(device); return -EIO; @@ -2206,7 +2245,15 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(dp_flags); - if (peer_req->pages == NULL) { + if (pi->cmd == P_TRIM) { + struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); + peer_req->flags |= EE_IS_TRIM; + if (!blk_queue_discard(q)) + peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; + D_ASSERT(peer_device, peer_req->i.size > 0); + D_ASSERT(peer_device, rw & REQ_DISCARD); + D_ASSERT(peer_device, peer_req->pages == NULL); + } else if (peer_req->pages == NULL) { D_ASSERT(device, peer_req->i.size == 0); D_ASSERT(device, dp_flags & DP_FLUSH); } @@ -2242,7 +2289,12 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * update_peer_seq(peer_device, peer_seq); spin_lock_irq(&device->resource->req_lock); } - list_add(&peer_req->w.list, &device->active_ee); + /* if we use the zeroout fallback code, we process synchronously + * and we wait for all pending requests, respectively wait for + * active_ee to become empty in drbd_submit_peer_request(); + * better not add ourselves here. */ + if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0) + list_add(&peer_req->w.list, &device->active_ee); spin_unlock_irq(&device->resource->req_lock); if (device->state.conn == C_SYNC_TARGET) @@ -2313,39 +2365,45 @@ out_interrupted: * The current sync rate used here uses only the most recent two step marks, * to have a short time average so we can react faster. */ -int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) +bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) { - struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; - unsigned long db, dt, dbdt; struct lc_element *tmp; - int curr_events; - int throttle = 0; - unsigned int c_min_rate; - - rcu_read_lock(); - c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; - rcu_read_unlock(); + bool throttle = true; - /* feature disabled? */ - if (c_min_rate == 0) - return 0; + if (!drbd_rs_c_min_rate_throttle(device)) + return false; spin_lock_irq(&device->al_lock); tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); if (tmp) { struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); - if (test_bit(BME_PRIORITY, &bm_ext->flags)) { - spin_unlock_irq(&device->al_lock); - return 0; - } + if (test_bit(BME_PRIORITY, &bm_ext->flags)) + throttle = false; /* Do not slow down if app IO is already waiting for this extent */ } spin_unlock_irq(&device->al_lock); + return throttle; +} + +bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) +{ + struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; + unsigned long db, dt, dbdt; + unsigned int c_min_rate; + int curr_events; + + rcu_read_lock(); + c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; + rcu_read_unlock(); + + /* feature disabled? */ + if (c_min_rate == 0) + return false; + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - atomic_read(&device->rs_sect_ev); - if (!device->rs_last_events || curr_events - device->rs_last_events > 64) { unsigned long rs_left; int i; @@ -2368,12 +2426,11 @@ int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) dbdt = Bit2KB(db/dt); if (dbdt > c_min_rate) - throttle = 1; + return true; } - return throttle; + return false; } - static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi) { struct drbd_peer_device *peer_device; @@ -2436,7 +2493,8 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ - peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, GFP_NOIO); + peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, + true /* has real payload */, GFP_NOIO); if (!peer_req) { put_ldev(device); return -ENOMEM; @@ -3648,6 +3706,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info put_ldev(device); } + device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); + drbd_reconsider_max_bio_size(device); + /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size(). + In case we cleared the QUEUE_FLAG_DISCARD from our queue in + drbd_reconsider_max_bio_size(), we can be sure that after + drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */ + ddsf = be16_to_cpu(p->dds_flags); if (get_ldev(device)) { dd = drbd_determine_dev_size(device, ddsf, NULL); @@ -3660,9 +3725,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info drbd_set_my_capacity(device, p_size); } - device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); - drbd_reconsider_max_bio_size(device); - if (get_ldev(device)) { if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) { device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); @@ -4423,6 +4485,7 @@ static struct data_cmd drbd_cmd_handler[] = { [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state }, [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, + [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data }, }; static void drbdd(struct drbd_connection *connection) @@ -4630,6 +4693,7 @@ static int drbd_send_features(struct drbd_connection *connection) memset(p, 0, sizeof(*p)); p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); + p->feature_flags = cpu_to_be32(PRO_FEATURES); return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0); } @@ -4683,10 +4747,14 @@ static int drbd_do_features(struct drbd_connection *connection) goto incompat; connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); + connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags); drbd_info(connection, "Handshake successful: " "Agreed network protocol version %d\n", connection->agreed_pro_version); + drbd_info(connection, "Agreed to%ssupport TRIM on protocol level\n", + connection->agreed_features & FF_TRIM ? " " : " not "); + return 1; incompat: @@ -4778,6 +4846,12 @@ static int drbd_do_auth(struct drbd_connection *connection) goto fail; } + if (pi.size < CHALLENGE_LEN) { + drbd_err(connection, "AuthChallenge payload too small.\n"); + rv = -1; + goto fail; + } + peers_ch = kmalloc(pi.size, GFP_NOIO); if (peers_ch == NULL) { drbd_err(connection, "kmalloc of peers_ch failed\n"); @@ -4791,6 +4865,12 @@ static int drbd_do_auth(struct drbd_connection *connection) goto fail; } + if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) { + drbd_err(connection, "Peer presented the same challenge!\n"); + rv = -1; + goto fail; + } + resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm); response = kmalloc(resp_size, GFP_NOIO); if (response == NULL) { diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3779c8d2875b..09803d0d5207 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -522,6 +522,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break; + case DISCARD_COMPLETED_NOTSUPP: + case DISCARD_COMPLETED_WITH_ERROR: + /* I'd rather not detach from local disk just because it + * failed a REQ_DISCARD. */ + mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); + break; + case QUEUE_FOR_NET_READ: /* READ or READA, and * no local disk, @@ -1235,6 +1242,7 @@ void do_submit(struct work_struct *ws) if (list_empty(&incoming)) break; +skip_fast_path: wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending)); /* Maybe more was queued, while we prepared the transaction? * Try to stuff them into this transaction as well. @@ -1273,6 +1281,25 @@ void do_submit(struct work_struct *ws) list_del_init(&req->tl_requests); drbd_send_and_submit(device, req); } + + /* If all currently hot activity log extents are kept busy by + * incoming requests, we still must not totally starve new + * requests to cold extents. In that case, prepare one request + * in blocking mode. */ + list_for_each_entry_safe(req, tmp, &incoming, tl_requests) { + list_del_init(&req->tl_requests); + req->rq_state |= RQ_IN_ACT_LOG; + if (!drbd_al_begin_io_prepare(device, &req->i)) { + /* Corresponding extent was hot after all? */ + drbd_send_and_submit(device, req); + } else { + /* Found a request to a cold extent. + * Put on "pending" list, + * and try to cumulate with more. */ + list_add(&req->tl_requests, &pending); + goto skip_fast_path; + } + } } } @@ -1326,23 +1353,35 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct return limit; } -static struct drbd_request *find_oldest_request(struct drbd_connection *connection) +static void find_oldest_requests( + struct drbd_connection *connection, + struct drbd_device *device, + struct drbd_request **oldest_req_waiting_for_peer, + struct drbd_request **oldest_req_waiting_for_disk) { - /* Walk the transfer log, - * and find the oldest not yet completed request */ struct drbd_request *r; + *oldest_req_waiting_for_peer = NULL; + *oldest_req_waiting_for_disk = NULL; list_for_each_entry(r, &connection->transfer_log, tl_requests) { - if (atomic_read(&r->completion_ref)) - return r; + const unsigned s = r->rq_state; + if (!*oldest_req_waiting_for_peer + && ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) + *oldest_req_waiting_for_peer = r; + + if (!*oldest_req_waiting_for_disk + && (s & RQ_LOCAL_PENDING) && r->device == device) + *oldest_req_waiting_for_disk = r; + + if (*oldest_req_waiting_for_peer && *oldest_req_waiting_for_disk) + break; } - return NULL; } void request_timer_fn(unsigned long data) { struct drbd_device *device = (struct drbd_device *) data; struct drbd_connection *connection = first_peer_device(device)->connection; - struct drbd_request *req; /* oldest request */ + struct drbd_request *req_disk, *req_peer; /* oldest request */ struct net_conf *nc; unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ unsigned long now; @@ -1366,8 +1405,8 @@ void request_timer_fn(unsigned long data) now = jiffies; spin_lock_irq(&device->resource->req_lock); - req = find_oldest_request(connection); - if (!req) { + find_oldest_requests(connection, device, &req_peer, &req_disk); + if (req_peer == NULL && req_disk == NULL) { spin_unlock_irq(&device->resource->req_lock); mod_timer(&device->request_timer, now + et); return; @@ -1389,19 +1428,26 @@ void request_timer_fn(unsigned long data) * ~198 days with 250 HZ, we have a window where the timeout would need * to expire twice (worst case) to become effective. Good enough. */ - if (ent && req->rq_state & RQ_NET_PENDING && - time_after(now, req->start_time + ent) && + if (ent && req_peer && + time_after(now, req_peer->start_time + ent) && !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); } - if (dt && req->rq_state & RQ_LOCAL_PENDING && req->device == device && - time_after(now, req->start_time + dt) && + if (dt && req_disk && + time_after(now, req_disk->start_time + dt) && !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); __drbd_chk_io_error(device, DRBD_FORCE_DETACH); } - nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; + + /* Reschedule timer for the nearest not already expired timeout. + * Fallback to now + min(effective network timeout, disk timeout). */ + ent = (ent && req_peer && time_before(now, req_peer->start_time + ent)) + ? req_peer->start_time + ent : now + et; + dt = (dt && req_disk && time_before(now, req_disk->start_time + dt)) + ? req_disk->start_time + dt : now + et; + nt = time_before(ent, dt) ? ent : dt; spin_unlock_irq(&connection->resource->req_lock); mod_timer(&device->request_timer, nt); } diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index c684c963538e..8566cd5866b4 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -30,7 +30,6 @@ #include <linux/slab.h> #include <linux/drbd.h> #include "drbd_int.h" -#include "drbd_wrappers.h" /* The request callbacks will be called in irq context by the IDE drivers, and in Softirqs/Tasklets/BH context by the SCSI drivers, @@ -111,11 +110,14 @@ enum drbd_req_event { BARRIER_ACKED, /* in protocol A and B */ DATA_RECEIVED, /* (remote read) */ + COMPLETED_OK, READ_COMPLETED_WITH_ERROR, READ_AHEAD_COMPLETED_WITH_ERROR, WRITE_COMPLETED_WITH_ERROR, + DISCARD_COMPLETED_NOTSUPP, + DISCARD_COMPLETED_WITH_ERROR, + ABORT_DISK_IO, - COMPLETED_OK, RESEND, FAIL_FROZEN_DISK_IO, RESTART_FROZEN_DISK_IO, diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 1a84345a3868..a5d8aae00e04 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -54,8 +54,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state); static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *); static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns); -static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns, - enum sanitize_state_warnings *warn); +static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os, + union drbd_state ns, enum sanitize_state_warnings *warn); static inline bool is_susp(union drbd_state s) { @@ -287,7 +287,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask, spin_lock_irqsave(&device->resource->req_lock, flags); os = drbd_read_state(device); - ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); + ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); rv = is_valid_transition(os, ns); if (rv >= SS_SUCCESS) rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ @@ -333,7 +333,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask, spin_lock_irqsave(&device->resource->req_lock, flags); os = drbd_read_state(device); - ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); + ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); rv = is_valid_transition(os, ns); if (rv < SS_SUCCESS) { spin_unlock_irqrestore(&device->resource->req_lock, flags); @@ -740,8 +740,8 @@ static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_st * When we loose connection, we have to set the state of the peers disk (pdsk) * to D_UNKNOWN. This rule and many more along those lines are in this function. */ -static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns, - enum sanitize_state_warnings *warn) +static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os, + union drbd_state ns, enum sanitize_state_warnings *warn) { enum drbd_fencing_p fp; enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max; @@ -882,11 +882,13 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st } if (fp == FP_STONITH && - (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED)) + (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && + !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO && - (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) + (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) && + !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE)) ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ if (ns.aftr_isp || ns.peer_isp || ns.user_isp) { @@ -958,7 +960,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, os = drbd_read_state(device); - ns = sanitize_state(device, ns, &ssw); + ns = sanitize_state(device, os, ns, &ssw); if (ns.i == os.i) return SS_NOTHING_TO_DO; @@ -1656,7 +1658,7 @@ conn_is_valid_transition(struct drbd_connection *connection, union drbd_state ma idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { struct drbd_device *device = peer_device->device; os = drbd_read_state(device); - ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); + ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED) ns.disk = os.disk; @@ -1718,7 +1720,7 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union number_of_volumes++; os = drbd_read_state(device); ns = apply_mask_val(os, mask, val); - ns = sanitize_state(device, ns, NULL); + ns = sanitize_state(device, os, ns, NULL); if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED) ns.disk = os.disk; @@ -1763,19 +1765,19 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union static enum drbd_state_rv _conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val) { - enum drbd_state_rv rv; + enum drbd_state_rv err, rv = SS_UNKNOWN_ERROR; /* continue waiting */; if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags)) - return SS_CW_SUCCESS; + rv = SS_CW_SUCCESS; if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags)) - return SS_CW_FAILED_BY_PEER; + rv = SS_CW_FAILED_BY_PEER; - rv = conn_is_valid_transition(connection, mask, val, 0); - if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS) - rv = SS_UNKNOWN_ERROR; /* continue waiting */ + err = conn_is_valid_transition(connection, mask, val, 0); + if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS) + return rv; - return rv; + return err; } enum drbd_state_rv diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 2c4ce42c3657..d8f57b6305cd 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -118,7 +118,7 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele /* writes on behalf of the partner, or resync writes, * "submitted" by the receiver, final stage. */ -static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) +void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) { unsigned long flags = 0; struct drbd_peer_device *peer_device = peer_req->peer_device; @@ -150,7 +150,9 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee); - if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) + /* FIXME do we want to detach for failed REQ_DISCARD? + * ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */ + if (peer_req->flags & EE_WAS_ERROR) __drbd_chk_io_error(device, DRBD_WRITE_ERROR); spin_unlock_irqrestore(&device->resource->req_lock, flags); @@ -176,10 +178,12 @@ void drbd_peer_request_endio(struct bio *bio, int error) struct drbd_device *device = peer_req->peer_device->device; int uptodate = bio_flagged(bio, BIO_UPTODATE); int is_write = bio_data_dir(bio) == WRITE; + int is_discard = !!(bio->bi_rw & REQ_DISCARD); if (error && __ratelimit(&drbd_ratelimit_state)) drbd_warn(device, "%s: error=%d s=%llus\n", - is_write ? "write" : "read", error, + is_write ? (is_discard ? "discard" : "write") + : "read", error, (unsigned long long)peer_req->i.sector); if (!error && !uptodate) { if (__ratelimit(&drbd_ratelimit_state)) @@ -263,7 +267,12 @@ void drbd_request_endio(struct bio *bio, int error) /* to avoid recursion in __req_mod */ if (unlikely(error)) { - what = (bio_data_dir(bio) == WRITE) + if (bio->bi_rw & REQ_DISCARD) + what = (error == -EOPNOTSUPP) + ? DISCARD_COMPLETED_NOTSUPP + : DISCARD_COMPLETED_WITH_ERROR; + else + what = (bio_data_dir(bio) == WRITE) ? WRITE_COMPLETED_WITH_ERROR : (bio_rw(bio) == READ) ? READ_COMPLETED_WITH_ERROR @@ -395,7 +404,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, /* GFP_TRY, because if there is no memory available right now, this may * be rescheduled for later. It is "only" background resync, after all. */ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, - size, GFP_TRY); + size, true /* has real payload */, GFP_TRY); if (!peer_req) goto defer; @@ -492,10 +501,9 @@ struct fifo_buffer *fifo_alloc(int fifo_size) return fb; } -static int drbd_rs_controller(struct drbd_device *device) +static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in) { struct disk_conf *dc; - unsigned int sect_in; /* Number of sectors that came in since the last turn */ unsigned int want; /* The number of sectors we want in the proxy */ int req_sect; /* Number of sectors to request in this turn */ int correction; /* Number of sectors more we need in the proxy*/ @@ -505,9 +513,6 @@ static int drbd_rs_controller(struct drbd_device *device) int max_sect; struct fifo_buffer *plan; - sect_in = atomic_xchg(&device->rs_sect_in, 0); /* Number of sectors that came in */ - device->rs_in_flight -= sect_in; - dc = rcu_dereference(device->ldev->disk_conf); plan = rcu_dereference(device->rs_plan_s); @@ -550,11 +555,16 @@ static int drbd_rs_controller(struct drbd_device *device) static int drbd_rs_number_requests(struct drbd_device *device) { - int number; + unsigned int sect_in; /* Number of sectors that came in since the last turn */ + int number, mxb; + + sect_in = atomic_xchg(&device->rs_sect_in, 0); + device->rs_in_flight -= sect_in; rcu_read_lock(); + mxb = drbd_get_max_buffers(device) / 2; if (rcu_dereference(device->rs_plan_s)->size) { - number = drbd_rs_controller(device) >> (BM_BLOCK_SHIFT - 9); + number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9); device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; } else { device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate; @@ -562,8 +572,14 @@ static int drbd_rs_number_requests(struct drbd_device *device) } rcu_read_unlock(); - /* ignore the amount of pending requests, the resync controller should - * throttle down to incoming reply rate soon enough anyways. */ + /* Don't have more than "max-buffers"/2 in-flight. + * Otherwise we may cause the remote site to stall on drbd_alloc_pages(), + * potentially causing a distributed deadlock on congestion during + * online-verify or (checksum-based) resync, if max-buffers, + * socket buffer sizes and resync rate settings are mis-configured. */ + if (mxb - device->rs_in_flight < number) + number = mxb - device->rs_in_flight; + return number; } @@ -597,7 +613,7 @@ static int make_resync_request(struct drbd_device *device, int cancel) max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; number = drbd_rs_number_requests(device); - if (number == 0) + if (number <= 0) goto requeue; for (i = 0; i < number; i++) { @@ -647,7 +663,7 @@ next_sector: */ align = 1; rollback_i = i; - for (;;) { + while (i < number) { if (size + BM_BLOCK_SIZE > max_bio_size) break; @@ -1670,11 +1686,15 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) } clear_bit(B_RS_H_DONE, &device->flags); - write_lock_irq(&global_state_lock); + /* req_lock: serialize with drbd_send_and_submit() and others + * global_state_lock: for stable sync-after dependencies */ + spin_lock_irq(&device->resource->req_lock); + write_lock(&global_state_lock); /* Did some connection breakage or IO error race with us? */ if (device->state.conn < C_CONNECTED || !get_ldev_if_state(device, D_NEGOTIATING)) { - write_unlock_irq(&global_state_lock); + write_unlock(&global_state_lock); + spin_unlock_irq(&device->resource->req_lock); mutex_unlock(device->state_mutex); return; } @@ -1714,7 +1734,8 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) } _drbd_pause_after(device); } - write_unlock_irq(&global_state_lock); + write_unlock(&global_state_lock); + spin_unlock_irq(&device->resource->req_lock); if (r == SS_SUCCESS) { /* reset rs_last_bcast when a resync or verify is started, @@ -1778,34 +1799,6 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) mutex_unlock(device->state_mutex); } -/* If the resource already closed the current epoch, but we did not - * (because we have not yet seen new requests), we should send the - * corresponding barrier now. Must be checked within the same spinlock - * that is used to check for new requests. */ -static bool need_to_send_barrier(struct drbd_connection *connection) -{ - if (!connection->send.seen_any_write_yet) - return false; - - /* Skip barriers that do not contain any writes. - * This may happen during AHEAD mode. */ - if (!connection->send.current_epoch_writes) - return false; - - /* ->req_lock is held when requests are queued on - * connection->sender_work, and put into ->transfer_log. - * It is also held when ->current_tle_nr is increased. - * So either there are already new requests queued, - * and corresponding barriers will be send there. - * Or nothing new is queued yet, so the difference will be 1. - */ - if (atomic_read(&connection->current_tle_nr) != - connection->send.current_epoch_nr + 1) - return false; - - return true; -} - static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list) { spin_lock_irq(&queue->q_lock); @@ -1864,12 +1857,22 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head * spin_unlock_irq(&connection->resource->req_lock); break; } - send_barrier = need_to_send_barrier(connection); + + /* We found nothing new to do, no to-be-communicated request, + * no other work item. We may still need to close the last + * epoch. Next incoming request epoch will be connection -> + * current transfer log epoch number. If that is different + * from the epoch of the last request we communicated, it is + * safe to send the epoch separating barrier now. + */ + send_barrier = + atomic_read(&connection->current_tle_nr) != + connection->send.current_epoch_nr; spin_unlock_irq(&connection->resource->req_lock); - if (send_barrier) { - drbd_send_barrier(connection); - connection->send.current_epoch_nr++; - } + + if (send_barrier) + maybe_send_barrier(connection, + connection->send.current_epoch_nr + 1); schedule(); /* may be woken up for other things but new work, too, * e.g. if the current epoch got closed. diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h deleted file mode 100644 index 3db9ebaf64f6..000000000000 --- a/drivers/block/drbd/drbd_wrappers.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef _DRBD_WRAPPERS_H -#define _DRBD_WRAPPERS_H - -#include <linux/ctype.h> -#include <linux/mm.h> -#include "drbd_int.h" - -/* see get_sb_bdev and bd_claim */ -extern char *drbd_sec_holder; - -/* sets the number of 512 byte sectors of our virtual device */ -static inline void drbd_set_my_capacity(struct drbd_device *device, - sector_t size) -{ - /* set_capacity(device->this_bdev->bd_disk, size); */ - set_capacity(device->vdisk, size); - device->this_bdev->bd_inode->i_size = (loff_t)size << 9; -} - -#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE) - -/* bi_end_io handlers */ -extern void drbd_md_io_complete(struct bio *bio, int error); -extern void drbd_peer_request_endio(struct bio *bio, int error); -extern void drbd_request_endio(struct bio *bio, int error); - -/* - * used to submit our private bio - */ -static inline void drbd_generic_make_request(struct drbd_device *device, - int fault_type, struct bio *bio) -{ - __release(local); - if (!bio->bi_bdev) { - printk(KERN_ERR "drbd%d: drbd_generic_make_request: " - "bio->bi_bdev == NULL\n", - device_to_minor(device)); - dump_stack(); - bio_endio(bio, -ENODEV); - return; - } - - if (drbd_insert_fault(device, fault_type)) - bio_endio(bio, -EIO); - else - generic_make_request(bio); -} - -#ifndef __CHECKER__ -# undef __cond_lock -# define __cond_lock(x,c) (c) -#endif - -#endif diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index fa9bb742df6e..677db049f55a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2351,7 +2351,7 @@ static void rw_interrupt(void) } if (CT(COMMAND) != FD_READ || - raw_cmd->kernel_data == current_req->buffer) { + raw_cmd->kernel_data == bio_data(current_req->bio)) { /* transfer directly from buffer */ cont->done(1); } else if (CT(COMMAND) == FD_READ) { @@ -2640,7 +2640,7 @@ static int make_raw_rw_request(void) raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); - } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { + } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { unsigned long dma_limit; int direct, indirect; @@ -2654,13 +2654,13 @@ static int make_raw_rw_request(void) */ max_size = buffer_chain_size(); dma_limit = (MAX_DMA_ADDRESS - - ((unsigned long)current_req->buffer)) >> 9; + ((unsigned long)bio_data(current_req->bio))) >> 9; if ((unsigned long)max_size > dma_limit) max_size = dma_limit; /* 64 kb boundaries */ - if (CROSS_64KB(current_req->buffer, max_size << 9)) + if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) max_size = (K_64 - - ((unsigned long)current_req->buffer) % + ((unsigned long)bio_data(current_req->bio)) % K_64) >> 9; direct = transfer_size(ssize, max_sector, max_size) - fsector_t; /* @@ -2677,7 +2677,7 @@ static int make_raw_rw_request(void) (DP->read_track & (1 << DRS->probed_format)))))) { max_size = blk_rq_sectors(current_req); } else { - raw_cmd->kernel_data = current_req->buffer; + raw_cmd->kernel_data = bio_data(current_req->bio); raw_cmd->length = current_count_sectors << 9; if (raw_cmd->length == 0) { DPRINT("%s: zero dma transfer attempted\n", __func__); @@ -2731,7 +2731,7 @@ static int make_raw_rw_request(void) raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || - (raw_cmd->kernel_data != current_req->buffer && + (raw_cmd->kernel_data != bio_data(current_req->bio) && CT(COMMAND) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || @@ -2739,7 +2739,7 @@ static int make_raw_rw_request(void) raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); - if (raw_cmd->kernel_data != current_req->buffer) + if (raw_cmd->kernel_data != bio_data(current_req->bio)) pr_info("addr=%d, length=%ld\n", (int)((raw_cmd->kernel_data - floppy_track_buffer) >> 9), @@ -2756,7 +2756,7 @@ static int make_raw_rw_request(void) return 0; } - if (raw_cmd->kernel_data != current_req->buffer) { + if (raw_cmd->kernel_data != bio_data(current_req->bio)) { if (raw_cmd->kernel_data < floppy_track_buffer || current_count_sectors < 0 || raw_cmd->length < 0 || @@ -3812,7 +3812,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) bio.bi_iter.bi_size = size; bio.bi_bdev = bdev; bio.bi_iter.bi_sector = 0; - bio.bi_flags = (1 << BIO_QUIET); + bio.bi_flags |= (1 << BIO_QUIET); bio.bi_private = &cbdata; bio.bi_end_io = floppy_rb0_cb; diff --git a/drivers/block/hd.c b/drivers/block/hd.c index bf397bf108b7..8a290c08262f 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -464,11 +464,11 @@ static void read_intr(void) ok_to_read: req = hd_req; - insw(HD_DATA, req->buffer, 256); + insw(HD_DATA, bio_data(req->bio), 256); #ifdef DEBUG printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", req->rq_disk->disk_name, blk_rq_pos(req) + 1, - blk_rq_sectors(req) - 1, req->buffer+512); + blk_rq_sectors(req) - 1, bio_data(req->bio)+512); #endif if (hd_end_request(0, 512)) { SET_HANDLER(&read_intr); @@ -505,7 +505,7 @@ static void write_intr(void) ok_to_write: if (hd_end_request(0, 512)) { SET_HANDLER(&write_intr); - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); return; } @@ -624,7 +624,7 @@ repeat: printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", req->rq_disk->disk_name, req_data_dir(req) == READ ? "read" : "writ", - cyl, head, sec, nsect, req->buffer); + cyl, head, sec, nsect, bio_data(req->bio)); #endif if (req->cmd_type == REQ_TYPE_FS) { switch (rq_data_dir(req)) { @@ -643,7 +643,7 @@ repeat: bad_rw_intr(); goto repeat; } - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); break; default: printk("unknown hd-command\n"); diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index eb59b1241366..e352cac707e8 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -479,7 +479,7 @@ static unsigned int mg_out(struct mg_host *host, static void mg_read_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -496,7 +496,7 @@ static void mg_read(struct request *req) mg_bad_rw_intr(host); MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - blk_rq_sectors(req), blk_rq_pos(req), req->buffer); + blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); do { if (mg_wait(host, ATA_DRQ, @@ -514,7 +514,7 @@ static void mg_read(struct request *req) static void mg_write_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -534,7 +534,7 @@ static void mg_write(struct request *req) } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - rem, blk_rq_pos(req), req->buffer); + rem, blk_rq_pos(req), bio_data(req->bio)); if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { @@ -585,7 +585,7 @@ ok_to_read: mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); + blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); /* send read confirm */ outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); @@ -624,7 +624,7 @@ ok_to_write: /* write 1 sector and set handler if remains */ mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req), req->buffer); + blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); host->mg_do_intr = mg_write_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 59c5abe32f06..abc858b3528b 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -31,6 +31,7 @@ #include <linux/module.h> #include <linux/genhd.h> #include <linux/blkdev.h> +#include <linux/blk-mq.h> #include <linux/bio.h> #include <linux/dma-mapping.h> #include <linux/idr.h> @@ -173,60 +174,34 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev) return false; /* device present */ } -/* - * Obtain an empty command slot. - * - * This function needs to be reentrant since it could be called - * at the same time on multiple CPUs. The allocation of the - * command slot must be atomic. - * - * @port Pointer to the port data structure. - * - * return value - * >= 0 Index of command slot obtained. - * -1 No command slots available. - */ -static int get_slot(struct mtip_port *port) +static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) { - int slot, i; - unsigned int num_command_slots = port->dd->slot_groups * 32; + struct request *rq; - /* - * Try 10 times, because there is a small race here. - * that's ok, because it's still cheaper than a lock. - * - * Race: Since this section is not protected by lock, same bit - * could be chosen by different process contexts running in - * different processor. So instead of costly lock, we are going - * with loop. - */ - for (i = 0; i < 10; i++) { - slot = find_next_zero_bit(port->allocated, - num_command_slots, 1); - if ((slot < num_command_slots) && - (!test_and_set_bit(slot, port->allocated))) - return slot; - } - dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n"); + rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); + return blk_mq_rq_to_pdu(rq); +} - mtip_check_surprise_removal(port->dd->pdev); - return -1; +static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd) +{ + blk_put_request(blk_mq_rq_from_pdu(cmd)); } /* - * Release a command slot. - * - * @port Pointer to the port data structure. - * @tag Tag of command to release - * - * return value - * None + * Once we add support for one hctx per mtip group, this will change a bit */ -static inline void release_slot(struct mtip_port *port, int tag) +static struct request *mtip_rq_from_tag(struct driver_data *dd, + unsigned int tag) +{ + return blk_mq_tag_to_rq(dd->queue->queue_hw_ctx[0], tag); +} + +static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, + unsigned int tag) { - smp_mb__before_clear_bit(); - clear_bit(tag, port->allocated); - smp_mb__after_clear_bit(); + struct request *rq = mtip_rq_from_tag(dd, tag); + + return blk_mq_rq_to_pdu(rq); } /* @@ -248,93 +223,28 @@ static inline void release_slot(struct mtip_port *port, int tag) * None */ static void mtip_async_complete(struct mtip_port *port, - int tag, - void *data, - int status) + int tag, struct mtip_cmd *cmd, int status) { - struct mtip_cmd *cmd; - struct driver_data *dd = data; - int unaligned, cb_status = status ? -EIO : 0; - void (*func)(void *, int); + struct driver_data *dd = port->dd; + struct request *rq; if (unlikely(!dd) || unlikely(!port)) return; - cmd = &port->commands[tag]; - if (unlikely(status == PORT_IRQ_TF_ERR)) { dev_warn(&port->dd->pdev->dev, "Command tag %d failed due to TFE\n", tag); } - /* Clear the active flag */ - atomic_set(&port->commands[tag].active, 0); + /* Unmap the DMA scatter list entries */ + dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction); - /* Upper layer callback */ - func = cmd->async_callback; - if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) { + rq = mtip_rq_from_tag(dd, tag); - /* Unmap the DMA scatter list entries */ - dma_unmap_sg(&dd->pdev->dev, - cmd->sg, - cmd->scatter_ents, - cmd->direction); + if (unlikely(cmd->unaligned)) + up(&port->cmd_slot_unal); - func(cmd->async_data, cb_status); - unaligned = cmd->unaligned; - - /* Clear the allocated bit for the command */ - release_slot(port, tag); - - if (unlikely(unaligned)) - up(&port->cmd_slot_unal); - else - up(&port->cmd_slot); - } -} - -/* - * This function is called for clean the pending command in the - * command slot during the surprise removal of device and return - * error to the upper layer. - * - * @dd Pointer to the DRIVER_DATA structure. - * - * return value - * None - */ -static void mtip_command_cleanup(struct driver_data *dd) -{ - int tag = 0; - struct mtip_cmd *cmd; - struct mtip_port *port = dd->port; - unsigned int num_cmd_slots = dd->slot_groups * 32; - - if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) - return; - - if (!port) - return; - - cmd = &port->commands[MTIP_TAG_INTERNAL]; - if (atomic_read(&cmd->active)) - if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & - (1 << MTIP_TAG_INTERNAL)) - if (cmd->comp_func) - cmd->comp_func(port, MTIP_TAG_INTERNAL, - cmd->comp_data, -ENODEV); - - while (1) { - tag = find_next_bit(port->allocated, num_cmd_slots, tag); - if (tag >= num_cmd_slots) - break; - - cmd = &port->commands[tag]; - if (atomic_read(&cmd->active)) - mtip_async_complete(port, tag, dd, -ENODEV); - } - - set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); + blk_mq_end_io(rq, status ? -EIO : 0); } /* @@ -388,8 +298,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) { int group = tag >> 5; - atomic_set(&port->commands[tag].active, 1); - /* guard SACT and CI registers */ spin_lock(&port->cmd_issue_lock[group]); writel((1 << MTIP_TAG_BIT(tag)), @@ -397,10 +305,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) writel((1 << MTIP_TAG_BIT(tag)), port->cmd_issue[MTIP_TAG_INDEX(tag)]); spin_unlock(&port->cmd_issue_lock[group]); - - /* Set the command's timeout value.*/ - port->commands[tag].comp_time = jiffies + msecs_to_jiffies( - MTIP_NCQ_COMMAND_TIMEOUT_MS); } /* @@ -648,132 +552,13 @@ static void print_tags(struct driver_data *dd, memset(tagmap, 0, sizeof(tagmap)); for (group = SLOTBITS_IN_LONGS; group > 0; group--) - tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ", + tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ", tagbits[group-1]); dev_warn(&dd->pdev->dev, "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); } /* - * Called periodically to see if any read/write commands are - * taking too long to complete. - * - * @data Pointer to the PORT data structure. - * - * return value - * None - */ -static void mtip_timeout_function(unsigned long int data) -{ - struct mtip_port *port = (struct mtip_port *) data; - struct host_to_dev_fis *fis; - struct mtip_cmd *cmd; - int unaligned, tag, cmdto_cnt = 0; - unsigned int bit, group; - unsigned int num_command_slots; - unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; - void (*func)(void *, int); - - if (unlikely(!port)) - return; - - if (unlikely(port->dd->sr)) - return; - - if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) { - mod_timer(&port->cmd_timer, - jiffies + msecs_to_jiffies(30000)); - return; - } - /* clear the tag accumulator */ - memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); - num_command_slots = port->dd->slot_groups * 32; - - for (tag = 0; tag < num_command_slots; tag++) { - /* - * Skip internal command slot as it has - * its own timeout mechanism - */ - if (tag == MTIP_TAG_INTERNAL) - continue; - - if (atomic_read(&port->commands[tag].active) && - (time_after(jiffies, port->commands[tag].comp_time))) { - group = tag >> 5; - bit = tag & 0x1F; - - cmd = &port->commands[tag]; - fis = (struct host_to_dev_fis *) cmd->command; - - set_bit(tag, tagaccum); - cmdto_cnt++; - if (cmdto_cnt == 1) - set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); - - /* - * Clear the completed bit. This should prevent - * any interrupt handlers from trying to retire - * the command. - */ - writel(1 << bit, port->completed[group]); - - /* Clear the active flag for the command */ - atomic_set(&port->commands[tag].active, 0); - - func = cmd->async_callback; - if (func && - cmpxchg(&cmd->async_callback, func, 0) == func) { - - /* Unmap the DMA scatter list entries */ - dma_unmap_sg(&port->dd->pdev->dev, - cmd->sg, - cmd->scatter_ents, - cmd->direction); - - func(cmd->async_data, -EIO); - unaligned = cmd->unaligned; - - /* Clear the allocated bit for the command. */ - release_slot(port, tag); - - if (unaligned) - up(&port->cmd_slot_unal); - else - up(&port->cmd_slot); - } - } - } - - if (cmdto_cnt) { - print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); - if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { - mtip_device_reset(port->dd); - wake_up_interruptible(&port->svc_wait); - } - clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); - } - - if (port->ic_pause_timer) { - to = port->ic_pause_timer + msecs_to_jiffies(1000); - if (time_after(jiffies, to)) { - if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { - port->ic_pause_timer = 0; - clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); - clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); - clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); - wake_up_interruptible(&port->svc_wait); - } - - - } - } - - /* Restart the timer */ - mod_timer(&port->cmd_timer, - jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); -} - -/* * Internal command completion callback function. * * This function is normally called by the driver ISR when an internal @@ -789,28 +574,19 @@ static void mtip_timeout_function(unsigned long int data) * None */ static void mtip_completion(struct mtip_port *port, - int tag, - void *data, - int status) + int tag, struct mtip_cmd *command, int status) { - struct mtip_cmd *command = &port->commands[tag]; - struct completion *waiting = data; + struct completion *waiting = command->comp_data; if (unlikely(status == PORT_IRQ_TF_ERR)) dev_warn(&port->dd->pdev->dev, "Internal command %d completed with TFE\n", tag); - command->async_callback = NULL; - command->comp_func = NULL; - complete(waiting); } static void mtip_null_completion(struct mtip_port *port, - int tag, - void *data, - int status) + int tag, struct mtip_cmd *command, int status) { - return; } static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, @@ -842,19 +618,16 @@ static void mtip_handle_tfe(struct driver_data *dd) port = dd->port; - /* Stop the timer to prevent command timeouts. */ - del_timer(&port->cmd_timer); set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && test_bit(MTIP_TAG_INTERNAL, port->allocated)) { - cmd = &port->commands[MTIP_TAG_INTERNAL]; + cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); - atomic_inc(&cmd->active); /* active > 1 indicates error */ if (cmd->comp_data && cmd->comp_func) { cmd->comp_func(port, MTIP_TAG_INTERNAL, - cmd->comp_data, PORT_IRQ_TF_ERR); + cmd, PORT_IRQ_TF_ERR); } goto handle_tfe_exit; } @@ -866,6 +639,8 @@ static void mtip_handle_tfe(struct driver_data *dd) for (group = 0; group < dd->slot_groups; group++) { completed = readl(port->completed[group]); + dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed); + /* clear completed status register in the hardware.*/ writel(completed, port->completed[group]); @@ -879,15 +654,11 @@ static void mtip_handle_tfe(struct driver_data *dd) if (tag == MTIP_TAG_INTERNAL) continue; - cmd = &port->commands[tag]; + cmd = mtip_cmd_from_tag(dd, tag); if (likely(cmd->comp_func)) { set_bit(tag, tagaccum); cmd_cnt++; - atomic_set(&cmd->active, 0); - cmd->comp_func(port, - tag, - cmd->comp_data, - 0); + cmd->comp_func(port, tag, cmd, 0); } else { dev_err(&port->dd->pdev->dev, "Missing completion func for tag %d", @@ -947,11 +718,7 @@ static void mtip_handle_tfe(struct driver_data *dd) for (bit = 0; bit < 32; bit++) { reissue = 1; tag = (group << 5) + bit; - cmd = &port->commands[tag]; - - /* If the active bit is set re-issue the command */ - if (atomic_read(&cmd->active) == 0) - continue; + cmd = mtip_cmd_from_tag(dd, tag); fis = (struct host_to_dev_fis *)cmd->command; @@ -970,11 +737,9 @@ static void mtip_handle_tfe(struct driver_data *dd) tag, fail_reason != NULL ? fail_reason : "unknown"); - atomic_set(&cmd->active, 0); if (cmd->comp_func) { cmd->comp_func(port, tag, - cmd->comp_data, - -ENODATA); + cmd, -ENODATA); } continue; } @@ -997,14 +762,9 @@ static void mtip_handle_tfe(struct driver_data *dd) /* Retire a command that will not be reissued */ dev_warn(&port->dd->pdev->dev, "retiring tag %d\n", tag); - atomic_set(&cmd->active, 0); if (cmd->comp_func) - cmd->comp_func( - port, - tag, - cmd->comp_data, - PORT_IRQ_TF_ERR); + cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR); else dev_warn(&port->dd->pdev->dev, "Bad completion for tag %d\n", @@ -1017,9 +777,6 @@ handle_tfe_exit: /* clear eh_active */ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); - - mod_timer(&port->cmd_timer, - jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); } /* @@ -1048,15 +805,10 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, if (unlikely(tag == MTIP_TAG_INTERNAL)) continue; - command = &port->commands[tag]; - /* make internal callback */ - if (likely(command->comp_func)) { - command->comp_func( - port, - tag, - command->comp_data, - 0); - } else { + command = mtip_cmd_from_tag(dd, tag); + if (likely(command->comp_func)) + command->comp_func(port, tag, command, 0); + else { dev_dbg(&dd->pdev->dev, "Null completion for tag %d", tag); @@ -1081,16 +833,13 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) { struct mtip_port *port = dd->port; - struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL]; + struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL))) { if (cmd->comp_func) { - cmd->comp_func(port, - MTIP_TAG_INTERNAL, - cmd->comp_data, - 0); + cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0); return; } } @@ -1103,8 +852,6 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) */ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) { - if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) - mtip_handle_tfe(dd); if (unlikely(port_stat & PORT_IRQ_CONNECT)) { dev_warn(&dd->pdev->dev, @@ -1122,6 +869,12 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) dev_warn(&dd->pdev->dev, "Port stat errors %x unhandled\n", (port_stat & ~PORT_IRQ_HANDLED)); + if (mtip_check_surprise_removal(dd->pdev)) + return; + } + if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) { + set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags); + wake_up_interruptible(&dd->port->svc_wait); } } @@ -1222,7 +975,6 @@ static irqreturn_t mtip_irq_handler(int irq, void *instance) static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) { - atomic_set(&port->commands[tag].active, 1); writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]); } @@ -1280,6 +1032,8 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) unsigned int n; unsigned int active = 1; + blk_mq_stop_hw_queues(port->dd->queue); + to = jiffies + msecs_to_jiffies(timeout); do { if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) && @@ -1287,8 +1041,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) msleep(20); continue; /* svc thd is actively issuing commands */ } + + msleep(100); + if (mtip_check_surprise_removal(port->dd->pdev)) + goto err_fault; if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) - return -EFAULT; + goto err_fault; + /* * Ignore s_active bit 0 of array element 0. * This bit will always be set @@ -1299,11 +1058,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) if (!active) break; - - msleep(20); } while (time_before(jiffies, to)); + blk_mq_start_stopped_hw_queues(port->dd->queue, true); return active ? -EBUSY : 0; +err_fault: + blk_mq_start_stopped_hw_queues(port->dd->queue, true); + return -EFAULT; } /* @@ -1335,10 +1096,9 @@ static int mtip_exec_internal_command(struct mtip_port *port, { struct mtip_cmd_sg *command_sg; DECLARE_COMPLETION_ONSTACK(wait); - int rv = 0, ready2go = 1; - struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; - unsigned long to; + struct mtip_cmd *int_cmd; struct driver_data *dd = port->dd; + int rv = 0; /* Make sure the buffer is 8 byte aligned. This is asic specific. */ if (buffer & 0x00000007) { @@ -1346,19 +1106,8 @@ static int mtip_exec_internal_command(struct mtip_port *port, return -EFAULT; } - to = jiffies + msecs_to_jiffies(timeout); - do { - ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL, - port->allocated); - if (ready2go) - break; - mdelay(100); - } while (time_before(jiffies, to)); - if (!ready2go) { - dev_warn(&dd->pdev->dev, - "Internal cmd active. new cmd [%02X]\n", fis->command); - return -EBUSY; - } + int_cmd = mtip_get_int_command(dd); + set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); port->ic_pause_timer = 0; @@ -1368,10 +1117,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, if (atomic == GFP_KERNEL) { if (fis->command != ATA_CMD_STANDBYNOW1) { /* wait for io to complete if non atomic */ - if (mtip_quiesce_io(port, 5000) < 0) { + if (mtip_quiesce_io(port, + MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) { dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n"); - release_slot(port, MTIP_TAG_INTERNAL); + mtip_put_int_command(dd, int_cmd); clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); return -EBUSY; @@ -1416,9 +1166,9 @@ static int mtip_exec_internal_command(struct mtip_port *port, if (atomic == GFP_KERNEL) { /* Wait for the command to complete or timeout. */ - if (wait_for_completion_interruptible_timeout( + if ((rv = wait_for_completion_interruptible_timeout( &wait, - msecs_to_jiffies(timeout)) <= 0) { + msecs_to_jiffies(timeout))) <= 0) { if (rv == -ERESTARTSYS) { /* interrupted */ dev_err(&dd->pdev->dev, "Internal command [%02X] was interrupted after %lu ms\n", @@ -1497,8 +1247,7 @@ static int mtip_exec_internal_command(struct mtip_port *port, } exec_ic_exit: /* Clear the allocated and active bits for the internal command. */ - atomic_set(&int_cmd->active, 0); - release_slot(port, MTIP_TAG_INTERNAL); + mtip_put_int_command(dd, int_cmd); if (rv >= 0 && mtip_pause_ncq(port, fis)) { /* NCQ paused */ return rv; @@ -1529,6 +1278,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len) be16_to_cpus(&buf[i]); } +static void mtip_set_timeout(struct driver_data *dd, + struct host_to_dev_fis *fis, + unsigned int *timeout, u8 erasemode) +{ + switch (fis->command) { + case ATA_CMD_DOWNLOAD_MICRO: + *timeout = 120000; /* 2 minutes */ + break; + case ATA_CMD_SEC_ERASE_UNIT: + case 0xFC: + if (erasemode) + *timeout = ((*(dd->port->identify + 90) * 2) * 60000); + else + *timeout = ((*(dd->port->identify + 89) * 2) * 60000); + break; + case ATA_CMD_STANDBYNOW1: + *timeout = 120000; /* 2 minutes */ + break; + case 0xF7: + case 0xFA: + *timeout = 60000; /* 60 seconds */ + break; + case ATA_CMD_SMART: + *timeout = 15000; /* 15 seconds */ + break; + default: + *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS; + break; + } +} + /* * Request the device identity information. * @@ -1576,7 +1356,7 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer) sizeof(u16) * ATA_ID_WORDS, 0, GFP_KERNEL, - MTIP_INTERNAL_COMMAND_TIMEOUT_MS) + MTIP_INT_CMD_TIMEOUT_MS) < 0) { rv = -1; goto out; @@ -1644,6 +1424,7 @@ static int mtip_standby_immediate(struct mtip_port *port) int rv; struct host_to_dev_fis fis; unsigned long start; + unsigned int timeout; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); @@ -1651,6 +1432,8 @@ static int mtip_standby_immediate(struct mtip_port *port) fis.opts = 1 << 7; fis.command = ATA_CMD_STANDBYNOW1; + mtip_set_timeout(port->dd, &fis, &timeout, 0); + start = jiffies; rv = mtip_exec_internal_command(port, &fis, @@ -1659,7 +1442,7 @@ static int mtip_standby_immediate(struct mtip_port *port) 0, 0, GFP_ATOMIC, - 15000); + timeout); dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", jiffies_to_msecs(jiffies - start)); if (rv) @@ -1705,7 +1488,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, sectors * ATA_SECT_SIZE, 0, GFP_ATOMIC, - MTIP_INTERNAL_COMMAND_TIMEOUT_MS); + MTIP_INT_CMD_TIMEOUT_MS); } /* @@ -1998,6 +1781,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command) { struct host_to_dev_fis fis; struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); + unsigned int to; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); @@ -2011,6 +1795,8 @@ static int exec_drive_task(struct mtip_port *port, u8 *command) fis.cyl_hi = command[5]; fis.device = command[6] & ~0x10; /* Clear the dev bit*/ + mtip_set_timeout(port->dd, &fis, &to, 0); + dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n", __func__, command[0], @@ -2029,7 +1815,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command) 0, 0, GFP_KERNEL, - MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) { + to) < 0) { return -1; } @@ -2069,6 +1855,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, u8 *buf = NULL; dma_addr_t dma_addr = 0; int rv = 0, xfer_sz = command[3]; + unsigned int to; if (xfer_sz) { if (!user_buffer) @@ -2100,6 +1887,8 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, fis.cyl_hi = 0xC2; } + mtip_set_timeout(port->dd, &fis, &to, 0); + if (xfer_sz) reply = (port->rxfis + RX_FIS_PIO_SETUP); else @@ -2122,7 +1911,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0), 0, GFP_KERNEL, - MTIP_IOCTL_COMMAND_TIMEOUT_MS) + to) < 0) { rv = -EFAULT; goto exit_drive_command; @@ -2202,36 +1991,6 @@ static unsigned int implicit_sector(unsigned char command, } return rv; } -static void mtip_set_timeout(struct driver_data *dd, - struct host_to_dev_fis *fis, - unsigned int *timeout, u8 erasemode) -{ - switch (fis->command) { - case ATA_CMD_DOWNLOAD_MICRO: - *timeout = 120000; /* 2 minutes */ - break; - case ATA_CMD_SEC_ERASE_UNIT: - case 0xFC: - if (erasemode) - *timeout = ((*(dd->port->identify + 90) * 2) * 60000); - else - *timeout = ((*(dd->port->identify + 89) * 2) * 60000); - break; - case ATA_CMD_STANDBYNOW1: - *timeout = 120000; /* 2 minutes */ - break; - case 0xF7: - case 0xFA: - *timeout = 60000; /* 60 seconds */ - break; - case ATA_CMD_SMART: - *timeout = 15000; /* 15 seconds */ - break; - default: - *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; - break; - } -} /* * Executes a taskfile @@ -2606,22 +2365,21 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * return value * None */ -static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, - int nsect, int nents, int tag, void *callback, - void *data, int dir, int unaligned) +static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, + struct mtip_cmd *command, int nents, + struct blk_mq_hw_ctx *hctx) { struct host_to_dev_fis *fis; struct mtip_port *port = dd->port; - struct mtip_cmd *command = &port->commands[tag]; - int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - u64 start = sector; + int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + u64 start = blk_rq_pos(rq); + unsigned int nsect = blk_rq_sectors(rq); /* Map the scatter list for DMA access */ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); command->scatter_ents = nents; - command->unaligned = unaligned; /* * The number of retries for this command before it is * reported as a failure to the upper layers. @@ -2632,8 +2390,10 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, fis = command->command; fis->type = 0x27; fis->opts = 1 << 7; - fis->command = - (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); + if (rq_data_dir(rq) == READ) + fis->command = ATA_CMD_FPDMA_READ; + else + fis->command = ATA_CMD_FPDMA_WRITE; fis->lba_low = start & 0xFF; fis->lba_mid = (start >> 8) & 0xFF; fis->lba_hi = (start >> 16) & 0xFF; @@ -2643,14 +2403,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, fis->device = 1 << 6; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; - fis->sect_count = ((tag << 3) | (tag >> 5)); + fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5)); fis->sect_cnt_ex = 0; fis->control = 0; fis->res2 = 0; fis->res3 = 0; fill_command_sg(dd, command, nents); - if (unaligned) + if (command->unaligned) fis->device |= 1 << 7; /* Populate the command header */ @@ -2668,81 +2428,17 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, command->direction = dma_dir; /* - * Set the completion function and data for the command passed - * from the upper layer. - */ - command->async_data = data; - command->async_callback = callback; - - /* * To prevent this command from being issued * if an internal command is in progress or error handling is active. */ if (port->flags & MTIP_PF_PAUSE_IO) { - set_bit(tag, port->cmds_to_issue); + set_bit(rq->tag, port->cmds_to_issue); set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); return; } /* Issue the command to the hardware */ - mtip_issue_ncq_command(port, tag); - - return; -} - -/* - * Release a command slot. - * - * @dd Pointer to the driver data structure. - * @tag Slot tag - * - * return value - * None - */ -static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag, - int unaligned) -{ - struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal : - &dd->port->cmd_slot; - release_slot(dd->port, tag); - up(sem); -} - -/* - * Obtain a command slot and return its associated scatter list. - * - * @dd Pointer to the driver data structure. - * @tag Pointer to an int that will receive the allocated command - * slot tag. - * - * return value - * Pointer to the scatter list for the allocated command slot - * or NULL if no command slots are available. - */ -static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd, - int *tag, int unaligned) -{ - struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal : - &dd->port->cmd_slot; - - /* - * It is possible that, even with this semaphore, a thread - * may think that no command slots are available. Therefore, we - * need to make an attempt to get_slot(). - */ - down(sem); - *tag = get_slot(dd->port); - - if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { - up(sem); - return NULL; - } - if (unlikely(*tag < 0)) { - up(sem); - return NULL; - } - - return dd->port->commands[*tag].sg; + mtip_issue_ncq_command(port, rq->tag); } /* @@ -3113,6 +2809,7 @@ static int mtip_free_orphan(struct driver_data *dd) if (dd->queue) { dd->queue->queuedata = NULL; blk_cleanup_queue(dd->queue); + blk_mq_free_tag_set(&dd->tags); dd->queue = NULL; } } @@ -3270,6 +2967,11 @@ static int mtip_service_thread(void *data) int ret; while (1) { + if (kthread_should_stop() || + test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) + goto st_out; + clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); + /* * the condition is to check neither an internal command is * is in progress nor error handling is active @@ -3277,11 +2979,12 @@ static int mtip_service_thread(void *data) wait_event_interruptible(port->svc_wait, (port->flags) && !(port->flags & MTIP_PF_PAUSE_IO)); - if (kthread_should_stop()) - goto st_out; - set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); + if (kthread_should_stop() || + test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) + goto st_out; + /* If I am an orphan, start self cleanup */ if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags)) break; @@ -3290,6 +2993,16 @@ static int mtip_service_thread(void *data) &dd->dd_flag))) goto st_out; +restart_eh: + /* Demux bits: start with error handling */ + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) { + mtip_handle_tfe(dd); + clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); + } + + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) + goto restart_eh; + if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { slot = 1; /* used to restrict the loop to one iteration */ @@ -3319,16 +3032,14 @@ static int mtip_service_thread(void *data) } clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); - } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { + } + + if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { if (mtip_ftl_rebuild_poll(dd) < 0) set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); } - clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); - - if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) - goto st_out; } /* wait for pci remove to exit */ @@ -3365,7 +3076,6 @@ st_out: */ static void mtip_dma_free(struct driver_data *dd) { - int i; struct mtip_port *port = dd->port; if (port->block1) @@ -3376,13 +3086,6 @@ static void mtip_dma_free(struct driver_data *dd) dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, port->command_list, port->command_list_dma); } - - for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) { - if (port->commands[i].command) - dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, - port->commands[i].command, - port->commands[i].command_dma); - } } /* @@ -3396,8 +3099,6 @@ static void mtip_dma_free(struct driver_data *dd) static int mtip_dma_alloc(struct driver_data *dd) { struct mtip_port *port = dd->port; - int i, rv = 0; - u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ port->block1 = @@ -3430,41 +3131,63 @@ static int mtip_dma_alloc(struct driver_data *dd) port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET; port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET; - /* Setup per command SGL DMA region */ - - /* Point the command headers at the command tables */ - for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) { - port->commands[i].command = - dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, - &port->commands[i].command_dma, GFP_KERNEL); - if (!port->commands[i].command) { - rv = -ENOMEM; - mtip_dma_free(dd); - return rv; - } - memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ); - - port->commands[i].command_header = port->command_list + - (sizeof(struct mtip_cmd_hdr) * i); - port->commands[i].command_header_dma = - dd->port->command_list_dma + - (sizeof(struct mtip_cmd_hdr) * i); + return 0; +} - if (host_cap_64) - port->commands[i].command_header->ctbau = - __force_bit2int cpu_to_le32( - (port->commands[i].command_dma >> 16) >> 16); +static int mtip_hw_get_identify(struct driver_data *dd) +{ + struct smart_attr attr242; + unsigned char *buf; + int rv; - port->commands[i].command_header->ctba = - __force_bit2int cpu_to_le32( - port->commands[i].command_dma & 0xFFFFFFFF); + if (mtip_get_identify(dd->port, NULL) < 0) + return -EFAULT; - sg_init_table(port->commands[i].sg, MTIP_MAX_SG); + if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == + MTIP_FTL_REBUILD_MAGIC) { + set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); + return MTIP_FTL_REBUILD_MAGIC; + } + mtip_dump_identify(dd->port); - /* Mark command as currently inactive */ - atomic_set(&dd->port->commands[i].active, 0); + /* check write protect, over temp and rebuild statuses */ + rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, + dd->port->log_buf, + dd->port->log_buf_dma, 1); + if (rv) { + dev_warn(&dd->pdev->dev, + "Error in READ LOG EXT (10h) command\n"); + /* non-critical error, don't fail the load */ + } else { + buf = (unsigned char *)dd->port->log_buf; + if (buf[259] & 0x1) { + dev_info(&dd->pdev->dev, + "Write protect bit is set.\n"); + set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); + } + if (buf[288] == 0xF7) { + dev_info(&dd->pdev->dev, + "Exceeded Tmax, drive in thermal shutdown.\n"); + set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); + } + if (buf[288] == 0xBF) { + dev_info(&dd->pdev->dev, + "Drive indicates rebuild has failed.\n"); + /* TODO */ + } } - return 0; + + /* get write protect progess */ + memset(&attr242, 0, sizeof(struct smart_attr)); + if (mtip_get_smart_attr(dd->port, 242, &attr242)) + dev_warn(&dd->pdev->dev, + "Unable to check write protect progress\n"); + else + dev_info(&dd->pdev->dev, + "Write protect progress: %u%% (%u blocks)\n", + attr242.cur, le32_to_cpu(attr242.data)); + + return rv; } /* @@ -3481,8 +3204,6 @@ static int mtip_hw_init(struct driver_data *dd) int rv; unsigned int num_command_slots; unsigned long timeout, timetaken; - unsigned char *buf; - struct smart_attr attr242; dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; @@ -3513,8 +3234,6 @@ static int mtip_hw_init(struct driver_data *dd) else dd->unal_qdepth = 0; - /* Counting semaphore to track command slot usage */ - sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth); sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); /* Spinlock to prevent concurrent issue */ @@ -3599,73 +3318,16 @@ static int mtip_hw_init(struct driver_data *dd) writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, dd->mmio + HOST_CTL); - init_timer(&dd->port->cmd_timer); init_waitqueue_head(&dd->port->svc_wait); - dd->port->cmd_timer.data = (unsigned long int) dd->port; - dd->port->cmd_timer.function = mtip_timeout_function; - mod_timer(&dd->port->cmd_timer, - jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); - - if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { rv = -EFAULT; goto out3; } - if (mtip_get_identify(dd->port, NULL) < 0) { - rv = -EFAULT; - goto out3; - } - mtip_dump_identify(dd->port); - - if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == - MTIP_FTL_REBUILD_MAGIC) { - set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); - return MTIP_FTL_REBUILD_MAGIC; - } - - /* check write protect, over temp and rebuild statuses */ - rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, - dd->port->log_buf, - dd->port->log_buf_dma, 1); - if (rv) { - dev_warn(&dd->pdev->dev, - "Error in READ LOG EXT (10h) command\n"); - /* non-critical error, don't fail the load */ - } else { - buf = (unsigned char *)dd->port->log_buf; - if (buf[259] & 0x1) { - dev_info(&dd->pdev->dev, - "Write protect bit is set.\n"); - set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); - } - if (buf[288] == 0xF7) { - dev_info(&dd->pdev->dev, - "Exceeded Tmax, drive in thermal shutdown.\n"); - set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); - } - if (buf[288] == 0xBF) { - dev_info(&dd->pdev->dev, - "Drive is in security locked state.\n"); - set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); - } - } - - /* get write protect progess */ - memset(&attr242, 0, sizeof(struct smart_attr)); - if (mtip_get_smart_attr(dd->port, 242, &attr242)) - dev_warn(&dd->pdev->dev, - "Unable to check write protect progress\n"); - else - dev_info(&dd->pdev->dev, - "Write protect progress: %u%% (%u blocks)\n", - attr242.cur, le32_to_cpu(attr242.data)); return rv; out3: - del_timer_sync(&dd->port->cmd_timer); - /* Disable interrupts on the HBA. */ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, dd->mmio + HOST_CTL); @@ -3685,6 +3347,22 @@ out1: return rv; } +static void mtip_standby_drive(struct driver_data *dd) +{ + if (dd->sr) + return; + + /* + * Send standby immediate (E0h) to the drive so that it + * saves its state. + */ + if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && + !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) + if (mtip_standby_immediate(dd->port)) + dev_warn(&dd->pdev->dev, + "STANDBY IMMEDIATE failed\n"); +} + /* * Called to deinitialize an interface. * @@ -3700,12 +3378,6 @@ static int mtip_hw_exit(struct driver_data *dd) * saves its state. */ if (!dd->sr) { - if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && - !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) - if (mtip_standby_immediate(dd->port)) - dev_warn(&dd->pdev->dev, - "STANDBY IMMEDIATE failed\n"); - /* de-initialize the port. */ mtip_deinit_port(dd->port); @@ -3714,8 +3386,6 @@ static int mtip_hw_exit(struct driver_data *dd) dd->mmio + HOST_CTL); } - del_timer_sync(&dd->port->cmd_timer); - /* Release the IRQ. */ irq_set_affinity_hint(dd->pdev->irq, NULL); devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); @@ -4032,100 +3702,138 @@ static const struct block_device_operations mtip_block_ops = { * * @queue Pointer to the request queue. Unused other than to obtain * the driver data structure. - * @bio Pointer to the BIO. + * @rq Pointer to the request. * */ -static void mtip_make_request(struct request_queue *queue, struct bio *bio) +static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct driver_data *dd = queue->queuedata; - struct scatterlist *sg; - struct bio_vec bvec; - struct bvec_iter iter; - int nents = 0; - int tag = 0, unaligned = 0; + struct driver_data *dd = hctx->queue->queuedata; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + unsigned int nents; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { - bio_endio(bio, -ENXIO); - return; + return -ENXIO; } if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { - bio_endio(bio, -ENODATA); - return; + return -ENODATA; } if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) && - bio_data_dir(bio))) { - bio_endio(bio, -ENODATA); - return; - } - if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) { - bio_endio(bio, -ENODATA); - return; - } - if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { - bio_endio(bio, -ENXIO); - return; + rq_data_dir(rq))) { + return -ENODATA; } + if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) + return -ENODATA; + if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) + return -ENXIO; } - if (unlikely(bio->bi_rw & REQ_DISCARD)) { - bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector, - bio_sectors(bio))); - return; - } + if (rq->cmd_flags & REQ_DISCARD) { + int err; - if (unlikely(!bio_has_data(bio))) { - blk_queue_flush(queue, 0); - bio_endio(bio, 0); - return; + err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); + blk_mq_end_io(rq, err); + return 0; } - if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && - dd->unal_qdepth) { - if (bio->bi_iter.bi_sector % 8 != 0) - /* Unaligned on 4k boundaries */ - unaligned = 1; - else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ - unaligned = 1; + /* Create the scatter list for this request. */ + nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); + + /* Issue the read/write. */ + mtip_hw_submit_io(dd, rq, cmd, nents, hctx); + return 0; +} + +static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + struct driver_data *dd = hctx->queue->queuedata; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + + if (!dd->unal_qdepth || rq_data_dir(rq) == READ) + return false; + + /* + * If unaligned depth must be limited on this controller, mark it + * as unaligned if the IO isn't on a 4k boundary (start of length). + */ + if (blk_rq_sectors(rq) <= 64) { + if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7)) + cmd->unaligned = 1; } - sg = mtip_hw_get_scatterlist(dd, &tag, unaligned); - if (likely(sg != NULL)) { - blk_queue_bounce(queue, &bio); + if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal)) + return true; - if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) { - dev_warn(&dd->pdev->dev, - "Maximum number of SGL entries exceeded\n"); - bio_io_error(bio); - mtip_hw_release_scatterlist(dd, tag, unaligned); - return; - } + return false; +} - /* Create the scatter list for this bio. */ - bio_for_each_segment(bvec, bio, iter) { - sg_set_page(&sg[nents], - bvec.bv_page, - bvec.bv_len, - bvec.bv_offset); - nents++; - } +static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) +{ + int ret; - /* Issue the read/write. */ - mtip_hw_submit_io(dd, - bio->bi_iter.bi_sector, - bio_sectors(bio), - nents, - tag, - bio_endio, - bio, - bio_data_dir(bio), - unaligned); - } else - bio_io_error(bio); + if (mtip_check_unal_depth(hctx, rq)) + return BLK_MQ_RQ_QUEUE_BUSY; + + ret = mtip_submit_request(hctx, rq); + if (!ret) + return BLK_MQ_RQ_QUEUE_OK; + + rq->errors = ret; + return BLK_MQ_RQ_QUEUE_ERROR; +} + +static void mtip_free_cmd(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx) +{ + struct driver_data *dd = data; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + + if (!cmd->command) + return; + + dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, + cmd->command, cmd->command_dma); +} + +static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, + unsigned int request_idx, unsigned int numa_node) +{ + struct driver_data *dd = data; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; + + cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, + &cmd->command_dma, GFP_KERNEL); + if (!cmd->command) + return -ENOMEM; + + memset(cmd->command, 0, CMD_DMA_ALLOC_SZ); + + /* Point the command headers at the command tables. */ + cmd->command_header = dd->port->command_list + + (sizeof(struct mtip_cmd_hdr) * request_idx); + cmd->command_header_dma = dd->port->command_list_dma + + (sizeof(struct mtip_cmd_hdr) * request_idx); + + if (host_cap_64) + cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); + + cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); + + sg_init_table(cmd->sg, MTIP_MAX_SG); + return 0; } +static struct blk_mq_ops mtip_mq_ops = { + .queue_rq = mtip_queue_rq, + .map_queue = blk_mq_map_queue, + .init_request = mtip_init_cmd, + .exit_request = mtip_free_cmd, +}; + /* * Block layer initialization function. * @@ -4148,11 +3856,7 @@ static int mtip_block_initialize(struct driver_data *dd) if (dd->disk) goto skip_create_disk; /* hw init done, before rebuild */ - /* Initialize the protocol layer. */ - wait_for_rebuild = mtip_hw_init(dd); - if (wait_for_rebuild < 0) { - dev_err(&dd->pdev->dev, - "Protocol layer initialization failed\n"); + if (mtip_hw_init(dd)) { rv = -EINVAL; goto protocol_init_error; } @@ -4194,29 +3898,53 @@ static int mtip_block_initialize(struct driver_data *dd) mtip_hw_debugfs_init(dd); - /* - * if rebuild pending, start the service thread, and delay the block - * queue creation and add_disk() - */ - if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) - goto start_service_thread; - skip_create_disk: - /* Allocate the request queue. */ - dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node); - if (dd->queue == NULL) { + memset(&dd->tags, 0, sizeof(dd->tags)); + dd->tags.ops = &mtip_mq_ops; + dd->tags.nr_hw_queues = 1; + dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS; + dd->tags.reserved_tags = 1; + dd->tags.cmd_size = sizeof(struct mtip_cmd); + dd->tags.numa_node = dd->numa_node; + dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; + dd->tags.driver_data = dd; + + rv = blk_mq_alloc_tag_set(&dd->tags); + if (rv) { dev_err(&dd->pdev->dev, "Unable to allocate request queue\n"); rv = -ENOMEM; goto block_queue_alloc_init_error; } - /* Attach our request function to the request queue. */ - blk_queue_make_request(dd->queue, mtip_make_request); + /* Allocate the request queue. */ + dd->queue = blk_mq_init_queue(&dd->tags); + if (IS_ERR(dd->queue)) { + dev_err(&dd->pdev->dev, + "Unable to allocate request queue\n"); + rv = -ENOMEM; + goto block_queue_alloc_init_error; + } dd->disk->queue = dd->queue; dd->queue->queuedata = dd; + /* Initialize the protocol layer. */ + wait_for_rebuild = mtip_hw_get_identify(dd); + if (wait_for_rebuild < 0) { + dev_err(&dd->pdev->dev, + "Protocol layer initialization failed\n"); + rv = -EINVAL; + goto init_hw_cmds_error; + } + + /* + * if rebuild pending, start the service thread, and delay the block + * queue creation and add_disk() + */ + if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) + goto start_service_thread; + /* Set device limits. */ set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); blk_queue_max_segments(dd->queue, MTIP_MAX_SG); @@ -4295,8 +4023,9 @@ kthread_run_error: del_gendisk(dd->disk); read_capacity_error: +init_hw_cmds_error: blk_cleanup_queue(dd->queue); - + blk_mq_free_tag_set(&dd->tags); block_queue_alloc_init_error: mtip_hw_debugfs_exit(dd); disk_index_error: @@ -4345,6 +4074,9 @@ static int mtip_block_remove(struct driver_data *dd) kobject_put(kobj); } } + + mtip_standby_drive(dd); + /* * Delete our gendisk structure. This also removes the device * from /dev @@ -4357,6 +4089,7 @@ static int mtip_block_remove(struct driver_data *dd) if (dd->disk->queue) { del_gendisk(dd->disk); blk_cleanup_queue(dd->queue); + blk_mq_free_tag_set(&dd->tags); dd->queue = NULL; } else put_disk(dd->disk); @@ -4391,6 +4124,8 @@ static int mtip_block_remove(struct driver_data *dd) */ static int mtip_block_shutdown(struct driver_data *dd) { + mtip_hw_shutdown(dd); + /* Delete our gendisk structure, and cleanup the blk queue. */ if (dd->disk) { dev_info(&dd->pdev->dev, @@ -4399,6 +4134,7 @@ static int mtip_block_shutdown(struct driver_data *dd) if (dd->disk->queue) { del_gendisk(dd->disk); blk_cleanup_queue(dd->queue); + blk_mq_free_tag_set(&dd->tags); } else put_disk(dd->disk); dd->disk = NULL; @@ -4408,8 +4144,6 @@ static int mtip_block_shutdown(struct driver_data *dd) spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, dd->index); spin_unlock(&rssd_index_lock); - - mtip_hw_shutdown(dd); return 0; } @@ -4479,6 +4213,57 @@ static DEFINE_HANDLER(5); static DEFINE_HANDLER(6); static DEFINE_HANDLER(7); +static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) +{ + int pos; + unsigned short pcie_dev_ctrl; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(pdev, + pos + PCI_EXP_DEVCTL, + &pcie_dev_ctrl); + if (pcie_dev_ctrl & (1 << 11) || + pcie_dev_ctrl & (1 << 4)) { + dev_info(&dd->pdev->dev, + "Disabling ERO/No-Snoop on bridge device %04x:%04x\n", + pdev->vendor, pdev->device); + pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN | + PCI_EXP_DEVCTL_RELAX_EN); + pci_write_config_word(pdev, + pos + PCI_EXP_DEVCTL, + pcie_dev_ctrl); + } + } +} + +static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev) +{ + /* + * This workaround is specific to AMD/ATI chipset with a PCI upstream + * device with device id 0x5aXX + */ + if (pdev->bus && pdev->bus->self) { + if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI && + ((pdev->bus->self->device & 0xff00) == 0x5a00)) { + mtip_disable_link_opts(dd, pdev->bus->self); + } else { + /* Check further up the topology */ + struct pci_dev *parent_dev = pdev->bus->self; + if (parent_dev->bus && + parent_dev->bus->parent && + parent_dev->bus->parent->self && + parent_dev->bus->parent->self->vendor == + PCI_VENDOR_ID_ATI && + (parent_dev->bus->parent->self->device & + 0xff00) == 0x5a00) { + mtip_disable_link_opts(dd, + parent_dev->bus->parent->self); + } + } + } +} + /* * Called for each supported PCI device detected. * @@ -4630,6 +4415,8 @@ static int mtip_pci_probe(struct pci_dev *pdev, goto msi_initialize_err; } + mtip_fix_ero_nosnoop(dd, pdev); + /* Initialize the block layer. */ rv = mtip_block_initialize(dd); if (rv < 0) { @@ -4710,8 +4497,6 @@ static void mtip_pci_remove(struct pci_dev *pdev) dev_warn(&dd->pdev->dev, "Completion workers still active!\n"); } - /* Cleanup the outstanding commands */ - mtip_command_cleanup(dd); /* Clean up the block layer. */ mtip_block_remove(dd); @@ -4737,8 +4522,6 @@ static void mtip_pci_remove(struct pci_dev *pdev) pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); pci_set_drvdata(pdev, NULL); - pci_dev_put(pdev); - } /* @@ -4935,13 +4718,13 @@ static int __init mtip_init(void) */ static void __exit mtip_exit(void) { - debugfs_remove_recursive(dfs_parent); - /* Release the allocated major block device number. */ unregister_blkdev(mtip_major, MTIP_DRV_NAME); /* Unregister the PCI driver. */ pci_unregister_driver(&mtip_pci_driver); + + debugfs_remove_recursive(dfs_parent); } MODULE_AUTHOR("Micron Technology, Inc"); diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index ffb955e7ccb9..4b9b554234bc 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -40,9 +40,11 @@ #define MTIP_MAX_RETRIES 2 /* Various timeout values in ms */ -#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000 -#define MTIP_IOCTL_COMMAND_TIMEOUT_MS 5000 -#define MTIP_INTERNAL_COMMAND_TIMEOUT_MS 5000 +#define MTIP_NCQ_CMD_TIMEOUT_MS 15000 +#define MTIP_IOCTL_CMD_TIMEOUT_MS 5000 +#define MTIP_INT_CMD_TIMEOUT_MS 5000 +#define MTIP_QUIESCE_IO_TIMEOUT_MS (MTIP_NCQ_CMD_TIMEOUT_MS * \ + (MTIP_MAX_RETRIES + 1)) /* check for timeouts every 500ms */ #define MTIP_TIMEOUT_CHECK_PERIOD 500 @@ -331,12 +333,8 @@ struct mtip_cmd { */ void (*comp_func)(struct mtip_port *port, int tag, - void *data, + struct mtip_cmd *cmd, int status); - /* Additional callback function that may be called by comp_func() */ - void (*async_callback)(void *data, int status); - - void *async_data; /* Addl. data passed to async_callback() */ int scatter_ents; /* Number of scatter list entries used */ @@ -347,10 +345,6 @@ struct mtip_cmd { int retries; /* The number of retries left for this command. */ int direction; /* Data transfer direction */ - - unsigned long comp_time; /* command completion time, in jiffies */ - - atomic_t active; /* declares if this command sent to the drive. */ }; /* Structure used to describe a port. */ @@ -436,12 +430,6 @@ struct mtip_port { * or error handling is active */ unsigned long cmds_to_issue[SLOTBITS_IN_LONGS]; - /* - * Array of command slots. Structure includes pointers to the - * command header and command table, and completion function and data - * pointers. - */ - struct mtip_cmd commands[MTIP_MAX_COMMAND_SLOTS]; /* Used by mtip_service_thread to wait for an event */ wait_queue_head_t svc_wait; /* @@ -452,13 +440,7 @@ struct mtip_port { /* * Timer used to complete commands that have been active for too long. */ - struct timer_list cmd_timer; unsigned long ic_pause_timer; - /* - * Semaphore used to block threads if there are no - * command slots available. - */ - struct semaphore cmd_slot; /* Semaphore to control queue depth of unaligned IOs */ struct semaphore cmd_slot_unal; @@ -485,6 +467,8 @@ struct driver_data { struct request_queue *queue; /* Our request queue. */ + struct blk_mq_tag_set tags; /* blk_mq tags */ + struct mtip_port *port; /* Pointer to the port data structure. */ unsigned product_type; /* magic value declaring the product type */ diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 091b9ea14feb..77087a29b127 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -32,6 +32,7 @@ struct nullb { unsigned int index; struct request_queue *q; struct gendisk *disk; + struct blk_mq_tag_set tag_set; struct hrtimer timer; unsigned int queue_depth; spinlock_t lock; @@ -202,8 +203,8 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) entry = llist_reverse_order(entry); do { cmd = container_of(entry, struct nullb_cmd, ll_list); - end_cmd(cmd); entry = entry->next; + end_cmd(cmd); } while (entry); } @@ -226,7 +227,7 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) static void null_softirq_done_fn(struct request *rq) { - end_cmd(rq->special); + end_cmd(blk_mq_rq_to_pdu(rq)); } static inline void null_handle_cmd(struct nullb_cmd *cmd) @@ -311,7 +312,7 @@ static void null_request_fn(struct request_queue *q) static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct nullb_cmd *cmd = rq->special; + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->rq = rq; cmd->nq = hctx->driver_data; @@ -320,46 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) -{ - int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); - int tip = (reg->nr_hw_queues % nr_online_nodes); - int node = 0, i, n; - - /* - * Split submit queues evenly wrt to the number of nodes. If uneven, - * fill the first buckets with one extra, until the rest is filled with - * no extra. - */ - for (i = 0, n = 1; i < hctx_index; i++, n++) { - if (n % b_size == 0) { - n = 0; - node++; - - tip--; - if (!tip) - b_size = reg->nr_hw_queues / nr_online_nodes; - } - } - - /* - * A node might not be online, therefore map the relative node id to the - * real node id. - */ - for_each_online_node(n) { - if (!node) - break; - node--; - } - - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); -} - -static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) -{ - kfree(hctx); -} - static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { BUG_ON(!nullb); @@ -389,19 +350,14 @@ static struct blk_mq_ops null_mq_ops = { .complete = null_softirq_done_fn, }; -static struct blk_mq_reg null_mq_reg = { - .ops = &null_mq_ops, - .queue_depth = 64, - .cmd_size = sizeof(struct nullb_cmd), - .flags = BLK_MQ_F_SHOULD_MERGE, -}; - static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); put_disk(nullb->disk); kfree(nullb); } @@ -506,7 +462,7 @@ static int null_add_dev(void) nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) - return -ENOMEM; + goto out; spin_lock_init(&nullb->lock); @@ -514,49 +470,44 @@ static int null_add_dev(void) submit_queues = nr_online_nodes; if (setup_queues(nullb)) - goto err; + goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { - null_mq_reg.numa_node = home_node; - null_mq_reg.queue_depth = hw_queue_depth; - null_mq_reg.nr_hw_queues = submit_queues; - - if (use_per_node_hctx) { - null_mq_reg.ops->alloc_hctx = null_alloc_hctx; - null_mq_reg.ops->free_hctx = null_free_hctx; - } else { - null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; - null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - } - - nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); + nullb->tag_set.ops = &null_mq_ops; + nullb->tag_set.nr_hw_queues = submit_queues; + nullb->tag_set.queue_depth = hw_queue_depth; + nullb->tag_set.numa_node = home_node; + nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); + nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + nullb->tag_set.driver_data = nullb; + + if (blk_mq_alloc_tag_set(&nullb->tag_set)) + goto out_cleanup_queues; + + nullb->q = blk_mq_init_queue(&nullb->tag_set); + if (!nullb->q) + goto out_cleanup_tags; } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_make_request(nullb->q, null_queue_bio); init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_prep_rq(nullb->q, null_rq_prep_fn); - if (nullb->q) - blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + blk_queue_softirq_done(nullb->q, null_softirq_done_fn); init_driver_queues(nullb); } - if (!nullb->q) - goto queue_fail; - nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { -queue_fail: - blk_cleanup_queue(nullb->q); - cleanup_queues(nullb); -err: - kfree(nullb); - return -ENOMEM; - } + if (!disk) + goto out_cleanup_blk_queue; mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -579,6 +530,18 @@ err: sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; + +out_cleanup_blk_queue: + blk_cleanup_queue(nullb->q); +out_cleanup_tags: + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); +out_cleanup_queues: + cleanup_queues(nullb); +out_free_nullb: + kfree(nullb); +out: + return -ENOMEM; } static int __init null_init(void) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 7c64fa756cce..a842c71dcc21 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -2775,6 +2775,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) return result; } +static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + if (prepare) + nvme_dev_shutdown(dev); + else + nvme_dev_resume(dev); +} + static void nvme_shutdown(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); @@ -2839,6 +2849,7 @@ static const struct pci_error_handlers nvme_err_handler = { .link_reset = nvme_link_reset, .slot_reset = nvme_slot_reset, .resume = nvme_error_resume, + .reset_notify = nvme_reset_notify, }; /* Move to pci_ids.h later */ diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index e76bdc074dbe..719cb1bc1640 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -747,7 +747,7 @@ static void do_pcd_request(struct request_queue * q) pcd_current = cd; pcd_sector = blk_rq_pos(pcd_req); pcd_count = blk_rq_cur_sectors(pcd_req); - pcd_buf = pcd_req->buffer; + pcd_buf = bio_data(pcd_req->bio); pcd_busy = 1; ps_set_intr(do_pcd_read, NULL, 0, nice); return; diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 19ad8f0c83ef..fea7e76a00de 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -454,7 +454,7 @@ static enum action do_pd_io_start(void) if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) return Fail; pd_run = blk_rq_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); pd_retries = 0; if (pd_cmd == READ) return do_pd_read_start(); @@ -485,7 +485,7 @@ static int pd_next_buf(void) spin_lock_irqsave(&pd_lock, saved_flags); __blk_end_request_cur(pd_req, 0); pd_count = blk_rq_cur_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); spin_unlock_irqrestore(&pd_lock, saved_flags); return 0; } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index f5c86d523ba0..9a15fd3c9349 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -795,7 +795,7 @@ repeat: } pf_cmd = rq_data_dir(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); pf_retries = 0; pf_busy = 1; @@ -827,7 +827,7 @@ static int pf_next_buf(void) if (!pf_req) return 1; pf_count = blk_rq_cur_sectors(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); } return 0; } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index a69dd93d1bd5..608532d3f8c9 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -563,7 +563,6 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, req = skreq->req; blk_add_request_payload(req, page, len); - req->buffer = buf; } static void skd_request_fn_not_online(struct request_queue *q); @@ -744,6 +743,7 @@ static void skd_request_fn(struct request_queue *q) break; } skreq->discard_page = 1; + req->completion_data = page; skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { @@ -858,8 +858,7 @@ static void skd_end_request(struct skd_device *skdev, (skreq->discard_page == 1)) { pr_debug("%s:%s:%d, free the page!", skdev->name, __func__, __LINE__); - free_page((unsigned long)req->buffer); - req->buffer = NULL; + __free_page(req->completion_data); } if (unlikely(error)) { @@ -3945,15 +3944,14 @@ static int skd_acquire_msix(struct skd_device *skdev) for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) entries[i].entry = i; - rc = pci_enable_msix_range(pdev, entries, - SKD_MIN_MSIX_COUNT, SKD_MAX_MSIX_COUNT); - if (rc < 0) { + rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT); + if (rc) { pr_err("(%s): failed to enable MSI-X %d\n", skd_name(skdev), rc); goto msix_out; } - skdev->msix_count = rc; + skdev->msix_count = SKD_MAX_MSIX_COUNT; skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) * skdev->msix_count, GFP_KERNEL); if (!skdev->msix_entries) { diff --git a/drivers/block/swim.c b/drivers/block/swim.c index b02d53a399f3..6b44bbe528b7 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -549,7 +549,7 @@ static void redo_fd_request(struct request_queue *q) case READ: err = floppy_read_sectors(fs, blk_rq_pos(req), blk_rq_cur_sectors(req), - req->buffer); + bio_data(req->bio)); break; } done: diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index c74f7b56e7c4..523ee8fd4c15 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -342,7 +342,7 @@ static void start_request(struct floppy_state *fs) swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", req->rq_disk->disk_name, req->cmd, (long)blk_rq_pos(req), blk_rq_sectors(req), - req->buffer); + bio_data(req->bio)); swim3_dbg(" errors=%d current_nr_sectors=%u\n", req->errors, blk_rq_cur_sectors(req)); #endif @@ -479,11 +479,11 @@ static inline void setup_transfer(struct floppy_state *fs) /* Set up 3 dma commands: write preamble, data, postamble */ init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); ++cp; - init_dma(cp, OUTPUT_MORE, req->buffer, 512); + init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512); ++cp; init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); } else { - init_dma(cp, INPUT_LAST, req->buffer, n * 512); + init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); } ++cp; out_le16(&cp->command, DBDMA_STOP); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6d8a87f252de..f63d358f3d93 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -30,6 +30,9 @@ struct virtio_blk /* The disk structure for the kernel. */ struct gendisk *disk; + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + /* Process context for config space updates */ struct work_struct config_work; @@ -112,7 +115,7 @@ static int __virtblk_add_req(struct virtqueue *vq, static inline void virtblk_request_done(struct request *req) { - struct virtblk_req *vbr = req->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); int error = virtblk_result(vbr); if (req->cmd_type == REQ_TYPE_BLOCK_PC) { @@ -144,21 +147,22 @@ static void virtblk_done(struct virtqueue *vq) if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); - spin_unlock_irqrestore(&vblk->vq_lock, flags); /* In case queue is stopped waiting for more buffers. */ if (req_done) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); + spin_unlock_irqrestore(&vblk->vq_lock, flags); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) { struct virtio_blk *vblk = hctx->queue->queuedata; - struct virtblk_req *vbr = req->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; unsigned int num; const bool last = (req->cmd_flags & REQ_END) != 0; int err; + bool notify = false; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); @@ -202,8 +206,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vq); - spin_unlock_irqrestore(&vblk->vq_lock, flags); blk_mq_stop_hw_queue(hctx); + spin_unlock_irqrestore(&vblk->vq_lock, flags); /* Out of mem doesn't actually happen, since we fall back * to direct descriptors */ if (err == -ENOMEM || err == -ENOSPC) @@ -211,10 +215,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) return BLK_MQ_RQ_QUEUE_ERROR; } - if (last) - virtqueue_kick(vblk->vq); - + if (last && virtqueue_kick_prepare(vblk->vq)) + notify = true; spin_unlock_irqrestore(&vblk->vq_lock, flags); + + if (notify) + virtqueue_notify(vblk->vq); return BLK_MQ_RQ_QUEUE_OK; } @@ -480,33 +486,27 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); -static struct blk_mq_ops virtio_mq_ops = { - .queue_rq = virtio_queue_rq, - .map_queue = blk_mq_map_queue, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, - .complete = virtblk_request_done, -}; - -static struct blk_mq_reg virtio_mq_reg = { - .ops = &virtio_mq_ops, - .nr_hw_queues = 1, - .queue_depth = 0, /* Set in virtblk_probe */ - .numa_node = NUMA_NO_NODE, - .flags = BLK_MQ_F_SHOULD_MERGE, -}; -module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); - -static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, - struct request *rq, unsigned int nr) +static int virtblk_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx, + unsigned int numa_node) { struct virtio_blk *vblk = data; - struct virtblk_req *vbr = rq->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); sg_init_table(vbr->sg, vblk->sg_elems); return 0; } +static struct blk_mq_ops virtio_mq_ops = { + .queue_rq = virtio_queue_rq, + .map_queue = blk_mq_map_queue, + .complete = virtblk_request_done, + .init_request = virtblk_init_request, +}; + +static unsigned int virtblk_queue_depth; +module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); + static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; @@ -561,24 +561,34 @@ static int virtblk_probe(struct virtio_device *vdev) } /* Default queue sizing is to fill the ring. */ - if (!virtio_mq_reg.queue_depth) { - virtio_mq_reg.queue_depth = vblk->vq->num_free; + if (!virtblk_queue_depth) { + virtblk_queue_depth = vblk->vq->num_free; /* ... but without indirect descs, we use 2 descs per req */ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) - virtio_mq_reg.queue_depth /= 2; + virtblk_queue_depth /= 2; } - virtio_mq_reg.cmd_size = + + memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); + vblk->tag_set.ops = &virtio_mq_ops; + vblk->tag_set.nr_hw_queues = 1; + vblk->tag_set.queue_depth = virtblk_queue_depth; + vblk->tag_set.numa_node = NUMA_NO_NODE; + vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + vblk->tag_set.cmd_size = sizeof(struct virtblk_req) + sizeof(struct scatterlist) * sg_elems; + vblk->tag_set.driver_data = vblk; + + err = blk_mq_alloc_tag_set(&vblk->tag_set); + if (err) + goto out_put_disk; - q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); + q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); if (!q) { err = -ENOMEM; - goto out_put_disk; + goto out_free_tags; } - blk_mq_init_commands(q, virtblk_init_vbr, vblk); - q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); @@ -679,6 +689,8 @@ static int virtblk_probe(struct virtio_device *vdev) out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); +out_free_tags: + blk_mq_free_tag_set(&vblk->tag_set); out_put_disk: put_disk(vblk->disk); out_free_vq: @@ -705,6 +717,8 @@ static void virtblk_remove(struct virtio_device *vdev) del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); + blk_mq_free_tag_set(&vblk->tag_set); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); @@ -749,7 +763,7 @@ static int virtblk_restore(struct virtio_device *vdev) vblk->config_enable = true; ret = init_vq(vdev->priv); if (!ret) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); return ret; } diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index be052773ad03..f65b807e3236 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -314,7 +314,7 @@ struct xen_blkif { unsigned long long st_rd_sect; unsigned long long st_wr_sect; - wait_queue_head_t waiting_to_free; + struct work_struct free_work; /* Thread shutdown wait queue. */ wait_queue_head_t shutdown_wq; }; @@ -361,7 +361,7 @@ struct pending_req { #define xen_blkif_put(_b) \ do { \ if (atomic_dec_and_test(&(_b)->refcnt)) \ - wake_up(&(_b)->waiting_to_free);\ + schedule_work(&(_b)->free_work);\ } while (0) struct phys_req { diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 9a547e6b6ebf..3a8b810b4980 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -35,12 +35,26 @@ static void connect(struct backend_info *); static int connect_ring(struct backend_info *); static void backend_changed(struct xenbus_watch *, const char **, unsigned int); +static void xen_blkif_free(struct xen_blkif *blkif); +static void xen_vbd_free(struct xen_vbd *vbd); struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be) { return be->dev; } +/* + * The last request could free the device from softirq context and + * xen_blkif_free() can sleep. + */ +static void xen_blkif_deferred_free(struct work_struct *work) +{ + struct xen_blkif *blkif; + + blkif = container_of(work, struct xen_blkif, free_work); + xen_blkif_free(blkif); +} + static int blkback_name(struct xen_blkif *blkif, char *buf) { char *devpath, *devname; @@ -121,7 +135,6 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) init_completion(&blkif->drain_complete); atomic_set(&blkif->drain, 0); blkif->st_print = jiffies; - init_waitqueue_head(&blkif->waiting_to_free); blkif->persistent_gnts.rb_node = NULL; spin_lock_init(&blkif->free_pages_lock); INIT_LIST_HEAD(&blkif->free_pages); @@ -132,6 +145,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); INIT_LIST_HEAD(&blkif->pending_free); + INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); for (i = 0; i < XEN_BLKIF_REQS; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); @@ -231,7 +245,7 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, return 0; } -static void xen_blkif_disconnect(struct xen_blkif *blkif) +static int xen_blkif_disconnect(struct xen_blkif *blkif) { if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); @@ -239,9 +253,12 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) blkif->xenblkd = NULL; } - atomic_dec(&blkif->refcnt); - wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0); - atomic_inc(&blkif->refcnt); + /* The above kthread_stop() guarantees that at this point we + * don't have any discard_io or other_io requests. So, checking + * for inflight IO is enough. + */ + if (atomic_read(&blkif->inflight) > 0) + return -EBUSY; if (blkif->irq) { unbind_from_irqhandler(blkif->irq, blkif); @@ -252,6 +269,8 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring); blkif->blk_rings.common.sring = NULL; } + + return 0; } static void xen_blkif_free(struct xen_blkif *blkif) @@ -259,8 +278,8 @@ static void xen_blkif_free(struct xen_blkif *blkif) struct pending_req *req, *n; int i = 0, j; - if (!atomic_dec_and_test(&blkif->refcnt)) - BUG(); + xen_blkif_disconnect(blkif); + xen_vbd_free(&blkif->vbd); /* Remove all persistent grants and the cache of ballooned pages. */ xen_blkbk_free_caches(blkif); @@ -449,16 +468,15 @@ static int xen_blkbk_remove(struct xenbus_device *dev) be->backend_watch.node = NULL; } + dev_set_drvdata(&dev->dev, NULL); + if (be->blkif) { xen_blkif_disconnect(be->blkif); - xen_vbd_free(&be->blkif->vbd); - xen_blkif_free(be->blkif); - be->blkif = NULL; + xen_blkif_put(be->blkif); } kfree(be->mode); kfree(be); - dev_set_drvdata(&dev->dev, NULL); return 0; } @@ -481,10 +499,15 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info struct xenbus_device *dev = be->dev; struct xen_blkif *blkif = be->blkif; int err; - int state = 0; + int state = 0, discard_enable; struct block_device *bdev = be->blkif->vbd.bdev; struct request_queue *q = bdev_get_queue(bdev); + err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d", + &discard_enable); + if (err == 1 && !discard_enable) + return; + if (blk_queue_discard(q)) { err = xenbus_printf(xbt, dev->nodename, "discard-granularity", "%u", @@ -700,7 +723,11 @@ static void frontend_changed(struct xenbus_device *dev, * Enforce precondition before potential leak point. * xen_blkif_disconnect() is idempotent. */ - xen_blkif_disconnect(be->blkif); + err = xen_blkif_disconnect(be->blkif); + if (err) { + xenbus_dev_fatal(dev, err, "pending I/O"); + break; + } err = connect_ring(be); if (err) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index efe1b4761735..5deb235bd18f 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -612,10 +612,10 @@ static void do_blkif_request(struct request_queue *rq) } pr_debug("do_blk_req %p: cmd %p, sec %lx, " - "(%u/%u) buffer:%p [%s]\n", + "(%u/%u) [%s]\n", req, req->cmd, (unsigned long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), - req->buffer, rq_data_dir(req) ? "write" : "read"); + rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); @@ -1635,36 +1635,24 @@ blkfront_closing(struct blkfront_info *info) static void blkfront_setup_discard(struct blkfront_info *info) { int err; - char *type; unsigned int discard_granularity; unsigned int discard_alignment; unsigned int discard_secure; - type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); - if (IS_ERR(type)) - return; - - info->feature_secdiscard = 0; - if (strncmp(type, "phy", 3) == 0) { - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "discard-granularity", "%u", &discard_granularity, - "discard-alignment", "%u", &discard_alignment, - NULL); - if (!err) { - info->feature_discard = 1; - info->discard_granularity = discard_granularity; - info->discard_alignment = discard_alignment; - } - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "discard-secure", "%d", &discard_secure, - NULL); - if (!err) - info->feature_secdiscard = discard_secure; - - } else if (strncmp(type, "file", 4) == 0) - info->feature_discard = 1; - - kfree(type); + info->feature_discard = 1; + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "discard-granularity", "%u", &discard_granularity, + "discard-alignment", "%u", &discard_alignment, + NULL); + if (!err) { + info->discard_granularity = discard_granularity; + info->discard_alignment = discard_alignment; + } + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "discard-secure", "%d", &discard_secure, + NULL); + if (!err) + info->feature_secdiscard = !!discard_secure; } static int blkfront_setup_indirect(struct blkfront_info *info) diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 1393b8871a28..ab3ea62e5dfc 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -661,7 +661,7 @@ static void ace_fsm_dostate(struct ace_device *ace) rq_data_dir(req)); ace->req = req; - ace->data_ptr = req->buffer; + ace->data_ptr = bio_data(req->bio); ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); @@ -733,7 +733,7 @@ static void ace_fsm_dostate(struct ace_device *ace) * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ - ace->data_ptr = ace->req->buffer; + ace->data_ptr = bio_data(ace->req->bio); ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 27de5046708a..968f9e52effa 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -87,13 +87,15 @@ static void do_z2_request(struct request_queue *q) while (len) { unsigned long addr = start & Z2RAM_CHUNKMASK; unsigned long size = Z2RAM_CHUNKSIZE - addr; + void *buffer = bio_data(req->bio); + if (len < size) size = len; addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; if (rq_data_dir(req) == READ) - memcpy(req->buffer, (char *)addr, size); + memcpy(buffer, (char *)addr, size); else - memcpy((char *)addr, req->buffer, size); + memcpy((char *)addr, buffer, size); start += size; len -= size; } diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 293e2e0a0a87..00b73448b22e 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -56,6 +56,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/debugfs.h> +#include <linux/log2.h> /* * DDR target is the same on all platforms. @@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus, */ if ((u64)base < wend && end > wbase) return 0; - - /* - * Check if target/attribute conflicts - */ - if (target == wtarget && attr == wattr) - return 0; } return 1; @@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus, mbus->soc->win_cfg_offset(win); u32 ctrl, remap_addr; + if (!is_power_of_2(size)) { + WARN(true, "Invalid MBus window size: 0x%zx\n", size); + return -EINVAL; + } + + if ((base & (phys_addr_t)(size - 1)) != 0) { + WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base, + size); + return -EINVAL; + } + ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | (attr << WIN_CTRL_ATTR_SHIFT) | (target << WIN_CTRL_TGT_SHIFT) | @@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v) win, (unsigned long long)wbase, (unsigned long long)(wbase + wsize), wtarget, wattr); + if (!is_power_of_2(wsize) || + ((wbase & (u64)(wsize - 1)) != 0)) + seq_puts(seq, " (Invalid base/size!!)"); + if (win < mbus->soc->num_remappable_wins) { seq_printf(seq, " (remap %016llx)\n", (unsigned long long)wremap); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 8a3aff724d98..49ac5662585b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -312,36 +312,24 @@ static const char *mrw_format_status[] = { static const char *mrw_address_space[] = { "DMA", "GAA" }; -#if (ERRLOGMASK!=CD_NOTHING) -#define cdinfo(type, fmt, args...) \ +#if (ERRLOGMASK != CD_NOTHING) +#define cd_dbg(type, fmt, ...) \ do { \ if ((ERRLOGMASK & type) || debug == 1) \ - pr_info(fmt, ##args); \ + pr_debug(fmt, ##__VA_ARGS__); \ } while (0) #else -#define cdinfo(type, fmt, args...) \ +#define cd_dbg(type, fmt, ...) \ do { \ if (0 && (ERRLOGMASK & type) || debug == 1) \ - pr_info(fmt, ##args); \ + pr_debug(fmt, ##__VA_ARGS__); \ } while (0) #endif -/* These are used to simplify getting data in from and back to user land */ -#define IOCTL_IN(arg, type, in) \ - if (copy_from_user(&(in), (type __user *) (arg), sizeof (in))) \ - return -EFAULT; - -#define IOCTL_OUT(arg, type, out) \ - if (copy_to_user((type __user *) (arg), &(out), sizeof (out))) \ - return -EFAULT; - /* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in a lot of places. This macro makes the code more clear. */ #define CDROM_CAN(type) (cdi->ops->capability & ~cdi->mask & (type)) -/* used in the audio ioctls */ -#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret - /* * Another popular OS uses 7 seconds as the hard timeout for default * commands, so it is a good choice for us as well. @@ -349,21 +337,6 @@ do { \ #define CDROM_DEF_TIMEOUT (7 * HZ) /* Not-exported routines. */ -static int open_for_data(struct cdrom_device_info * cdi); -static int check_for_audio_disc(struct cdrom_device_info * cdi, - struct cdrom_device_ops * cdo); -static void sanitize_format(union cdrom_addr *addr, - u_char * curr, u_char requested); -static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, - unsigned long arg); - -int cdrom_get_last_written(struct cdrom_device_info *, long *); -static int cdrom_get_next_writable(struct cdrom_device_info *, long *); -static void cdrom_count_tracks(struct cdrom_device_info *, tracktype*); - -static int cdrom_mrw_exit(struct cdrom_device_info *cdi); - -static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di); static void cdrom_sysctl_register(void); @@ -382,113 +355,65 @@ static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, return -EIO; } -/* This macro makes sure we don't have to check on cdrom_device_ops - * existence in the run-time routines below. Change_capability is a - * hack to have the capability flags defined const, while we can still - * change it here without gcc complaining at every line. - */ -#define ENSURE(call, bits) if (cdo->call == NULL) *change_capability &= ~(bits) - -int register_cdrom(struct cdrom_device_info *cdi) -{ - static char banner_printed; - struct cdrom_device_ops *cdo = cdi->ops; - int *change_capability = (int *)&cdo->capability; /* hack */ - - cdinfo(CD_OPEN, "entering register_cdrom\n"); - - if (cdo->open == NULL || cdo->release == NULL) - return -EINVAL; - if (!banner_printed) { - pr_info("Uniform CD-ROM driver " REVISION "\n"); - banner_printed = 1; - cdrom_sysctl_register(); - } - - ENSURE(drive_status, CDC_DRIVE_STATUS ); - if (cdo->check_events == NULL && cdo->media_changed == NULL) - *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); - ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); - ENSURE(lock_door, CDC_LOCK); - ENSURE(select_speed, CDC_SELECT_SPEED); - ENSURE(get_last_session, CDC_MULTI_SESSION); - ENSURE(get_mcn, CDC_MCN); - ENSURE(reset, CDC_RESET); - ENSURE(generic_packet, CDC_GENERIC_PACKET); - cdi->mc_flags = 0; - cdo->n_minors = 0; - cdi->options = CDO_USE_FFLAGS; - - if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY)) - cdi->options |= (int) CDO_AUTO_CLOSE; - if (autoeject==1 && CDROM_CAN(CDC_OPEN_TRAY)) - cdi->options |= (int) CDO_AUTO_EJECT; - if (lockdoor==1) - cdi->options |= (int) CDO_LOCK; - if (check_media_type==1) - cdi->options |= (int) CDO_CHECK_TYPE; - - if (CDROM_CAN(CDC_MRW_W)) - cdi->exit = cdrom_mrw_exit; - - if (cdi->disk) - cdi->cdda_method = CDDA_BPC_FULL; - else - cdi->cdda_method = CDDA_OLD; - - if (!cdo->generic_packet) - cdo->generic_packet = cdrom_dummy_generic_packet; - - cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); - mutex_lock(&cdrom_mutex); - list_add(&cdi->list, &cdrom_list); - mutex_unlock(&cdrom_mutex); - return 0; -} -#undef ENSURE - -void unregister_cdrom(struct cdrom_device_info *cdi) +static int cdrom_flush_cache(struct cdrom_device_info *cdi) { - cdinfo(CD_OPEN, "entering unregister_cdrom\n"); + struct packet_command cgc; - mutex_lock(&cdrom_mutex); - list_del(&cdi->list); - mutex_unlock(&cdrom_mutex); + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_FLUSH_CACHE; - if (cdi->exit) - cdi->exit(cdi); + cgc.timeout = 5 * 60 * HZ; - cdi->ops->n_minors--; - cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); + return cdi->ops->generic_packet(cdi, &cgc); } -int cdrom_get_media_event(struct cdrom_device_info *cdi, - struct media_event_desc *med) +/* requires CD R/RW */ +static int cdrom_get_disc_info(struct cdrom_device_info *cdi, + disc_information *di) { + struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; - unsigned char buffer[8]; - struct event_header *eh = (struct event_header *) buffer; + int ret, buflen; - init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); - cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION; - cgc.cmd[1] = 1; /* IMMED */ - cgc.cmd[4] = 1 << 4; /* media event */ - cgc.cmd[8] = sizeof(buffer); + /* set up command and get the disc info */ + init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_DISC_INFO; + cgc.cmd[8] = cgc.buflen = 2; cgc.quiet = 1; - if (cdi->ops->generic_packet(cdi, &cgc)) - return 1; + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; - if (be16_to_cpu(eh->data_len) < sizeof(*med)) - return 1; + /* not all drives have the same disc_info length, so requeue + * packet with the length the drive tells us it can supply + */ + buflen = be16_to_cpu(di->disc_information_length) + + sizeof(di->disc_information_length); - if (eh->nea || eh->notification_class != 0x4) - return 1; + if (buflen > sizeof(disc_information)) + buflen = sizeof(disc_information); - memcpy(med, &buffer[sizeof(*eh)], sizeof(*med)); - return 0; + cgc.cmd[8] = cgc.buflen = buflen; + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; + + /* return actual fill size */ + return buflen; } +/* This macro makes sure we don't have to check on cdrom_device_ops + * existence in the run-time routines below. Change_capability is a + * hack to have the capability flags defined const, while we can still + * change it here without gcc complaining at every line. + */ +#define ENSURE(call, bits) \ +do { \ + if (cdo->call == NULL) \ + *change_capability &= ~(bits); \ +} while (0) + /* * the first prototypes used 0x2c as the page code for the mrw mode page, * subsequently this was changed to 0x03. probe the one used by this drive @@ -605,18 +530,6 @@ static int cdrom_mrw_bgformat_susp(struct cdrom_device_info *cdi, int immed) return cdi->ops->generic_packet(cdi, &cgc); } -static int cdrom_flush_cache(struct cdrom_device_info *cdi) -{ - struct packet_command cgc; - - init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.cmd[0] = GPCMD_FLUSH_CACHE; - - cgc.timeout = 5 * 60 * HZ; - - return cdi->ops->generic_packet(cdi, &cgc); -} - static int cdrom_mrw_exit(struct cdrom_device_info *cdi) { disc_information di; @@ -650,17 +563,19 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space) cgc.buffer = buffer; cgc.buflen = sizeof(buffer); - if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0))) + ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0); + if (ret) return ret; - mph = (struct mode_page_header *) buffer; + mph = (struct mode_page_header *)buffer; offset = be16_to_cpu(mph->desc_length); size = be16_to_cpu(mph->mode_data_length) + 2; buffer[offset + 3] = space; cgc.buflen = size; - if ((ret = cdrom_mode_select(cdi, &cgc))) + ret = cdrom_mode_select(cdi, &cgc); + if (ret) return ret; pr_info("%s: mrw address space %s selected\n", @@ -668,6 +583,106 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space) return 0; } +int register_cdrom(struct cdrom_device_info *cdi) +{ + static char banner_printed; + struct cdrom_device_ops *cdo = cdi->ops; + int *change_capability = (int *)&cdo->capability; /* hack */ + + cd_dbg(CD_OPEN, "entering register_cdrom\n"); + + if (cdo->open == NULL || cdo->release == NULL) + return -EINVAL; + if (!banner_printed) { + pr_info("Uniform CD-ROM driver " REVISION "\n"); + banner_printed = 1; + cdrom_sysctl_register(); + } + + ENSURE(drive_status, CDC_DRIVE_STATUS); + if (cdo->check_events == NULL && cdo->media_changed == NULL) + *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); + ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); + ENSURE(lock_door, CDC_LOCK); + ENSURE(select_speed, CDC_SELECT_SPEED); + ENSURE(get_last_session, CDC_MULTI_SESSION); + ENSURE(get_mcn, CDC_MCN); + ENSURE(reset, CDC_RESET); + ENSURE(generic_packet, CDC_GENERIC_PACKET); + cdi->mc_flags = 0; + cdo->n_minors = 0; + cdi->options = CDO_USE_FFLAGS; + + if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY)) + cdi->options |= (int) CDO_AUTO_CLOSE; + if (autoeject == 1 && CDROM_CAN(CDC_OPEN_TRAY)) + cdi->options |= (int) CDO_AUTO_EJECT; + if (lockdoor == 1) + cdi->options |= (int) CDO_LOCK; + if (check_media_type == 1) + cdi->options |= (int) CDO_CHECK_TYPE; + + if (CDROM_CAN(CDC_MRW_W)) + cdi->exit = cdrom_mrw_exit; + + if (cdi->disk) + cdi->cdda_method = CDDA_BPC_FULL; + else + cdi->cdda_method = CDDA_OLD; + + if (!cdo->generic_packet) + cdo->generic_packet = cdrom_dummy_generic_packet; + + cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); + mutex_lock(&cdrom_mutex); + list_add(&cdi->list, &cdrom_list); + mutex_unlock(&cdrom_mutex); + return 0; +} +#undef ENSURE + +void unregister_cdrom(struct cdrom_device_info *cdi) +{ + cd_dbg(CD_OPEN, "entering unregister_cdrom\n"); + + mutex_lock(&cdrom_mutex); + list_del(&cdi->list); + mutex_unlock(&cdrom_mutex); + + if (cdi->exit) + cdi->exit(cdi); + + cdi->ops->n_minors--; + cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); +} + +int cdrom_get_media_event(struct cdrom_device_info *cdi, + struct media_event_desc *med) +{ + struct packet_command cgc; + unsigned char buffer[8]; + struct event_header *eh = (struct event_header *)buffer; + + init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION; + cgc.cmd[1] = 1; /* IMMED */ + cgc.cmd[4] = 1 << 4; /* media event */ + cgc.cmd[8] = sizeof(buffer); + cgc.quiet = 1; + + if (cdi->ops->generic_packet(cdi, &cgc)) + return 1; + + if (be16_to_cpu(eh->data_len) < sizeof(*med)) + return 1; + + if (eh->nea || eh->notification_class != 0x4) + return 1; + + memcpy(med, &buffer[sizeof(*eh)], sizeof(*med)); + return 0; +} + static int cdrom_get_random_writable(struct cdrom_device_info *cdi, struct rwrt_feature_desc *rfd) { @@ -839,7 +854,7 @@ static int cdrom_ram_open_write(struct cdrom_device_info *cdi) else if (CDF_RWRT == be16_to_cpu(rfd.feature_code)) ret = !rfd.curr; - cdinfo(CD_OPEN, "can open for random write\n"); + cd_dbg(CD_OPEN, "can open for random write\n"); return ret; } @@ -928,12 +943,12 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi) struct packet_command cgc; if (cdi->mmc3_profile != 0x1a) { - cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name); + cd_dbg(CD_CLOSE, "%s: No DVD+RW\n", cdi->name); return; } if (!cdi->media_written) { - cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name); + cd_dbg(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name); return; } @@ -969,82 +984,74 @@ static int cdrom_close_write(struct cdrom_device_info *cdi) #endif } -/* We use the open-option O_NONBLOCK to indicate that the - * purpose of opening is only for subsequent ioctl() calls; no device - * integrity checks are performed. - * - * We hope that all cd-player programs will adopt this convention. It - * is in their own interest: device control becomes a lot easier - * this way. - */ -int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode) +/* badly broken, I know. Is due for a fixup anytime. */ +static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks) { - int ret; - - cdinfo(CD_OPEN, "entering cdrom_open\n"); - - /* open is event synchronization point, check events first */ - check_disk_change(bdev); - - /* if this was a O_NONBLOCK open and we should honor the flags, - * do a quick open without drive/disc integrity checks. */ - cdi->use_count++; - if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { - ret = cdi->ops->open(cdi, 1); - } else { - ret = open_for_data(cdi); - if (ret) - goto err; - cdrom_mmc3_profile(cdi); - if (mode & FMODE_WRITE) { - ret = -EROFS; - if (cdrom_open_write(cdi)) - goto err_release; - if (!CDROM_CAN(CDC_RAM)) - goto err_release; - ret = 0; - cdi->media_written = 0; - } + struct cdrom_tochdr header; + struct cdrom_tocentry entry; + int ret, i; + tracks->data = 0; + tracks->audio = 0; + tracks->cdi = 0; + tracks->xa = 0; + tracks->error = 0; + cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); + /* Grab the TOC header so we can see how many tracks there are */ + ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header); + if (ret) { + if (ret == -ENOMEDIUM) + tracks->error = CDS_NO_DISC; + else + tracks->error = CDS_NO_INFO; + return; } - - if (ret) - goto err; - - cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", - cdi->name, cdi->use_count); - return 0; -err_release: - if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { - cdi->ops->lock_door(cdi, 0); - cdinfo(CD_OPEN, "door unlocked.\n"); + /* check what type of tracks are on this disc */ + entry.cdte_format = CDROM_MSF; + for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) { + entry.cdte_track = i; + if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) { + tracks->error = CDS_NO_INFO; + return; + } + if (entry.cdte_ctrl & CDROM_DATA_TRACK) { + if (entry.cdte_format == 0x10) + tracks->cdi++; + else if (entry.cdte_format == 0x20) + tracks->xa++; + else + tracks->data++; + } else { + tracks->audio++; + } + cd_dbg(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n", + i, entry.cdte_format, entry.cdte_ctrl); } - cdi->ops->release(cdi); -err: - cdi->use_count--; - return ret; + cd_dbg(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n", + header.cdth_trk1, tracks->audio, tracks->data, + tracks->cdi, tracks->xa); } static -int open_for_data(struct cdrom_device_info * cdi) +int open_for_data(struct cdrom_device_info *cdi) { int ret; struct cdrom_device_ops *cdo = cdi->ops; tracktype tracks; - cdinfo(CD_OPEN, "entering open_for_data\n"); + cd_dbg(CD_OPEN, "entering open_for_data\n"); /* Check if the driver can report drive status. If it can, we can do clever things. If it can't, well, we at least tried! */ if (cdo->drive_status != NULL) { ret = cdo->drive_status(cdi, CDSL_CURRENT); - cdinfo(CD_OPEN, "drive_status=%d\n", ret); + cd_dbg(CD_OPEN, "drive_status=%d\n", ret); if (ret == CDS_TRAY_OPEN) { - cdinfo(CD_OPEN, "the tray is open...\n"); + cd_dbg(CD_OPEN, "the tray is open...\n"); /* can/may i close it? */ if (CDROM_CAN(CDC_CLOSE_TRAY) && cdi->options & CDO_AUTO_CLOSE) { - cdinfo(CD_OPEN, "trying to close the tray.\n"); + cd_dbg(CD_OPEN, "trying to close the tray\n"); ret=cdo->tray_move(cdi,0); if (ret) { - cdinfo(CD_OPEN, "bummer. tried to close the tray but failed.\n"); + cd_dbg(CD_OPEN, "bummer. tried to close the tray but failed.\n"); /* Ignore the error from the low level driver. We don't care why it couldn't close the tray. We only care @@ -1054,19 +1061,19 @@ int open_for_data(struct cdrom_device_info * cdi) goto clean_up_and_return; } } else { - cdinfo(CD_OPEN, "bummer. this drive can't close the tray.\n"); + cd_dbg(CD_OPEN, "bummer. this drive can't close the tray.\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } /* Ok, the door should be closed now.. Check again */ ret = cdo->drive_status(cdi, CDSL_CURRENT); if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) { - cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n"); - cdinfo(CD_OPEN, "tray might not contain a medium.\n"); + cd_dbg(CD_OPEN, "bummer. the tray is still not closed.\n"); + cd_dbg(CD_OPEN, "tray might not contain a medium\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } - cdinfo(CD_OPEN, "the tray is now closed.\n"); + cd_dbg(CD_OPEN, "the tray is now closed\n"); } /* the door should be closed now, check for the disc */ ret = cdo->drive_status(cdi, CDSL_CURRENT); @@ -1077,7 +1084,7 @@ int open_for_data(struct cdrom_device_info * cdi) } cdrom_count_tracks(cdi, &tracks); if (tracks.error == CDS_NO_DISC) { - cdinfo(CD_OPEN, "bummer. no disc.\n"); + cd_dbg(CD_OPEN, "bummer. no disc.\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } @@ -1087,34 +1094,34 @@ int open_for_data(struct cdrom_device_info * cdi) if (cdi->options & CDO_CHECK_TYPE) { /* give people a warning shot, now that CDO_CHECK_TYPE is the default case! */ - cdinfo(CD_OPEN, "bummer. wrong media type.\n"); - cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n", - (unsigned int)task_pid_nr(current)); + cd_dbg(CD_OPEN, "bummer. wrong media type.\n"); + cd_dbg(CD_WARNING, "pid %d must open device O_NONBLOCK!\n", + (unsigned int)task_pid_nr(current)); ret=-EMEDIUMTYPE; goto clean_up_and_return; } else { - cdinfo(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set.\n"); + cd_dbg(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set\n"); } } - cdinfo(CD_OPEN, "all seems well, opening the device.\n"); + cd_dbg(CD_OPEN, "all seems well, opening the devicen"); /* all seems well, we can open the device */ ret = cdo->open(cdi, 0); /* open for data */ - cdinfo(CD_OPEN, "opening the device gave me %d.\n", ret); + cd_dbg(CD_OPEN, "opening the device gave me %d\n", ret); /* After all this careful checking, we shouldn't have problems opening the device, but we don't want the device locked if this somehow fails... */ if (ret) { - cdinfo(CD_OPEN, "open device failed.\n"); + cd_dbg(CD_OPEN, "open device failed\n"); goto clean_up_and_return; } if (CDROM_CAN(CDC_LOCK) && (cdi->options & CDO_LOCK)) { cdo->lock_door(cdi, 1); - cdinfo(CD_OPEN, "door locked.\n"); + cd_dbg(CD_OPEN, "door locked\n"); } - cdinfo(CD_OPEN, "device opened successfully.\n"); + cd_dbg(CD_OPEN, "device opened successfully\n"); return ret; /* Something failed. Try to unlock the drive, because some drivers @@ -1123,14 +1130,70 @@ int open_for_data(struct cdrom_device_info * cdi) This ensures that the drive gets unlocked after a mount fails. This is a goto to avoid bloating the driver with redundant code. */ clean_up_and_return: - cdinfo(CD_OPEN, "open failed.\n"); + cd_dbg(CD_OPEN, "open failed\n"); if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { cdo->lock_door(cdi, 0); - cdinfo(CD_OPEN, "door unlocked.\n"); + cd_dbg(CD_OPEN, "door unlocked\n"); } return ret; } +/* We use the open-option O_NONBLOCK to indicate that the + * purpose of opening is only for subsequent ioctl() calls; no device + * integrity checks are performed. + * + * We hope that all cd-player programs will adopt this convention. It + * is in their own interest: device control becomes a lot easier + * this way. + */ +int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode) +{ + int ret; + + cd_dbg(CD_OPEN, "entering cdrom_open\n"); + + /* open is event synchronization point, check events first */ + check_disk_change(bdev); + + /* if this was a O_NONBLOCK open and we should honor the flags, + * do a quick open without drive/disc integrity checks. */ + cdi->use_count++; + if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { + ret = cdi->ops->open(cdi, 1); + } else { + ret = open_for_data(cdi); + if (ret) + goto err; + cdrom_mmc3_profile(cdi); + if (mode & FMODE_WRITE) { + ret = -EROFS; + if (cdrom_open_write(cdi)) + goto err_release; + if (!CDROM_CAN(CDC_RAM)) + goto err_release; + ret = 0; + cdi->media_written = 0; + } + } + + if (ret) + goto err; + + cd_dbg(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", + cdi->name, cdi->use_count); + return 0; +err_release: + if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { + cdi->ops->lock_door(cdi, 0); + cd_dbg(CD_OPEN, "door unlocked\n"); + } + cdi->ops->release(cdi); +err: + cdi->use_count--; + return ret; +} + /* This code is similar to that in open_for_data. The routine is called whenever an audio play operation is requested. */ @@ -1139,21 +1202,21 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi, { int ret; tracktype tracks; - cdinfo(CD_OPEN, "entering check_for_audio_disc\n"); + cd_dbg(CD_OPEN, "entering check_for_audio_disc\n"); if (!(cdi->options & CDO_CHECK_TYPE)) return 0; if (cdo->drive_status != NULL) { ret = cdo->drive_status(cdi, CDSL_CURRENT); - cdinfo(CD_OPEN, "drive_status=%d\n", ret); + cd_dbg(CD_OPEN, "drive_status=%d\n", ret); if (ret == CDS_TRAY_OPEN) { - cdinfo(CD_OPEN, "the tray is open...\n"); + cd_dbg(CD_OPEN, "the tray is open...\n"); /* can/may i close it? */ if (CDROM_CAN(CDC_CLOSE_TRAY) && cdi->options & CDO_AUTO_CLOSE) { - cdinfo(CD_OPEN, "trying to close the tray.\n"); + cd_dbg(CD_OPEN, "trying to close the tray\n"); ret=cdo->tray_move(cdi,0); if (ret) { - cdinfo(CD_OPEN, "bummer. tried to close tray but failed.\n"); + cd_dbg(CD_OPEN, "bummer. tried to close tray but failed.\n"); /* Ignore the error from the low level driver. We don't care why it couldn't close the tray. We only care @@ -1162,20 +1225,20 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi, return -ENOMEDIUM; } } else { - cdinfo(CD_OPEN, "bummer. this driver can't close the tray.\n"); + cd_dbg(CD_OPEN, "bummer. this driver can't close the tray.\n"); return -ENOMEDIUM; } /* Ok, the door should be closed now.. Check again */ ret = cdo->drive_status(cdi, CDSL_CURRENT); if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) { - cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n"); + cd_dbg(CD_OPEN, "bummer. the tray is still not closed.\n"); return -ENOMEDIUM; } if (ret!=CDS_DISC_OK) { - cdinfo(CD_OPEN, "bummer. disc isn't ready.\n"); + cd_dbg(CD_OPEN, "bummer. disc isn't ready.\n"); return -EIO; } - cdinfo(CD_OPEN, "the tray is now closed.\n"); + cd_dbg(CD_OPEN, "the tray is now closed\n"); } } cdrom_count_tracks(cdi, &tracks); @@ -1193,17 +1256,18 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) struct cdrom_device_ops *cdo = cdi->ops; int opened_for_data; - cdinfo(CD_CLOSE, "entering cdrom_release\n"); + cd_dbg(CD_CLOSE, "entering cdrom_release\n"); if (cdi->use_count > 0) cdi->use_count--; if (cdi->use_count == 0) { - cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); + cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", + cdi->name); cdrom_dvd_rw_close_write(cdi); if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) { - cdinfo(CD_CLOSE, "Unlocking door!\n"); + cd_dbg(CD_CLOSE, "Unlocking door!\n"); cdo->lock_door(cdi, 0); } } @@ -1262,7 +1326,7 @@ static int cdrom_slot_status(struct cdrom_device_info *cdi, int slot) struct cdrom_changer_info *info; int ret; - cdinfo(CD_CHANGER, "entering cdrom_slot_status()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_slot_status()\n"); if (cdi->sanyo_slot) return CDS_NO_INFO; @@ -1292,7 +1356,7 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi) int nslots = 1; struct cdrom_changer_info *info; - cdinfo(CD_CHANGER, "entering cdrom_number_of_slots()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_number_of_slots()\n"); /* cdrom_read_mech_status requires a valid value for capacity: */ cdi->capacity = 0; @@ -1313,7 +1377,7 @@ static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot) { struct packet_command cgc; - cdinfo(CD_CHANGER, "entering cdrom_load_unload()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_load_unload()\n"); if (cdi->sanyo_slot && slot < 0) return 0; @@ -1342,7 +1406,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) int curslot; int ret; - cdinfo(CD_CHANGER, "entering cdrom_select_disc()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_select_disc()\n"); if (!CDROM_CAN(CDC_SELECT_DISC)) return -EDRIVE_CANT_DO_THIS; @@ -1476,51 +1540,6 @@ int cdrom_media_changed(struct cdrom_device_info *cdi) return media_changed(cdi, 0); } -/* badly broken, I know. Is due for a fixup anytime. */ -static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks) -{ - struct cdrom_tochdr header; - struct cdrom_tocentry entry; - int ret, i; - tracks->data=0; - tracks->audio=0; - tracks->cdi=0; - tracks->xa=0; - tracks->error=0; - cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); - /* Grab the TOC header so we can see how many tracks there are */ - if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) { - if (ret == -ENOMEDIUM) - tracks->error = CDS_NO_DISC; - else - tracks->error = CDS_NO_INFO; - return; - } - /* check what type of tracks are on this disc */ - entry.cdte_format = CDROM_MSF; - for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) { - entry.cdte_track = i; - if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) { - tracks->error=CDS_NO_INFO; - return; - } - if (entry.cdte_ctrl & CDROM_DATA_TRACK) { - if (entry.cdte_format == 0x10) - tracks->cdi++; - else if (entry.cdte_format == 0x20) - tracks->xa++; - else - tracks->data++; - } else - tracks->audio++; - cdinfo(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n", - i, entry.cdte_format, entry.cdte_ctrl); - } - cdinfo(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n", - header.cdth_trk1, tracks->audio, tracks->data, - tracks->cdi, tracks->xa); -} - /* Requests to the low-level drivers will /always/ be done in the following format convention: @@ -1632,7 +1651,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) switch (ai->type) { /* LU data send */ case DVD_LU_SEND_AGID: - cdinfo(CD_DVD, "entering DVD_LU_SEND_AGID\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_AGID\n"); cgc.quiet = 1; setup_report_key(&cgc, ai->lsa.agid, 0); @@ -1644,7 +1663,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_LU_SEND_KEY1: - cdinfo(CD_DVD, "entering DVD_LU_SEND_KEY1\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_KEY1\n"); setup_report_key(&cgc, ai->lsk.agid, 2); if ((ret = cdo->generic_packet(cdi, &cgc))) @@ -1655,7 +1674,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_LU_SEND_CHALLENGE: - cdinfo(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n"); setup_report_key(&cgc, ai->lsc.agid, 1); if ((ret = cdo->generic_packet(cdi, &cgc))) @@ -1667,7 +1686,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Post-auth key */ case DVD_LU_SEND_TITLE_KEY: - cdinfo(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n"); cgc.quiet = 1; setup_report_key(&cgc, ai->lstk.agid, 4); cgc.cmd[5] = ai->lstk.lba; @@ -1686,7 +1705,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_LU_SEND_ASF: - cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_ASF\n"); setup_report_key(&cgc, ai->lsasf.agid, 5); if ((ret = cdo->generic_packet(cdi, &cgc))) @@ -1697,7 +1716,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* LU data receive (LU changes state) */ case DVD_HOST_SEND_CHALLENGE: - cdinfo(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n"); + cd_dbg(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n"); setup_send_key(&cgc, ai->hsc.agid, 1); buf[1] = 0xe; copy_chal(&buf[4], ai->hsc.chal); @@ -1709,7 +1728,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_HOST_SEND_KEY2: - cdinfo(CD_DVD, "entering DVD_HOST_SEND_KEY2\n"); + cd_dbg(CD_DVD, "entering DVD_HOST_SEND_KEY2\n"); setup_send_key(&cgc, ai->hsk.agid, 3); buf[1] = 0xa; copy_key(&buf[4], ai->hsk.key); @@ -1724,7 +1743,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Misc */ case DVD_INVALIDATE_AGID: cgc.quiet = 1; - cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n"); + cd_dbg(CD_DVD, "entering DVD_INVALIDATE_AGID\n"); setup_report_key(&cgc, ai->lsa.agid, 0x3f); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; @@ -1732,7 +1751,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Get region settings */ case DVD_LU_SEND_RPC_STATE: - cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n"); setup_report_key(&cgc, 0, 8); memset(&rpc_state, 0, sizeof(rpc_state_t)); cgc.buffer = (char *) &rpc_state; @@ -1749,7 +1768,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Set region settings */ case DVD_HOST_SEND_RPC_STATE: - cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n"); + cd_dbg(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n"); setup_send_key(&cgc, 0, 6); buf[1] = 6; buf[4] = ai->hrpcs.pdrc; @@ -1759,7 +1778,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; default: - cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type); + cd_dbg(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type); return -ENOTTY; } @@ -1891,7 +1910,8 @@ static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s, s->bca.len = buf[0] << 8 | buf[1]; if (s->bca.len < 12 || s->bca.len > 188) { - cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len); + cd_dbg(CD_WARNING, "Received invalid BCA length (%d)\n", + s->bca.len); ret = -EIO; goto out; } @@ -1927,14 +1947,13 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s, s->manufact.len = buf[0] << 8 | buf[1]; if (s->manufact.len < 0) { - cdinfo(CD_WARNING, "Received invalid manufacture info length" - " (%d)\n", s->manufact.len); + cd_dbg(CD_WARNING, "Received invalid manufacture info length (%d)\n", + s->manufact.len); ret = -EIO; } else { if (s->manufact.len > 2048) { - cdinfo(CD_WARNING, "Received invalid manufacture info " - "length (%d): truncating to 2048\n", - s->manufact.len); + cd_dbg(CD_WARNING, "Received invalid manufacture info length (%d): truncating to 2048\n", + s->manufact.len); s->manufact.len = 2048; } memcpy(s->manufact.value, &buf[4], s->manufact.len); @@ -1965,8 +1984,8 @@ static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s, return dvd_read_manufact(cdi, s, cgc); default: - cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n", - s->type); + cd_dbg(CD_WARNING, ": Invalid DVD structure read requested (%d)\n", + s->type); return -EINVAL; } } @@ -2255,7 +2274,7 @@ static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi, u8 requested_format; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); if (!(cdi->ops->capability & CDC_MULTI_SESSION)) return -ENOSYS; @@ -2277,13 +2296,13 @@ static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi, if (copy_to_user(argp, &ms_info, sizeof(ms_info))) return -EFAULT; - cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); + cd_dbg(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); return 0; } static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMEJECT\n"); if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; @@ -2300,7 +2319,7 @@ static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); if (!CDROM_CAN(CDC_CLOSE_TRAY)) return -ENOSYS; @@ -2310,7 +2329,7 @@ static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi) static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; @@ -2329,7 +2348,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, struct cdrom_changer_info *info; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); if (!CDROM_CAN(CDC_MEDIA_CHANGED)) return -ENOSYS; @@ -2355,7 +2374,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); /* * Options need to be in sync with capability. @@ -2383,7 +2402,7 @@ static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi, static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); cdi->options &= ~(int) arg; return cdi->options; @@ -2392,7 +2411,7 @@ static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi, static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); if (!CDROM_CAN(CDC_SELECT_SPEED)) return -ENOSYS; @@ -2402,7 +2421,7 @@ static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); if (!CDROM_CAN(CDC_SELECT_DISC)) return -ENOSYS; @@ -2420,14 +2439,14 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, if (cdi->ops->select_disc) return cdi->ops->select_disc(cdi, arg); - cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); + cd_dbg(CD_CHANGER, "Using generic cdrom_select_disc()\n"); return cdrom_select_disc(cdi, arg); } static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, struct block_device *bdev) { - cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_RESET\n"); if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -2440,7 +2459,7 @@ static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); + cd_dbg(CD_DO_IOCTL, "%socking door\n", arg ? "L" : "Unl"); if (!CDROM_CAN(CDC_LOCK)) return -EDRIVE_CANT_DO_THIS; @@ -2459,7 +2478,7 @@ static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, static int cdrom_ioctl_debug(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); + cd_dbg(CD_DO_IOCTL, "%sabling debug\n", arg ? "En" : "Dis"); if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -2469,7 +2488,7 @@ static int cdrom_ioctl_debug(struct cdrom_device_info *cdi, static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); return (cdi->ops->capability & ~cdi->mask); } @@ -2485,7 +2504,7 @@ static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn mcn; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); if (!(cdi->ops->capability & CDC_MCN)) return -ENOSYS; @@ -2495,14 +2514,14 @@ static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi, if (copy_to_user(argp, &mcn, sizeof(mcn))) return -EFAULT; - cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); + cd_dbg(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); return 0; } static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); if (!(cdi->ops->capability & CDC_DRIVE_STATUS)) return -ENOSYS; @@ -2535,7 +2554,7 @@ static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi) { tracktype tracks; - cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); cdrom_count_tracks(cdi, &tracks); if (tracks.error) @@ -2557,13 +2576,13 @@ static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi) return CDS_DATA_1; /* Policy mode off */ - cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); + cd_dbg(CD_WARNING, "This disc doesn't have any tracks I recognize!\n"); return CDS_NO_INFO; } static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); return cdi->capacity; } @@ -2574,7 +2593,7 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, u8 requested, back; int ret; - /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ + /* cd_dbg(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ if (copy_from_user(&q, argp, sizeof(q))) return -EFAULT; @@ -2594,7 +2613,7 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, if (copy_to_user(argp, &q, sizeof(q))) return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ + /* cd_dbg(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ return 0; } @@ -2604,7 +2623,7 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, struct cdrom_tochdr header; int ret; - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ + /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ if (copy_from_user(&header, argp, sizeof(header))) return -EFAULT; @@ -2615,7 +2634,7 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, if (copy_to_user(argp, &header, sizeof(header))) return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ + /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ return 0; } @@ -2626,7 +2645,7 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, u8 requested_format; int ret; - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ + /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ if (copy_from_user(&entry, argp, sizeof(entry))) return -EFAULT; @@ -2643,7 +2662,7 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, if (copy_to_user(argp, &entry, sizeof(entry))) return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ + /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ return 0; } @@ -2652,7 +2671,7 @@ static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi, { struct cdrom_msf msf; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2667,7 +2686,7 @@ static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi, struct cdrom_ti ti; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2684,7 +2703,7 @@ static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi, { struct cdrom_volctrl volume; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2699,7 +2718,7 @@ static int cdrom_ioctl_volread(struct cdrom_device_info *cdi, struct cdrom_volctrl volume; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2718,7 +2737,7 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, { int ret; - cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); + cd_dbg(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2729,103 +2748,6 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, } /* - * Just about every imaginable ioctl is supported in the Uniform layer - * these days. - * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). - */ -int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, - fmode_t mode, unsigned int cmd, unsigned long arg) -{ - void __user *argp = (void __user *)arg; - int ret; - - /* - * Try the generic SCSI command ioctl's first. - */ - ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); - if (ret != -ENOTTY) - return ret; - - switch (cmd) { - case CDROMMULTISESSION: - return cdrom_ioctl_multisession(cdi, argp); - case CDROMEJECT: - return cdrom_ioctl_eject(cdi); - case CDROMCLOSETRAY: - return cdrom_ioctl_closetray(cdi); - case CDROMEJECT_SW: - return cdrom_ioctl_eject_sw(cdi, arg); - case CDROM_MEDIA_CHANGED: - return cdrom_ioctl_media_changed(cdi, arg); - case CDROM_SET_OPTIONS: - return cdrom_ioctl_set_options(cdi, arg); - case CDROM_CLEAR_OPTIONS: - return cdrom_ioctl_clear_options(cdi, arg); - case CDROM_SELECT_SPEED: - return cdrom_ioctl_select_speed(cdi, arg); - case CDROM_SELECT_DISC: - return cdrom_ioctl_select_disc(cdi, arg); - case CDROMRESET: - return cdrom_ioctl_reset(cdi, bdev); - case CDROM_LOCKDOOR: - return cdrom_ioctl_lock_door(cdi, arg); - case CDROM_DEBUG: - return cdrom_ioctl_debug(cdi, arg); - case CDROM_GET_CAPABILITY: - return cdrom_ioctl_get_capability(cdi); - case CDROM_GET_MCN: - return cdrom_ioctl_get_mcn(cdi, argp); - case CDROM_DRIVE_STATUS: - return cdrom_ioctl_drive_status(cdi, arg); - case CDROM_DISC_STATUS: - return cdrom_ioctl_disc_status(cdi); - case CDROM_CHANGER_NSLOTS: - return cdrom_ioctl_changer_nslots(cdi); - } - - /* - * Use the ioctls that are implemented through the generic_packet() - * interface. this may look at bit funny, but if -ENOTTY is - * returned that particular ioctl is not implemented and we - * let it go through the device specific ones. - */ - if (CDROM_CAN(CDC_GENERIC_PACKET)) { - ret = mmc_ioctl(cdi, cmd, arg); - if (ret != -ENOTTY) - return ret; - } - - /* - * Note: most of the cdinfo() calls are commented out here, - * because they fill up the sys log when CD players poll - * the drive. - */ - switch (cmd) { - case CDROMSUBCHNL: - return cdrom_ioctl_get_subchnl(cdi, argp); - case CDROMREADTOCHDR: - return cdrom_ioctl_read_tochdr(cdi, argp); - case CDROMREADTOCENTRY: - return cdrom_ioctl_read_tocentry(cdi, argp); - case CDROMPLAYMSF: - return cdrom_ioctl_play_msf(cdi, argp); - case CDROMPLAYTRKIND: - return cdrom_ioctl_play_trkind(cdi, argp); - case CDROMVOLCTRL: - return cdrom_ioctl_volctrl(cdi, argp); - case CDROMVOLREAD: - return cdrom_ioctl_volread(cdi, argp); - case CDROMSTART: - case CDROMSTOP: - case CDROMPAUSE: - case CDROMRESUME: - return cdrom_ioctl_audioctl(cdi, cmd); - } - - return -ENOSYS; -} - -/* * Required when we need to use READ_10 to issue other than 2048 block * reads */ @@ -2854,10 +2776,158 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size) return cdo->generic_packet(cdi, &cgc); } +static int cdrom_get_track_info(struct cdrom_device_info *cdi, + __u16 track, __u8 type, track_information *ti) +{ + struct cdrom_device_ops *cdo = cdi->ops; + struct packet_command cgc; + int ret, buflen; + + init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; + cgc.cmd[1] = type & 3; + cgc.cmd[4] = (track & 0xff00) >> 8; + cgc.cmd[5] = track & 0xff; + cgc.cmd[8] = 8; + cgc.quiet = 1; + + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; + + buflen = be16_to_cpu(ti->track_information_length) + + sizeof(ti->track_information_length); + + if (buflen > sizeof(track_information)) + buflen = sizeof(track_information); + + cgc.cmd[8] = cgc.buflen = buflen; + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; + + /* return actual fill size */ + return buflen; +} + +/* return the last written block on the CD-R media. this is for the udf + file system. */ +int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written) +{ + struct cdrom_tocentry toc; + disc_information di; + track_information ti; + __u32 last_track; + int ret = -1, ti_size; + + if (!CDROM_CAN(CDC_GENERIC_PACKET)) + goto use_toc; + + ret = cdrom_get_disc_info(cdi, &di); + if (ret < (int)(offsetof(typeof(di), last_track_lsb) + + sizeof(di.last_track_lsb))) + goto use_toc; + + /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ + last_track = (di.last_track_msb << 8) | di.last_track_lsb; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + if (ti_size < (int)offsetof(typeof(ti), track_start)) + goto use_toc; + + /* if this track is blank, try the previous. */ + if (ti.blank) { + if (last_track == 1) + goto use_toc; + last_track--; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + } + + if (ti_size < (int)(offsetof(typeof(ti), track_size) + + sizeof(ti.track_size))) + goto use_toc; + + /* if last recorded field is valid, return it. */ + if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address) + + sizeof(ti.last_rec_address))) { + *last_written = be32_to_cpu(ti.last_rec_address); + } else { + /* make it up instead */ + *last_written = be32_to_cpu(ti.track_start) + + be32_to_cpu(ti.track_size); + if (ti.free_blocks) + *last_written -= (be32_to_cpu(ti.free_blocks) + 7); + } + return 0; + + /* this is where we end up if the drive either can't do a + GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if + it doesn't give enough information or fails. then we return + the toc contents. */ +use_toc: + toc.cdte_format = CDROM_MSF; + toc.cdte_track = CDROM_LEADOUT; + if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc))) + return ret; + sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA); + *last_written = toc.cdte_addr.lba; + return 0; +} + +/* return the next writable block. also for udf file system. */ +static int cdrom_get_next_writable(struct cdrom_device_info *cdi, + long *next_writable) +{ + disc_information di; + track_information ti; + __u16 last_track; + int ret, ti_size; + + if (!CDROM_CAN(CDC_GENERIC_PACKET)) + goto use_last_written; + + ret = cdrom_get_disc_info(cdi, &di); + if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb) + + sizeof(di.last_track_lsb)) + goto use_last_written; + + /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ + last_track = (di.last_track_msb << 8) | di.last_track_lsb; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start)) + goto use_last_written; + + /* if this track is blank, try the previous. */ + if (ti.blank) { + if (last_track == 1) + goto use_last_written; + last_track--; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + if (ti_size < 0) + goto use_last_written; + } + + /* if next recordable address field is valid, use it. */ + if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable) + + sizeof(ti.next_writable)) { + *next_writable = be32_to_cpu(ti.next_writable); + return 0; + } + +use_last_written: + ret = cdrom_get_last_written(cdi, next_writable); + if (ret) { + *next_writable = 0; + return ret; + } else { + *next_writable += 7; + return 0; + } +} + static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc, - int cmd) + void __user *arg, + struct packet_command *cgc, + int cmd) { struct request_sense sense; struct cdrom_msf msf; @@ -2876,7 +2946,8 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, blocksize = CD_FRAMESIZE_RAW0; break; } - IOCTL_IN(arg, struct cdrom_msf, msf); + if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf))) + return -EFAULT; lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0); /* FIXME: we need upper bound checking, too!! */ if (lba < 0) @@ -2891,8 +2962,8 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, cgc->data_direction = CGC_DATA_READ; ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize); if (ret && sense.sense_key == 0x05 && - sense.asc == 0x20 && - sense.ascq == 0x00) { + sense.asc == 0x20 && + sense.ascq == 0x00) { /* * SCSI-II devices are not required to support * READ_CD, so let's try switching block size @@ -2913,12 +2984,14 @@ out: } static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { struct cdrom_read_audio ra; int lba; - IOCTL_IN(arg, struct cdrom_read_audio, ra); + if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg, + sizeof(ra))) + return -EFAULT; if (ra.addr_format == CDROM_MSF) lba = msf_to_lba(ra.addr.msf.minute, @@ -2937,12 +3010,13 @@ static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; struct cdrom_subchnl q; u_char requested, back; - IOCTL_IN(arg, struct cdrom_subchnl, q); + if (copy_from_user(&q, (struct cdrom_subchnl __user *)arg, sizeof(q))) + return -EFAULT; requested = q.cdsc_format; if (!((requested == CDROM_MSF) || (requested == CDROM_LBA))) @@ -2954,19 +3028,21 @@ static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi, back = q.cdsc_format; /* local copy */ sanitize_format(&q.cdsc_absaddr, &back, requested); sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); - IOCTL_OUT(arg, struct cdrom_subchnl, q); - /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ + if (copy_to_user((struct cdrom_subchnl __user *)arg, &q, sizeof(q))) + return -EFAULT; + /* cd_dbg(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ return 0; } static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc) + void __user *arg, + struct packet_command *cgc) { struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_msf msf; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); - IOCTL_IN(arg, struct cdrom_msf, msf); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); + if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf))) + return -EFAULT; cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF; cgc->cmd[3] = msf.cdmsf_min0; cgc->cmd[4] = msf.cdmsf_sec0; @@ -2979,13 +3055,14 @@ static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc) + void __user *arg, + struct packet_command *cgc) { struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_blk blk; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n"); - IOCTL_IN(arg, struct cdrom_blk, blk); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYBLK\n"); + if (copy_from_user(&blk, (struct cdrom_blk __user *)arg, sizeof(blk))) + return -EFAULT; cgc->cmd[0] = GPCMD_PLAY_AUDIO_10; cgc->cmd[2] = (blk.from >> 24) & 0xff; cgc->cmd[3] = (blk.from >> 16) & 0xff; @@ -2998,9 +3075,9 @@ static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc, - unsigned int cmd) + void __user *arg, + struct packet_command *cgc, + unsigned int cmd) { struct cdrom_volctrl volctrl; unsigned char buffer[32]; @@ -3008,9 +3085,11 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, unsigned short offset; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMVOLUME\n"); - IOCTL_IN(arg, struct cdrom_volctrl, volctrl); + if (copy_from_user(&volctrl, (struct cdrom_volctrl __user *)arg, + sizeof(volctrl))) + return -EFAULT; cgc->buffer = buffer; cgc->buflen = 24; @@ -3030,14 +3109,14 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, if (offset + 16 > cgc->buflen) { cgc->buflen = offset + 16; ret = cdrom_mode_sense(cdi, cgc, - GPMODE_AUDIO_CTL_PAGE, 0); + GPMODE_AUDIO_CTL_PAGE, 0); if (ret) return ret; } /* sanity check */ if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE || - buffer[offset + 1] < 14) + buffer[offset + 1] < 14) return -EINVAL; /* now we have the current volume settings. if it was only @@ -3047,7 +3126,9 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, volctrl.channel1 = buffer[offset+11]; volctrl.channel2 = buffer[offset+13]; volctrl.channel3 = buffer[offset+15]; - IOCTL_OUT(arg, struct cdrom_volctrl, volctrl); + if (copy_to_user((struct cdrom_volctrl __user *)arg, &volctrl, + sizeof(volctrl))) + return -EFAULT; return 0; } @@ -3069,11 +3150,11 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi, - struct packet_command *cgc, - int cmd) + struct packet_command *cgc, + int cmd) { struct cdrom_device_ops *cdo = cdi->ops; - cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); cgc->cmd[0] = GPCMD_START_STOP_UNIT; cgc->cmd[1] = 1; cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0; @@ -3082,11 +3163,11 @@ static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi, - struct packet_command *cgc, - int cmd) + struct packet_command *cgc, + int cmd) { struct cdrom_device_ops *cdo = cdi->ops; - cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); cgc->cmd[0] = GPCMD_PAUSE_RESUME; cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0; cgc->data_direction = CGC_DATA_NONE; @@ -3094,8 +3175,8 @@ static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc) + void __user *arg, + struct packet_command *cgc) { int ret; dvd_struct *s; @@ -3108,7 +3189,7 @@ static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi, if (!s) return -ENOMEM; - cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); + cd_dbg(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); if (copy_from_user(s, arg, size)) { kfree(s); return -EFAULT; @@ -3126,44 +3207,48 @@ out: } static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; dvd_authinfo ai; if (!CDROM_CAN(CDC_DVD)) return -ENOSYS; - cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n"); - IOCTL_IN(arg, dvd_authinfo, ai); + cd_dbg(CD_DO_IOCTL, "entering DVD_AUTH\n"); + if (copy_from_user(&ai, (dvd_authinfo __user *)arg, sizeof(ai))) + return -EFAULT; ret = dvd_do_auth(cdi, &ai); if (ret) return ret; - IOCTL_OUT(arg, dvd_authinfo, ai); + if (copy_to_user((dvd_authinfo __user *)arg, &ai, sizeof(ai))) + return -EFAULT; return 0; } static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; long next = 0; - cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n"); ret = cdrom_get_next_writable(cdi, &next); if (ret) return ret; - IOCTL_OUT(arg, long, next); + if (copy_to_user((long __user *)arg, &next, sizeof(next))) + return -EFAULT; return 0; } static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; long last = 0; - cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n"); ret = cdrom_get_last_written(cdi, &last); if (ret) return ret; - IOCTL_OUT(arg, long, last); + if (copy_to_user((long __user *)arg, &last, sizeof(last))) + return -EFAULT; return 0; } @@ -3212,181 +3297,101 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, return -ENOTTY; } -static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type, - track_information *ti) -{ - struct cdrom_device_ops *cdo = cdi->ops; - struct packet_command cgc; - int ret, buflen; - - init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); - cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; - cgc.cmd[1] = type & 3; - cgc.cmd[4] = (track & 0xff00) >> 8; - cgc.cmd[5] = track & 0xff; - cgc.cmd[8] = 8; - cgc.quiet = 1; - - if ((ret = cdo->generic_packet(cdi, &cgc))) - return ret; - - buflen = be16_to_cpu(ti->track_information_length) + - sizeof(ti->track_information_length); - - if (buflen > sizeof(track_information)) - buflen = sizeof(track_information); - - cgc.cmd[8] = cgc.buflen = buflen; - if ((ret = cdo->generic_packet(cdi, &cgc))) - return ret; - - /* return actual fill size */ - return buflen; -} - -/* requires CD R/RW */ -static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di) +/* + * Just about every imaginable ioctl is supported in the Uniform layer + * these days. + * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). + */ +int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode, unsigned int cmd, unsigned long arg) { - struct cdrom_device_ops *cdo = cdi->ops; - struct packet_command cgc; - int ret, buflen; - - /* set up command and get the disc info */ - init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); - cgc.cmd[0] = GPCMD_READ_DISC_INFO; - cgc.cmd[8] = cgc.buflen = 2; - cgc.quiet = 1; - - if ((ret = cdo->generic_packet(cdi, &cgc))) - return ret; + void __user *argp = (void __user *)arg; + int ret; - /* not all drives have the same disc_info length, so requeue - * packet with the length the drive tells us it can supply + /* + * Try the generic SCSI command ioctl's first. */ - buflen = be16_to_cpu(di->disc_information_length) + - sizeof(di->disc_information_length); - - if (buflen > sizeof(disc_information)) - buflen = sizeof(disc_information); - - cgc.cmd[8] = cgc.buflen = buflen; - if ((ret = cdo->generic_packet(cdi, &cgc))) + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); + if (ret != -ENOTTY) return ret; - /* return actual fill size */ - return buflen; -} - -/* return the last written block on the CD-R media. this is for the udf - file system. */ -int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written) -{ - struct cdrom_tocentry toc; - disc_information di; - track_information ti; - __u32 last_track; - int ret = -1, ti_size; - - if (!CDROM_CAN(CDC_GENERIC_PACKET)) - goto use_toc; - - ret = cdrom_get_disc_info(cdi, &di); - if (ret < (int)(offsetof(typeof(di), last_track_lsb) - + sizeof(di.last_track_lsb))) - goto use_toc; - - /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ - last_track = (di.last_track_msb << 8) | di.last_track_lsb; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - if (ti_size < (int)offsetof(typeof(ti), track_start)) - goto use_toc; - - /* if this track is blank, try the previous. */ - if (ti.blank) { - if (last_track==1) - goto use_toc; - last_track--; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - } - - if (ti_size < (int)(offsetof(typeof(ti), track_size) - + sizeof(ti.track_size))) - goto use_toc; - - /* if last recorded field is valid, return it. */ - if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address) - + sizeof(ti.last_rec_address))) { - *last_written = be32_to_cpu(ti.last_rec_address); - } else { - /* make it up instead */ - *last_written = be32_to_cpu(ti.track_start) + - be32_to_cpu(ti.track_size); - if (ti.free_blocks) - *last_written -= (be32_to_cpu(ti.free_blocks) + 7); + switch (cmd) { + case CDROMMULTISESSION: + return cdrom_ioctl_multisession(cdi, argp); + case CDROMEJECT: + return cdrom_ioctl_eject(cdi); + case CDROMCLOSETRAY: + return cdrom_ioctl_closetray(cdi); + case CDROMEJECT_SW: + return cdrom_ioctl_eject_sw(cdi, arg); + case CDROM_MEDIA_CHANGED: + return cdrom_ioctl_media_changed(cdi, arg); + case CDROM_SET_OPTIONS: + return cdrom_ioctl_set_options(cdi, arg); + case CDROM_CLEAR_OPTIONS: + return cdrom_ioctl_clear_options(cdi, arg); + case CDROM_SELECT_SPEED: + return cdrom_ioctl_select_speed(cdi, arg); + case CDROM_SELECT_DISC: + return cdrom_ioctl_select_disc(cdi, arg); + case CDROMRESET: + return cdrom_ioctl_reset(cdi, bdev); + case CDROM_LOCKDOOR: + return cdrom_ioctl_lock_door(cdi, arg); + case CDROM_DEBUG: + return cdrom_ioctl_debug(cdi, arg); + case CDROM_GET_CAPABILITY: + return cdrom_ioctl_get_capability(cdi); + case CDROM_GET_MCN: + return cdrom_ioctl_get_mcn(cdi, argp); + case CDROM_DRIVE_STATUS: + return cdrom_ioctl_drive_status(cdi, arg); + case CDROM_DISC_STATUS: + return cdrom_ioctl_disc_status(cdi); + case CDROM_CHANGER_NSLOTS: + return cdrom_ioctl_changer_nslots(cdi); } - return 0; - /* this is where we end up if the drive either can't do a - GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if - it doesn't give enough information or fails. then we return - the toc contents. */ -use_toc: - toc.cdte_format = CDROM_MSF; - toc.cdte_track = CDROM_LEADOUT; - if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc))) - return ret; - sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA); - *last_written = toc.cdte_addr.lba; - return 0; -} - -/* return the next writable block. also for udf file system. */ -static int cdrom_get_next_writable(struct cdrom_device_info *cdi, long *next_writable) -{ - disc_information di; - track_information ti; - __u16 last_track; - int ret, ti_size; - - if (!CDROM_CAN(CDC_GENERIC_PACKET)) - goto use_last_written; - - ret = cdrom_get_disc_info(cdi, &di); - if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb) - + sizeof(di.last_track_lsb)) - goto use_last_written; - - /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ - last_track = (di.last_track_msb << 8) | di.last_track_lsb; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start)) - goto use_last_written; - - /* if this track is blank, try the previous. */ - if (ti.blank) { - if (last_track == 1) - goto use_last_written; - last_track--; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - if (ti_size < 0) - goto use_last_written; + /* + * Use the ioctls that are implemented through the generic_packet() + * interface. this may look at bit funny, but if -ENOTTY is + * returned that particular ioctl is not implemented and we + * let it go through the device specific ones. + */ + if (CDROM_CAN(CDC_GENERIC_PACKET)) { + ret = mmc_ioctl(cdi, cmd, arg); + if (ret != -ENOTTY) + return ret; } - /* if next recordable address field is valid, use it. */ - if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable) - + sizeof(ti.next_writable)) { - *next_writable = be32_to_cpu(ti.next_writable); - return 0; + /* + * Note: most of the cd_dbg() calls are commented out here, + * because they fill up the sys log when CD players poll + * the drive. + */ + switch (cmd) { + case CDROMSUBCHNL: + return cdrom_ioctl_get_subchnl(cdi, argp); + case CDROMREADTOCHDR: + return cdrom_ioctl_read_tochdr(cdi, argp); + case CDROMREADTOCENTRY: + return cdrom_ioctl_read_tocentry(cdi, argp); + case CDROMPLAYMSF: + return cdrom_ioctl_play_msf(cdi, argp); + case CDROMPLAYTRKIND: + return cdrom_ioctl_play_trkind(cdi, argp); + case CDROMVOLCTRL: + return cdrom_ioctl_volctrl(cdi, argp); + case CDROMVOLREAD: + return cdrom_ioctl_volread(cdi, argp); + case CDROMSTART: + case CDROMSTOP: + case CDROMPAUSE: + case CDROMRESUME: + return cdrom_ioctl_audioctl(cdi, cmd); } -use_last_written: - if ((ret = cdrom_get_last_written(cdi, next_writable))) { - *next_writable = 0; - return ret; - } else { - *next_writable += 7; - return 0; - } + return -ENOSYS; } EXPORT_SYMBOL(cdrom_get_last_written); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 51e75ad96422..584bc3126403 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -602,7 +602,7 @@ static void gdrom_readdisk_dma(struct work_struct *work) spin_unlock(&gdrom_lock); block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; block_cnt = blk_rq_sectors(req)/GD_TO_BLK; - __raw_writel(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG); + __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG); __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); __raw_writel(1, GDROM_DMA_DIRECTION_REG); __raw_writel(1, GDROM_DMA_ENABLE_REG); diff --git a/drivers/char/random.c b/drivers/char/random.c index 6b75713d953a..06cea7ff3a7c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -902,6 +902,7 @@ void add_disk_randomness(struct gendisk *disk) add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); } +EXPORT_SYMBOL_GPL(add_disk_randomness); #endif /********************************************************************* @@ -995,8 +996,11 @@ retry: ibytes = min_t(size_t, ibytes, have_bytes - reserved); if (ibytes < min) ibytes = 0; - entropy_count = max_t(int, 0, - entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); + if (have_bytes >= ibytes + reserved) + entropy_count -= ibytes << (ENTROPY_SHIFT + 3); + else + entropy_count = reserved << (ENTROPY_SHIFT + 3); + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) goto retry; diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index b3ea223585bd..61dcc8011ec7 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent) /* Cache TPM ACPI handle and version string */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ppi_callback, NULL, NULL, &tpm_ppi_handle); - if (tpm_ppi_handle == NULL) - return -ENODEV; - - return sysfs_create_group(parent, &ppi_attr_grp); + return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0; } void tpm_remove_ppi(struct kobject *parent) { - sysfs_remove_group(parent, &ppi_attr_grp); + if (tpm_ppi_handle) + sysfs_remove_group(parent, &ppi_attr_grp); } diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c index c7607feb18dd..54a06526f64f 100644 --- a/drivers/clk/bcm/clk-kona-setup.c +++ b/drivers/clk/bcm/clk-kona-setup.c @@ -27,7 +27,7 @@ LIST_HEAD(ccu_list); /* The list of set up CCUs */ static bool clk_requires_trigger(struct kona_clk *bcm_clk) { - struct peri_clk_data *peri = bcm_clk->peri; + struct peri_clk_data *peri = bcm_clk->u.peri; struct bcm_clk_sel *sel; struct bcm_clk_div *div; @@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) u32 limit; BUG_ON(bcm_clk->type != bcm_clk_peri); - peri = bcm_clk->peri; + peri = bcm_clk->u.peri; name = bcm_clk->name; range = bcm_clk->ccu->range; @@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) div = &peri->div; if (divider_exists(div)) { - if (div->offset > limit) { + if (div->u.s.offset > limit) { pr_err("%s: bad divider offset for %s (%u > %u)\n", - __func__, name, div->offset, limit); + __func__, name, div->u.s.offset, limit); return false; } } div = &peri->pre_div; if (divider_exists(div)) { - if (div->offset > limit) { + if (div->u.s.offset > limit) { pr_err("%s: bad pre-divider offset for %s " "(%u > %u)\n", - __func__, name, div->offset, limit); + __func__, name, div->u.s.offset, limit); return false; } } @@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name, { if (divider_is_fixed(div)) { /* Any fixed divider value but 0 is OK */ - if (div->fixed == 0) { + if (div->u.fixed == 0) { pr_err("%s: bad %s fixed value 0 for %s\n", __func__, field_name, clock_name); return false; } return true; } - if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) + if (!bitfield_valid(div->u.s.shift, div->u.s.width, + field_name, clock_name)) return false; if (divider_has_fraction(div)) - if (div->frac_width > div->width) { + if (div->u.s.frac_width > div->u.s.width) { pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", __func__, field_name, clock_name, - div->frac_width, div->width); + div->u.s.frac_width, div->u.s.width); return false; } @@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name, */ static bool kona_dividers_valid(struct kona_clk *bcm_clk) { - struct peri_clk_data *peri = bcm_clk->peri; + struct peri_clk_data *peri = bcm_clk->u.peri; struct bcm_clk_div *div; struct bcm_clk_div *pre_div; u32 limit; @@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk) limit = BITS_PER_BYTE * sizeof(u32); - return div->frac_width + pre_div->frac_width <= limit; + return div->u.s.frac_width + pre_div->u.s.frac_width <= limit; } @@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk) if (!peri_clk_data_offsets_valid(bcm_clk)) return false; - peri = bcm_clk->peri; + peri = bcm_clk->u.peri; name = bcm_clk->name; gate = &peri->gate; if (gate_exists(gate) && !gate_valid(gate, "gate", name)) @@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk) { switch (bcm_clk->type) { case bcm_clk_peri: - peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); + peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data); break; default: break; } - bcm_clk->data = NULL; + bcm_clk->u.data = NULL; bcm_clk->type = bcm_clk_none; } @@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, break; } bcm_clk->type = type; - bcm_clk->data = data; + bcm_clk->u.data = data; /* Make sure everything makes sense before we set it up */ if (!kona_clk_valid(bcm_clk)) { diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c index e3d339e08309..db11a87449f2 100644 --- a/drivers/clk/bcm/clk-kona.c +++ b/drivers/clk/bcm/clk-kona.c @@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor) /* Convert a divider into the scaled divisor value it represents. */ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) { - return (u64)reg_div + ((u64)1 << div->frac_width); + return (u64)reg_div + ((u64)1 << div->u.s.frac_width); } /* @@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) BUG_ON(billionths >= BILLION); combined = (u64)div_value * BILLION + billionths; - combined <<= div->frac_width; + combined <<= div->u.s.frac_width; return do_div_round_closest(combined, BILLION); } @@ -87,7 +87,7 @@ static inline u64 scaled_div_min(struct bcm_clk_div *div) { if (divider_is_fixed(div)) - return (u64)div->fixed; + return (u64)div->u.fixed; return scaled_div_value(div, 0); } @@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div) u32 reg_div; if (divider_is_fixed(div)) - return (u64)div->fixed; + return (u64)div->u.fixed; - reg_div = ((u32)1 << div->width) - 1; + reg_div = ((u32)1 << div->u.s.width) - 1; return scaled_div_value(div, reg_div); } @@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div) BUG_ON(scaled_div < scaled_div_min(div)); BUG_ON(scaled_div > scaled_div_max(div)); - return (u32)(scaled_div - ((u64)1 << div->frac_width)); + return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); } /* Return a rate scaled for use when dividing by a scaled divisor. */ @@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate) if (divider_is_fixed(div)) return (u64)rate; - return (u64)rate << div->frac_width; + return (u64)rate << div->u.s.frac_width; } /* CCU access */ @@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) u32 reg_div; if (divider_is_fixed(div)) - return (u64)div->fixed; + return (u64)div->u.fixed; flags = ccu_lock(ccu); - reg_val = __ccu_read(ccu, div->offset); + reg_val = __ccu_read(ccu, div->u.s.offset); ccu_unlock(ccu, flags); /* Extract the full divider field from the register value */ - reg_div = bitfield_extract(reg_val, div->shift, div->width); + reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); /* Return the scaled divisor value it represents */ return scaled_div_value(div, reg_div); @@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, * state was defined in the device tree, we just find out * what its current value is rather than updating it. */ - if (div->scaled_div == BAD_SCALED_DIV_VALUE) { - reg_val = __ccu_read(ccu, div->offset); - reg_div = bitfield_extract(reg_val, div->shift, div->width); - div->scaled_div = scaled_div_value(div, reg_div); + if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { + reg_val = __ccu_read(ccu, div->u.s.offset); + reg_div = bitfield_extract(reg_val, div->u.s.shift, + div->u.s.width); + div->u.s.scaled_div = scaled_div_value(div, reg_div); return 0; } /* Convert the scaled divisor to the value we need to record */ - reg_div = divider(div, div->scaled_div); + reg_div = divider(div, div->u.s.scaled_div); /* Clock needs to be enabled before changing the rate */ enabled = __is_clk_gate_enabled(ccu, gate); @@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, } /* Replace the divider value and record the result */ - reg_val = __ccu_read(ccu, div->offset); - reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); - __ccu_write(ccu, div->offset, reg_val); + reg_val = __ccu_read(ccu, div->u.s.offset); + reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, + reg_div); + __ccu_write(ccu, div->u.s.offset, reg_val); /* If the trigger fails we still want to disable the gate */ if (!__clk_trigger(ccu, trig)) @@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, BUG_ON(divider_is_fixed(div)); - previous = div->scaled_div; + previous = div->u.s.scaled_div; if (previous == scaled_div) return 0; /* No change */ - div->scaled_div = scaled_div; + div->u.s.scaled_div = scaled_div; flags = ccu_lock(ccu); __ccu_write_enable(ccu); @@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, ccu_unlock(ccu, flags); if (ret) - div->scaled_div = previous; /* Revert the change */ + div->u.s.scaled_div = previous; /* Revert the change */ return ret; @@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, static int kona_peri_clk_enable(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); } @@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw) static void kona_peri_clk_disable(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); } @@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw) static int kona_peri_clk_is_enabled(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; } @@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, parent_rate); @@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_div *div = &bcm_clk->peri->div; + struct bcm_clk_div *div = &bcm_clk->u.peri->div; if (!divider_exists(div)) return __clk_get_rate(hw->clk); /* Quietly avoid a zero rate */ - return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, + return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, rate ? rate : 1, *parent_rate, NULL); } static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; struct bcm_clk_sel *sel = &data->sel; struct bcm_clk_trig *trig; int ret; @@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) static u8 kona_peri_clk_get_parent(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; u8 index; index = selector_read_index(bcm_clk->ccu, &data->sel); @@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; struct bcm_clk_div *div = &data->div; u64 scaled_div = 0; int ret; @@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = { static bool __peri_clk_init(struct kona_clk *bcm_clk) { struct ccu_data *ccu = bcm_clk->ccu; - struct peri_clk_data *peri = bcm_clk->peri; + struct peri_clk_data *peri = bcm_clk->u.peri; const char *name = bcm_clk->name; struct bcm_clk_trig *trig; diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h index 5e139adc3dc5..dee690951bb6 100644 --- a/drivers/clk/bcm/clk-kona.h +++ b/drivers/clk/bcm/clk-kona.h @@ -57,7 +57,7 @@ #define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) #define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) #define divider_has_fraction(div) (!divider_is_fixed(div) && \ - (div)->frac_width > 0) + (div)->u.s.frac_width > 0) #define selector_exists(sel) ((sel)->width != 0) #define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) @@ -244,9 +244,9 @@ struct bcm_clk_div { u32 frac_width; /* field fraction width */ u64 scaled_div; /* scaled divider value */ - }; + } s; u32 fixed; /* non-zero fixed divider value */ - }; + } u; u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ }; @@ -263,28 +263,28 @@ struct bcm_clk_div { /* A fixed (non-zero) divider */ #define FIXED_DIVIDER(_value) \ { \ - .fixed = (_value), \ + .u.fixed = (_value), \ .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ } /* A divider with an integral divisor */ #define DIVIDER(_offset, _shift, _width) \ { \ - .offset = (_offset), \ - .shift = (_shift), \ - .width = (_width), \ - .scaled_div = BAD_SCALED_DIV_VALUE, \ + .u.s.offset = (_offset), \ + .u.s.shift = (_shift), \ + .u.s.width = (_width), \ + .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ .flags = FLAG(DIV, EXISTS), \ } /* A divider whose divisor has an integer and fractional part */ #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ { \ - .offset = (_offset), \ - .shift = (_shift), \ - .width = (_width), \ - .frac_width = (_frac_width), \ - .scaled_div = BAD_SCALED_DIV_VALUE, \ + .u.s.offset = (_offset), \ + .u.s.shift = (_shift), \ + .u.s.width = (_width), \ + .u.s.frac_width = (_frac_width), \ + .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ .flags = FLAG(DIV, EXISTS), \ } @@ -380,7 +380,7 @@ struct kona_clk { union { void *data; struct peri_clk_data *peri; - }; + } u; }; #define to_kona_clk(_hw) \ container_of(_hw, struct kona_clk, hw) diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index ec22112e569f..3fbee4540228 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div) return true; } +static int _round_up_table(const struct clk_div_table *table, int div) +{ + const struct clk_div_table *clkt; + int up = INT_MAX; + + for (clkt = table; clkt->div; clkt++) { + if (clkt->div == div) + return clkt->div; + else if (clkt->div < div) + continue; + + if ((clkt->div - div) < (up - div)) + up = clkt->div; + } + + return up; +} + +static int _div_round_up(struct clk_divider *divider, + unsigned long parent_rate, unsigned long rate) +{ + int div = DIV_ROUND_UP(parent_rate, rate); + + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + div = __roundup_pow_of_two(div); + if (divider->table) + div = _round_up_table(divider->table, div); + + return div; +} + static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate) { @@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { parent_rate = *best_parent_rate; - bestdiv = DIV_ROUND_UP(parent_rate, rate); + bestdiv = _div_round_up(divider, parent_rate, rate); bestdiv = bestdiv == 0 ? 1 : bestdiv; bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; @@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; div = DIV_ROUND_UP(parent_rate, rate); + + if (!_is_valid_div(divider, div)) + return -EINVAL; + value = _get_val(divider, div); if (value > div_mask(divider)) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index dff0373f53c1..7cf2c093cc54 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw) } EXPORT_SYMBOL_GPL(__clk_register); -static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) +/** + * clk_register - allocate a new clock, register it and return an opaque cookie + * @dev: device that is registering this clock + * @hw: link to hardware-specific clock data + * + * clk_register is the primary interface for populating the clock tree with new + * clock nodes. It returns a pointer to the newly allocated struct clk which + * cannot be dereferenced by driver code but may be used in conjuction with the + * rest of the clock API. In the event of an error clk_register will return an + * error code; drivers must test for an error code after calling clk_register. + */ +struct clk *clk_register(struct device *dev, struct clk_hw *hw) { int i, ret; + struct clk *clk; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) { + pr_err("%s: could not allocate clk\n", __func__); + ret = -ENOMEM; + goto fail_out; + } clk->name = kstrdup(hw->init->name, GFP_KERNEL); if (!clk->name) { @@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) ret = __clk_init(dev, clk); if (!ret) - return 0; + return clk; fail_parent_names_copy: while (--i >= 0) @@ -2035,36 +2054,6 @@ fail_parent_names_copy: fail_parent_names: kfree(clk->name); fail_name: - return ret; -} - -/** - * clk_register - allocate a new clock, register it and return an opaque cookie - * @dev: device that is registering this clock - * @hw: link to hardware-specific clock data - * - * clk_register is the primary interface for populating the clock tree with new - * clock nodes. It returns a pointer to the newly allocated struct clk which - * cannot be dereferenced by driver code but may be used in conjuction with the - * rest of the clock API. In the event of an error clk_register will return an - * error code; drivers must test for an error code after calling clk_register. - */ -struct clk *clk_register(struct device *dev, struct clk_hw *hw) -{ - int ret; - struct clk *clk; - - clk = kzalloc(sizeof(*clk), GFP_KERNEL); - if (!clk) { - pr_err("%s: could not allocate clk\n", __func__); - ret = -ENOMEM; - goto fail_out; - } - - ret = _clk_register(dev, hw, clk); - if (!ret) - return clk; - kfree(clk); fail_out: return ERR_PTR(ret); @@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk) if (!hlist_empty(&clk->children)) { struct clk *child; + struct hlist_node *t; /* Reparent all children to the orphan list. */ - hlist_for_each_entry(child, &clk->children, child_node) + hlist_for_each_entry_safe(child, t, &clk->children, child_node) clk_set_parent(child, NULL); } @@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister); static void devm_clk_release(struct device *dev, void *res) { - clk_unregister(res); + clk_unregister(*(struct clk **)res); } /** @@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res) struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) { struct clk *clk; - int ret; + struct clk **clkp; - clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); - if (!clk) + clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); + if (!clkp) return ERR_PTR(-ENOMEM); - ret = _clk_register(dev, hw, clk); - if (!ret) { - devres_add(dev, clk); + clk = clk_register(dev, hw); + if (!IS_ERR(clk)) { + *clkp = clk; + devres_add(dev, clkp); } else { - devres_free(clk); - clk = ERR_PTR(ret); + devres_free(clkp); } return clk; diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c index 2e5810c88d11..1f6324e29a80 100644 --- a/drivers/clk/shmobile/clk-mstp.c +++ b/drivers/clk/shmobile/clk-mstp.c @@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name, static void __init cpg_mstp_clocks_init(struct device_node *np) { struct mstp_clock_group *group; + const char *idxname; struct clk **clks; unsigned int i; @@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) for (i = 0; i < MSTP_MAX_CLOCKS; ++i) clks[i] = ERR_PTR(-ENOENT); + if (of_find_property(np, "clock-indices", &i)) + idxname = "clock-indices"; + else + idxname = "renesas,clock-indices"; + for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { const char *parent_name; const char *name; @@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) continue; parent_name = of_clk_get_parent_name(np, i); - ret = of_property_read_u32_index(np, "renesas,clock-indices", i, - &clkidx); + ret = of_property_read_u32_index(np, idxname, i, &clkidx); if (parent_name == NULL || ret < 0) break; diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index 88dafb5e9627..de6da957a09d 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -20,6 +20,7 @@ #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> #include "clk.h" @@ -43,6 +44,8 @@ #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) +void __iomem *clk_mgr_base_addr; + static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { @@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, const char *clk_name = node->name; const char *parent_name[SOCFPGA_MAX_PARENTS]; struct clk_init_data init; + struct device_node *clkmgr_np; int rc; int i = 0; @@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, if (WARN_ON(!pll_clk)) return NULL; + clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); + clk_mgr_base_addr = of_iomap(clkmgr_np, 0); + BUG_ON(!clk_mgr_base_addr); pll_clk->hw.reg = clk_mgr_base_addr + reg; of_property_read_string(node, "clock-output-names", &clk_name); diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c index 35a960a993f9..43db947e5f0e 100644 --- a/drivers/clk/socfpga/clk.c +++ b/drivers/clk/socfpga/clk.c @@ -17,28 +17,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/clk.h> -#include <linux/clkdev.h> -#include <linux/clk-provider.h> -#include <linux/io.h> #include <linux/of.h> -#include <linux/of_address.h> #include "clk.h" -void __iomem *clk_mgr_base_addr; - -static const struct of_device_id socfpga_child_clocks[] __initconst = { - { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, }, - { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, }, - { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, }, - {}, -}; - -static void __init socfpga_clkmgr_init(struct device_node *node) -{ - clk_mgr_base_addr = of_iomap(node, 0); - of_clk_init(socfpga_child_clocks); -} -CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init); +CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init); +CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init); +CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init); diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index bca0a0badbfa..a886702f7c8b 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name, gate->lock = odf_lock; div = kzalloc(sizeof(*div), GFP_KERNEL); - if (!div) + if (!div) { + kfree(gate); return ERR_PTR(-ENOMEM); + } div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; div->reg = reg + pll_data->odf[odf].offset; diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 0d20241e0770..6aad8abc69a2 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -58,9 +58,9 @@ #define PLLDU_LFCON_SET_DIVN 600 #define PLLE_BASE_DIVCML_SHIFT 24 -#define PLLE_BASE_DIVCML_WIDTH 4 +#define PLLE_BASE_DIVCML_MASK 0xf #define PLLE_BASE_DIVP_SHIFT 16 -#define PLLE_BASE_DIVP_WIDTH 7 +#define PLLE_BASE_DIVP_WIDTH 6 #define PLLE_BASE_DIVN_SHIFT 8 #define PLLE_BASE_DIVN_WIDTH 8 #define PLLE_BASE_DIVM_SHIFT 0 @@ -183,6 +183,14 @@ #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ mask(p->params->div_nmp->divp_width)) +#define divm_shift(p) (p)->params->div_nmp->divm_shift +#define divn_shift(p) (p)->params->div_nmp->divn_shift +#define divp_shift(p) (p)->params->div_nmp->divp_shift + +#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p)) +#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p)) +#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p)) + #define divm_max(p) (divm_mask(p)) #define divn_max(p) (divn_mask(p)) #define divp_max(p) (1 << (divp_mask(p))) @@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll, } else { val = pll_readl_base(pll); - val &= ~((divm_mask(pll) << div_nmp->divm_shift) | - (divn_mask(pll) << div_nmp->divn_shift) | - (divp_mask(pll) << div_nmp->divp_shift)); + val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) | + divp_mask_shifted(pll)); - val |= ((cfg->m << div_nmp->divm_shift) | - (cfg->n << div_nmp->divn_shift) | - (cfg->p << div_nmp->divp_shift)); + val |= (cfg->m << divm_shift(pll)) | + (cfg->n << divn_shift(pll)) | + (cfg->p << divp_shift(pll)); pll_writel_base(val, pll); } @@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw) if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { /* configure dividers */ val = pll_readl_base(pll); - val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); - val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); - val |= sel.m << pll->params->div_nmp->divm_shift; - val |= sel.n << pll->params->div_nmp->divn_shift; - val |= sel.p << pll->params->div_nmp->divp_shift; + val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) | + divm_mask_shifted(pll)); + val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT); + val |= sel.m << divm_shift(pll); + val |= sel.n << divn_shift(pll); + val |= sel.p << divp_shift(pll); val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; pll_writel_base(val, pll); } @@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw) pll_writel_misc(val, pll); val = readl(pll->clk_base + PLLE_SS_CTRL); + val &= ~PLLE_SS_COEFFICIENTS_MASK; val |= PLLE_SS_DISABLE; writel(val, pll->clk_base + PLLE_SS_CTRL); - val |= pll_readl_base(pll); + val = pll_readl_base(pll); val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); pll_writel_base(val, pll); @@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) pll_writel(val, PLLE_SS_CTRL, pll); val = pll_readl_base(pll); - val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); - val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); - val |= sel.m << pll->params->div_nmp->divm_shift; - val |= sel.n << pll->params->div_nmp->divn_shift; + val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) | + divm_mask_shifted(pll)); + val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT); + val |= sel.m << divm_shift(pll); + val |= sel.n << divn_shift(pll); val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; pll_writel_base(val, pll); udelay(1); @@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name, return clk; } +static struct div_nmp pll_e_nmp = { + .divn_shift = PLLE_BASE_DIVN_SHIFT, + .divn_width = PLLE_BASE_DIVN_WIDTH, + .divm_shift = PLLE_BASE_DIVM_SHIFT, + .divm_width = PLLE_BASE_DIVM_WIDTH, + .divp_shift = PLLE_BASE_DIVP_SHIFT, + .divp_width = PLLE_BASE_DIVP_WIDTH, +}; + struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, void __iomem *clk_base, void __iomem *pmc, unsigned long flags, struct tegra_clk_pll_params *pll_params, @@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; + + if (!pll_params->div_nmp) + pll_params->div_nmp = &pll_e_nmp; + pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); if (IS_ERR(pll)) return ERR_CAST(pll); @@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name, int m; m = _pll_fixed_mdiv(pll_params, parent_rate); - val = m << PLL_BASE_DIVM_SHIFT; - val |= (pll_params->vco_min / parent_rate) - << PLL_BASE_DIVN_SHIFT; + val = m << divm_shift(pll); + val |= (pll_params->vco_min / parent_rate) << divn_shift(pll); pll_writel_base(val, pll); } @@ -1718,7 +1740,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name, "pll_re_vco"); } else { val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); - pll_writel(val, pll_params->aux_reg, pll); + pll_writel(val_aux, pll_params->aux_reg, pll); } clk = _tegra_clk_register_pll(pll, name, parent_name, flags, diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 00fdd1170284..a8d7ea14f183 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); - clk_disable_unprepare(tcd->clk); + clk_disable(tcd->clk); } switch (m) { @@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) * of oneshot, we get lower overhead and improved accuracy. */ case CLOCK_EVT_MODE_PERIODIC: - clk_prepare_enable(tcd->clk); + clk_enable(tcd->clk); /* slow clock, count up to RC, then irq and restart */ __raw_writel(timer_clock @@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) break; case CLOCK_EVT_MODE_ONESHOT: - clk_prepare_enable(tcd->clk); + clk_enable(tcd->clk); /* slow clock, count up to RC, then irq and stop */ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP @@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ret = clk_prepare_enable(t2_clk); if (ret) return ret; - clk_disable_unprepare(t2_clk); + clk_disable(t2_clk); clkevt.regs = tc->regs; clkevt.clk = t2_clk; diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index 571d10974139..dbd30398222a 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c @@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) action->dev_id = ce; BUG_ON(setup_irq(ce->irq, action)); - irq_set_affinity(action->irq, cpumask_of(cpu)); + irq_force_affinity(action->irq, cpumask_of(cpu)); clockevents_register_device(ce); return 0; diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 1bf6bbac3e03..09b9129c7bd3 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) return -ENOENT; } - cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); + cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); if (IS_ERR(cpu_reg)) { /* * If cpu0 regulator supply node is present, but regulator is @@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) PTR_ERR(cpu_reg)); } - cpu_clk = devm_clk_get(cpu_dev, NULL); + cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); pr_err("failed to get cpu0 clock: %d\n", ret); - goto out_put_node; + goto out_put_reg; } ret = of_init_opp_table(cpu_dev); if (ret) { pr_err("failed to init OPP table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); @@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) out_free_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_put_clk: + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); +out_put_reg: + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); out_put_node: of_node_put(np); return ret; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index ba43991ba98a..e1c6433b16e0 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_data->mutex); + if (!cpu_cdbs->cur_policy) { + mutex_unlock(&dbs_data->mutex); + break; + } mutex_lock(&cpu_cdbs->timer_mutex); if (policy->max < cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, @@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); mutex_unlock(&cpu_cdbs->timer_mutex); + mutex_unlock(&dbs_data->mutex); break; } return 0; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 099967302bf2..eab8ccfe6beb 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -37,6 +37,7 @@ #define BYT_RATIOS 0x66a #define BYT_VIDS 0x66b #define BYT_TURBO_RATIOS 0x66c +#define BYT_TURBO_VIDS 0x66d #define FRAC_BITS 6 @@ -70,8 +71,9 @@ struct pstate_data { }; struct vid_data { - int32_t min; - int32_t max; + int min; + int max; + int turbo; int32_t ratio; }; @@ -359,14 +361,14 @@ static int byt_get_min_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 8) & 0xFF; + return (value >> 8) & 0x3F; } static int byt_get_max_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 16) & 0xFF; + return (value >> 16) & 0x3F; } static int byt_get_turbo_pstate(void) @@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); vid = fp_toint(vid_fp); + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; + val |= vid; wrmsrl(MSR_IA32_PERF_CTL, val); @@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata) { u64 value; + rdmsrl(BYT_VIDS, value); - cpudata->vid.min = int_tofp((value >> 8) & 0x7f); - cpudata->vid.max = int_tofp((value >> 16) & 0x7f); + cpudata->vid.min = int_tofp((value >> 8) & 0x3f); + cpudata->vid.max = int_tofp((value >> 16) & 0x3f); cpudata->vid.ratio = div_fp( cpudata->vid.max - cpudata->vid.min, int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); + + rdmsrl(BYT_TURBO_VIDS, value); + cpudata->vid.turbo = value & 0x7f; } @@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); - - /* - * goto max pstate so we don't slow up boot if we are built-in if we are - * a module we will take care of it during normal operation - */ - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } static inline void intel_pstate_calc_busy(struct cpudata *cpu, @@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; intel_pstate_get_cpu_pstates(cpu); - if (!cpu->pstate.current_pstate) { - all_cpu_data[cpunum] = NULL; - kfree(cpu); - return -ENODATA; - } cpu->cpu = cpunum; @@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); intel_pstate_sample(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); add_timer_on(&cpu->timer, cpunum); diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index f0bc31f5db27..d4add8621944 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, set_cpus_allowed_ptr(current, &cpus_allowed); /* setting the cpu frequency */ - clk_set_rate(policy->clk, freq); + clk_set_rate(policy->clk, freq * 1000); return 0; } @@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) i++) loongson2_clockmod_table[i].frequency = (rate * i) / 8; - ret = clk_set_rate(cpuclk, rate); + ret = clk_set_rate(cpuclk, rate * 1000); if (ret) { clk_put(cpuclk); return ret; diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 9f25f5296029..0eabd81e1a90 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -16,9 +16,13 @@ char *tmp; \ \ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ - sprintf(tmp, format, param); \ - strcat(str, tmp); \ - kfree(tmp); \ + if (likely(tmp)) { \ + sprintf(tmp, format, param); \ + strcat(str, tmp); \ + kfree(tmp); \ + } else { \ + strcat(str, "kmalloc failure in SPRINTFCAT"); \ + } \ } static void report_jump_idx(u32 status, char *outstr) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a886713937fd..d5d30ed863ce 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref) dma_unmap_page(dev, unmap->addr[i], unmap->len, DMA_BIDIRECTIONAL); } + cnt = unmap->map_cnt; mempool_free(unmap, __get_unmap_pool(cnt)->pool); } @@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) memset(unmap, 0, sizeof(*unmap)); kref_init(&unmap->kref); unmap->dev = dev; + unmap->map_cnt = nr; return unmap; } diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cfdbb92aae1d..7a740769c2fa 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Disable BLOCK interrupts as well */ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, - IRQF_SHARED, "dw_dmac", dw); - if (err) - return err; - /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, sizeof(struct dw_desc), 4, 0); @@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); + err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, + "dw_dmac", dw); + if (err) + return err; + INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; @@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip) dw_dma_off(dw); dma_async_device_unregister(&dw->dma); + free_irq(chip->irq, dw); tasklet_kill(&dw->tasklet); list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 766b68ed505c..394cbc5c93e3 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan, static void mv_chan_activate(struct mv_xor_chan *chan) { - u32 activation; - dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); - activation = readl_relaxed(XOR_ACTIVATION(chan)); - activation |= 0x1; - writel_relaxed(activation, XOR_ACTIVATION(chan)); + + /* writel ensures all descriptors are flushed before activation */ + writel(BIT(0), XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ab26d46bbe15..5ebdfbc1051e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -113,11 +113,9 @@ struct sa11x0_dma_phy { struct sa11x0_dma_desc *txd_load; unsigned sg_done; struct sa11x0_dma_desc *txd_done; -#ifdef CONFIG_PM_SLEEP u32 dbs[2]; u32 dbt[2]; u32 dcsr; -#endif }; struct sa11x0_dma_dev { @@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP static int sa11x0_dma_suspend(struct device *dev) { struct sa11x0_dma_dev *d = dev_get_drvdata(dev); @@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops sa11x0_dma_pm_ops = { .suspend_noirq = sa11x0_dma_suspend, diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 33edd6766344..2c694b5297cc 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -1018,7 +1018,7 @@ static void edac_ce_error(struct mem_ctl_info *mci, } edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); - if (mci->scrub_mode & SCRUB_SW_SRC) { + if (mci->scrub_mode == SCRUB_SW_SRC) { /* * Some memory controllers (called MCs below) can remap * memory so that it is still available at a different diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 8d0450b9b9af..64b68320249f 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c @@ -275,7 +275,6 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, { struct pci_dev *dev; void __iomem *window; - int err; *ovrfl_pdev = NULL; *ovrfl_window = NULL; @@ -293,13 +292,8 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, if (dev == NULL) return 1; - err = pci_bus_add_device(dev); - if (err) { - i82875p_printk(KERN_ERR, - "%s(): pci_bus_add_device() Failed\n", - __func__); - } pci_bus_assign_resources(dev->bus); + pci_bus_add_device(dev); } *ovrfl_pdev = dev; diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 51b9caa0b024..5f43620d580a 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -6,7 +6,6 @@ static struct amd_decoder_ops *fam_ops; static u8 xec_mask = 0xf; -static u8 nb_err_cpumask = 0xf; static bool report_gart_errors; static void (*nb_bus_decoder)(int node_id, struct mce *m); @@ -852,7 +851,6 @@ static int __init mce_amd_init(void) break; case 0x14: - nb_err_cpumask = 0x3; fam_ops->mc0_mce = cat_mc0_mce; fam_ops->mc1_mce = cat_mc1_mce; fam_ops->mc2_mce = k8_mc2_mce; diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c98764aeeec6..f477308b6e9c 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation) #define LOCAL_BUS 0xffc0 -/* arbitrarily chosen maximum range for physical DMA: 128 TB */ -#define FW_MAX_PHYSICAL_RANGE (128ULL << 40) +/* OHCI-1394's default upper bound for physical DMA: 4 GB */ +#define FW_MAX_PHYSICAL_RANGE (1ULL << 32) void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8db663219560..586f2f7f6993 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev, version >> 16, version & 0xff, ohci->card.index, ohci->n_ir, ohci->n_it, ohci->quirks, reg_read(ohci, OHCI1394_PhyUpperBound) ? - ", >4 GB phys DMA" : ""); + ", physUB" : ""); return 0; diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 3ee852c9925b..071c2c969eec 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -756,6 +756,7 @@ static const struct { */ { ACPI_SIG_IBFT }, { "iBFT" }, + { "BIFT" }, /* Broadcom iSCSI Offload */ }; static void __init acpi_find_ibft_region(void) diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index a86c49a605c6..4a1b5113e527 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -56,6 +56,7 @@ config GPIO_ACPI depends on ACPI config GPIOLIB_IRQCHIP + select IRQ_DOMAIN bool config DEBUG_GPIO @@ -243,6 +244,15 @@ config GPIO_OCTEON Say yes here to support the on-chip GPIO lines on the OCTEON family of SOCs. +config GPIO_OMAP + bool "TI OMAP GPIO support" if COMPILE_TEST && !ARCH_OMAP2PLUS + default y if ARCH_OMAP + depends on ARM + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say yes here to enable GPIO support for TI OMAP SoCs. + config GPIO_PL061 bool "PrimeCell PL061 GPIO support" depends on ARM_AMBA @@ -259,7 +269,7 @@ config GPIO_PXA config GPIO_RCAR tristate "Renesas R-Car GPIO" - depends on ARM + depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST) help Say yes here to support GPIO on Renesas R-Car SoCs. @@ -510,6 +520,7 @@ config GPIO_PCA953X config GPIO_PCA953X_IRQ bool "Interrupt controller support for PCA953x" depends on GPIO_PCA953X=y + select GPIOLIB_IRQCHIP help Say yes here to enable the pca953x to be used as an interrupt controller. It requires the driver to be built in the kernel. @@ -579,6 +590,7 @@ config GPIO_STP_XWAY config GPIO_TC3589X bool "TC3589X GPIOs" depends on MFD_TC3589X + select GPIOLIB_IRQCHIP help This enables support for the GPIOs found on the TC3589X I/O Expander. @@ -699,13 +711,13 @@ config GPIO_AMD8111 config GPIO_INTEL_MID bool "Intel Mid GPIO support" depends on PCI && X86 - select IRQ_DOMAIN + select GPIOLIB_IRQCHIP help Say Y here to support Intel Mid GPIO. config GPIO_PCH tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO" - depends on PCI && X86 + depends on PCI && (X86_32 || COMPILE_TEST) select GENERIC_IRQ_CHIP help This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff @@ -739,7 +751,7 @@ config GPIO_SODAVILLE config GPIO_TIMBERDALE bool "Support for timberdale GPIO IP" - depends on MFD_TIMBERDALE && HAS_IOMEM + depends on MFD_TIMBERDALE ---help--- Add support for the GPIO IP in the timberdale FPGA. diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 6309aff1d806..d10f6a9d875a 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -58,7 +58,7 @@ obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o -obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o +obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o obj-$(CONFIG_GPIO_PCH) += gpio-pch.o diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c index 307464fd015f..65978cf85f79 100644 --- a/drivers/gpio/devres.c +++ b/drivers/gpio/devres.c @@ -52,6 +52,22 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, EXPORT_SYMBOL(devm_gpiod_get); /** + * devm_gpiod_get_optional - Resource-managed gpiod_get_optional() + * @dev: GPIO consumer + * @con_id: function within the GPIO consumer + * + * Managed gpiod_get_optional(). GPIO descriptors returned from this function + * are automatically disposed on driver detach. See gpiod_get_optional() for + * detailed information about behavior and return values. + */ +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, + const char *con_id) +{ + return devm_gpiod_get_index_optional(dev, con_id, 0); +} +EXPORT_SYMBOL(devm_gpiod_get_optional); + +/** * devm_gpiod_get_index - Resource-managed gpiod_get_index() * @dev: GPIO consumer * @con_id: function within the GPIO consumer @@ -87,6 +103,33 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, EXPORT_SYMBOL(devm_gpiod_get_index); /** + * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional() + * @dev: GPIO consumer + * @con_id: function within the GPIO consumer + * @index: index of the GPIO to obtain in the consumer + * + * Managed gpiod_get_index_optional(). GPIO descriptors returned from this + * function are automatically disposed on driver detach. See + * gpiod_get_index_optional() for detailed information about behavior and + * return values. + */ +struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index) +{ + struct gpio_desc *desc; + + desc = devm_gpiod_get_index(dev, con_id, index); + if (IS_ERR(desc)) { + if (PTR_ERR(desc) == -ENOENT) + return NULL; + } + + return desc; +} +EXPORT_SYMBOL(devm_gpiod_get_index_optional); + +/** * devm_gpiod_put - Resource-managed gpiod_put() * @desc: GPIO descriptor to dispose of * diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c index 613265944e2e..f1ade8fa3218 100644 --- a/drivers/gpio/gpio-adp5520.c +++ b/drivers/gpio/gpio-adp5520.c @@ -106,10 +106,8 @@ static int adp5520_gpio_probe(struct platform_device *pdev) } dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); - if (dev == NULL) { - dev_err(&pdev->dev, "failed to alloc memory\n"); + if (dev == NULL) return -ENOMEM; - } dev->master = pdev->dev.parent; diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index d974020b78bb..ef19bc33f2bd 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c @@ -379,10 +379,8 @@ static int adp5588_gpio_probe(struct i2c_client *client, } dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (dev == NULL) { - dev_err(&client->dev, "failed to alloc memory\n"); + if (dev == NULL) return -ENOMEM; - } dev->client = client; diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c index ecb3ca2d1d10..6557147d9331 100644 --- a/drivers/gpio/gpio-bt8xx.c +++ b/drivers/gpio/gpio-bt8xx.c @@ -178,7 +178,7 @@ static int bt8xxgpio_probe(struct pci_dev *dev, struct bt8xxgpio *bg; int err; - bg = kzalloc(sizeof(*bg), GFP_KERNEL); + bg = devm_kzalloc(&dev->dev, sizeof(struct bt8xxgpio), GFP_KERNEL); if (!bg) return -ENOMEM; @@ -188,9 +188,9 @@ static int bt8xxgpio_probe(struct pci_dev *dev, err = pci_enable_device(dev); if (err) { printk(KERN_ERR "bt8xxgpio: Can't enable device.\n"); - goto err_freebg; + return err; } - if (!request_mem_region(pci_resource_start(dev, 0), + if (!devm_request_mem_region(&dev->dev, pci_resource_start(dev, 0), pci_resource_len(dev, 0), "bt8xxgpio")) { printk(KERN_WARNING "bt8xxgpio: Can't request iomem (0x%llx).\n", @@ -201,11 +201,11 @@ static int bt8xxgpio_probe(struct pci_dev *dev, pci_set_master(dev); pci_set_drvdata(dev, bg); - bg->mmio = ioremap(pci_resource_start(dev, 0), 0x1000); + bg->mmio = devm_ioremap(&dev->dev, pci_resource_start(dev, 0), 0x1000); if (!bg->mmio) { printk(KERN_ERR "bt8xxgpio: ioremap() failed\n"); err = -EIO; - goto err_release_mem; + goto err_disable; } /* Disable interrupts */ @@ -220,18 +220,13 @@ static int bt8xxgpio_probe(struct pci_dev *dev, err = gpiochip_add(&bg->gpio); if (err) { printk(KERN_ERR "bt8xxgpio: Failed to register GPIOs\n"); - goto err_release_mem; + goto err_disable; } return 0; -err_release_mem: - release_mem_region(pci_resource_start(dev, 0), - pci_resource_len(dev, 0)); err_disable: pci_disable_device(dev); -err_freebg: - kfree(bg); return err; } @@ -250,8 +245,6 @@ static void bt8xxgpio_remove(struct pci_dev *pdev) release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); pci_disable_device(pdev); - - kfree(bg); } #ifdef CONFIG_PM diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 339f9dac591b..9f0682534e2f 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -230,10 +230,8 @@ static int davinci_gpio_probe(struct platform_device *pdev) chips = devm_kzalloc(dev, ngpio * sizeof(struct davinci_gpio_controller), GFP_KERNEL); - if (!chips) { - dev_err(dev, "Memory allocation failed\n"); + if (!chips) return -ENOMEM; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index ed5711f77e2d..cd3b81435274 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -198,6 +198,8 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type) break; } + irq_setup_alt_chip(d, type); + writel(level, gpio->regs + GPIO_INTTYPE_LEVEL); writel(polarity, gpio->regs + GPIO_INT_POLARITY); spin_unlock_irqrestore(&bgc->lock, flags); @@ -213,7 +215,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, struct irq_chip_generic *irq_gc; unsigned int hwirq, ngpio = gc->ngpio; struct irq_chip_type *ct; - int err, irq; + int err, irq, i; irq = irq_of_parse_and_map(node, 0); if (!irq) { @@ -227,7 +229,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, if (!gpio->domain) return; - err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 1, + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, "gpio-dwapb", handle_level_irq, IRQ_NOREQUEST, 0, IRQ_GC_INIT_NESTED_LOCK); @@ -248,20 +250,24 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, irq_gc->reg_base = gpio->regs; irq_gc->private = gpio; - ct = irq_gc->chip_types; - ct->chip.irq_ack = irq_gc_ack_set_bit; - ct->chip.irq_mask = irq_gc_mask_set_bit; - ct->chip.irq_unmask = irq_gc_mask_clr_bit; - ct->chip.irq_set_type = dwapb_irq_set_type; - ct->chip.irq_enable = dwapb_irq_enable; - ct->chip.irq_disable = dwapb_irq_disable; - ct->chip.irq_request_resources = dwapb_irq_reqres; - ct->chip.irq_release_resources = dwapb_irq_relres; - ct->regs.ack = GPIO_PORTA_EOI; - ct->regs.mask = GPIO_INTMASK; - - irq_setup_generic_chip(irq_gc, IRQ_MSK(port->bgc.gc.ngpio), - IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); + for (i = 0; i < 2; i++) { + ct = &irq_gc->chip_types[i]; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_set_type = dwapb_irq_set_type; + ct->chip.irq_enable = dwapb_irq_enable; + ct->chip.irq_disable = dwapb_irq_disable; + ct->chip.irq_request_resources = dwapb_irq_reqres; + ct->chip.irq_release_resources = dwapb_irq_relres; + ct->regs.ack = GPIO_PORTA_EOI; + ct->regs.mask = GPIO_INTMASK; + ct->type = IRQ_TYPE_LEVEL_MASK; + } + + irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + irq_gc->chip_types[1].handler = handle_edge_irq; irq_set_chained_handler(irq, dwapb_irq_handler); irq_set_handler_data(irq, gpio); diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index 8765bd6f48e1..cde36054c387 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -212,7 +212,7 @@ static void __em_gio_set(struct gpio_chip *chip, unsigned int reg, { /* upper 16 bits contains mask and lower 16 actual value */ em_gio_write(gpio_to_priv(chip), reg, - (1 << (shift + 16)) | (value << shift)); + (BIT(shift + 16)) | (value << shift)); } static void em_gio_set(struct gpio_chip *chip, unsigned offset, int value) @@ -284,7 +284,6 @@ static int em_gio_probe(struct platform_device *pdev) p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); if (!p) { - dev_err(&pdev->dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 80829f3c6543..dcc2bb4074ef 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -344,37 +344,24 @@ static int ep93xx_gpio_probe(struct platform_device *pdev) { struct ep93xx_gpio *ep93xx_gpio; struct resource *res; - void __iomem *mmio; int i; - int ret; + struct device *dev = &pdev->dev; - ep93xx_gpio = kzalloc(sizeof(*ep93xx_gpio), GFP_KERNEL); + ep93xx_gpio = devm_kzalloc(dev, sizeof(struct ep93xx_gpio), GFP_KERNEL); if (!ep93xx_gpio) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENXIO; - goto exit_free; - } - - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - ret = -EBUSY; - goto exit_free; - } - - mmio = ioremap(res->start, resource_size(res)); - if (!mmio) { - ret = -ENXIO; - goto exit_release; - } - ep93xx_gpio->mmio_base = mmio; + ep93xx_gpio->mmio_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ep93xx_gpio->mmio_base)) + return PTR_ERR(ep93xx_gpio->mmio_base); for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i]; struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; - if (ep93xx_gpio_add_bank(bgc, &pdev->dev, mmio, bank)) + if (ep93xx_gpio_add_bank(bgc, &pdev->dev, + ep93xx_gpio->mmio_base, bank)) dev_warn(&pdev->dev, "Unable to add gpio bank %s\n", bank->label); } @@ -382,13 +369,6 @@ static int ep93xx_gpio_probe(struct platform_device *pdev) ep93xx_gpio_init_irq(); return 0; - -exit_release: - release_mem_region(res->start, resource_size(res)); -exit_free: - kfree(ep93xx_gpio); - dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, ret); - return ret; } static struct platform_driver ep93xx_gpio_driver = { diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c index 7b95a4a8318c..1237a73c3c91 100644 --- a/drivers/gpio/gpio-ge.c +++ b/drivers/gpio/gpio-ge.c @@ -18,15 +18,9 @@ */ #include <linux/kernel.h> -#include <linux/compiler.h> -#include <linux/init.h> #include <linux/io.h> -#include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_platform.h> #include <linux/of_gpio.h> -#include <linux/gpio.h> -#include <linux/slab.h> #include <linux/module.h> #define GEF_GPIO_DIRECT 0x00 @@ -39,28 +33,26 @@ #define GEF_GPIO_OVERRUN 0x1C #define GEF_GPIO_MODE 0x20 -static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value) +static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { + struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); unsigned int data; - data = ioread32be(reg); - /* value: 0=low; 1=high */ - if (value & 0x1) - data = data | (0x1 << offset); + data = ioread32be(mmchip->regs + GEF_GPIO_OUT); + if (value) + data = data | BIT(offset); else - data = data & ~(0x1 << offset); - - iowrite32be(data, reg); + data = data & ~BIT(offset); + iowrite32be(data, mmchip->regs + GEF_GPIO_OUT); } - static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset) { unsigned int data; struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT); - data = data | (0x1 << offset); + data = data | BIT(offset); iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT); return 0; @@ -71,11 +63,11 @@ static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) unsigned int data; struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); - /* Set direction before switching to input */ - _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); + /* Set value before switching to output */ + gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT); - data = data & ~(0x1 << offset); + data = data & ~BIT(offset); iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT); return 0; @@ -83,116 +75,56 @@ static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) static int gef_gpio_get(struct gpio_chip *chip, unsigned offset) { - unsigned int data; - int state = 0; struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); - data = ioread32be(mmchip->regs + GEF_GPIO_IN); - state = (int)((data >> offset) & 0x1); - - return state; + return !!(ioread32be(mmchip->regs + GEF_GPIO_IN) & BIT(offset)); } -static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); - - _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); -} +static const struct of_device_id gef_gpio_ids[] = { + { + .compatible = "gef,sbc610-gpio", + .data = (void *)19, + }, { + .compatible = "gef,sbc310-gpio", + .data = (void *)6, + }, { + .compatible = "ge,imp3a-gpio", + .data = (void *)16, + }, + { } +}; +MODULE_DEVICE_TABLE(of, gef_gpio_ids); -static int __init gef_gpio_init(void) +static int __init gef_gpio_probe(struct platform_device *pdev) { - struct device_node *np; - int retval; - struct of_mm_gpio_chip *gef_gpio_chip; - - for_each_compatible_node(np, NULL, "gef,sbc610-gpio") { - - pr_debug("%s: Initialising GEF GPIO\n", np->full_name); - - /* Allocate chip structure */ - gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL); - if (!gef_gpio_chip) { - pr_err("%s: Unable to allocate structure\n", - np->full_name); - continue; - } - - /* Setup pointers to chip functions */ - gef_gpio_chip->gc.of_gpio_n_cells = 2; - gef_gpio_chip->gc.ngpio = 19; - gef_gpio_chip->gc.direction_input = gef_gpio_dir_in; - gef_gpio_chip->gc.direction_output = gef_gpio_dir_out; - gef_gpio_chip->gc.get = gef_gpio_get; - gef_gpio_chip->gc.set = gef_gpio_set; - - /* This function adds a memory mapped GPIO chip */ - retval = of_mm_gpiochip_add(np, gef_gpio_chip); - if (retval) { - kfree(gef_gpio_chip); - pr_err("%s: Unable to add GPIO\n", np->full_name); - } - } - - for_each_compatible_node(np, NULL, "gef,sbc310-gpio") { - - pr_debug("%s: Initialising GEF GPIO\n", np->full_name); - - /* Allocate chip structure */ - gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL); - if (!gef_gpio_chip) { - pr_err("%s: Unable to allocate structure\n", - np->full_name); - continue; - } - - /* Setup pointers to chip functions */ - gef_gpio_chip->gc.of_gpio_n_cells = 2; - gef_gpio_chip->gc.ngpio = 6; - gef_gpio_chip->gc.direction_input = gef_gpio_dir_in; - gef_gpio_chip->gc.direction_output = gef_gpio_dir_out; - gef_gpio_chip->gc.get = gef_gpio_get; - gef_gpio_chip->gc.set = gef_gpio_set; - - /* This function adds a memory mapped GPIO chip */ - retval = of_mm_gpiochip_add(np, gef_gpio_chip); - if (retval) { - kfree(gef_gpio_chip); - pr_err("%s: Unable to add GPIO\n", np->full_name); - } - } - - for_each_compatible_node(np, NULL, "ge,imp3a-gpio") { - - pr_debug("%s: Initialising GE GPIO\n", np->full_name); - - /* Allocate chip structure */ - gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL); - if (!gef_gpio_chip) { - pr_err("%s: Unable to allocate structure\n", - np->full_name); - continue; - } - - /* Setup pointers to chip functions */ - gef_gpio_chip->gc.of_gpio_n_cells = 2; - gef_gpio_chip->gc.ngpio = 16; - gef_gpio_chip->gc.direction_input = gef_gpio_dir_in; - gef_gpio_chip->gc.direction_output = gef_gpio_dir_out; - gef_gpio_chip->gc.get = gef_gpio_get; - gef_gpio_chip->gc.set = gef_gpio_set; - - /* This function adds a memory mapped GPIO chip */ - retval = of_mm_gpiochip_add(np, gef_gpio_chip); - if (retval) { - kfree(gef_gpio_chip); - pr_err("%s: Unable to add GPIO\n", np->full_name); - } - } + const struct of_device_id *of_id = + of_match_device(gef_gpio_ids, &pdev->dev); + struct of_mm_gpio_chip *mmchip; + + mmchip = devm_kzalloc(&pdev->dev, sizeof(*mmchip), GFP_KERNEL); + if (!mmchip) + return -ENOMEM; + + /* Setup pointers to chip functions */ + mmchip->gc.ngpio = (u16)(uintptr_t)of_id->data; + mmchip->gc.of_gpio_n_cells = 2; + mmchip->gc.direction_input = gef_gpio_dir_in; + mmchip->gc.direction_output = gef_gpio_dir_out; + mmchip->gc.get = gef_gpio_get; + mmchip->gc.set = gef_gpio_set; + + /* This function adds a memory mapped GPIO chip */ + return of_mm_gpiochip_add(pdev->dev.of_node, mmchip); +}; - return 0; +static struct platform_driver gef_gpio_driver = { + .driver = { + .name = "gef-gpio", + .owner = THIS_MODULE, + .of_match_table = gef_gpio_ids, + }, }; -arch_initcall(gef_gpio_init); +module_platform_driver_probe(gef_gpio_driver, gef_gpio_probe); MODULE_DESCRIPTION("GE I/O FPGA GPIO driver"); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index b5dff9e742f8..fea8c82bb8fc 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -388,6 +388,14 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc, return 0; } +static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin) +{ + if (gpio_pin < chip->ngpio) + return 0; + + return -EINVAL; +} + int bgpio_remove(struct bgpio_chip *bgc) { return gpiochip_remove(&bgc->gc); @@ -413,6 +421,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev, bgc->gc.label = dev_name(dev); bgc->gc.base = -1; bgc->gc.ngpio = bgc->bits; + bgc->gc.request = bgpio_request; ret = bgpio_setup_io(bgc, dat, set, clr); if (ret) diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 84d2478ec294..3c3f515b7916 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -481,7 +481,7 @@ out: return ret; } -static struct of_device_id grgpio_match[] = { +static const struct of_device_id grgpio_match[] = { {.name = "GAISLER_GPIO"}, {.name = "01_01a"}, {}, diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index e73c6755a5eb..70304220a479 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = { .ngpio = 50, .have_blink = true, + .regs = ichx_regs, + .reglen = ichx_reglen, }; /* Intel 3100 */ @@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = { .uses_gpe0 = true, .ngpio = 50, + .regs = ichx_regs, + .reglen = ichx_reglen, }; /* ICH7 and ICH8-based */ diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c index 2ecd3a09c743..42852eaaf020 100644 --- a/drivers/gpio/gpio-janz-ttl.c +++ b/drivers/gpio/gpio-janz-ttl.c @@ -152,34 +152,21 @@ static int ttl_probe(struct platform_device *pdev) pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(dev, "no platform data\n"); - ret = -ENXIO; - goto out_return; + return -ENXIO; } - mod = kzalloc(sizeof(*mod), GFP_KERNEL); - if (!mod) { - dev_err(dev, "unable to allocate private data\n"); - ret = -ENOMEM; - goto out_return; - } + mod = devm_kzalloc(dev, sizeof(*mod), GFP_KERNEL); + if (!mod) + return -ENOMEM; platform_set_drvdata(pdev, mod); spin_lock_init(&mod->lock); /* get access to the MODULbus registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "MODULbus registers not found\n"); - ret = -ENODEV; - goto out_free_mod; - } - - mod->regs = ioremap(res->start, resource_size(res)); - if (!mod->regs) { - dev_err(dev, "MODULbus registers not ioremap\n"); - ret = -ENOMEM; - goto out_free_mod; - } + mod->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(mod->regs)) + return PTR_ERR(mod->regs); ttl_setup_device(mod); @@ -198,17 +185,10 @@ static int ttl_probe(struct platform_device *pdev) ret = gpiochip_add(gpio); if (ret) { dev_err(dev, "unable to add GPIO chip\n"); - goto out_iounmap_regs; + return ret; } return 0; - -out_iounmap_regs: - iounmap(mod->regs); -out_free_mod: - kfree(mod); -out_return: - return ret; } static int ttl_remove(struct platform_device *pdev) @@ -223,8 +203,6 @@ static int ttl_remove(struct platform_device *pdev) return ret; } - iounmap(mod->regs); - kfree(mod); return 0; } diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c index c6d88173f5a2..1e5e51987d31 100644 --- a/drivers/gpio/gpio-kempld.c +++ b/drivers/gpio/gpio-kempld.c @@ -24,7 +24,7 @@ #include <linux/mfd/kempld.h> #define KEMPLD_GPIO_MAX_NUM 16 -#define KEMPLD_GPIO_MASK(x) (1 << ((x) % 8)) +#define KEMPLD_GPIO_MASK(x) (BIT((x) % 8)) #define KEMPLD_GPIO_DIR_NUM(x) (0x40 + (x) / 8) #define KEMPLD_GPIO_LVL_NUM(x) (0x42 + (x) / 8) #define KEMPLD_GPIO_EVT_LVL_EDGE 0x46 @@ -216,4 +216,4 @@ module_platform_driver(kempld_gpio_driver); MODULE_DESCRIPTION("KEM PLD GPIO Driver"); MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:gpio-kempld"); +MODULE_ALIAS("platform:kempld-gpio"); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 9a82a9074a2c..2bea89b72508 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -375,10 +375,8 @@ static int lp_gpio_probe(struct platform_device *pdev) int ret = -ENODEV; lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL); - if (!lg) { - dev_err(dev, "can't allocate lp_gpio chip data\n"); + if (!lg) return -ENOMEM; - } lg->pdev = pdev; platform_set_drvdata(pdev, lg); diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c index 8672755f95c9..0814584fcdc1 100644 --- a/drivers/gpio/gpio-max730x.c +++ b/drivers/gpio/gpio-max730x.c @@ -237,10 +237,9 @@ int __max730x_remove(struct device *dev) ts->write(dev, 0x04, 0x00); ret = gpiochip_remove(&ts->chip); - if (!ret) { + if (!ret) mutex_destroy(&ts->lock); - kfree(ts); - } else + else dev_err(dev, "Failed to remove GPIO controller: %d\n", ret); return ret; diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 99a68310e7c0..fe7c0e211f9a 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -714,7 +714,7 @@ fail: #ifdef CONFIG_OF #ifdef CONFIG_SPI_MASTER -static struct of_device_id mcp23s08_spi_of_match[] = { +static const struct of_device_id mcp23s08_spi_of_match[] = { { .compatible = "microchip,mcp23s08", .data = (void *) MCP_TYPE_S08, @@ -738,7 +738,7 @@ MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match); #endif #if IS_ENABLED(CONFIG_I2C) -static struct of_device_id mcp23s08_i2c_of_match[] = { +static const struct of_device_id mcp23s08_i2c_of_match[] = { { .compatible = "microchip,mcp23008", .data = (void *) MCP_TYPE_008, @@ -867,7 +867,7 @@ static int mcp23s08_probe(struct spi_device *spi) { struct mcp23s08_platform_data *pdata; unsigned addr; - unsigned chips = 0; + int chips = 0; struct mcp23s08_driver_data *data; int status, type; unsigned base = -1, @@ -895,8 +895,13 @@ static int mcp23s08_probe(struct spi_device *spi) return -ENODEV; } - for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) + for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { pullups[addr] = 0; + if (spi_present_mask & (1 << addr)) + chips++; + } + if (!chips) + return -ENODEV; } else { type = spi_get_device_id(spi)->driver_data; pdata = dev_get_platdata(&spi->dev); @@ -919,12 +924,12 @@ static int mcp23s08_probe(struct spi_device *spi) pullups[addr] = pdata->chip[addr].pullups; } - if (!chips) - return -ENODEV; - base = pdata->base; } + if (!chips) + return -ENODEV; + data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), GFP_KERNEL); if (!data) @@ -935,6 +940,10 @@ static int mcp23s08_probe(struct spi_device *spi) if (!(spi_present_mask & (1 << addr))) continue; chips--; + if (chips < 0) { + dev_err(&spi->dev, "FATAL: invalid negative chip id\n"); + goto fail; + } data->mcp[addr] = &data->chip[chips]; status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 0x40 | (addr << 1), type, base, diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c index ccd45704e5fd..4661e181be04 100644 --- a/drivers/gpio/gpio-moxart.c +++ b/drivers/gpio/gpio-moxart.c @@ -113,10 +113,8 @@ static int moxart_gpio_probe(struct platform_device *pdev) int ret; mgc = devm_kzalloc(dev, sizeof(*mgc), GFP_KERNEL); - if (!mgc) { - dev_err(dev, "can't allocate GPIO chip container\n"); + if (!mgc) return -ENOMEM; - } mgc->gpio = moxart_template_chip; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index d42509422394..418e38650363 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -535,7 +535,7 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #define mvebu_gpio_dbg_show NULL #endif -static struct of_device_id mvebu_gpio_of_match[] = { +static const struct of_device_id mvebu_gpio_of_match[] = { { .compatible = "marvell,orion-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, @@ -574,10 +574,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL); - if (!mvchip) { - dev_err(&pdev->dev, "Cannot allocate memory\n"); + if (!mvchip) return -ENOMEM; - } if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) { dev_err(&pdev->dev, "Missing ngpios OF property\n"); @@ -738,9 +736,4 @@ static struct platform_driver mvebu_gpio_driver = { }, .probe = mvebu_gpio_probe, }; - -static int __init mvebu_gpio_init(void) -{ - return platform_driver_register(&mvebu_gpio_driver); -} -postcore_initcall(mvebu_gpio_init); +module_platform_driver(mvebu_gpio_driver); diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 19b886c21b1d..00f29aa1fb9d 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -24,9 +24,9 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/gpio.h> +#include <linux/bitops.h> #include <linux/platform_data/gpio-omap.h> #define OFF_MODE 1 @@ -52,7 +52,6 @@ struct gpio_bank { struct list_head node; void __iomem *base; u16 irq; - struct irq_domain *domain; u32 non_wakeup_gpios; u32 enabled_non_wakeup_gpios; struct gpio_regs context; @@ -84,22 +83,21 @@ struct gpio_bank { }; #define GPIO_INDEX(bank, gpio) (gpio % bank->width) -#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) +#define GPIO_BIT(bank, gpio) (BIT(GPIO_INDEX(bank, gpio))) #define GPIO_MOD_CTRL_BIT BIT(0) #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) -#define LINE_USED(line, offset) (line & (1 << offset)) +#define LINE_USED(line, offset) (line & (BIT(offset))) static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) { return bank->chip.base + gpio_irq; } -static int omap_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +static inline struct gpio_bank *_irq_data_get_bank(struct irq_data *d) { - struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); - - return irq_find_mapping(bank->domain, offset); + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + return container_of(chip, struct gpio_bank, chip); } static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) @@ -110,9 +108,9 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) reg += bank->regs->direction; l = readl_relaxed(reg); if (is_input) - l |= 1 << gpio; + l |= BIT(gpio); else - l &= ~(1 << gpio); + l &= ~(BIT(gpio)); writel_relaxed(l, reg); bank->context.oe = l; } @@ -155,14 +153,14 @@ static int _get_gpio_datain(struct gpio_bank *bank, int offset) { void __iomem *reg = bank->base + bank->regs->datain; - return (readl_relaxed(reg) & (1 << offset)) != 0; + return (readl_relaxed(reg) & (BIT(offset))) != 0; } static int _get_gpio_dataout(struct gpio_bank *bank, int offset) { void __iomem *reg = bank->base + bank->regs->dataout; - return (readl_relaxed(reg) & (1 << offset)) != 0; + return (readl_relaxed(reg) & (BIT(offset))) != 0; } static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) @@ -180,7 +178,7 @@ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) static inline void _gpio_dbck_enable(struct gpio_bank *bank) { if (bank->dbck_enable_mask && !bank->dbck_enabled) { - clk_enable(bank->dbck); + clk_prepare_enable(bank->dbck); bank->dbck_enabled = true; writel_relaxed(bank->dbck_enable_mask, @@ -198,7 +196,7 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank) */ writel_relaxed(0, bank->base + bank->regs->debounce_en); - clk_disable(bank->dbck); + clk_disable_unprepare(bank->dbck); bank->dbck_enabled = false; } } @@ -231,7 +229,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, l = GPIO_BIT(bank, gpio); - clk_enable(bank->dbck); + clk_prepare_enable(bank->dbck); reg = bank->base + bank->regs->debounce; writel_relaxed(debounce, reg); @@ -245,7 +243,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, bank->dbck_enable_mask = val; writel_relaxed(val, reg); - clk_disable(bank->dbck); + clk_disable_unprepare(bank->dbck); /* * Enable debounce clock per module. * This call is mandatory because in omap_gpio_request() when @@ -290,7 +288,7 @@ static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio) bank->context.debounce = 0; writel_relaxed(bank->context.debounce, bank->base + bank->regs->debounce); - clk_disable(bank->dbck); + clk_disable_unprepare(bank->dbck); bank->dbck_enabled = false; } } @@ -299,7 +297,7 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio, unsigned trigger) { void __iomem *base = bank->base; - u32 gpio_bit = 1 << gpio; + u32 gpio_bit = BIT(gpio); _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, trigger & IRQ_TYPE_LEVEL_LOW); @@ -368,9 +366,9 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) l = readl_relaxed(reg); if ((l >> gpio) & 1) - l &= ~(1 << gpio); + l &= ~(BIT(gpio)); else - l |= 1 << gpio; + l |= BIT(gpio); writel_relaxed(l, reg); } @@ -392,11 +390,11 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, l = readl_relaxed(reg); if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) - bank->toggle_mask |= 1 << gpio; + bank->toggle_mask |= BIT(gpio); if (trigger & IRQ_TYPE_EDGE_RISING) - l |= 1 << gpio; + l |= BIT(gpio); else if (trigger & IRQ_TYPE_EDGE_FALLING) - l &= ~(1 << gpio); + l &= ~(BIT(gpio)); else return -EINVAL; @@ -413,10 +411,10 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, if (trigger & IRQ_TYPE_EDGE_RISING) l |= 2 << (gpio << 1); if (trigger & IRQ_TYPE_EDGE_FALLING) - l |= 1 << (gpio << 1); + l |= BIT(gpio << 1); /* Enable wake-up during idle for dynamic tick */ - _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger); + _gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); bank->context.wake_en = readl_relaxed(bank->base + bank->regs->wkup_en); writel_relaxed(l, reg); @@ -430,7 +428,7 @@ static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset) void __iomem *reg = bank->base + bank->regs->pinctrl; /* Claim the pin for MPU */ - writel_relaxed(readl_relaxed(reg) | (1 << offset), reg); + writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); } if (bank->regs->ctrl && !BANK_USED(bank)) { @@ -453,7 +451,7 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset) !LINE_USED(bank->mod_usage, offset) && !LINE_USED(bank->irq_usage, offset)) { /* Disable wake-up during idle for dynamic tick */ - _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0); + _gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); bank->context.wake_en = readl_relaxed(bank->base + bank->regs->wkup_en); } @@ -479,7 +477,7 @@ static int gpio_is_input(struct gpio_bank *bank, int mask) static int gpio_irq_type(struct irq_data *d, unsigned type) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned gpio = 0; int retval; unsigned long flags; @@ -509,20 +507,12 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) if (!LINE_USED(bank->mod_usage, offset)) { _enable_gpio_module(bank, offset); _set_gpio_direction(bank, offset, 1); - } else if (!gpio_is_input(bank, 1 << offset)) { + } else if (!gpio_is_input(bank, BIT(offset))) { spin_unlock_irqrestore(&bank->lock, flags); return -EINVAL; } - retval = gpio_lock_as_irq(&bank->chip, offset); - if (retval) { - dev_err(bank->dev, "unable to lock offset %d for IRQ\n", - offset); - spin_unlock_irqrestore(&bank->lock, flags); - return retval; - } - - bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio); + bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); spin_unlock_irqrestore(&bank->lock, flags); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) @@ -559,7 +549,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank) { void __iomem *reg = bank->base; u32 l; - u32 mask = (1 << bank->width) - 1; + u32 mask = (BIT(bank->width)) - 1; reg += bank->regs->irqenable; l = readl_relaxed(reg); @@ -664,7 +654,7 @@ static void _reset_gpio(struct gpio_bank *bank, int gpio) /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ static int gpio_wake_enable(struct irq_data *d, unsigned int enable) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); return _set_gpio_wakeup(bank, gpio, enable); @@ -691,7 +681,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); _enable_gpio_module(bank, offset); } - bank->mod_usage |= 1 << offset; + bank->mod_usage |= BIT(offset); spin_unlock_irqrestore(&bank->lock, flags); return 0; @@ -703,7 +693,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) unsigned long flags; spin_lock_irqsave(&bank->lock, flags); - bank->mod_usage &= ~(1 << offset); + bank->mod_usage &= ~(BIT(offset)); _disable_gpio_module(bank, offset); _reset_gpio(bank, bank->chip.base + offset); spin_unlock_irqrestore(&bank->lock, flags); @@ -732,11 +722,12 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) unsigned int bit; struct gpio_bank *bank; int unmasked = 0; - struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + struct gpio_chip *chip = irq_get_handler_data(irq); - chained_irq_enter(chip, desc); + chained_irq_enter(irqchip, desc); - bank = irq_get_handler_data(irq); + bank = container_of(chip, struct gpio_bank, chip); isr_reg = bank->base + bank->regs->irqstatus; pm_runtime_get_sync(bank->dev); @@ -764,7 +755,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) configured, we could unmask GPIO bank interrupt immediately */ if (!level_mask && !unmasked) { unmasked = 1; - chained_irq_exit(chip, desc); + chained_irq_exit(irqchip, desc); } if (!isr) @@ -772,7 +763,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) while (isr) { bit = __ffs(isr); - isr &= ~(1 << bit); + isr &= ~(BIT(bit)); /* * Some chips can't respond to both rising and falling @@ -781,10 +772,11 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) * to respond to the IRQ for the opposite direction. * This will be indicated in the bank toggle_mask. */ - if (bank->toggle_mask & (1 << bit)) + if (bank->toggle_mask & (BIT(bit))) _toggle_gpio_edge_triggering(bank, bit); - generic_handle_irq(irq_find_mapping(bank->domain, bit)); + generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, + bit)); } } /* if bank has any level sensitive GPIO pin interrupt @@ -793,20 +785,20 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) interrupt */ exit: if (!unmasked) - chained_irq_exit(chip, desc); + chained_irq_exit(irqchip, desc); pm_runtime_put(bank->dev); } static void gpio_irq_shutdown(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); unsigned long flags; unsigned offset = GPIO_INDEX(bank, gpio); spin_lock_irqsave(&bank->lock, flags); gpio_unlock_as_irq(&bank->chip, offset); - bank->irq_usage &= ~(1 << offset); + bank->irq_usage &= ~(BIT(offset)); _disable_gpio_module(bank, offset); _reset_gpio(bank, gpio); spin_unlock_irqrestore(&bank->lock, flags); @@ -821,7 +813,7 @@ static void gpio_irq_shutdown(struct irq_data *d) static void gpio_ack_irq(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); _clear_gpio_irqstatus(bank, gpio); @@ -829,7 +821,7 @@ static void gpio_ack_irq(struct irq_data *d) static void gpio_mask_irq(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); unsigned long flags; @@ -841,7 +833,7 @@ static void gpio_mask_irq(struct irq_data *d) static void gpio_unmask_irq(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); unsigned int irq_mask = GPIO_BIT(bank, gpio); u32 trigger = irqd_get_trigger_type(d); @@ -936,6 +928,21 @@ static inline void mpuio_init(struct gpio_bank *bank) /*---------------------------------------------------------------------*/ +static int gpio_get_direction(struct gpio_chip *chip, unsigned offset) +{ + struct gpio_bank *bank; + unsigned long flags; + void __iomem *reg; + int dir; + + bank = container_of(chip, struct gpio_bank, chip); + reg = bank->base + bank->regs->direction; + spin_lock_irqsave(&bank->lock, flags); + dir = !!(readl_relaxed(reg) & BIT(offset)); + spin_unlock_irqrestore(&bank->lock, flags); + return dir; +} + static int gpio_input(struct gpio_chip *chip, unsigned offset) { struct gpio_bank *bank; @@ -954,7 +961,7 @@ static int gpio_get(struct gpio_chip *chip, unsigned offset) u32 mask; bank = container_of(chip, struct gpio_bank, chip); - mask = (1 << offset); + mask = (BIT(offset)); if (gpio_is_input(bank, mask)) return _get_gpio_datain(bank, offset); @@ -1081,10 +1088,12 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, IRQ_NOREQUEST | IRQ_NOPROBE, 0); } -static void omap_gpio_chip_init(struct gpio_bank *bank) +static int omap_gpio_chip_init(struct gpio_bank *bank) { int j; static int gpio; + int irq_base = 0; + int ret; /* * REVISIT eventually switch from OMAP-specific gpio structs @@ -1092,12 +1101,12 @@ static void omap_gpio_chip_init(struct gpio_bank *bank) */ bank->chip.request = omap_gpio_request; bank->chip.free = omap_gpio_free; + bank->chip.get_direction = gpio_get_direction; bank->chip.direction_input = gpio_input; bank->chip.get = gpio_get; bank->chip.direction_output = gpio_output; bank->chip.set_debounce = gpio_debounce; bank->chip.set = gpio_set; - bank->chip.to_irq = omap_gpio_to_irq; if (bank->is_mpuio) { bank->chip.label = "mpuio"; if (bank->regs->wkup_en) @@ -1110,22 +1119,48 @@ static void omap_gpio_chip_init(struct gpio_bank *bank) } bank->chip.ngpio = bank->width; - gpiochip_add(&bank->chip); + ret = gpiochip_add(&bank->chip); + if (ret) { + dev_err(bank->dev, "Could not register gpio chip %d\n", ret); + return ret; + } + +#ifdef CONFIG_ARCH_OMAP1 + /* + * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop + * irq_alloc_descs() since a base IRQ offset will no longer be needed. + */ + irq_base = irq_alloc_descs(-1, 0, bank->width, 0); + if (irq_base < 0) { + dev_err(bank->dev, "Couldn't allocate IRQ numbers\n"); + return -ENODEV; + } +#endif + + ret = gpiochip_irqchip_add(&bank->chip, &gpio_irq_chip, + irq_base, gpio_irq_handler, + IRQ_TYPE_NONE); + + if (ret) { + dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret); + ret = gpiochip_remove(&bank->chip); + return -ENODEV; + } + + gpiochip_set_chained_irqchip(&bank->chip, &gpio_irq_chip, + bank->irq, gpio_irq_handler); for (j = 0; j < bank->width; j++) { - int irq = irq_create_mapping(bank->domain, j); + int irq = irq_find_mapping(bank->chip.irqdomain, j); irq_set_lockdep_class(irq, &gpio_lock_class); - irq_set_chip_data(irq, bank); if (bank->is_mpuio) { omap_mpuio_alloc_gc(bank, irq, bank->width); - } else { - irq_set_chip_and_handler(irq, &gpio_irq_chip, - handle_simple_irq); - set_irq_flags(irq, IRQF_VALID); + irq_set_chip_and_handler(irq, NULL, NULL); + set_irq_flags(irq, 0); } } - irq_set_chained_handler(bank->irq, gpio_irq_handler); - irq_set_handler_data(bank->irq, bank); + + return 0; } static const struct of_device_id omap_gpio_match[]; @@ -1138,9 +1173,7 @@ static int omap_gpio_probe(struct platform_device *pdev) const struct omap_gpio_platform_data *pdata; struct resource *res; struct gpio_bank *bank; -#ifdef CONFIG_ARCH_OMAP1 - int irq_base; -#endif + int ret; match = of_match_device(of_match_ptr(omap_gpio_match), dev); @@ -1162,6 +1195,7 @@ static int omap_gpio_probe(struct platform_device *pdev) bank->irq = res->start; bank->dev = dev; + bank->chip.dev = dev; bank->dbck_flag = pdata->dbck_flag; bank->stride = pdata->bank_stride; bank->width = pdata->bank_width; @@ -1182,29 +1216,6 @@ static int omap_gpio_probe(struct platform_device *pdev) pdata->get_context_loss_count; } -#ifdef CONFIG_ARCH_OMAP1 - /* - * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop - * irq_alloc_descs() and irq_domain_add_legacy() and just use a - * linear IRQ domain mapping for all OMAP platforms. - */ - irq_base = irq_alloc_descs(-1, 0, bank->width, 0); - if (irq_base < 0) { - dev_err(dev, "Couldn't allocate IRQ numbers\n"); - return -ENODEV; - } - - bank->domain = irq_domain_add_legacy(node, bank->width, irq_base, - 0, &irq_domain_simple_ops, NULL); -#else - bank->domain = irq_domain_add_linear(node, bank->width, - &irq_domain_simple_ops, NULL); -#endif - if (!bank->domain) { - dev_err(dev, "Couldn't register an IRQ domain\n"); - return -ENODEV; - } - if (bank->regs->set_dataout && bank->regs->clr_dataout) bank->set_dataout = _set_gpio_dataout_reg; else @@ -1216,7 +1227,7 @@ static int omap_gpio_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bank->base = devm_ioremap_resource(dev, res); if (IS_ERR(bank->base)) { - irq_domain_remove(bank->domain); + irq_domain_remove(bank->chip.irqdomain); return PTR_ERR(bank->base); } @@ -1230,7 +1241,11 @@ static int omap_gpio_probe(struct platform_device *pdev) mpuio_init(bank); omap_gpio_mod_init(bank); - omap_gpio_chip_init(bank); + + ret = omap_gpio_chip_init(bank); + if (ret) + return ret; + omap_gpio_show_rev(bank); pm_runtime_put(bank->dev); diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index da9d33252e56..86bdbe362068 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c @@ -148,7 +148,7 @@ static const struct palmas_device_data tps80036_dev_data = { .ngpio = 16, }; -static struct of_device_id of_palmas_gpio_match[] = { +static const struct of_device_id of_palmas_gpio_match[] = { { .compatible = "ti,palmas-gpio", .data = &palmas_dev_data,}, { .compatible = "ti,tps65913-gpio", .data = &palmas_dev_data,}, { .compatible = "ti,tps65914-gpio", .data = &palmas_dev_data,}, @@ -173,10 +173,8 @@ static int palmas_gpio_probe(struct platform_device *pdev) palmas_gpio = devm_kzalloc(&pdev->dev, sizeof(*palmas_gpio), GFP_KERNEL); - if (!palmas_gpio) { - dev_err(&pdev->dev, "Could not allocate palmas_gpio\n"); + if (!palmas_gpio) return -ENOMEM; - } palmas_gpio->palmas = palmas; palmas_gpio->gpio_chip.owner = THIS_MODULE; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index d550d8e58705..e721a37c3473 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -15,8 +15,6 @@ #include <linux/init.h> #include <linux/gpio.h> #include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> #include <linux/i2c.h> #include <linux/platform_data/pca953x.h> #include <linux/slab.h> @@ -91,7 +89,6 @@ struct pca953x_chip { u8 irq_stat[MAX_BANK]; u8 irq_trig_raise[MAX_BANK]; u8 irq_trig_fall[MAX_BANK]; - struct irq_domain *domain; #endif struct i2c_client *client; @@ -100,6 +97,11 @@ struct pca953x_chip { int chip_type; }; +static inline struct pca953x_chip *to_pca(struct gpio_chip *gc) +{ + return container_of(gc, struct pca953x_chip, gpio_chip); +} + static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val, int off) { @@ -202,12 +204,10 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val) static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u8 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ)); @@ -233,12 +233,10 @@ exit: static int pca953x_gpio_direction_output(struct gpio_chip *gc, unsigned off, int val) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u8 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); /* set output level */ if (val) @@ -285,12 +283,10 @@ exit: static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u32 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); switch (chip->chip_type) { case PCA953X_TYPE: @@ -315,12 +311,10 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u8 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); if (val) reg_val = chip->reg_output[off / BANK_SZ] @@ -367,38 +361,34 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) } #ifdef CONFIG_GPIO_PCA953X_IRQ -static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off) -{ - struct pca953x_chip *chip; - - chip = container_of(gc, struct pca953x_chip, gpio_chip); - return irq_create_mapping(chip->domain, off); -} - static void pca953x_irq_mask(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); chip->irq_mask[d->hwirq / BANK_SZ] &= ~(1 << (d->hwirq % BANK_SZ)); } static void pca953x_irq_unmask(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ); } static void pca953x_irq_bus_lock(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); mutex_lock(&chip->irq_lock); } static void pca953x_irq_bus_sync_unlock(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); u8 new_irqs; int level, i; @@ -420,7 +410,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); int bank_nb = d->hwirq / BANK_SZ; u8 mask = 1 << (d->hwirq % BANK_SZ); @@ -503,44 +494,25 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) struct pca953x_chip *chip = devid; u8 pending[MAX_BANK]; u8 level; + unsigned nhandled = 0; int i; if (!pca953x_irq_pending(chip, pending)) - return IRQ_HANDLED; + return IRQ_NONE; for (i = 0; i < NBANK(chip); i++) { while (pending[i]) { level = __ffs(pending[i]); - handle_nested_irq(irq_find_mapping(chip->domain, + handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain, level + (BANK_SZ * i))); pending[i] &= ~(1 << level); + nhandled++; } } - return IRQ_HANDLED; + return (nhandled > 0) ? IRQ_HANDLED : IRQ_NONE; } -static int pca953x_gpio_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_clear_status_flags(irq, IRQ_NOREQUEST); - irq_set_chip_data(irq, d->host_data); - irq_set_chip(irq, &pca953x_irq_chip); - irq_set_nested_thread(irq, true); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif - - return 0; -} - -static const struct irq_domain_ops pca953x_irq_simple_ops = { - .map = pca953x_gpio_irq_map, - .xlate = irq_domain_xlate_twocell, -}; - static int pca953x_irq_setup(struct pca953x_chip *chip, const struct i2c_device_id *id, int irq_base) @@ -572,19 +544,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, chip->irq_stat[i] &= chip->reg_direction[i]; mutex_init(&chip->irq_lock); - chip->domain = irq_domain_add_simple(client->dev.of_node, - chip->gpio_chip.ngpio, - irq_base, - &pca953x_irq_simple_ops, - chip); - if (!chip->domain) - return -ENODEV; - ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, pca953x_irq_handler, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, + IRQF_TRIGGER_LOW | IRQF_ONESHOT | + IRQF_SHARED, dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", @@ -592,7 +557,16 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, return ret; } - chip->gpio_chip.to_irq = pca953x_gpio_to_irq; + ret = gpiochip_irqchip_add(&chip->gpio_chip, + &pca953x_irq_chip, + irq_base, + handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&client->dev, + "could not connect irqchip to gpiochip\n"); + return ret; + } } return 0; @@ -756,11 +730,11 @@ static int pca953x_probe(struct i2c_client *client, if (ret) return ret; - ret = pca953x_irq_setup(chip, id, irq_base); + ret = gpiochip_add(&chip->gpio_chip); if (ret) return ret; - ret = gpiochip_add(&chip->gpio_chip); + ret = pca953x_irq_setup(chip, id, irq_base); if (ret) return ret; diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 82735822bc9d..27b46751ea7e 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -262,7 +262,7 @@ static int pcf857x_irq_domain_init(struct pcf857x *gpio, /* enable real irq */ status = devm_request_threaded_irq(&client->dev, client->irq, NULL, pcf857x_irq, IRQF_ONESHOT | - IRQF_TRIGGER_FALLING, + IRQF_TRIGGER_FALLING | IRQF_SHARED, dev_name(&client->dev), gpio); if (status) @@ -319,7 +319,7 @@ static int pcf857x_probe(struct i2c_client *client, status = pcf857x_irq_domain_init(gpio, client); if (status < 0) { dev_err(&client->dev, "irq_domain init failed\n"); - goto fail; + goto fail_irq_domain; } } @@ -414,12 +414,13 @@ static int pcf857x_probe(struct i2c_client *client, return 0; fail: - dev_dbg(&client->dev, "probe error %d for '%s'\n", - status, client->name); - if (client->irq) pcf857x_irq_domain_cleanup(gpio); +fail_irq_domain: + dev_dbg(&client->dev, "probe error %d for '%s'\n", + status, client->name); + return status; } diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index 83a156397474..d6eac9b17db9 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -20,6 +20,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/slab.h> #define PCH_EDGE_FALLING 0 #define PCH_EDGE_RISING BIT(0) diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index b0f475243cef..84b49cfb81a8 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -17,7 +17,6 @@ #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/bitops.h> -#include <linux/workqueue.h> #include <linux/gpio.h> #include <linux/device.h> #include <linux/amba/bus.h> @@ -88,7 +87,7 @@ static int pl061_direction_input(struct gpio_chip *gc, unsigned offset) spin_lock_irqsave(&chip->lock, flags); gpiodir = readb(chip->base + GPIODIR); - gpiodir &= ~(1 << offset); + gpiodir &= ~(BIT(offset)); writeb(gpiodir, chip->base + GPIODIR); spin_unlock_irqrestore(&chip->lock, flags); @@ -106,16 +105,16 @@ static int pl061_direction_output(struct gpio_chip *gc, unsigned offset, return -EINVAL; spin_lock_irqsave(&chip->lock, flags); - writeb(!!value << offset, chip->base + (1 << (offset + 2))); + writeb(!!value << offset, chip->base + (BIT(offset + 2))); gpiodir = readb(chip->base + GPIODIR); - gpiodir |= 1 << offset; + gpiodir |= BIT(offset); writeb(gpiodir, chip->base + GPIODIR); /* * gpio value is set again, because pl061 doesn't allow to set value of * a gpio pin before configuring it in OUT mode. */ - writeb(!!value << offset, chip->base + (1 << (offset + 2))); + writeb(!!value << offset, chip->base + (BIT(offset + 2))); spin_unlock_irqrestore(&chip->lock, flags); return 0; @@ -125,14 +124,14 @@ static int pl061_get_value(struct gpio_chip *gc, unsigned offset) { struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - return !!readb(chip->base + (1 << (offset + 2))); + return !!readb(chip->base + (BIT(offset + 2))); } static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value) { struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - writeb(!!value << offset, chip->base + (1 << (offset + 2))); + writeb(!!value << offset, chip->base + (BIT(offset + 2))); } static int pl061_irq_type(struct irq_data *d, unsigned trigger) @@ -207,7 +206,7 @@ static void pl061_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR); + u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR); u8 gpioie; spin_lock(&chip->lock); @@ -220,7 +219,7 @@ static void pl061_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR); + u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR); u8 gpioie; spin_lock(&chip->lock); @@ -302,9 +301,9 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) for (i = 0; i < PL061_GPIO_NR; i++) { if (pdata) { - if (pdata->directions & (1 << i)) + if (pdata->directions & (BIT(i))) pl061_direction_output(&chip->gc, i, - pdata->values & (1 << i)); + pdata->values & (BIT(i))); else pl061_direction_input(&chip->gc, i); } @@ -331,7 +330,7 @@ static int pl061_suspend(struct device *dev) chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE); for (offset = 0; offset < PL061_GPIO_NR; offset++) { - if (chip->csave_regs.gpio_dir & (1 << offset)) + if (chip->csave_regs.gpio_dir & (BIT(offset))) chip->csave_regs.gpio_data |= pl061_get_value(&chip->gc, offset) << offset; } @@ -345,10 +344,10 @@ static int pl061_resume(struct device *dev) int offset; for (offset = 0; offset < PL061_GPIO_NR; offset++) { - if (chip->csave_regs.gpio_dir & (1 << offset)) + if (chip->csave_regs.gpio_dir & (BIT(offset))) pl061_direction_output(&chip->gc, offset, chip->csave_regs.gpio_data & - (1 << offset)); + (BIT(offset))); else pl061_direction_input(&chip->gc, offset); } diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c index 9b423173ab50..562b0c4d9cc8 100644 --- a/drivers/gpio/gpio-rc5t583.c +++ b/drivers/gpio/gpio-rc5t583.c @@ -119,10 +119,8 @@ static int rc5t583_gpio_probe(struct platform_device *pdev) rc5t583_gpio = devm_kzalloc(&pdev->dev, sizeof(*rc5t583_gpio), GFP_KERNEL); - if (!rc5t583_gpio) { - dev_warn(&pdev->dev, "Mem allocation for rc5t583_gpio failed"); + if (!rc5t583_gpio) return -ENOMEM; - } rc5t583_gpio->gpio_chip.label = "gpio-rc5t583", rc5t583_gpio->gpio_chip.owner = THIS_MODULE, diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 03c91482432c..0c9f803fc1ac 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -26,6 +26,7 @@ #include <linux/pinctrl/consumer.h> #include <linux/platform_data/gpio-rcar.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/spinlock.h> #include <linux/slab.h> @@ -362,7 +363,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); if (!p) { - dev_err(dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } @@ -377,6 +377,9 @@ static int gpio_rcar_probe(struct platform_device *pdev) platform_set_drvdata(pdev, p); + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -460,6 +463,8 @@ static int gpio_rcar_probe(struct platform_device *pdev) err1: irq_domain_remove(p->irq_domain); err0: + pm_runtime_put(dev); + pm_runtime_disable(dev); return ret; } @@ -473,6 +478,8 @@ static int gpio_rcar_remove(struct platform_device *pdev) return ret; irq_domain_remove(p->irq_domain); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c index 88577c3272a5..9fa7e53331c9 100644 --- a/drivers/gpio/gpio-rdc321x.c +++ b/drivers/gpio/gpio-rdc321x.c @@ -141,17 +141,15 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) return -ENODEV; } - rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL); - if (!rdc321x_gpio_dev) { - dev_err(&pdev->dev, "failed to allocate private data\n"); + rdc321x_gpio_dev = devm_kzalloc(&pdev->dev, sizeof(struct rdc321x_gpio), + GFP_KERNEL); + if (!rdc321x_gpio_dev) return -ENOMEM; - } r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n"); - err = -ENODEV; - goto out_free; + return -ENODEV; } spin_lock_init(&rdc321x_gpio_dev->lock); @@ -162,8 +160,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n"); - err = -ENODEV; - goto out_free; + return -ENODEV; } rdc321x_gpio_dev->reg2_ctrl_base = r->start; @@ -187,21 +184,17 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->reg1_data_base, &rdc321x_gpio_dev->data_reg[0]); if (err) - goto out_free; + return err; err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, rdc321x_gpio_dev->reg2_data_base, &rdc321x_gpio_dev->data_reg[1]); if (err) - goto out_free; + return err; dev_info(&pdev->dev, "registering %d GPIOs\n", rdc321x_gpio_dev->chip.ngpio); return gpiochip_add(&rdc321x_gpio_dev->chip); - -out_free: - kfree(rdc321x_gpio_dev); - return err; } static int rdc321x_gpio_remove(struct platform_device *pdev) @@ -213,8 +206,6 @@ static int rdc321x_gpio_remove(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "failed to unregister chip\n"); - kfree(rdc321x_gpio_dev); - return ret; } diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index 5af65719b95d..a9b1cd16c848 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -97,8 +97,6 @@ static int sch_gpio_core_direction_out(struct gpio_chip *gc, u8 curr_dirs; unsigned short offset, bit; - sch_gpio_core_set(gc, gpio_num, val); - spin_lock(&gpio_lock); offset = CGIO + gpio_num / 8; @@ -109,6 +107,17 @@ static int sch_gpio_core_direction_out(struct gpio_chip *gc, outb(curr_dirs & ~(1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); + + /* + * according to the datasheet, writing to the level register has no + * effect when GPIO is programmed as input. + * Actually the the level register is read-only when configured as input. + * Thus presetting the output level before switching to output is _NOT_ possible. + * Hence we set the level after configuring the GPIO as output. + * But we cannot prevent a short low pulse if direction is set to high + * and an external pull-up is connected. + */ + sch_gpio_core_set(gc, gpio_num, val); return 0; } @@ -178,8 +187,6 @@ static int sch_gpio_resume_direction_out(struct gpio_chip *gc, u8 curr_dirs; unsigned short offset, bit; - sch_gpio_resume_set(gc, gpio_num, val); - offset = RGIO + gpio_num / 8; bit = gpio_num % 8; @@ -190,6 +197,17 @@ static int sch_gpio_resume_direction_out(struct gpio_chip *gc, outb(curr_dirs & ~(1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); + + /* + * according to the datasheet, writing to the level register has no + * effect when GPIO is programmed as input. + * Actually the the level register is read-only when configured as input. + * Thus presetting the output level before switching to output is _NOT_ possible. + * Hence we set the level after configuring the GPIO as output. + * But we cannot prevent a short low pulse if direction is set to high + * and an external pull-up is connected. + */ + sch_gpio_resume_set(gc, gpio_num, val); return 0; } diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c index 0357387b3645..f942b80ee403 100644 --- a/drivers/gpio/gpio-sch311x.c +++ b/drivers/gpio/gpio-sch311x.c @@ -327,14 +327,22 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr) if (err) return err; - /* Check device ID. We currently know about: - * SCH3112 (0x7c), SCH3114 (0x7d), and SCH3116 (0x7f). */ + /* Check device ID. */ reg = sch311x_sio_inb(sio_config_port, 0x20); - if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) { + switch (reg) { + case 0x7c: /* SCH3112 */ + dev_id = 2; + break; + case 0x7d: /* SCH3114 */ + dev_id = 4; + break; + case 0x7f: /* SCH3116 */ + dev_id = 6; + break; + default: err = -ENODEV; goto exit; } - dev_id = reg == 0x7c ? 2 : reg == 0x7d ? 4 : 6; /* Select logical device A (runtime registers) */ sch311x_sio_outb(sio_config_port, 0x07, 0x0a); diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c index 30bcc539425d..353263c85d26 100644 --- a/drivers/gpio/gpio-spear-spics.c +++ b/drivers/gpio/gpio-spear-spics.c @@ -129,10 +129,8 @@ static int spics_gpio_probe(struct platform_device *pdev) int ret; spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL); - if (!spics) { - dev_err(&pdev->dev, "memory allocation fail\n"); + if (!spics) return -ENOMEM; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spics->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c index 13d73fb2b5e1..b51ca9f5c140 100644 --- a/drivers/gpio/gpio-sx150x.c +++ b/drivers/gpio/gpio-sx150x.c @@ -22,7 +22,6 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/i2c/sx150x.h> #define NO_UPDATE_PENDING -1 diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 1019320984d7..51f7cbd9ff71 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -12,8 +12,6 @@ #include <linux/slab.h> #include <linux/gpio.h> #include <linux/of.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/mfd/tc3589x.h> @@ -31,10 +29,6 @@ struct tc3589x_gpio { struct tc3589x *tc3589x; struct device *dev; struct mutex irq_lock; - struct irq_domain *domain; - - int irq_base; - /* Caches of interrupt control registers for bus_lock */ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS]; u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS]; @@ -95,30 +89,6 @@ static int tc3589x_gpio_direction_input(struct gpio_chip *chip, return tc3589x_set_bits(tc3589x, reg, 1 << pos, 0); } -/** - * tc3589x_gpio_irq_get_irq(): Map a hardware IRQ on a chip to a Linux IRQ - * - * @tc3589x_gpio: tc3589x_gpio_irq controller to operate on. - * @irq: index of the hardware interrupt requested in the chip IRQs - * - * Useful for drivers to request their own IRQs. - */ -static int tc3589x_gpio_irq_get_irq(struct tc3589x_gpio *tc3589x_gpio, - int hwirq) -{ - if (!tc3589x_gpio) - return -EINVAL; - - return irq_create_mapping(tc3589x_gpio->domain, hwirq); -} - -static int tc3589x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip); - - return tc3589x_gpio_irq_get_irq(tc3589x_gpio, offset); -} - static struct gpio_chip template_chip = { .label = "tc3589x", .owner = THIS_MODULE, @@ -126,13 +96,13 @@ static struct gpio_chip template_chip = { .get = tc3589x_gpio_get, .direction_output = tc3589x_gpio_direction_output, .set = tc3589x_gpio_set, - .to_irq = tc3589x_gpio_to_irq, .can_sleep = true, }; static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -159,14 +129,16 @@ static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) static void tc3589x_gpio_irq_lock(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); mutex_lock(&tc3589x_gpio->irq_lock); } static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; static const u8 regmap[] = { [REG_IBE] = TC3589x_GPIOIBE0, @@ -194,7 +166,8 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) static void tc3589x_gpio_irq_mask(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -204,7 +177,8 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d) static void tc3589x_gpio_irq_unmask(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -242,7 +216,8 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev) while (stat) { int bit = __ffs(stat); int line = i * 8 + bit; - int irq = tc3589x_gpio_irq_get_irq(tc3589x_gpio, line); + int irq = irq_find_mapping(tc3589x_gpio->chip.irqdomain, + line); handle_nested_irq(irq); stat &= ~(1 << bit); @@ -254,61 +229,6 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev) return IRQ_HANDLED; } -static int tc3589x_gpio_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - struct tc3589x *tc3589x_gpio = d->host_data; - - irq_set_chip_data(irq, tc3589x_gpio); - irq_set_chip_and_handler(irq, &tc3589x_gpio_irq_chip, - handle_simple_irq); - irq_set_nested_thread(irq, 1); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif - - return 0; -} - -static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int irq) -{ -#ifdef CONFIG_ARM - set_irq_flags(irq, 0); -#endif - irq_set_chip_and_handler(irq, NULL, NULL); - irq_set_chip_data(irq, NULL); -} - -static struct irq_domain_ops tc3589x_irq_ops = { - .map = tc3589x_gpio_irq_map, - .unmap = tc3589x_gpio_irq_unmap, - .xlate = irq_domain_xlate_twocell, -}; - -static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio, - struct device_node *np) -{ - int base = tc3589x_gpio->irq_base; - - /* - * If this results in a linear domain, irq_create_mapping() will - * take care of allocating IRQ descriptors at runtime. When a base - * is provided, the IRQ descriptors will be allocated when the - * domain is instantiated. - */ - tc3589x_gpio->domain = irq_domain_add_simple(np, - tc3589x_gpio->chip.ngpio, base, &tc3589x_irq_ops, - tc3589x_gpio); - if (!tc3589x_gpio->domain) { - dev_err(tc3589x_gpio->dev, "Failed to create irqdomain\n"); - return -ENOSYS; - } - - return 0; -} - static int tc3589x_gpio_probe(struct platform_device *pdev) { struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent); @@ -329,7 +249,8 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) if (irq < 0) return irq; - tc3589x_gpio = kzalloc(sizeof(struct tc3589x_gpio), GFP_KERNEL); + tc3589x_gpio = devm_kzalloc(&pdev->dev, sizeof(struct tc3589x_gpio), + GFP_KERNEL); if (!tc3589x_gpio) return -ENOMEM; @@ -347,30 +268,36 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) tc3589x_gpio->chip.of_node = np; #endif - tc3589x_gpio->irq_base = tc3589x->irq_base ? - tc3589x->irq_base + TC3589x_INT_GPIO(0) : 0; - /* Bring the GPIO module out of reset */ ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL, TC3589x_RSTCTRL_GPIRST, 0); if (ret < 0) - goto out_free; - - ret = tc3589x_gpio_irq_init(tc3589x_gpio, np); - if (ret) - goto out_free; + return ret; - ret = request_threaded_irq(irq, NULL, tc3589x_gpio_irq, IRQF_ONESHOT, - "tc3589x-gpio", tc3589x_gpio); + ret = devm_request_threaded_irq(&pdev->dev, + irq, NULL, tc3589x_gpio_irq, + IRQF_ONESHOT, "tc3589x-gpio", + tc3589x_gpio); if (ret) { dev_err(&pdev->dev, "unable to get irq: %d\n", ret); - goto out_free; + return ret; } ret = gpiochip_add(&tc3589x_gpio->chip); if (ret) { dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret); - goto out_freeirq; + return ret; + } + + ret = gpiochip_irqchip_add(&tc3589x_gpio->chip, + &tc3589x_gpio_irq_chip, + 0, + handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&pdev->dev, + "could not connect irqchip to gpiochip\n"); + return ret; } if (pdata && pdata->setup) @@ -379,12 +306,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tc3589x_gpio); return 0; - -out_freeirq: - free_irq(irq, tc3589x_gpio); -out_free: - kfree(tc3589x_gpio); - return ret; } static int tc3589x_gpio_remove(struct platform_device *pdev) @@ -392,7 +313,6 @@ static int tc3589x_gpio_remove(struct platform_device *pdev) struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; struct tc3589x_gpio_platform_data *pdata = tc3589x->pdata->gpio; - int irq = platform_get_irq(pdev, 0); int ret; if (pdata && pdata->remove) @@ -405,10 +325,6 @@ static int tc3589x_gpio_remove(struct platform_device *pdev) return ret; } - free_irq(irq, tc3589x_gpio); - - kfree(tc3589x_gpio); - return 0; } diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 2b49f878b56c..4e8fb8261a87 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -408,7 +408,7 @@ static struct tegra_gpio_soc_config tegra30_gpio_config = { .upper_offset = 0x80, }; -static struct of_device_id tegra_gpio_of_match[] = { +static const struct of_device_id tegra_gpio_of_match[] = { { .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config }, { .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config }, { }, @@ -458,10 +458,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) tegra_gpio_banks = devm_kzalloc(&pdev->dev, tegra_gpio_bank_count * sizeof(*tegra_gpio_banks), GFP_KERNEL); - if (!tegra_gpio_banks) { - dev_err(&pdev->dev, "Couldn't allocate bank structure\n"); + if (!tegra_gpio_banks) return -ENODEV; - } irq_domain = irq_domain_add_linear(pdev->dev.of_node, tegra_gpio_chip.ngpio, diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index f9a8fbde108e..efc7c129016d 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -224,6 +224,7 @@ static struct irq_chip timbgpio_irqchip = { static int timbgpio_probe(struct platform_device *pdev) { int err, i; + struct device *dev = &pdev->dev; struct gpio_chip *gc; struct timbgpio *tgpio; struct resource *iomem; @@ -231,35 +232,35 @@ static int timbgpio_probe(struct platform_device *pdev) int irq = platform_get_irq(pdev, 0); if (!pdata || pdata->nr_pins > 32) { - err = -EINVAL; - goto err_mem; + dev_err(dev, "Invalid platform data\n"); + return -EINVAL; } iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { - err = -EINVAL; - goto err_mem; + dev_err(dev, "Unable to get resource\n"); + return -EINVAL; } - tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL); + tgpio = devm_kzalloc(dev, sizeof(struct timbgpio), GFP_KERNEL); if (!tgpio) { - err = -EINVAL; - goto err_mem; + dev_err(dev, "Memory alloc failed\n"); + return -EINVAL; } tgpio->irq_base = pdata->irq_base; spin_lock_init(&tgpio->lock); - if (!request_mem_region(iomem->start, resource_size(iomem), - DRIVER_NAME)) { - err = -EBUSY; - goto err_request; + if (!devm_request_mem_region(dev, iomem->start, resource_size(iomem), + DRIVER_NAME)) { + dev_err(dev, "Region already claimed\n"); + return -EBUSY; } - tgpio->membase = ioremap(iomem->start, resource_size(iomem)); + tgpio->membase = devm_ioremap(dev, iomem->start, resource_size(iomem)); if (!tgpio->membase) { - err = -ENOMEM; - goto err_ioremap; + dev_err(dev, "Cannot ioremap\n"); + return -ENOMEM; } gc = &tgpio->gpio; @@ -279,7 +280,7 @@ static int timbgpio_probe(struct platform_device *pdev) err = gpiochip_add(gc); if (err) - goto err_chipadd; + return err; platform_set_drvdata(pdev, tgpio); @@ -302,17 +303,6 @@ static int timbgpio_probe(struct platform_device *pdev) irq_set_chained_handler(irq, timbgpio_irq); return 0; - -err_chipadd: - iounmap(tgpio->membase); -err_ioremap: - release_mem_region(iomem->start, resource_size(iomem)); -err_request: - kfree(tgpio); -err_mem: - printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err); - - return err; } static int timbgpio_remove(struct platform_device *pdev) @@ -320,7 +310,6 @@ static int timbgpio_remove(struct platform_device *pdev) int err; struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev); struct timbgpio *tgpio = platform_get_drvdata(pdev); - struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); int irq = platform_get_irq(pdev, 0); if (irq >= 0 && tgpio->irq_base > 0) { @@ -338,10 +327,6 @@ static int timbgpio_remove(struct platform_device *pdev) if (err) printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n"); - iounmap(tgpio->membase); - release_mem_region(iomem->start, resource_size(iomem)); - kfree(tgpio); - return 0; } diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c index 8994dfa13491..a69fbea41253 100644 --- a/drivers/gpio/gpio-tps6586x.c +++ b/drivers/gpio/gpio-tps6586x.c @@ -97,10 +97,8 @@ static int tps6586x_gpio_probe(struct platform_device *pdev) pdata = dev_get_platdata(pdev->dev.parent); tps6586x_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps6586x_gpio), GFP_KERNEL); - if (!tps6586x_gpio) { - dev_err(&pdev->dev, "Could not allocate tps6586x_gpio\n"); + if (!tps6586x_gpio) return -ENOMEM; - } tps6586x_gpio->parent = pdev->dev.parent; diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c index b6e818e68007..e2f8cda235ea 100644 --- a/drivers/gpio/gpio-tps65910.c +++ b/drivers/gpio/gpio-tps65910.c @@ -123,10 +123,8 @@ static int tps65910_gpio_probe(struct platform_device *pdev) tps65910_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65910_gpio), GFP_KERNEL); - if (!tps65910_gpio) { - dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n"); + if (!tps65910_gpio) return -ENOMEM; - } tps65910_gpio->tps65910 = tps65910; diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index 792a05ad4649..12481867daf1 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -289,7 +289,7 @@ static int xgpio_of_probe(struct device_node *np) return 0; } -static struct of_device_id xgpio_of_match[] = { +static const struct of_device_id xgpio_of_match[] = { { .compatible = "xlnx,xps-gpio-1.00.a", }, { /* end of list */ }, }; diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c index 9bf5034b6cdb..54e54e4cc6c4 100644 --- a/drivers/gpio/gpio-zevio.c +++ b/drivers/gpio/gpio-zevio.c @@ -81,9 +81,15 @@ static inline void zevio_gpio_port_set(struct zevio_gpio *c, unsigned pin, static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin) { struct zevio_gpio *controller = to_zevio_gpio(chip); + u32 val, dir; - /* Only reading allowed, so no spinlock needed */ - u32 val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT); + spin_lock(&controller->lock); + dir = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION); + if (dir & BIT(ZEVIO_GPIO_BIT(pin))) + val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT); + else + val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT); + spin_unlock(&controller->lock); return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1; } @@ -172,10 +178,8 @@ static int zevio_gpio_probe(struct platform_device *pdev) int status, i; controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL); - if (!controller) { - dev_err(&pdev->dev, "not enough free memory\n"); + if (!controller) return -ENOMEM; - } /* Copy our reference */ controller->chip.gc = zevio_gpio_chip; @@ -198,7 +202,7 @@ static int zevio_gpio_probe(struct platform_device *pdev) return 0; } -static struct of_device_id zevio_gpio_of_match[] = { +static const struct of_device_id zevio_gpio_of_match[] = { { .compatible = "lsi,zevio-gpio", }, { }, }; @@ -209,7 +213,7 @@ static struct platform_driver zevio_gpio_driver = { .driver = { .name = "gpio-zevio", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(zevio_gpio_of_match), + .of_match_table = zevio_gpio_of_match, }, .probe = zevio_gpio_probe, }; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 401add28933f..4a987917c186 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -449,9 +449,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, mutex_unlock(&achip->conn_lock); if (function == ACPI_WRITE) - gpiod_set_raw_value(desc, !!((1 << i) & *value)); + gpiod_set_raw_value_cansleep(desc, + !!((1 << i) & *value)); else - *value |= (u64)gpiod_get_raw_value(desc) << i; + *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i; } out: diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 2024d45e5503..af7e25c9a9ae 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -48,7 +48,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) if (ret < 0) return false; - gg_data->out_gpio = gpio_to_desc(ret + gc->base); + gg_data->out_gpio = gpiochip_get_desc(gc, ret); return true; } @@ -96,6 +96,20 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, } EXPORT_SYMBOL(of_get_named_gpiod_flags); +int of_get_named_gpio_flags(struct device_node *np, const char *list_name, + int index, enum of_gpio_flags *flags) +{ + struct gpio_desc *desc; + + desc = of_get_named_gpiod_flags(np, list_name, index, flags); + + if (IS_ERR(desc)) + return PTR_ERR(desc); + else + return desc_to_gpio(desc); +} +EXPORT_SYMBOL(of_get_named_gpio_flags); + /** * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags * @gc: pointer to the gpio_chip structure diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f48817d97480..d9c9cb4665db 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1363,6 +1363,11 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, int parent_irq, irq_flow_handler_t parent_handler) { + if (gpiochip->can_sleep) { + chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n"); + return; + } + irq_set_chained_handler(parent_irq, parent_handler); /* * The parent irqchip is already using the chip_data for this @@ -1372,6 +1377,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, } EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); +/* + * This lock class tells lockdep that GPIO irqs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key gpiochip_irq_lock_class; + /** * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip * @d: the irqdomain used by this irqchip @@ -1388,22 +1399,35 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, struct gpio_chip *chip = d->host_data; irq_set_chip_data(irq, chip); + irq_set_lockdep_class(irq, &gpiochip_irq_lock_class); irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler); + /* Chips that can sleep need nested thread handlers */ + if (chip->can_sleep) + irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif - irq_set_irq_type(irq, chip->irq_default_type); + /* + * No set-up of the hardware will happen if IRQ_TYPE_NONE + * is passed as default type. + */ + if (chip->irq_default_type != IRQ_TYPE_NONE) + irq_set_irq_type(irq, chip->irq_default_type); return 0; } static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq) { + struct gpio_chip *chip = d->host_data; + #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif + if (chip->can_sleep) + irq_set_nested_thread(irq, 0); irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } @@ -1471,7 +1495,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * @first_irq: if not dynamically assigned, the base (first) IRQ to * allocate gpiochip irqs from * @handler: the irq handler to use (often a predefined irq core function) - * @type: the default type for IRQs on this irqchip + * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE + * to have the core avoid setting up any default type in the hardware. * * This function closely associates a certain irqchip with a certain * gpiochip, providing an irq domain to translate the local IRQs to @@ -2571,22 +2596,27 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table) mutex_unlock(&gpio_lookup_lock); } -#ifdef CONFIG_OF static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) { + static const char *suffixes[] = { "gpios", "gpio" }; char prop_name[32]; /* 32 is max size of property name */ enum of_gpio_flags of_flags; struct gpio_desc *desc; + unsigned int i; - if (con_id) - snprintf(prop_name, 32, "%s-gpios", con_id); - else - snprintf(prop_name, 32, "gpios"); + for (i = 0; i < ARRAY_SIZE(suffixes); i++) { + if (con_id) + snprintf(prop_name, 32, "%s-%s", con_id, suffixes[i]); + else + snprintf(prop_name, 32, "%s", suffixes[i]); - desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, - &of_flags); + desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, + &of_flags); + if (!IS_ERR(desc)) + break; + } if (IS_ERR(desc)) return desc; @@ -2596,14 +2626,6 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, return desc; } -#else -static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, - unsigned int idx, - enum gpio_lookup_flags *flags) -{ - return ERR_PTR(-ENODEV); -} -#endif static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, unsigned int idx, @@ -2701,7 +2723,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, } /** - * gpio_get - obtain a GPIO for a given GPIO function + * gpiod_get - obtain a GPIO for a given GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @@ -2716,6 +2738,22 @@ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id) EXPORT_SYMBOL_GPL(gpiod_get); /** + * gpiod_get_optional - obtain an optional GPIO for a given GPIO function + * @dev: GPIO consumer, can be NULL for system-global GPIOs + * @con_id: function within the GPIO consumer + * + * This is equivalent to gpiod_get(), except that when no GPIO was assigned to + * the requested function it will return NULL. This is convenient for drivers + * that need to handle optional GPIOs. + */ +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, + const char *con_id) +{ + return gpiod_get_index_optional(dev, con_id, 0); +} +EXPORT_SYMBOL_GPL(gpiod_get_optional); + +/** * gpiod_get_index - obtain a GPIO from a multi-index GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer @@ -2778,6 +2816,33 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, EXPORT_SYMBOL_GPL(gpiod_get_index); /** + * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO + * function + * @dev: GPIO consumer, can be NULL for system-global GPIOs + * @con_id: function within the GPIO consumer + * @index: index of the GPIO to obtain in the consumer + * + * This is equivalent to gpiod_get_index(), except that when no GPIO with the + * specified index was assigned to the requested function it will return NULL. + * This is convenient for drivers that need to handle optional GPIOs. + */ +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index) +{ + struct gpio_desc *desc; + + desc = gpiod_get_index(dev, con_id, index); + if (IS_ERR(desc)) { + if (PTR_ERR(desc) == -ENOENT) + return NULL; + } + + return desc; +} +EXPORT_SYMBOL_GPL(gpiod_get_index_optional); + +/** * gpiod_put - dispose of a GPIO descriptor * @desc: GPIO descriptor to dispose of * diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index cf092941a9fd..1a4103dd38df 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -15,6 +15,8 @@ #include <linux/err.h> #include <linux/device.h> +enum of_gpio_flags; + /** * struct acpi_gpio_info - ACPI GPIO specific information * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo @@ -46,4 +48,7 @@ acpi_get_gpiod_by_index(struct device *dev, int index, int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); void gpiochip_free_own_desc(struct gpio_desc *desc); +struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags); + #endif /* GPIOLIB_H */ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 96177eec0a0e..eedb023af27d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev) flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); - i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); WARN_ON(dev_priv->mm.aliasing_ppgtt); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 108e1ec2fa4b..388c028e223c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -242,18 +242,6 @@ struct intel_ddi_plls { #define WATCH_LISTS 0 #define WATCH_GTT 0 -#define I915_GEM_PHYS_CURSOR_0 1 -#define I915_GEM_PHYS_CURSOR_1 2 -#define I915_GEM_PHYS_OVERLAY_REGS 3 -#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) - -struct drm_i915_gem_phys_object { - int id; - struct page **page_list; - drm_dma_handle_t *handle; - struct drm_i915_gem_object *cur_obj; -}; - struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -1187,9 +1175,6 @@ struct i915_gem_mm { /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; - /* storage for physical objects */ - struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; - /* accounting, useful for userland debugging */ spinlock_t object_stat_lock; size_t object_memory; @@ -1769,7 +1754,7 @@ struct drm_i915_gem_object { struct drm_file *pin_filp; /** for phy allocated objects */ - struct drm_i915_gem_phys_object *phys_obj; + drm_dma_handle_t *phys_handle; }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) @@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_MAPPABLE 0x1 #define PIN_NONBLOCK 0x2 #define PIN_GLOBAL 0x4 +#define PIN_OFFSET_BIAS 0x8 +#define PIN_OFFSET_MASK (~4095) int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags); + uint64_t flags); int __must_check i915_vma_unbind(struct i915_vma *vma); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); @@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); -int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj); -void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_open(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); @@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, + unsigned long end, unsigned flags); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_everything(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2871ce75f438..3326770c9ed2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o static __must_check int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool readonly); -static int i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file); static void i915_gem_write_fence(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj); @@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } +static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) +{ + drm_dma_handle_t *phys = obj->phys_handle; + + if (!phys) + return; + + if (obj->madv == I915_MADV_WILLNEED) { + struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + char *vaddr = phys->vaddr; + int i; + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page = shmem_read_mapping_page(mapping, i); + if (!IS_ERR(page)) { + char *dst = kmap_atomic(page); + memcpy(dst, vaddr, PAGE_SIZE); + drm_clflush_virt_range(dst, PAGE_SIZE); + kunmap_atomic(dst); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + } + vaddr += PAGE_SIZE; + } + i915_gem_chipset_flush(obj->base.dev); + } + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + obj->phys_handle = NULL; +} + +int +i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, + int align) +{ + drm_dma_handle_t *phys; + struct address_space *mapping; + char *vaddr; + int i; + + if (obj->phys_handle) { + if ((unsigned long)obj->phys_handle->vaddr & (align -1)) + return -EBUSY; + + return 0; + } + + if (obj->madv != I915_MADV_WILLNEED) + return -EFAULT; + + if (obj->base.filp == NULL) + return -EINVAL; + + /* create a new object */ + phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); + if (!phys) + return -ENOMEM; + + vaddr = phys->vaddr; +#ifdef CONFIG_X86 + set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); +#endif + mapping = file_inode(obj->base.filp)->i_mapping; + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + char *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) { +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + return PTR_ERR(page); + } + + src = kmap_atomic(page); + memcpy(vaddr, src, PAGE_SIZE); + kunmap_atomic(src); + + mark_page_accessed(page); + page_cache_release(page); + + vaddr += PAGE_SIZE; + } + + obj->phys_handle = phys; + return 0; +} + +static int +i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_device *dev = obj->base.dev; + void *vaddr = obj->phys_handle->vaddr + args->offset; + char __user *user_data = to_user_ptr(args->data_ptr); + + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; + + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + mutex_unlock(&dev->struct_mutex); + unwritten = copy_from_user(vaddr, user_data, args->size); + mutex_lock(&dev->struct_mutex); + if (unwritten) + return -EFAULT; + } + + i915_gem_chipset_flush(dev); + return 0; +} + void *i915_gem_object_alloc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj->phys_obj) { - ret = i915_gem_phys_pwrite(dev, obj, args, file); + if (obj->phys_handle) { + ret = i915_gem_phys_pwrite(obj, args, file); goto out; } @@ -3208,12 +3326,14 @@ static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned alignment, - unsigned flags) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 size, fence_size, fence_alignment, unfenced_alignment; - size_t gtt_max = + unsigned long start = + flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; + unsigned long end = flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; struct i915_vma *vma; int ret; @@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->base.size > gtt_max) { - DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", + if (obj->base.size > end) { + DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", obj->base.size, flags & PIN_MAPPABLE ? "mappable" : "total", - gtt_max); + end); return ERR_PTR(-E2BIG); } @@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, search_free: ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, - obj->cache_level, 0, gtt_max, + obj->cache_level, + start, end, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); if (ret) { ret = i915_gem_evict_something(dev, vm, size, alignment, - obj->cache_level, flags); + obj->cache_level, + start, end, + flags); if (ret == 0) goto search_free; @@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return ret; } +static bool +i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + + if (alignment && + vma->node.start & (alignment - 1)) + return true; + + if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) + return true; + + if (flags & PIN_OFFSET_BIAS && + vma->node.start < (flags & PIN_OFFSET_MASK)) + return true; + + return false; +} + int i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags) + uint64_t flags) { struct i915_vma *vma; int ret; @@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if ((alignment && - vma->node.start & (alignment - 1)) || - (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { + if (i915_vma_misplaced(vma, alignment, flags)) { WARN(vma->pin_count, "bo is already pinned with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", i915_gem_obj_offset(obj, vm), alignment, - flags & PIN_MAPPABLE, + !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); ret = i915_vma_unbind(vma); if (ret) @@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - if (obj->phys_obj) - i915_gem_detach_phys_object(dev, obj); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { int ret; @@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) } } + i915_gem_object_detach_phys(obj); + /* Stolen objects don't hold a ref, but do hold pin count. Fix that up * before progressing. */ if (obj->stolen) @@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev) register_shrinker(&dev_priv->mm.inactive_shrinker); } -/* - * Create a physically contiguous memory object for this object - * e.g. for cursor + overlay regs - */ -static int i915_gem_init_phys_object(struct drm_device *dev, - int id, int size, int align) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - int ret; - - if (dev_priv->mm.phys_objs[id - 1] || !size) - return 0; - - phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); - if (!phys_obj) - return -ENOMEM; - - phys_obj->id = id; - - phys_obj->handle = drm_pci_alloc(dev, size, align); - if (!phys_obj->handle) { - ret = -ENOMEM; - goto kfree_obj; - } -#ifdef CONFIG_X86 - set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - - dev_priv->mm.phys_objs[id - 1] = phys_obj; - - return 0; -kfree_obj: - kfree(phys_obj); - return ret; -} - -static void i915_gem_free_phys_object(struct drm_device *dev, int id) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - - if (!dev_priv->mm.phys_objs[id - 1]) - return; - - phys_obj = dev_priv->mm.phys_objs[id - 1]; - if (phys_obj->cur_obj) { - i915_gem_detach_phys_object(dev, phys_obj->cur_obj); - } - -#ifdef CONFIG_X86 - set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - drm_pci_free(dev, phys_obj->handle); - kfree(phys_obj); - dev_priv->mm.phys_objs[id - 1] = NULL; -} - -void i915_gem_free_all_phys_object(struct drm_device *dev) -{ - int i; - - for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) - i915_gem_free_phys_object(dev, i); -} - -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - char *vaddr; - int i; - int page_count; - - if (!obj->phys_obj) - return; - vaddr = obj->phys_obj->handle->vaddr; - - page_count = obj->base.size / PAGE_SIZE; - for (i = 0; i < page_count; i++) { - struct page *page = shmem_read_mapping_page(mapping, i); - if (!IS_ERR(page)) { - char *dst = kmap_atomic(page); - memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); - kunmap_atomic(dst); - - drm_clflush_pages(&page, 1); - - set_page_dirty(page); - mark_page_accessed(page); - page_cache_release(page); - } - } - i915_gem_chipset_flush(dev); - - obj->phys_obj->cur_obj = NULL; - obj->phys_obj = NULL; -} - -int -i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, - int align) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - int page_count; - int i; - - if (id > I915_MAX_PHYS_OBJECT) - return -EINVAL; - - if (obj->phys_obj) { - if (obj->phys_obj->id == id) - return 0; - i915_gem_detach_phys_object(dev, obj); - } - - /* create a new object */ - if (!dev_priv->mm.phys_objs[id - 1]) { - ret = i915_gem_init_phys_object(dev, id, - obj->base.size, align); - if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", - id, obj->base.size); - return ret; - } - } - - /* bind to the object */ - obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj->phys_obj->cur_obj = obj; - - page_count = obj->base.size / PAGE_SIZE; - - for (i = 0; i < page_count; i++) { - struct page *page; - char *dst, *src; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) - return PTR_ERR(page); - - src = kmap_atomic(page); - dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(src); - - mark_page_accessed(page); - page_cache_release(page); - } - - return 0; -} - -static int -i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) -{ - void *vaddr = obj->phys_obj->handle->vaddr + args->offset; - char __user *user_data = to_user_ptr(args->data_ptr); - - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { - unsigned long unwritten; - - /* The physical object once assigned is fixed for the lifetime - * of the obj, so we can safely drop the lock and continue - * to access vaddr. - */ - mutex_unlock(&dev->struct_mutex); - unwritten = copy_from_user(vaddr, user_data, args->size); - mutex_lock(&dev->struct_mutex); - if (unwritten) - return -EFAULT; - } - - i915_gem_chipset_flush(dev); - return 0; -} - void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 75fca63dc8c1..bbf4b12d842e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) int i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, unsigned long end, unsigned flags) { - struct drm_i915_private *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct i915_vma *vma; int ret = 0; @@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, */ INIT_LIST_HEAD(&unwind_list); - if (flags & PIN_MAPPABLE) { - BUG_ON(!i915_is_ggtt(vm)); + if (start != 0 || end != vm->total) { drm_mm_init_scan_with_range(&vm->mm, min_size, - alignment, cache_level, 0, - dev_priv->gtt.mappable_end); + alignment, cache_level, + start, end); } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2c9d9cbaf653..20fef6c50267 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -35,6 +35,9 @@ #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) + +#define BATCH_OFFSET_BIAS (256*1024) struct eb_vmas { struct list_head vmas; @@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence; - unsigned flags; + uint64_t flags; int ret; flags = 0; @@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, if (entry->flags & EXEC_OBJECT_NEEDS_GTT) flags |= PIN_GLOBAL; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); if (ret) @@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, return 0; } +static bool +eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + bool need_fence, need_mappable; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(vma); + + WARN_ON((need_mappable || need_fence) && + !i915_is_ggtt(vma->vm)); + + if (entry->alignment && + vma->node.start & (entry->alignment - 1)) + return true; + + if (need_mappable && !obj->map_and_fenceable) + return true; + + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && + vma->node.start < BATCH_OFFSET_BIAS) + return true; + + return false; +} + static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct list_head *vmas, @@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - bool need_fence, need_mappable; - - obj = vma->obj; - if (!drm_mm_node_allocated(&vma->node)) continue; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(vma); - - WARN_ON((need_mappable || need_fence) && - !i915_is_ggtt(vma->vm)); - - if ((entry->alignment && - vma->node.start & (entry->alignment - 1)) || - (need_mappable && !obj->map_and_fenceable)) + if (eb_vma_misplaced(vma, has_fenced_gpu_access)) ret = i915_vma_unbind(vma); else ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); @@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, * relocations were valid. */ for (j = 0; j < exec[i].relocation_count; j++) { - if (copy_to_user(&user_relocs[j].presumed_offset, - &invalid_offset, - sizeof(invalid_offset))) { + if (__copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { ret = -EFAULT; mutex_lock(&dev->struct_mutex); goto err; @@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } +static struct drm_i915_gem_object * +eb_get_batch(struct eb_vmas *eb) +{ + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return vma->obj; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; + batch_obj = eb_get_batch(eb); /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; @@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { + struct drm_i915_gem_exec_object __user *user_exec_list = + to_user_ptr(args->buffers_ptr); + /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + break; + } } } @@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + struct drm_i915_gem_exec_object2 *user_exec_list = + to_user_ptr(args->buffers_ptr); + int i; + + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user\n", + args->buffer_count); + break; + } } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 154b0f8bb88d..5deb22864c52 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1089,7 +1089,9 @@ alloc: if (ret == -ENOSPC && !retried) { ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, GEN6_PD_SIZE, GEN6_PD_ALIGN, - I915_CACHE_NONE, 0); + I915_CACHE_NONE, + 0, dev_priv->gtt.base.total, + 0); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index fa486c5fbb02..aff4a113cda3 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) dev_priv->vbt.edp_pps = *edp_pps; - dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : - DP_LINK_BW_1_62; + switch (edp_link_params->rate) { + case EDP_RATE_1_62: + dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; + break; + case EDP_RATE_2_7: + dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; + break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", + edp_link_params->rate); + break; + } + switch (edp_link_params->lanes) { - case 0: + case EDP_LANE_1: dev_priv->vbt.edp_lanes = 1; break; - case 1: + case EDP_LANE_2: dev_priv->vbt.edp_lanes = 2; break; - case 3: - default: + case EDP_LANE_4: dev_priv->vbt.edp_lanes = 4; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", + edp_link_params->lanes); + break; } + switch (edp_link_params->preemphasis) { - case 0: + case EDP_PREEMPHASIS_NONE: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; break; - case 1: + case EDP_PREEMPHASIS_3_5dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; break; - case 2: + case EDP_PREEMPHASIS_6dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; break; - case 3: + case EDP_PREEMPHASIS_9_5dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", + edp_link_params->preemphasis); + break; } + switch (edp_link_params->vswing) { - case 0: + case EDP_VSWING_0_4V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; break; - case 1: + case EDP_VSWING_0_6V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; break; - case 2: + case EDP_VSWING_0_8V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; break; - case 3: + case EDP_VSWING_1_2V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", + edp_link_params->vswing); + break; } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 48aa516a1ac0..5b60e25baa32 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = i915_gem_obj_ggtt_offset(obj); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; - ret = i915_gem_attach_phys_object(dev, obj, - (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, - align); + ret = i915_gem_object_attach_phys(obj, align); if (ret) { DRM_DEBUG_KMS("failed to attach phys object\n"); goto fail_locked; } - addr = obj->phys_obj->handle->busaddr; + addr = obj->phys_handle->busaddr; } if (IS_GEN2(dev)) @@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { - if (INTEL_INFO(dev)->cursor_needs_physical) { - if (intel_crtc->cursor_bo != obj) - i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); - } else + if (!INTEL_INFO(dev)->cursor_needs_physical) i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5ca68aa9f237..2a00cb828d20 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -121,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) return max_link_bw; } +static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + u8 source_max, sink_max; + + source_max = 4; + if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && + (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) + source_max = 2; + + sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + + return min(source_max, sink_max); +} + /* * The units on the numbers in the next two are... bizarre. Examples will * make it clearer; this one parallels an example in the eDP spec. @@ -171,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector, } max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); - max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); + max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); @@ -751,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc *intel_crtc = encoder->new_crtc; struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; - int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + int min_lane_count = 1; + int max_lane_count = intel_dp_max_lane_count(intel_dp); /* Conveniently, the link BW constants become indices with a shift...*/ + int min_clock = 0; int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; int bpp, mode_rate; static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; @@ -785,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = pipe_config->pipe_bpp; - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && - dev_priv->vbt.edp_bpp < bpp) { - DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", - dev_priv->vbt.edp_bpp); - bpp = dev_priv->vbt.edp_bpp; + if (is_edp(intel_dp)) { + if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { + DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", + dev_priv->vbt.edp_bpp); + bpp = dev_priv->vbt.edp_bpp; + } + + if (IS_BROADWELL(dev)) { + /* Yes, it's an ugly hack. */ + min_lane_count = max_lane_count; + DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", + min_lane_count); + } else if (dev_priv->vbt.edp_lanes) { + min_lane_count = min(dev_priv->vbt.edp_lanes, + max_lane_count); + DRM_DEBUG_KMS("using min %u lanes per VBT\n", + min_lane_count); + } + + if (dev_priv->vbt.edp_rate) { + min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); + DRM_DEBUG_KMS("using min %02x link bw per VBT\n", + bws[min_clock]); + } } for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, bpp); - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { - for (clock = 0; clock <= max_clock; clock++) { + for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = min_clock; clock <= max_clock; clock++) { link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); link_avail = intel_dp_max_data_rate(link_clock, lane_count); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fce4a0d93c0b..f73ba5e6b7a8 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, height); } + /* No preferred mode marked by the EDID? Are there any modes? */ + if (!modes[i] && !list_empty(&connector->modes)) { + DRM_DEBUG_KMS("using first mode listed on connector %s\n", + drm_get_connector_name(connector)); + modes[i] = list_first_entry(&connector->modes, + struct drm_display_mode, + head); + } + /* last resort: use current mode */ if (!modes[i]) { /* diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d8adc9104dca..129db0c7d835 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; + regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev) overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { - ret = i915_gem_attach_phys_object(dev, reg_bo, - I915_GEM_PHYS_OVERLAY_REGS, - PAGE_SIZE); + ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } - overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; + overlay->flip_addr = reg_bo->phys_handle->busaddr; } else { ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); if (ret) { @@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) - overlay->reg_bo->phys_obj->handle->vaddr; + overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; + error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; else error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 0eead16aeda7..cb8cfb7e0974 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, enum pipe pipe = intel_get_pipe_from_connector(connector); u32 freq; unsigned long flags; + u64 n; if (!panel->backlight.present || pipe == INVALID_PIPE) return; @@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, /* scale to hardware max, but be careful to not overflow */ freq = panel->backlight.max; - if (freq < max) - level = level * freq / max; - else - level = freq / max * level; + n = (u64)level * freq; + do_div(n, max); + level = n; panel->backlight.level = level; if (panel->backlight.device) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 19e94c3edc19..d93dcf683e8c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev, } } +static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, + uint16_t wm[5], uint16_t min) +{ + int level, max_level = ilk_wm_max_level(dev_priv->dev); + + if (wm[0] >= min) + return false; + + wm[0] = max(wm[0], min); + for (level = 1; level <= max_level; level++) + wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); + + return true; +} + +static void snb_wm_latency_quirk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool changed; + + /* + * The BIOS provided WM memory latency values are often + * inadequate for high resolution displays. Adjust them. + */ + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + + if (!changed) + return; + + DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); + intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); +} + static void ilk_setup_wm_latency(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev) intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); + + if (IS_GEN6(dev)) + snb_wm_latency_quirk(dev); } static void ilk_compute_wm_parameters(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d27155adf5db..46be00d66df3 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, if (ret < 0) goto err1; - ret = sysfs_create_link(&encoder->ddc.dev.kobj, - &drm_connector->kdev->kobj, + ret = sysfs_create_link(&drm_connector->kdev->kobj, + &encoder->ddc.dev.kobj, encoder->ddc.dev.kobj.name); if (ret < 0) goto err2; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f729dc71d5be..d0c75779d3f6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) { __raw_i915_write32(dev_priv, FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but !FORCEWAKE_VLV */ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 7762665ad8fd..876de9ac3793 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, } if (outp == 8) - return false; + return conf; data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); if (data == 0x0000) diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c index 43fec17ea540..bbf117be572f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c @@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line) case 0x00: return 2; case 0x19: return 1; case 0x1c: return 0; + case 0x1e: return 2; default: break; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 68528619834a..8149e7cf4303 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1642,6 +1642,7 @@ struct radeon_vce { unsigned fb_version; atomic_t handles[RADEON_MAX_VCE_HANDLES]; struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; + unsigned img_size[RADEON_MAX_VCE_HANDLES]; struct delayed_work idle_work; }; @@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence); void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); void radeon_vce_note_usage(struct radeon_device *rdev); -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size); int radeon_vce_cs_parse(struct radeon_cs_parser *p); bool radeon_vce_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, @@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) -#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) +#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ + (rdev->family == CHIP_MULLINS)) #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ (rdev->ddev->pdev->device == 0x6850) || \ diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b3633d9a5317..9ab30976287d 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } } + if (!found) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + } + if (!found) return false; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 2b6e0ebcc13a..41ecf8a60611 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) uint32_t domain = r->write_domain ? r->write_domain : r->read_domains; + if (domain & RADEON_GEM_DOMAIN_CPU) { + DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " + "for command submission\n"); + return -EINVAL; + } + p->relocs[i].domain = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; @@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EINVAL; /* we only support VM on some SI+ rings */ - if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { - DRM_ERROR("Ring %d requires VM!\n", p->ring); - return -EINVAL; + if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { + if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { + DRM_ERROR("Ring %d requires VM!\n", p->ring); + return -EINVAL; + } + } else { + if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { + DRM_ERROR("VM not supported on ring %d!\n", + p->ring); + return -EINVAL; + } } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 0e770bbf7e29..14671406212f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) radeon_restore_bios_scratch_regs(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); - console_unlock(); - } - /* init dig PHYs, disp eng pll */ if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); @@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + if (fbcon) { + radeon_fbdev_set_suspend(rdev, 0); + console_unlock(); + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 408b6ac53f0b..356b733caafe 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, unsigned *fb_div, unsigned *ref_div) { /* limit reference * post divider to a maximum */ - ref_div_max = min(128 / post_div, ref_div_max); + ref_div_max = max(min(100 / post_div, ref_div_max), 1u); /* get matching reference and feedback divider */ *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); @@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, /* avoid high jitter with small fractional dividers */ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { - fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); + fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); if (fb_div < fb_div_min) { unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); fb_div *= tmp; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0cc47f12d995..eaaedba04675 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return r; } - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; - } + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } - /* map the ib pool buffer read only into - * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); + /* map the ib pool buffer read only into + * virtual address space */ + bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + RADEON_VM_PAGE_READABLE | + RADEON_VM_PAGE_SNOOPED); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } } - file_priv->driver_priv = fpriv; } @@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev, struct radeon_bo_va *bo_va; int r; - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (!r) { + bo_va = radeon_vm_bo_find(&fpriv->vm, + rdev->ring_tmp_bo.bo); + if (bo_va) + radeon_vm_bo_rmv(rdev, bo_va); + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + } } radeon_vm_fini(rdev, &fpriv->vm); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 19bec0dbfa38..4faa4d6f9bb4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, * into account. We don't want to disallow buffer moves * completely. */ - if (current_domain != RADEON_GEM_DOMAIN_CPU && + if ((lobj->alt_domain & current_domain) != 0 && (domain & current_domain) == 0 && /* will be moved */ bytes_moved > bytes_moved_threshold) { /* don't move it */ @@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type == TTM_PL_VRAM) { - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) > rdev->mc.visible_vram_size) { - /* hurrah the memory is not visible ! */ - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); - if (unlikely(r != 0)) - return r; - offset = bo->mem.start << PAGE_SHIFT; - /* this should not happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; - } + if (bo->mem.mem_type != TTM_PL_VRAM) + return 0; + + size = bo->mem.num_pages << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; + if ((offset + size) <= rdev->mc.visible_vram_size) + return 0; + + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + r = ttm_bo_validate(bo, &rbo->placement, false, false); + if (unlikely(r == -ENOMEM)) { + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + return ttm_bo_validate(bo, &rbo->placement, false, false); + } else if (unlikely(r != 0)) { + return r; } + + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return -EINVAL; + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f30b8426eee2..53d6e1bb48dc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set profile when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (strncmp("default", buf, strlen("default")) == 0) @@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set method when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + count = -EINVAL; + goto fail; + } + /* we don't support the legacy modes with dpm */ if (rdev->pm.pm_method == PM_METHOD_DPM) { count = -EINVAL; @@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set dpm state when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("battery", buf, strlen("battery")) == 0) rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; @@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); @@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, enum radeon_dpm_forced_level level; int ret = 0; + /* Can't force performance level when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("low", buf, strlen("low")) == 0) { level = RADEON_DPM_FORCED_LEVEL_LOW; @@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); + struct drm_device *ddev = rdev->ddev; int temp; + /* Can't get temperature when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (rdev->asic->pm.get_temperature) temp = radeon_get_temperature(rdev); else @@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; + struct drm_device *ddev = rdev->ddev; - if (rdev->pm.dpm_enabled) { + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + seq_printf(m, "PX asic powered off\n"); + } else if (rdev->pm.dpm_enabled) { mutex_lock(&rdev->pm.mutex); if (rdev->asic->dpm.debugfs_print_current_performance_level) radeon_dpm_debugfs_print_current_performance_level(rdev, m); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index f73324c81491..3971d968af6c 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, * @p: parser context * @lo: address of lower dword * @hi: address of higher dword + * @size: size of checker for relocation buffer * * Patch relocation inside command stream with real buffer address */ -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, + unsigned size) { struct radeon_cs_chunk *relocs_chunk; - uint64_t offset; + struct radeon_cs_reloc *reloc; + uint64_t start, end, offset; unsigned idx; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; @@ -462,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) return -EINVAL; } - offset += p->relocs_ptr[(idx / 4)]->gpu_offset; + reloc = p->relocs_ptr[(idx / 4)]; + start = reloc->gpu_offset; + end = start + radeon_bo_size(reloc->robj); + start += offset; - p->ib.ptr[lo] = offset & 0xFFFFFFFF; - p->ib.ptr[hi] = offset >> 32; + p->ib.ptr[lo] = start & 0xFFFFFFFF; + p->ib.ptr[hi] = start >> 32; + + if (end <= start) { + DRM_ERROR("invalid reloc offset %llX!\n", offset); + return -EINVAL; + } + if ((end - start) < size) { + DRM_ERROR("buffer to small (%d / %d)!\n", + (unsigned)(end - start), size); + return -EINVAL; + } return 0; } /** + * radeon_vce_validate_handle - validate stream handle + * + * @p: parser context + * @handle: handle to validate + * + * Validates the handle and return the found session index or -EINVAL + * we we don't have another free session index. + */ +int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +{ + unsigned i; + + /* validate the handle */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (atomic_read(&p->rdev->vce.handles[i]) == handle) + return i; + } + + /* handle not found try to alloc a new one */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { + p->rdev->vce.filp[i] = p->filp; + p->rdev->vce.img_size[i] = 0; + return i; + } + } + + DRM_ERROR("No more free VCE handles!\n"); + return -EINVAL; +} + +/** * radeon_vce_cs_parse - parse and validate the command stream * * @p: parser context @@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) */ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { - uint32_t handle = 0; - bool destroy = false; + int session_idx = -1; + bool destroyed = false; + uint32_t tmp, handle = 0; + uint32_t *size = &tmp; int i, r; while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { @@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (destroyed) { + DRM_ERROR("No other command allowed after destroy!\n"); + return -EINVAL; + } + switch (cmd) { case 0x00000001: // session handle = radeon_get_ib_value(p, p->idx + 2); + session_idx = radeon_vce_validate_handle(p, handle); + if (session_idx < 0) + return session_idx; + size = &p->rdev->vce.img_size[session_idx]; break; case 0x00000002: // task info + break; + case 0x01000001: // create + *size = radeon_get_ib_value(p, p->idx + 8) * + radeon_get_ib_value(p, p->idx + 10) * + 8 * 3 / 2; + break; + case 0x04000001: // config extension case 0x04000002: // pic control case 0x04000005: // rate control @@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) break; case 0x03000001: // encode - r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); + r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, + *size); if (r) return r; - r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); + r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, + *size / 3); if (r) return r; break; case 0x02000001: // destroy - destroy = true; + destroyed = true; break; case 0x05000001: // context buffer + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + *size * 2); + if (r) + return r; + break; + case 0x05000004: // video bitstream buffer + tmp = radeon_get_ib_value(p, p->idx + 4); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + tmp); + if (r) + return r; + break; + case 0x05000005: // feedback buffer - r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + 4096); if (r) return r; break; @@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (session_idx == -1) { + DRM_ERROR("no session command at start of IB\n"); + return -EINVAL; + } + p->idx += len / 4; } - if (destroy) { + if (destroyed) { /* IB contains a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); - - return 0; - } - - /* create or encode, validate the handle */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->rdev->vce.handles[i]) == handle) - return 0; } - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { - p->rdev->vce.filp[i] = p->filp; - return 0; - } - } - - DRM_ERROR("No more free VCE handles!\n"); - return -EINVAL; + return 0; } /** diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 2aae6ce49d32..1f426696de36 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, struct list_head *head) { struct radeon_cs_reloc *list; - unsigned i, idx, size; + unsigned i, idx; - size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); - list = kmalloc(size, GFP_KERNEL); + list = kmalloc_array(vm->max_pde_used + 1, + sizeof(struct radeon_cs_reloc), GFP_KERNEL); if (!list) return NULL; @@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ndw = 64; /* assume the worst case */ - ndw += vm->max_pde_used * 12; + ndw += vm->max_pde_used * 16; /* update too big for an IB */ if (ndw > 0xfffff) diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 683532f84931..7321283602ce 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -107,8 +107,8 @@ #define SPLL_CHG_STATUS (1 << 1) #define SPLL_CNTL_MODE 0x618 #define SPLL_SW_DIR_CONTROL (1 << 0) -# define SPLL_REFCLK_SEL(x) ((x) << 8) -# define SPLL_REFCLK_SEL_MASK 0xFF00 +# define SPLL_REFCLK_SEL(x) ((x) << 26) +# define SPLL_REFCLK_SEL_MASK (3 << 26) #define CG_SPLL_SPREAD_SPECTRUM 0x620 #define SSEN (1 << 0) diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig index d94e38dd80c7..2c76de438eb1 100644 --- a/drivers/hsi/Kconfig +++ b/drivers/hsi/Kconfig @@ -14,6 +14,7 @@ config HSI_BOARDINFO bool default y +source "drivers/hsi/controllers/Kconfig" source "drivers/hsi/clients/Kconfig" endif # HSI diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile index 9d5d33f90de2..360371e134f1 100644 --- a/drivers/hsi/Makefile +++ b/drivers/hsi/Makefile @@ -3,4 +3,5 @@ # obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-$(CONFIG_HSI) += hsi.o +obj-y += controllers/ obj-y += clients/ diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig index 3bacd275f479..71b9f9ab86e4 100644 --- a/drivers/hsi/clients/Kconfig +++ b/drivers/hsi/clients/Kconfig @@ -4,6 +4,23 @@ comment "HSI clients" +config NOKIA_MODEM + tristate "Nokia Modem" + depends on HSI && SSI_PROTOCOL + help + Say Y here if you want to add support for the modem on Nokia + N900 (Nokia RX-51) hardware. + + If unsure, say N. + +config SSI_PROTOCOL + tristate "SSI protocol" + depends on HSI && PHONET && (OMAP_SSI=y || OMAP_SSI=m) + help + If you say Y here, you will enable the SSI protocol aka McSAAB. + + If unsure, say N. + config HSI_CHAR tristate "HSI/SSI character driver" depends on HSI diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile index 327c0e27c8b0..4d5bc0e0b27b 100644 --- a/drivers/hsi/clients/Makefile +++ b/drivers/hsi/clients/Makefile @@ -2,4 +2,6 @@ # Makefile for HSI clients # -obj-$(CONFIG_HSI_CHAR) += hsi_char.o +obj-$(CONFIG_NOKIA_MODEM) += nokia-modem.o +obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o +obj-$(CONFIG_HSI_CHAR) += hsi_char.o diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c index e61e5f991aa5..57f70c28fa38 100644 --- a/drivers/hsi/clients/hsi_char.c +++ b/drivers/hsi/clients/hsi_char.c @@ -367,7 +367,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) return -EINVAL; tmp = cl->rx_cfg; cl->rx_cfg.mode = rxc->mode; - cl->rx_cfg.channels = rxc->channels; + cl->rx_cfg.num_hw_channels = rxc->channels; cl->rx_cfg.flow = rxc->flow; ret = hsi_setup(cl); if (ret < 0) { @@ -383,7 +383,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc) { rxc->mode = cl->rx_cfg.mode; - rxc->channels = cl->rx_cfg.channels; + rxc->channels = cl->rx_cfg.num_hw_channels; rxc->flow = cl->rx_cfg.flow; } @@ -402,7 +402,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) return -EINVAL; tmp = cl->tx_cfg; cl->tx_cfg.mode = txc->mode; - cl->tx_cfg.channels = txc->channels; + cl->tx_cfg.num_hw_channels = txc->channels; cl->tx_cfg.speed = txc->speed; cl->tx_cfg.arb_mode = txc->arb_mode; ret = hsi_setup(cl); @@ -417,7 +417,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc) { txc->mode = cl->tx_cfg.mode; - txc->channels = cl->tx_cfg.channels; + txc->channels = cl->tx_cfg.num_hw_channels; txc->speed = cl->tx_cfg.speed; txc->arb_mode = cl->tx_cfg.arb_mode; } @@ -435,7 +435,7 @@ static ssize_t hsc_read(struct file *file, char __user *buf, size_t len, return -EINVAL; if (len > max_data_size) len = max_data_size; - if (channel->ch >= channel->cl->rx_cfg.channels) + if (channel->ch >= channel->cl->rx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_READ, &channel->flags)) return -EBUSY; @@ -492,7 +492,7 @@ static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len, return -EINVAL; if (len > max_data_size) len = max_data_size; - if (channel->ch >= channel->cl->tx_cfg.channels) + if (channel->ch >= channel->cl->tx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_WRITE, &channel->flags)) return -EBUSY; @@ -705,7 +705,7 @@ static int hsc_probe(struct device *dev) if (!hsc_major) { ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor, HSC_DEVS, devname); - if (ret > 0) + if (ret == 0) hsc_major = MAJOR(hsc_dev); } else { hsc_dev = MKDEV(hsc_major, hsc_baseminor); diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c new file mode 100644 index 000000000000..363b780dacea --- /dev/null +++ b/drivers/hsi/clients/nokia-modem.c @@ -0,0 +1,285 @@ +/* + * nokia-modem.c + * + * HSI client driver for Nokia N900 modem. + * + * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/gpio/consumer.h> +#include <linux/hsi/hsi.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_gpio.h> +#include <linux/hsi/ssi_protocol.h> + +static unsigned int pm; +module_param(pm, int, 0400); +MODULE_PARM_DESC(pm, + "Enable power management (0=disabled, 1=userland based [default])"); + +struct nokia_modem_gpio { + struct gpio_desc *gpio; + const char *name; +}; + +struct nokia_modem_device { + struct tasklet_struct nokia_modem_rst_ind_tasklet; + int nokia_modem_rst_ind_irq; + struct device *device; + struct nokia_modem_gpio *gpios; + int gpio_amount; + struct hsi_client *ssi_protocol; +}; + +static void do_nokia_modem_rst_ind_tasklet(unsigned long data) +{ + struct nokia_modem_device *modem = (struct nokia_modem_device *)data; + + if (!modem) + return; + + dev_info(modem->device, "CMT rst line change detected\n"); + + if (modem->ssi_protocol) + ssip_reset_event(modem->ssi_protocol); +} + +static irqreturn_t nokia_modem_rst_ind_isr(int irq, void *data) +{ + struct nokia_modem_device *modem = (struct nokia_modem_device *)data; + + tasklet_schedule(&modem->nokia_modem_rst_ind_tasklet); + + return IRQ_HANDLED; +} + +static void nokia_modem_gpio_unexport(struct device *dev) +{ + struct nokia_modem_device *modem = dev_get_drvdata(dev); + int i; + + for (i = 0; i < modem->gpio_amount; i++) { + sysfs_remove_link(&dev->kobj, modem->gpios[i].name); + gpiod_unexport(modem->gpios[i].gpio); + } +} + +static int nokia_modem_gpio_probe(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct nokia_modem_device *modem = dev_get_drvdata(dev); + int gpio_count, gpio_name_count, i, err; + + gpio_count = of_gpio_count(np); + + if (gpio_count < 0) { + dev_err(dev, "missing gpios: %d\n", gpio_count); + return gpio_count; + } + + gpio_name_count = of_property_count_strings(np, "gpio-names"); + + if (gpio_count != gpio_name_count) { + dev_err(dev, "number of gpios does not equal number of gpio names\n"); + return -EINVAL; + } + + modem->gpios = devm_kzalloc(dev, gpio_count * + sizeof(struct nokia_modem_gpio), GFP_KERNEL); + if (!modem->gpios) { + dev_err(dev, "Could not allocate memory for gpios\n"); + return -ENOMEM; + } + + modem->gpio_amount = gpio_count; + + for (i = 0; i < gpio_count; i++) { + modem->gpios[i].gpio = devm_gpiod_get_index(dev, NULL, i); + if (IS_ERR(modem->gpios[i].gpio)) { + dev_err(dev, "Could not get gpio %d\n", i); + return PTR_ERR(modem->gpios[i].gpio); + } + + err = of_property_read_string_index(np, "gpio-names", i, + &(modem->gpios[i].name)); + if (err) { + dev_err(dev, "Could not get gpio name %d\n", i); + return err; + } + + err = gpiod_direction_output(modem->gpios[i].gpio, 0); + if (err) + return err; + + err = gpiod_export(modem->gpios[i].gpio, 0); + if (err) + return err; + + err = gpiod_export_link(dev, modem->gpios[i].name, + modem->gpios[i].gpio); + if (err) + return err; + } + + return 0; +} + +static int nokia_modem_probe(struct device *dev) +{ + struct device_node *np; + struct nokia_modem_device *modem; + struct hsi_client *cl = to_hsi_client(dev); + struct hsi_port *port = hsi_get_port(cl); + int irq, pflags, err; + struct hsi_board_info ssip; + + np = dev->of_node; + if (!np) { + dev_err(dev, "device tree node not found\n"); + return -ENXIO; + } + + modem = devm_kzalloc(dev, sizeof(*modem), GFP_KERNEL); + if (!modem) { + dev_err(dev, "Could not allocate memory for nokia_modem_device\n"); + return -ENOMEM; + } + dev_set_drvdata(dev, modem); + + irq = irq_of_parse_and_map(np, 0); + if (irq < 0) { + dev_err(dev, "Invalid rst_ind interrupt (%d)\n", irq); + return irq; + } + modem->nokia_modem_rst_ind_irq = irq; + pflags = irq_get_trigger_type(irq); + + tasklet_init(&modem->nokia_modem_rst_ind_tasklet, + do_nokia_modem_rst_ind_tasklet, (unsigned long)modem); + err = devm_request_irq(dev, irq, nokia_modem_rst_ind_isr, + IRQF_DISABLED | pflags, "modem_rst_ind", modem); + if (err < 0) { + dev_err(dev, "Request rst_ind irq(%d) failed (flags %d)\n", + irq, pflags); + return err; + } + enable_irq_wake(irq); + + if(pm) { + err = nokia_modem_gpio_probe(dev); + if (err < 0) { + dev_err(dev, "Could not probe GPIOs\n"); + goto error1; + } + } + + ssip.name = "ssi-protocol"; + ssip.tx_cfg = cl->tx_cfg; + ssip.rx_cfg = cl->rx_cfg; + ssip.platform_data = NULL; + ssip.archdata = NULL; + + modem->ssi_protocol = hsi_new_client(port, &ssip); + if (!modem->ssi_protocol) { + dev_err(dev, "Could not register ssi-protocol device\n"); + goto error2; + } + + err = device_attach(&modem->ssi_protocol->device); + if (err == 0) { + dev_err(dev, "Missing ssi-protocol driver\n"); + err = -EPROBE_DEFER; + goto error3; + } else if (err < 0) { + dev_err(dev, "Could not load ssi-protocol driver (%d)\n", err); + goto error3; + } + + /* TODO: register cmt-speech hsi client */ + + dev_info(dev, "Registered Nokia HSI modem\n"); + + return 0; + +error3: + hsi_remove_client(&modem->ssi_protocol->device, NULL); +error2: + nokia_modem_gpio_unexport(dev); +error1: + disable_irq_wake(modem->nokia_modem_rst_ind_irq); + tasklet_kill(&modem->nokia_modem_rst_ind_tasklet); + + return err; +} + +static int nokia_modem_remove(struct device *dev) +{ + struct nokia_modem_device *modem = dev_get_drvdata(dev); + + if (!modem) + return 0; + + if (modem->ssi_protocol) { + hsi_remove_client(&modem->ssi_protocol->device, NULL); + modem->ssi_protocol = NULL; + } + + nokia_modem_gpio_unexport(dev); + dev_set_drvdata(dev, NULL); + disable_irq_wake(modem->nokia_modem_rst_ind_irq); + tasklet_kill(&modem->nokia_modem_rst_ind_tasklet); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id nokia_modem_of_match[] = { + { .compatible = "nokia,n900-modem", }, + {}, +}; +MODULE_DEVICE_TABLE(of, nokia_modem_of_match); +#endif + +static struct hsi_client_driver nokia_modem_driver = { + .driver = { + .name = "nokia-modem", + .owner = THIS_MODULE, + .probe = nokia_modem_probe, + .remove = nokia_modem_remove, + .of_match_table = of_match_ptr(nokia_modem_of_match), + }, +}; + +static int __init nokia_modem_init(void) +{ + return hsi_register_client_driver(&nokia_modem_driver); +} +module_init(nokia_modem_init); + +static void __exit nokia_modem_exit(void) +{ + hsi_unregister_client_driver(&nokia_modem_driver); +} +module_exit(nokia_modem_exit); + +MODULE_ALIAS("hsi:nokia-modem"); +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_DESCRIPTION("HSI driver module for Nokia N900 Modem"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c new file mode 100644 index 000000000000..ce4be3738d46 --- /dev/null +++ b/drivers/hsi/clients/ssi_protocol.c @@ -0,0 +1,1191 @@ +/* + * ssi_protocol.c + * + * Implementation of the SSI McSAAB improved protocol. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org> + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/atomic.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/if_ether.h> +#include <linux/if_arp.h> +#include <linux/if_phonet.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/notifier.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/hsi/hsi.h> +#include <linux/hsi/ssi_protocol.h> + +void ssi_waketest(struct hsi_client *cl, unsigned int enable); + +#define SSIP_TXQUEUE_LEN 100 +#define SSIP_MAX_MTU 65535 +#define SSIP_DEFAULT_MTU 4000 +#define PN_MEDIA_SOS 21 +#define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ +#define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ +#define SSIP_KATOUT 15 /* 15 msecs */ +#define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ +#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) +#define SSIP_CMT_LOADER_SYNC 0x11223344 +/* + * SSI protocol command definitions + */ +#define SSIP_COMMAND(data) ((data) >> 28) +#define SSIP_PAYLOAD(data) ((data) & 0xfffffff) +/* Commands */ +#define SSIP_SW_BREAK 0 +#define SSIP_BOOTINFO_REQ 1 +#define SSIP_BOOTINFO_RESP 2 +#define SSIP_WAKETEST_RESULT 3 +#define SSIP_START_TRANS 4 +#define SSIP_READY 5 +/* Payloads */ +#define SSIP_DATA_VERSION(data) ((data) & 0xff) +#define SSIP_LOCAL_VERID 1 +#define SSIP_WAKETEST_OK 0 +#define SSIP_WAKETEST_FAILED 1 +#define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) +#define SSIP_MSG_ID(data) ((data) & 0xff) +/* Generic Command */ +#define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) +/* Commands for the control channel */ +#define SSIP_BOOTINFO_REQ_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) +#define SSIP_BOOTINFO_RESP_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) +#define SSIP_START_TRANS_CMD(pdulen, id) \ + SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) +#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) +#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) + +/* Main state machine states */ +enum { + INIT, + HANDSHAKE, + ACTIVE, +}; + +/* Send state machine states */ +enum { + SEND_IDLE, + WAIT4READY, + SEND_READY, + SENDING, + SENDING_SWBREAK, +}; + +/* Receive state machine states */ +enum { + RECV_IDLE, + RECV_READY, + RECEIVING, +}; + +/** + * struct ssi_protocol - SSI protocol (McSAAB) data + * @main_state: Main state machine + * @send_state: TX state machine + * @recv_state: RX state machine + * @waketest: Flag to follow wake line test + * @rxid: RX data id + * @txid: TX data id + * @txqueue_len: TX queue length + * @tx_wd: TX watchdog + * @rx_wd: RX watchdog + * @keep_alive: Workaround for SSI HW bug + * @lock: To serialize access to this struct + * @netdev: Phonet network device + * @txqueue: TX data queue + * @cmdqueue: Queue of free commands + * @cl: HSI client own reference + * @link: Link for ssip_list + * @tx_usecount: Refcount to keep track the slaves that use the wake line + * @channel_id_cmd: HSI channel id for command stream + * @channel_id_data: HSI channel id for data stream + */ +struct ssi_protocol { + unsigned int main_state; + unsigned int send_state; + unsigned int recv_state; + unsigned int waketest:1; + u8 rxid; + u8 txid; + unsigned int txqueue_len; + struct timer_list tx_wd; + struct timer_list rx_wd; + struct timer_list keep_alive; /* wake-up workaround */ + spinlock_t lock; + struct net_device *netdev; + struct list_head txqueue; + struct list_head cmdqueue; + struct hsi_client *cl; + struct list_head link; + atomic_t tx_usecnt; + int channel_id_cmd; + int channel_id_data; +}; + +/* List of ssi protocol instances */ +static LIST_HEAD(ssip_list); + +static void ssip_rxcmd_complete(struct hsi_msg *msg); + +static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + *data = cmd; +} + +static inline u32 ssip_get_cmd(struct hsi_msg *msg) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + + return *data; +} + +static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) +{ + skb_frag_t *frag; + struct scatterlist *sg; + int i; + + BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); + + sg = msg->sgt.sgl; + sg_set_buf(sg, skb->data, skb_headlen(skb)); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg = sg_next(sg); + BUG_ON(!sg); + frag = &skb_shinfo(skb)->frags[i]; + sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); + } +} + +static void ssip_free_data(struct hsi_msg *msg) +{ + struct sk_buff *skb; + + skb = msg->context; + pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, + skb); + msg->destructor = NULL; + dev_kfree_skb(skb); + hsi_free_msg(msg); +} + +static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, + struct sk_buff *skb, gfp_t flags) +{ + struct hsi_msg *msg; + + msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); + if (!msg) + return NULL; + ssip_skb_to_msg(skb, msg); + msg->destructor = ssip_free_data; + msg->channel = ssi->channel_id_data; + msg->context = skb; + + return msg; +} + +static inline void ssip_release_cmd(struct hsi_msg *msg) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); + + dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); + spin_lock_bh(&ssi->lock); + list_add_tail(&msg->link, &ssi->cmdqueue); + spin_unlock_bh(&ssi->lock); +} + +static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + + BUG_ON(list_empty(&ssi->cmdqueue)); + + spin_lock_bh(&ssi->lock); + msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); + list_del(&msg->link); + spin_unlock_bh(&ssi->lock); + msg->destructor = ssip_release_cmd; + + return msg; +} + +static void ssip_free_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg, *tmp; + + list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { + list_del(&msg->link); + msg->destructor = NULL; + kfree(sg_virt(msg->sgt.sgl)); + hsi_free_msg(msg); + } +} + +static int ssip_alloc_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + u32 *buf; + unsigned int i; + + for (i = 0; i < SSIP_MAX_CMDS; i++) { + msg = hsi_alloc_msg(1, GFP_KERNEL); + if (!msg) + goto out; + buf = kmalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + hsi_free_msg(msg); + goto out; + } + sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); + msg->channel = ssi->channel_id_cmd; + list_add_tail(&msg->link, &ssi->cmdqueue); + } + + return 0; +out: + ssip_free_cmds(ssi); + + return -ENOMEM; +} + +static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->recv_state = state; + switch (state) { + case RECV_IDLE: + del_timer(&ssi->rx_wd); + if (ssi->send_state == SEND_IDLE) + del_timer(&ssi->keep_alive); + break; + case RECV_READY: + /* CMT speech workaround */ + if (atomic_read(&ssi->tx_usecnt)) + break; + /* Otherwise fall through */ + case RECEIVING: + mod_timer(&ssi->keep_alive, jiffies + + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->send_state = state; + switch (state) { + case SEND_IDLE: + case SEND_READY: + del_timer(&ssi->tx_wd); + if (ssi->recv_state == RECV_IDLE) + del_timer(&ssi->keep_alive); + break; + case WAIT4READY: + case SENDING: + case SENDING_SWBREAK: + mod_timer(&ssi->keep_alive, + jiffies + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) +{ + struct hsi_client *master = ERR_PTR(-ENODEV); + struct ssi_protocol *ssi; + + list_for_each_entry(ssi, &ssip_list, link) + if (slave->device.parent == ssi->cl->device.parent) { + master = ssi->cl; + break; + } + + return master; +} +EXPORT_SYMBOL_GPL(ssip_slave_get_master); + +int ssip_slave_start_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); + spin_lock_bh(&ssi->lock); + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + hsi_start_tx(master); + } + spin_unlock_bh(&ssi->lock); + atomic_inc(&ssi->tx_usecnt); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_start_tx); + +int ssip_slave_stop_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); + + if (atomic_dec_and_test(&ssi->tx_usecnt)) { + spin_lock_bh(&ssi->lock); + if ((ssi->send_state == SEND_READY) || + (ssi->send_state == WAIT4READY)) { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(master); + } + spin_unlock_bh(&ssi->lock); + } + dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); + +int ssip_slave_running(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + return netif_running(ssi->netdev); +} +EXPORT_SYMBOL_GPL(ssip_slave_running); + +static void ssip_reset(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct list_head *head, *tmp; + struct hsi_msg *msg; + + if (netif_running(ssi->netdev)) + netif_carrier_off(ssi->netdev); + hsi_flush(cl); + spin_lock_bh(&ssi->lock); + if (ssi->send_state != SEND_IDLE) + hsi_stop_tx(cl); + if (ssi->waketest) + ssi_waketest(cl, 0); + del_timer(&ssi->rx_wd); + del_timer(&ssi->tx_wd); + del_timer(&ssi->keep_alive); + ssi->main_state = 0; + ssi->send_state = 0; + ssi->recv_state = 0; + ssi->waketest = 0; + ssi->rxid = 0; + ssi->txid = 0; + list_for_each_safe(head, tmp, &ssi->txqueue) { + msg = list_entry(head, struct hsi_msg, link); + dev_dbg(&cl->device, "Pending TX data\n"); + list_del(head); + ssip_free_data(msg); + } + ssi->txqueue_len = 0; + spin_unlock_bh(&ssi->lock); +} + +static void ssip_dump_state(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + spin_lock_bh(&ssi->lock); + dev_err(&cl->device, "Main state: %d\n", ssi->main_state); + dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); + dev_err(&cl->device, "Send state: %d\n", ssi->send_state); + dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? + "Online" : "Offline"); + dev_err(&cl->device, "Wake test %d\n", ssi->waketest); + dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); + dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); + + list_for_each_entry(msg, &ssi->txqueue, link) + dev_err(&cl->device, "pending TX data (%p)\n", msg); + spin_unlock_bh(&ssi->lock); +} + +static void ssip_error(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + ssip_dump_state(cl); + ssip_reset(cl); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); +} + +static void ssip_keep_alive(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", + ssi->main_state, ssi->recv_state, ssi->send_state); + + spin_lock(&ssi->lock); + if (ssi->recv_state == RECV_IDLE) + switch (ssi->send_state) { + case SEND_READY: + if (atomic_read(&ssi->tx_usecnt) == 0) + break; + /* + * Fall through. Workaround for cmt-speech + * in that case we relay on audio timers. + */ + case SEND_IDLE: + spin_unlock(&ssi->lock); + return; + } + mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); + spin_unlock(&ssi->lock); +} + +static void ssip_wd(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + + dev_err(&cl->device, "Watchdog trigerred\n"); + ssip_error(cl); +} + +static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + dev_dbg(&cl->device, "Issuing RX command\n"); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); +} + +static void ssip_start_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, + ssi->recv_state); + spin_lock(&ssi->lock); + /* + * We can have two UP events in a row due to a short low + * high transition. Therefore we need to ignore the sencond UP event. + */ + if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { + if (ssi->main_state == INIT) { + ssi->main_state = HANDSHAKE; + spin_unlock(&ssi->lock); + ssip_send_bootinfo_req_cmd(cl); + } else { + spin_unlock(&ssi->lock); + } + return; + } + ssip_set_rxstate(ssi, RECV_READY); + spin_unlock(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_READY_CMD); + msg->complete = ssip_release_cmd; + dev_dbg(&cl->device, "Send READY\n"); + hsi_async_write(cl, msg); +} + +static void ssip_stop_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); + spin_lock(&ssi->lock); + if (likely(ssi->main_state == ACTIVE)) + ssip_set_rxstate(ssi, RECV_IDLE); + spin_unlock(&ssi->lock); +} + +static void ssip_free_strans(struct hsi_msg *msg) +{ + ssip_free_data(msg->context); + ssip_release_cmd(msg); +} + +static void ssip_strans_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *data; + + data = msg->context; + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + ssip_set_txstate(ssi, SENDING); + spin_unlock(&ssi->lock); + hsi_async_write(cl, data); +} + +static int ssip_xmit(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg, *dmsg; + struct sk_buff *skb; + + spin_lock_bh(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + spin_unlock_bh(&ssi->lock); + return 0; + } + dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); + list_del(&dmsg->link); + ssi->txqueue_len--; + spin_unlock_bh(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + skb = dmsg->context; + msg->context = dmsg; + msg->complete = ssip_strans_complete; + msg->destructor = ssip_free_strans; + + spin_lock_bh(&ssi->lock); + ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), + ssi->txid)); + ssi->txid++; + ssip_set_txstate(ssi, SENDING); + spin_unlock_bh(&ssi->lock); + + dev_dbg(&cl->device, "Send STRANS (%d frames)\n", + SSIP_BYTES_TO_FRAMES(skb->len)); + + return hsi_async_write(cl, msg); +} + +/* In soft IRQ context */ +static void ssip_pn_rx(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + + if (unlikely(!netif_running(dev))) { + dev_dbg(&dev->dev, "Drop RX packet\n"); + dev->stats.rx_dropped++; + dev_kfree_skb(skb); + return; + } + if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { + dev_dbg(&dev->dev, "Error drop RX packet\n"); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev_kfree_skb(skb); + return; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + /* length field is exchanged in network byte order */ + ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); + dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", + ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); + + skb->protocol = htons(ETH_P_PHONET); + skb_reset_mac_header(skb); + __skb_pull(skb, 1); + netif_rx(skb); +} + +static void ssip_rx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX data error\n"); + ssip_free_data(msg); + ssip_error(cl); + return; + } + del_timer(&ssi->rx_wd); /* FIXME: Revisit */ + skb = msg->context; + ssip_pn_rx(skb); + hsi_free_msg(msg); +} + +static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + /* Workaroud: Ignore CMT Loader message leftover */ + if (cmd == SSIP_CMT_LOADER_SYNC) + return; + + switch (ssi->main_state) { + case ACTIVE: + dev_err(&cl->device, "Boot info req on active state\n"); + ssip_error(cl); + /* Fall through */ + case INIT: + spin_lock(&ssi->lock); + ssi->main_state = HANDSHAKE; + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + /* Start boot handshake watchdog */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + spin_unlock(&ssi->lock); + dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info req verid mismatch\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + break; + case HANDSHAKE: + /* Ignore */ + break; + default: + dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); + break; + } +} + +static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info resp verid mismatch\n"); + + spin_lock(&ssi->lock); + if (ssi->main_state != ACTIVE) + /* Use tx_wd as a boot watchdog in non ACTIVE state */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + else + dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); +} + +static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + unsigned int wkres = SSIP_PAYLOAD(cmd); + + spin_lock(&ssi->lock); + if (ssi->main_state != HANDSHAKE) { + dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->waketest) { + ssi->waketest = 0; + ssi_waketest(cl, 0); /* FIXME: To be removed */ + } + ssi->main_state = ACTIVE; + del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ + spin_unlock(&ssi->lock); + + dev_notice(&cl->device, "WAKELINES TEST %s\n", + wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); + if (wkres & SSIP_WAKETEST_FAILED) { + ssip_error(cl); + return; + } + dev_dbg(&cl->device, "CMT is ONLINE\n"); + netif_wake_queue(ssi->netdev); + netif_carrier_on(ssi->netdev); +} + +static void ssip_rx_ready(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->send_state != WAIT4READY) { + dev_dbg(&cl->device, "Ignore spurious READY command\n"); + spin_unlock(&ssi->lock); + return; + } + ssip_set_txstate(ssi, SEND_READY); + spin_unlock(&ssi->lock); + ssip_xmit(cl); +} + +static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + struct hsi_msg *msg; + int len = SSIP_PDU_LENGTH(cmd); + + dev_dbg(&cl->device, "RX strans: %d frames\n", len); + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + ssip_set_rxstate(ssi, RECEIVING); + if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { + dev_err(&cl->device, "START TRANS id %d expeceted %d\n", + SSIP_MSG_ID(cmd), ssi->rxid); + spin_unlock(&ssi->lock); + goto out1; + } + ssi->rxid++; + spin_unlock(&ssi->lock); + skb = netdev_alloc_skb(ssi->netdev, len * 4); + if (unlikely(!skb)) { + dev_err(&cl->device, "No memory for rx skb\n"); + goto out1; + } + skb->dev = ssi->netdev; + skb_put(skb, len * 4); + msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); + if (unlikely(!msg)) { + dev_err(&cl->device, "No memory for RX data msg\n"); + goto out2; + } + msg->complete = ssip_rx_data_complete; + hsi_async_read(cl, msg); + + return; +out2: + dev_kfree_skb(skb); +out1: + ssip_error(cl); +} + +static void ssip_rxcmd_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + u32 cmd = ssip_get_cmd(msg); + unsigned int cmdid = SSIP_COMMAND(cmd); + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX error detected\n"); + ssip_release_cmd(msg); + ssip_error(cl); + return; + } + hsi_async_read(cl, msg); + dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); + switch (cmdid) { + case SSIP_SW_BREAK: + /* Ignored */ + break; + case SSIP_BOOTINFO_REQ: + ssip_rx_bootinforeq(cl, cmd); + break; + case SSIP_BOOTINFO_RESP: + ssip_rx_bootinforesp(cl, cmd); + break; + case SSIP_WAKETEST_RESULT: + ssip_rx_waketest(cl, cmd); + break; + case SSIP_START_TRANS: + ssip_rx_strans(cl, cmd); + break; + case SSIP_READY: + ssip_rx_ready(cl); + break; + default: + dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); + break; + } +} + +static void ssip_swbreak_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + if (atomic_read(&ssi->tx_usecnt)) { + ssip_set_txstate(ssi, SEND_READY); + } else { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(cl); + } + spin_unlock(&ssi->lock); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } + netif_wake_queue(ssi->netdev); +} + +static void ssip_tx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *cmsg; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "TX data error\n"); + ssip_error(cl); + goto out; + } + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + ssip_set_txstate(ssi, SENDING_SWBREAK); + spin_unlock(&ssi->lock); + cmsg = ssip_claim_cmd(ssi); + ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); + cmsg->complete = ssip_swbreak_complete; + dev_dbg(&cl->device, "Send SWBREAK\n"); + hsi_async_write(cl, cmsg); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } +out: + ssip_free_data(msg); +} + +void ssip_port_event(struct hsi_client *cl, unsigned long event) +{ + switch (event) { + case HSI_EVENT_START_RX: + ssip_start_rx(cl); + break; + case HSI_EVENT_STOP_RX: + ssip_stop_rx(cl); + break; + default: + return; + } +} + +static int ssip_pn_open(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + int err; + + err = hsi_claim_port(cl, 1); + if (err < 0) { + dev_err(&cl->device, "SSI port already claimed\n"); + return err; + } + err = hsi_register_port_event(cl, ssip_port_event); + if (err < 0) { + dev_err(&cl->device, "Register HSI port event failed (%d)\n", + err); + return err; + } + dev_dbg(&cl->device, "Configuring SSI port\n"); + hsi_setup(cl); + spin_lock_bh(&ssi->lock); + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + ssi->main_state = INIT; + spin_unlock_bh(&ssi->lock); + + return 0; +} + +static int ssip_pn_stop(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + + ssip_reset(cl); + hsi_unregister_port_event(cl); + hsi_release_port(cl); + + return 0; +} + +static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU) + return -EINVAL; + dev->mtu = new_mtu; + + return 0; +} + +static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + if ((skb->protocol != htons(ETH_P_PHONET)) || + (skb->len < SSIP_MIN_PN_HDR)) + goto drop; + /* Pad to 32-bits - FIXME: Revisit*/ + if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) + goto drop; + + /* + * Modem sends Phonet messages over SSI with its own endianess... + * Assume that modem has the same endianess as we do. + */ + if (skb_cow_head(skb, 0)) + goto drop; + + /* length field is exchanged in network byte order */ + ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); + + msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); + if (!msg) { + dev_dbg(&cl->device, "Dropping tx data: No memory\n"); + goto drop; + } + msg->complete = ssip_tx_data_complete; + + spin_lock_bh(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); + goto drop2; + } + list_add_tail(&msg->link, &ssi->txqueue); + ssi->txqueue_len++; + if (dev->tx_queue_len < ssi->txqueue_len) { + dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); + netif_stop_queue(dev); + } + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); + hsi_start_tx(cl); + } else if (ssi->send_state == SEND_READY) { + /* Needed for cmt-speech workaround */ + dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", + ssi->txqueue_len); + spin_unlock_bh(&ssi->lock); + ssip_xmit(cl); + } else { + spin_unlock_bh(&ssi->lock); + } + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + return 0; +drop2: + hsi_free_msg(msg); +drop: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + + return 0; +} + +/* CMT reset event handler */ +void ssip_reset_event(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + dev_err(&ssi->cl->device, "CMT reset detected!\n"); + ssip_error(ssi->cl); +} +EXPORT_SYMBOL_GPL(ssip_reset_event); + +static const struct net_device_ops ssip_pn_ops = { + .ndo_open = ssip_pn_open, + .ndo_stop = ssip_pn_stop, + .ndo_start_xmit = ssip_pn_xmit, + .ndo_change_mtu = ssip_pn_set_mtu, +}; + +static void ssip_pn_setup(struct net_device *dev) +{ + dev->features = 0; + dev->netdev_ops = &ssip_pn_ops; + dev->type = ARPHRD_PHONET; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = SSIP_DEFAULT_MTU; + dev->hard_header_len = 1; + dev->dev_addr[0] = PN_MEDIA_SOS; + dev->addr_len = 1; + dev->tx_queue_len = SSIP_TXQUEUE_LEN; + + dev->destructor = free_netdev; + dev->header_ops = &phonet_header_ops; +} + +static int ssi_protocol_probe(struct device *dev) +{ + static const char ifname[] = "phonet%d"; + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi; + int err; + + ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); + if (!ssi) { + dev_err(dev, "No memory for ssi protocol\n"); + return -ENOMEM; + } + + spin_lock_init(&ssi->lock); + init_timer_deferrable(&ssi->rx_wd); + init_timer_deferrable(&ssi->tx_wd); + init_timer(&ssi->keep_alive); + ssi->rx_wd.data = (unsigned long)cl; + ssi->rx_wd.function = ssip_wd; + ssi->tx_wd.data = (unsigned long)cl; + ssi->tx_wd.function = ssip_wd; + ssi->keep_alive.data = (unsigned long)cl; + ssi->keep_alive.function = ssip_keep_alive; + INIT_LIST_HEAD(&ssi->txqueue); + INIT_LIST_HEAD(&ssi->cmdqueue); + atomic_set(&ssi->tx_usecnt, 0); + hsi_client_set_drvdata(cl, ssi); + ssi->cl = cl; + + ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); + if (ssi->channel_id_cmd < 0) { + err = ssi->channel_id_cmd; + dev_err(dev, "Could not get cmd channel (%d)\n", err); + goto out; + } + + ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); + if (ssi->channel_id_data < 0) { + err = ssi->channel_id_data; + dev_err(dev, "Could not get data channel (%d)\n", err); + goto out; + } + + err = ssip_alloc_cmds(ssi); + if (err < 0) { + dev_err(dev, "No memory for commands\n"); + goto out; + } + + ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup); + if (!ssi->netdev) { + dev_err(dev, "No memory for netdev\n"); + err = -ENOMEM; + goto out1; + } + + SET_NETDEV_DEV(ssi->netdev, dev); + netif_carrier_off(ssi->netdev); + err = register_netdev(ssi->netdev); + if (err < 0) { + dev_err(dev, "Register netdev failed (%d)\n", err); + goto out2; + } + + list_add(&ssi->link, &ssip_list); + + dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", + ssi->channel_id_cmd, ssi->channel_id_data); + + return 0; +out2: + free_netdev(ssi->netdev); +out1: + ssip_free_cmds(ssi); +out: + kfree(ssi); + + return err; +} + +static int ssi_protocol_remove(struct device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + list_del(&ssi->link); + unregister_netdev(ssi->netdev); + ssip_free_cmds(ssi); + hsi_client_set_drvdata(cl, NULL); + kfree(ssi); + + return 0; +} + +static struct hsi_client_driver ssip_driver = { + .driver = { + .name = "ssi-protocol", + .owner = THIS_MODULE, + .probe = ssi_protocol_probe, + .remove = ssi_protocol_remove, + }, +}; + +static int __init ssip_init(void) +{ + pr_info("SSI protocol aka McSAAB added\n"); + + return hsi_register_client_driver(&ssip_driver); +} +module_init(ssip_init); + +static void __exit ssip_exit(void) +{ + hsi_unregister_client_driver(&ssip_driver); + pr_info("SSI protocol driver removed\n"); +} +module_exit(ssip_exit); + +MODULE_ALIAS("hsi:ssi-protocol"); +MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); +MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>"); +MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig new file mode 100644 index 000000000000..6aba27808172 --- /dev/null +++ b/drivers/hsi/controllers/Kconfig @@ -0,0 +1,19 @@ +# +# HSI controllers configuration +# +comment "HSI controllers" + +config OMAP_SSI + tristate "OMAP SSI hardware driver" + depends on HSI && OF && (ARCH_OMAP3 || (ARM && COMPILE_TEST)) + ---help--- + SSI is a legacy version of HSI. It is usually used to connect + an application engine with a cellular modem. + If you say Y here, you will enable the OMAP SSI hardware driver. + + If unsure, say N. + +config OMAP_SSI_PORT + tristate + default m if OMAP_SSI=m + default y if OMAP_SSI=y diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile new file mode 100644 index 000000000000..d2665cf9c545 --- /dev/null +++ b/drivers/hsi/controllers/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for HSI controllers drivers +# + +obj-$(CONFIG_OMAP_SSI) += omap_ssi.o +obj-$(CONFIG_OMAP_SSI_PORT) += omap_ssi_port.o diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c new file mode 100644 index 000000000000..0fc7a7fd0140 --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.c @@ -0,0 +1,625 @@ +/* OMAP SSI driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/delay.h> +#include <linux/seq_file.h> +#include <linux/scatterlist.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> +#include <linux/of_platform.h> +#include <linux/hsi/hsi.h> +#include <linux/idr.h> + +#include "omap_ssi_regs.h" +#include "omap_ssi.h" + +/* For automatically allocated device IDs */ +static DEFINE_IDA(platform_omap_ssi_ida); + +#ifdef CONFIG_DEBUG_FS +static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + + pm_runtime_get_sync(ssi->device.parent); + seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG)); + seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG)); + seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG)); + pm_runtime_put_sync(ssi->device.parent); + + return 0; +} + +static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + void __iomem *sys = omap_ssi->sys; + int lch; + + pm_runtime_get_sync(ssi->device.parent); + + seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", + readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG)); + seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", + readl(sys + SSI_GDD_MPU_IRQ_ENABLE_REG)); + seq_printf(m, "HW_ID\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_HW_ID_REG)); + seq_printf(m, "PPORT_ID\t: 0x%08x\n", + readl(gdd + SSI_GDD_PPORT_ID_REG)); + seq_printf(m, "MPORT_ID\t: 0x%08x\n", + readl(gdd + SSI_GDD_MPORT_ID_REG)); + seq_printf(m, "TEST\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_TEST_REG)); + seq_printf(m, "GCR\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_GCR_REG)); + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + seq_printf(m, "\nGDD LCH %d\n=========\n", lch); + seq_printf(m, "CSDP\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CSDP_REG(lch))); + seq_printf(m, "CCR\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CCR_REG(lch))); + seq_printf(m, "CICR\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CICR_REG(lch))); + seq_printf(m, "CSR\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CSR_REG(lch))); + seq_printf(m, "CSSA\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_CSSA_REG(lch))); + seq_printf(m, "CDSA\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_CDSA_REG(lch))); + seq_printf(m, "CEN\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CEN_REG(lch))); + seq_printf(m, "CSAC\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CSAC_REG(lch))); + seq_printf(m, "CDAC\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CDAC_REG(lch))); + seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", + readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); + } + + pm_runtime_put_sync(ssi->device.parent); + + return 0; +} + +static int ssi_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_show, inode->i_private); +} + +static int ssi_gdd_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_gdd_show, inode->i_private); +} + +static const struct file_operations ssi_regs_fops = { + .open = ssi_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations ssi_gdd_regs_fops = { + .open = ssi_gdd_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct dentry *dir; + + /* SSI controller */ + omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL); + if (IS_ERR(omap_ssi->dir)) + return PTR_ERR(omap_ssi->dir); + + debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, + &ssi_regs_fops); + /* SSI GDD (DMA) */ + dir = debugfs_create_dir("gdd", omap_ssi->dir); + if (IS_ERR(dir)) + goto rback; + debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops); + + return 0; +rback: + debugfs_remove_recursive(omap_ssi->dir); + + return PTR_ERR(dir); +} + +static void ssi_debug_remove_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + debugfs_remove_recursive(omap_ssi->dir); +} +#endif /* CONFIG_DEBUG_FS */ + +/* + * FIXME: Horrible HACK needed until we remove the useless wakeline test + * in the CMT. To be removed !!!! + */ +void ssi_waketest(struct hsi_client *cl, unsigned int enable) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + omap_port->wktest = !!enable; + if (omap_port->wktest) { + pm_runtime_get_sync(ssi->device.parent); + writel_relaxed(SSI_WAKE(0), + omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + } else { + writel_relaxed(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + pm_runtime_put_sync(ssi->device.parent); + } +} +EXPORT_SYMBOL_GPL(ssi_waketest); + +static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg; + struct hsi_port *port = to_hsi_port(msg->cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + unsigned int dir; + u32 csr; + u32 val; + + spin_lock(&omap_ssi->lock); + + val = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + val &= ~SSI_GDD_LCH(lch); + writel_relaxed(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + + if (msg->ttype == HSI_MSG_READ) { + dir = DMA_FROM_DEVICE; + val = SSI_DATAAVAILABLE(msg->channel); + pm_runtime_put_sync(ssi->device.parent); + } else { + dir = DMA_TO_DEVICE; + val = SSI_DATAACCEPT(msg->channel); + /* Keep clocks reference for write pio event */ + } + dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir); + csr = readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch)); + omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */ + dev_dbg(&port->device, "DMA completed ch %d ttype %d\n", + msg->channel, msg->ttype); + spin_unlock(&omap_ssi->lock); + if (csr & SSI_CSR_TOUR) { /* Timeout error */ + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + spin_lock(&omap_port->lock); + list_del(&msg->link); /* Dequeue msg */ + spin_unlock(&omap_port->lock); + msg->complete(msg); + return; + } + spin_lock(&omap_port->lock); + val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + msg->status = HSI_STATUS_COMPLETED; + msg->actual_len = sg_dma_len(msg->sgt.sgl); +} + +static void ssi_gdd_tasklet(unsigned long dev) +{ + struct hsi_controller *ssi = (struct hsi_controller *)dev; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int lch; + u32 status_reg; + + pm_runtime_get_sync(ssi->device.parent); + + status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + if (status_reg & SSI_GDD_LCH(lch)) + ssi_gdd_complete(ssi, lch); + } + writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); + status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + + pm_runtime_put_sync(ssi->device.parent); + + if (status_reg) + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + else + enable_irq(omap_ssi->gdd_irq); + +} + +static irqreturn_t ssi_gdd_isr(int irq, void *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static unsigned long ssi_get_clk_rate(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned long rate = clk_get_rate(omap_ssi->fck); + return rate; +} + +static int __init ssi_get_iomem(struct platform_device *pd, + const char *name, void __iomem **pbase, dma_addr_t *phy) +{ + struct resource *mem; + struct resource *ioarea; + void __iomem *base; + struct hsi_controller *ssi = platform_get_drvdata(pd); + + mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); + if (!mem) { + dev_err(&pd->dev, "IO memory region missing (%s)\n", name); + return -ENXIO; + } + ioarea = devm_request_mem_region(&ssi->device, mem->start, + resource_size(mem), dev_name(&pd->dev)); + if (!ioarea) { + dev_err(&pd->dev, "%s IO memory region request failed\n", + mem->name); + return -ENXIO; + } + base = devm_ioremap(&ssi->device, mem->start, resource_size(mem)); + if (!base) { + dev_err(&pd->dev, "%s IO remap failed\n", mem->name); + return -ENXIO; + } + *pbase = base; + + if (phy) + *phy = mem->start; + + return 0; +} + +static int __init ssi_add_controller(struct hsi_controller *ssi, + struct platform_device *pd) +{ + struct omap_ssi_controller *omap_ssi; + int err; + + omap_ssi = devm_kzalloc(&ssi->device, sizeof(*omap_ssi), GFP_KERNEL); + if (!omap_ssi) { + dev_err(&pd->dev, "not enough memory for omap ssi\n"); + return -ENOMEM; + } + + ssi->id = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL); + if (ssi->id < 0) { + err = ssi->id; + goto out_err; + } + + ssi->owner = THIS_MODULE; + ssi->device.parent = &pd->dev; + dev_set_name(&ssi->device, "ssi%d", ssi->id); + hsi_controller_set_drvdata(ssi, omap_ssi); + omap_ssi->dev = &ssi->device; + err = ssi_get_iomem(pd, "sys", &omap_ssi->sys, NULL); + if (err < 0) + goto out_err; + err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL); + if (err < 0) + goto out_err; + omap_ssi->gdd_irq = platform_get_irq_byname(pd, "gdd_mpu"); + if (omap_ssi->gdd_irq < 0) { + dev_err(&pd->dev, "GDD IRQ resource missing\n"); + err = omap_ssi->gdd_irq; + goto out_err; + } + tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet, + (unsigned long)ssi); + err = devm_request_irq(&ssi->device, omap_ssi->gdd_irq, ssi_gdd_isr, + 0, "gdd_mpu", ssi); + if (err < 0) { + dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)", + omap_ssi->gdd_irq, err); + goto out_err; + } + + omap_ssi->port = devm_kzalloc(&ssi->device, + sizeof(struct omap_ssi_port *) * ssi->num_ports, GFP_KERNEL); + if (!omap_ssi->port) { + err = -ENOMEM; + goto out_err; + } + + omap_ssi->fck = devm_clk_get(&ssi->device, "ssi_ssr_fck"); + if (IS_ERR(omap_ssi->fck)) { + dev_err(&pd->dev, "Could not acquire clock \"ssi_ssr_fck\": %li\n", + PTR_ERR(omap_ssi->fck)); + err = -ENODEV; + goto out_err; + } + + /* TODO: find register, which can be used to detect context loss */ + omap_ssi->get_loss = NULL; + + omap_ssi->max_speed = UINT_MAX; + spin_lock_init(&omap_ssi->lock); + err = hsi_register_controller(ssi); + + if (err < 0) + goto out_err; + + return 0; + +out_err: + ida_simple_remove(&platform_omap_ssi_ida, ssi->id); + return err; +} + +static int __init ssi_hw_init(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned int i; + u32 val; + int err; + + err = pm_runtime_get_sync(ssi->device.parent); + if (err < 0) { + dev_err(&ssi->device, "runtime PM failed %d\n", err); + return err; + } + /* Reseting SSI controller */ + writel_relaxed(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG); + val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) { + msleep(20); + val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + } + if (!(val & SSI_RESETDONE)) { + dev_err(&ssi->device, "SSI HW reset failed\n"); + pm_runtime_put_sync(ssi->device.parent); + return -EIO; + } + /* Reseting GDD */ + writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); + /* Get FCK rate in KHz */ + omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000); + dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate); + /* Set default PM settings */ + val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART; + writel_relaxed(val, omap_ssi->sys + SSI_SYSCONFIG_REG); + omap_ssi->sysconfig = val; + writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); + omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; + pm_runtime_put_sync(ssi->device.parent); + + return 0; +} + +static void ssi_remove_controller(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int id = ssi->id; + tasklet_kill(&omap_ssi->gdd_tasklet); + hsi_unregister_controller(ssi); + ida_simple_remove(&platform_omap_ssi_ida, id); +} + +static inline int ssi_of_get_available_ports_count(const struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + if (of_device_is_compatible(child, "ti,omap3-ssi-port")) + num++; + + return num; +} + +static int ssi_remove_ports(struct device *dev, void *c) +{ + struct platform_device *pdev = to_platform_device(dev); + + of_device_unregister(pdev); + + return 0; +} + +static int __init ssi_probe(struct platform_device *pd) +{ + struct platform_device *childpdev; + struct device_node *np = pd->dev.of_node; + struct device_node *child; + struct hsi_controller *ssi; + int err; + int num_ports; + + if (!np) { + dev_err(&pd->dev, "missing device tree data\n"); + return -EINVAL; + } + + num_ports = ssi_of_get_available_ports_count(np); + + ssi = hsi_alloc_controller(num_ports, GFP_KERNEL); + if (!ssi) { + dev_err(&pd->dev, "No memory for controller\n"); + return -ENOMEM; + } + + platform_set_drvdata(pd, ssi); + + err = ssi_add_controller(ssi, pd); + if (err < 0) + goto out1; + + pm_runtime_irq_safe(&pd->dev); + pm_runtime_enable(&pd->dev); + + err = ssi_hw_init(ssi); + if (err < 0) + goto out2; +#ifdef CONFIG_DEBUG_FS + err = ssi_debug_add_ctrl(ssi); + if (err < 0) + goto out2; +#endif + + for_each_available_child_of_node(np, child) { + if (!of_device_is_compatible(child, "ti,omap3-ssi-port")) + continue; + + childpdev = of_platform_device_create(child, NULL, &pd->dev); + if (!childpdev) { + err = -ENODEV; + dev_err(&pd->dev, "failed to create ssi controller port\n"); + goto out3; + } + } + + dev_info(&pd->dev, "ssi controller %d initialized (%d ports)!\n", + ssi->id, num_ports); + return err; +out3: + device_for_each_child(&pd->dev, NULL, ssi_remove_ports); +out2: + ssi_remove_controller(ssi); +out1: + platform_set_drvdata(pd, NULL); + pm_runtime_disable(&pd->dev); + + return err; +} + +static int __exit ssi_remove(struct platform_device *pd) +{ + struct hsi_controller *ssi = platform_get_drvdata(pd); + +#ifdef CONFIG_DEBUG_FS + ssi_debug_remove_ctrl(ssi); +#endif + ssi_remove_controller(ssi); + platform_set_drvdata(pd, NULL); + + pm_runtime_disable(&pd->dev); + + /* cleanup of of_platform_populate() call */ + device_for_each_child(&pd->dev, NULL, ssi_remove_ports); + + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int omap_ssi_runtime_suspend(struct device *dev) +{ + struct hsi_controller *ssi = dev_get_drvdata(dev); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "runtime suspend!\n"); + + if (omap_ssi->get_loss) + omap_ssi->loss_count = + omap_ssi->get_loss(ssi->device.parent); + + return 0; +} + +static int omap_ssi_runtime_resume(struct device *dev) +{ + struct hsi_controller *ssi = dev_get_drvdata(dev); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "runtime resume!\n"); + + if ((omap_ssi->get_loss) && (omap_ssi->loss_count == + omap_ssi->get_loss(ssi->device.parent))) + return 0; + + writel_relaxed(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG); + + return 0; +} + +static const struct dev_pm_ops omap_ssi_pm_ops = { + SET_RUNTIME_PM_OPS(omap_ssi_runtime_suspend, omap_ssi_runtime_resume, + NULL) +}; + +#define DEV_PM_OPS (&omap_ssi_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif + +#ifdef CONFIG_OF +static const struct of_device_id omap_ssi_of_match[] = { + { .compatible = "ti,omap3-ssi", }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_ssi_of_match); +#else +#define omap_ssi_of_match NULL +#endif + +static struct platform_driver ssi_pdriver = { + .remove = __exit_p(ssi_remove), + .driver = { + .name = "omap_ssi", + .owner = THIS_MODULE, + .pm = DEV_PM_OPS, + .of_match_table = omap_ssi_of_match, + }, +}; + +module_platform_driver_probe(ssi_pdriver, ssi_probe); + +MODULE_ALIAS("platform:omap_ssi"); +MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_DESCRIPTION("Synchronous Serial Interface Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h new file mode 100644 index 000000000000..9d056417d88c --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.h @@ -0,0 +1,166 @@ +/* OMAP SSI internal interface. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2013 Sebastian Reichel + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_HSI_OMAP_SSI_H__ +#define __LINUX_HSI_OMAP_SSI_H__ + +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/hsi/hsi.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#define SSI_MAX_CHANNELS 8 +#define SSI_MAX_GDD_LCH 8 +#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) + +/** + * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context + * @mode: Bit transmission mode + * @channels: Number of channels + * @framesize: Frame size in bits + * @timeout: RX frame timeout + * @divisor: TX divider + * @arb_mode: Arbitration mode for TX frame (Round robin, priority) + */ +struct omap_ssm_ctx { + u32 mode; + u32 channels; + u32 frame_size; + union { + u32 timeout; /* Rx Only */ + struct { + u32 arb_mode; + u32 divisor; + }; /* Tx only */ + }; +}; + +/** + * struct omap_ssi_port - OMAP SSI port data + * @dev: device associated to the port (HSI port) + * @pdev: platform device associated to the port + * @sst_dma: SSI transmitter physical base address + * @ssr_dma: SSI receiver physical base address + * @sst_base: SSI transmitter base address + * @ssr_base: SSI receiver base address + * @wk_lock: spin lock to serialize access to the wake lines + * @lock: Spin lock to serialize access to the SSI port + * @channels: Current number of channels configured (1,2,4 or 8) + * @txqueue: TX message queues + * @rxqueue: RX message queues + * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode) + * @irq: IRQ number + * @wake_irq: IRQ number for incoming wake line (-1 if none) + * @wake_gpio: GPIO number for incoming wake line (-1 if none) + * @pio_tasklet: Bottom half for PIO transfers and events + * @wake_tasklet: Bottom half for incoming wake events + * @wkin_cken: Keep track of clock references due to the incoming wake line + * @wk_refcount: Reference count for output wake line + * @sys_mpu_enable: Context for the interrupt enable register for irq 0 + * @sst: Context for the synchronous serial transmitter + * @ssr: Context for the synchronous serial receiver + */ +struct omap_ssi_port { + struct device *dev; + struct device *pdev; + dma_addr_t sst_dma; + dma_addr_t ssr_dma; + void __iomem *sst_base; + void __iomem *ssr_base; + spinlock_t wk_lock; + spinlock_t lock; + unsigned int channels; + struct list_head txqueue[SSI_MAX_CHANNELS]; + struct list_head rxqueue[SSI_MAX_CHANNELS]; + struct list_head brkqueue; + unsigned int irq; + int wake_irq; + int wake_gpio; + struct tasklet_struct pio_tasklet; + struct tasklet_struct wake_tasklet; + bool wktest:1; /* FIXME: HACK to be removed */ + bool wkin_cken:1; /* Workaround */ + unsigned int wk_refcount; + /* OMAP SSI port context */ + u32 sys_mpu_enable; /* We use only one irq */ + struct omap_ssm_ctx sst; + struct omap_ssm_ctx ssr; + u32 loss_count; + u32 port_id; +#ifdef CONFIG_DEBUG_FS + struct dentry *dir; +#endif +}; + +/** + * struct gdd_trn - GDD transaction data + * @msg: Pointer to the HSI message being served + * @sg: Pointer to the current sg entry being served + */ +struct gdd_trn { + struct hsi_msg *msg; + struct scatterlist *sg; +}; + +/** + * struct omap_ssi_controller - OMAP SSI controller data + * @dev: device associated to the controller (HSI controller) + * @sys: SSI I/O base address + * @gdd: GDD I/O base address + * @fck: SSI functional clock + * @gdd_irq: IRQ line for GDD + * @gdd_tasklet: bottom half for DMA transfers + * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers + * @lock: lock to serialize access to GDD + * @loss_count: To follow if we need to restore context or not + * @max_speed: Maximum TX speed (Kb/s) set by the clients. + * @sysconfig: SSI controller saved context + * @gdd_gcr: SSI GDD saved context + * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any + * @port: Array of pointers of the ports of the controller + * @dir: Debugfs SSI root directory + */ +struct omap_ssi_controller { + struct device *dev; + void __iomem *sys; + void __iomem *gdd; + struct clk *fck; + unsigned int gdd_irq; + struct tasklet_struct gdd_tasklet; + struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH]; + spinlock_t lock; + unsigned long fck_rate; + u32 loss_count; + u32 max_speed; + /* OMAP SSI Controller context */ + u32 sysconfig; + u32 gdd_gcr; + int (*get_loss)(struct device *dev); + struct omap_ssi_port **port; +#ifdef CONFIG_DEBUG_FS + struct dentry *dir; +#endif +}; + +#endif /* __LINUX_HSI_OMAP_SSI_H__ */ diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c new file mode 100644 index 000000000000..b8693f0b27fe --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi_port.c @@ -0,0 +1,1399 @@ +/* OMAP SSI port driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> + +#include <linux/of_gpio.h> +#include <linux/debugfs.h> + +#include "omap_ssi_regs.h" +#include "omap_ssi.h" + +static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) +{ + return 0; +} + +static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) +{ + return 0; +} + +static inline unsigned int ssi_wakein(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + return gpio_get_value(omap_port->wake_gpio); +} + +#ifdef CONFIG_DEBUG_FS +static void ssi_debug_remove_port(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + debugfs_remove_recursive(omap_port->dir); +} + +static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_port *port = m->private; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base = omap_ssi->sys; + unsigned int ch; + + pm_runtime_get_sync(omap_port->pdev); + if (omap_port->wake_irq > 0) + seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); + seq_printf(m, "WAKE\t\t: 0x%08x\n", + readl(base + SSI_WAKE_REG(port->num))); + seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, + readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); + seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, + readl(base + SSI_MPU_STATUS_REG(port->num, 0))); + /* SST */ + base = omap_port->sst_base; + seq_puts(m, "\nSST\n===\n"); + seq_printf(m, "ID SST\t\t: 0x%08x\n", + readl(base + SSI_SST_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + readl(base + SSI_SST_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + readl(base + SSI_SST_FRAMESIZE_REG)); + seq_printf(m, "DIVISOR\t\t: 0x%08x\n", + readl(base + SSI_SST_DIVISOR_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + readl(base + SSI_SST_CHANNELS_REG)); + seq_printf(m, "ARBMODE\t\t: 0x%08x\n", + readl(base + SSI_SST_ARBMODE_REG)); + seq_printf(m, "TXSTATE\t\t: 0x%08x\n", + readl(base + SSI_SST_TXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + readl(base + SSI_SST_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + readl(base + SSI_SST_BREAK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + readl(base + SSI_SST_BUFFER_CH_REG(ch))); + } + /* SSR */ + base = omap_port->ssr_base; + seq_puts(m, "\nSSR\n===\n"); + seq_printf(m, "ID SSR\t\t: 0x%08x\n", + readl(base + SSI_SSR_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + readl(base + SSI_SSR_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + readl(base + SSI_SSR_FRAMESIZE_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + readl(base + SSI_SSR_CHANNELS_REG)); + seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", + readl(base + SSI_SSR_TIMEOUT_REG)); + seq_printf(m, "RXSTATE\t\t: 0x%08x\n", + readl(base + SSI_SSR_RXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + readl(base + SSI_SSR_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + readl(base + SSI_SSR_BREAK_REG)); + seq_printf(m, "ERROR\t\t: 0x%08x\n", + readl(base + SSI_SSR_ERROR_REG)); + seq_printf(m, "ERRORACK\t: 0x%08x\n", + readl(base + SSI_SSR_ERRORACK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + readl(base + SSI_SSR_BUFFER_CH_REG(ch))); + } + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +static int ssi_port_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_port_show, inode->i_private); +} + +static const struct file_operations ssi_port_regs_fops = { + .open = ssi_port_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ssi_div_get(void *data, u64 *val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + pm_runtime_get_sync(omap_port->pdev); + *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +static int ssi_div_set(void *data, u64 val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + if (val > 127) + return -EINVAL; + + pm_runtime_get_sync(omap_port->pdev); + writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); + omap_port->sst.divisor = val; + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); + +static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port, + struct dentry *dir) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + + dir = debugfs_create_dir(dev_name(omap_port->dev), dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + omap_port->dir = dir; + debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); + dir = debugfs_create_dir("sst", dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port, + &ssi_sst_div_fops); + + return 0; +} +#endif + +static int ssi_claim_lch(struct hsi_msg *msg) +{ + + struct hsi_port *port = hsi_get_port(msg->cl); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int lch; + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) + if (!omap_ssi->gdd_trn[lch].msg) { + omap_ssi->gdd_trn[lch].msg = msg; + omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; + return lch; + } + + return -EBUSY; +} + +static int ssi_start_dma(struct hsi_msg *msg, int lch) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + int err; + u16 csdp; + u16 ccr; + u32 s_addr; + u32 d_addr; + u32 tmp; + + if (msg->ttype == HSI_MSG_READ) { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_FROM_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | + SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ + ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = omap_port->ssr_dma + + SSI_SSR_BUFFER_CH_REG(msg->channel); + d_addr = sg_dma_address(msg->sgt.sgl); + } else { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_TO_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | + SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ + ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = sg_dma_address(msg->sgt.sgl); + d_addr = omap_port->sst_dma + + SSI_SST_BUFFER_CH_REG(msg->channel); + } + dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", + lch, csdp, ccr, s_addr, d_addr); + + /* Hold clocks during the transfer */ + pm_runtime_get_sync(omap_port->pdev); + + writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); + writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); + writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); + writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); + writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), + gdd + SSI_GDD_CEN_REG(lch)); + + spin_lock_bh(&omap_ssi->lock); + tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp |= SSI_GDD_LCH(lch); + writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock_bh(&omap_ssi->lock); + writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_pio(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 val; + + pm_runtime_get_sync(omap_port->pdev); + if (msg->ttype == HSI_MSG_WRITE) { + val = SSI_DATAACCEPT(msg->channel); + /* Hold clocks for pio writes */ + pm_runtime_get_sync(omap_port->pdev); + } else { + val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; + } + dev_dbg(&port->device, "Single %s transfer\n", + msg->ttype ? "write" : "read"); + val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + pm_runtime_put_sync(omap_port->pdev); + msg->actual_len = 0; + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_transfer(struct list_head *queue) +{ + struct hsi_msg *msg; + int lch = -1; + + if (list_empty(queue)) + return 0; + msg = list_first_entry(queue, struct hsi_msg, link); + if (msg->status != HSI_STATUS_QUEUED) + return 0; + if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) + lch = ssi_claim_lch(msg); + if (lch >= 0) + return ssi_start_dma(msg, lch); + else + return ssi_start_pio(msg); +} + +static int ssi_async_break(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int err = 0; + u32 tmp; + + pm_runtime_get_sync(omap_port->pdev); + if (msg->ttype == HSI_MSG_WRITE) { + if (omap_port->sst.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); + msg->status = HSI_STATUS_COMPLETED; + msg->complete(msg); + } else { + if (omap_port->ssr.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + spin_lock_bh(&omap_port->lock); + tmp = readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + writel(tmp | SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + msg->status = HSI_STATUS_PROCEEDING; + list_add_tail(&msg->link, &omap_port->brkqueue); + spin_unlock_bh(&omap_port->lock); + } +out: + pm_runtime_put_sync(omap_port->pdev); + + return err; +} + +static int ssi_async(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct list_head *queue; + int err = 0; + + BUG_ON(!msg); + + if (msg->sgt.nents > 1) + return -ENOSYS; /* TODO: Add sg support */ + + if (msg->break_frame) + return ssi_async_break(msg); + + if (msg->ttype) { + BUG_ON(msg->channel >= omap_port->sst.channels); + queue = &omap_port->txqueue[msg->channel]; + } else { + BUG_ON(msg->channel >= omap_port->ssr.channels); + queue = &omap_port->rxqueue[msg->channel]; + } + msg->status = HSI_STATUS_QUEUED; + spin_lock_bh(&omap_port->lock); + list_add_tail(&msg->link, queue); + err = ssi_start_transfer(queue); + if (err < 0) { + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + } + spin_unlock_bh(&omap_port->lock); + dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", + msg->status, msg->ttype, msg->channel); + + return err; +} + +static u32 ssi_calculate_div(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 tx_fckrate = (u32) omap_ssi->fck_rate; + + /* / 2 : SSI TX clock is always half of the SSI functional clock */ + tx_fckrate >>= 1; + /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ + tx_fckrate--; + dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", + tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, + omap_ssi->max_speed); + + return tx_fckrate / omap_ssi->max_speed; +} + +static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) +{ + struct list_head *node, *tmp; + struct hsi_msg *msg; + + list_for_each_safe(node, tmp, queue) { + msg = list_entry(node, struct hsi_msg, link); + if ((cl) && (cl != msg->cl)) + continue; + list_del(node); + pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", + msg->channel, msg, msg->sgt.sgl->length, + msg->ttype, msg->context); + if (msg->destructor) + msg->destructor(msg); + else + hsi_free_msg(msg); + } +} + +static int ssi_setup(struct hsi_client *cl) +{ + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + u32 div; + u32 val; + int err = 0; + + pm_runtime_get_sync(omap_port->pdev); + spin_lock_bh(&omap_port->lock); + if (cl->tx_cfg.speed) + omap_ssi->max_speed = cl->tx_cfg.speed; + div = ssi_calculate_div(ssi); + if (div > SSI_MAX_DIVISOR) { + dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", + cl->tx_cfg.speed, div); + err = -EINVAL; + goto out; + } + /* Set TX/RX module to sleep to stop TX/RX during cfg update */ + writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); + writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); + /* Flush posted write */ + val = readl(ssr + SSI_SSR_MODE_REG); + /* TX */ + writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG); + writel_relaxed(div, sst + SSI_SST_DIVISOR_REG); + writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG); + writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); + writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); + /* RX */ + writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG); + writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG); + writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG); + /* Cleanup the break queue if we leave FRAME mode */ + if ((omap_port->ssr.mode == SSI_MODE_FRAME) && + (cl->rx_cfg.mode != SSI_MODE_FRAME)) + ssi_flush_queue(&omap_port->brkqueue, cl); + writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); + omap_port->channels = max(cl->rx_cfg.num_hw_channels, + cl->tx_cfg.num_hw_channels); + /* Shadow registering for OFF mode */ + /* SST */ + omap_port->sst.divisor = div; + omap_port->sst.frame_size = 31; + omap_port->sst.channels = cl->tx_cfg.num_hw_channels; + omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; + omap_port->sst.mode = cl->tx_cfg.mode; + /* SSR */ + omap_port->ssr.frame_size = 31; + omap_port->ssr.timeout = 0; + omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; + omap_port->ssr.mode = cl->rx_cfg.mode; +out: + spin_unlock_bh(&omap_port->lock); + pm_runtime_put_sync(omap_port->pdev); + + return err; +} + +static int ssi_flush(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + unsigned int i; + u32 err; + + pm_runtime_get_sync(omap_port->pdev); + spin_lock_bh(&omap_port->lock); + /* Stop all DMA transfers */ + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if (!msg || (port != hsi_get_port(msg->cl))) + continue; + writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + if (msg->ttype == HSI_MSG_READ) + pm_runtime_put_sync(omap_port->pdev); + omap_ssi->gdd_trn[i].msg = NULL; + } + /* Flush all SST buffers */ + writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG); + writel_relaxed(0, sst + SSI_SST_TXSTATE_REG); + /* Flush all SSR buffers */ + writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG); + writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG); + /* Flush all errors */ + err = readl(ssr + SSI_SSR_ERROR_REG); + writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG); + /* Flush break */ + writel_relaxed(0, ssr + SSI_SSR_BREAK_REG); + /* Clear interrupts */ + writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(0xffffff00, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); + /* Dequeue all pending requests */ + for (i = 0; i < omap_port->channels; i++) { + /* Release write clocks */ + if (!list_empty(&omap_port->txqueue[i])) + pm_runtime_put_sync(omap_port->pdev); + ssi_flush_queue(&omap_port->txqueue[i], NULL); + ssi_flush_queue(&omap_port->rxqueue[i], NULL); + } + ssi_flush_queue(&omap_port->brkqueue, NULL); + spin_unlock_bh(&omap_port->lock); + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +static int ssi_start_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + if (omap_port->wk_refcount++) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ + writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static int ssi_stop_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + BUG_ON(!omap_port->wk_refcount); + if (--omap_port->wk_refcount) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + pm_runtime_put_sync(omap_port->pdev); /* Release clocks */ + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static void ssi_transfer(struct omap_ssi_port *omap_port, + struct list_head *queue) +{ + struct hsi_msg *msg; + int err = -1; + + spin_lock_bh(&omap_port->lock); + while (err < 0) { + err = ssi_start_transfer(queue); + if (err < 0) { + msg = list_first_entry(queue, struct hsi_msg, link); + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + list_del(&msg->link); + spin_unlock_bh(&omap_port->lock); + msg->complete(msg); + spin_lock_bh(&omap_port->lock); + } + } + spin_unlock_bh(&omap_port->lock); +} + +static void ssi_cleanup_queues(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 rxbufstate = 0; + u32 txbufstate = 0; + u32 status = SSI_ERROROCCURED; + u32 tmp; + + ssi_flush_queue(&omap_port->brkqueue, cl); + if (list_empty(&omap_port->brkqueue)) + status |= SSI_BREAKDETECTED; + + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->txqueue[i])) + continue; + msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + txbufstate |= (1 << i); + status |= SSI_DATAACCEPT(i); + /* Release the clocks writes, also GDD ones */ + pm_runtime_put_sync(omap_port->pdev); + } + ssi_flush_queue(&omap_port->txqueue[i], cl); + } + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + rxbufstate |= (1 << i); + status |= SSI_DATAAVAILABLE(i); + } + ssi_flush_queue(&omap_port->rxqueue[i], cl); + /* Check if we keep the error detection interrupt armed */ + if (!list_empty(&omap_port->rxqueue[i])) + status &= ~SSI_ERROROCCURED; + } + /* Cleanup write buffers */ + tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); + tmp &= ~txbufstate; + writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); + /* Cleanup read buffers */ + tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + tmp &= ~rxbufstate; + writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + /* Disarm and ack pending interrupts */ + tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= ~status; + writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(status, omap_ssi->sys + + SSI_MPU_STATUS_REG(port->num, 0)); +} + +static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_msg *msg; + unsigned int i; + u32 val = 0; + u32 tmp; + + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((!msg) || (msg->cl != cl)) + continue; + writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + /* + * Clock references for write will be handled in + * ssi_cleanup_queues + */ + if (msg->ttype == HSI_MSG_READ) + pm_runtime_put_sync(omap_port->pdev); + omap_ssi->gdd_trn[i].msg = NULL; + } + tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); +} + +static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) +{ + writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); + writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static int ssi_release(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + + spin_lock_bh(&omap_port->lock); + pm_runtime_get_sync(omap_port->pdev); + /* Stop all the pending DMA requests for that client */ + ssi_cleanup_gdd(ssi, cl); + /* Now cleanup all the queues */ + ssi_cleanup_queues(cl); + pm_runtime_put_sync(omap_port->pdev); + /* If it is the last client of the port, do extra checks and cleanup */ + if (port->claimed <= 1) { + /* + * Drop the clock reference for the incoming wake line + * if it is still kept high by the other side. + */ + if (omap_port->wkin_cken) { + pm_runtime_put_sync(omap_port->pdev); + omap_port->wkin_cken = 0; + } + pm_runtime_get_sync(omap_port->pdev); + /* Stop any SSI TX/RX without a client */ + ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); + omap_port->sst.mode = SSI_MODE_SLEEP; + omap_port->ssr.mode = SSI_MODE_SLEEP; + pm_runtime_put_sync(omap_port->pdev); + WARN_ON(omap_port->wk_refcount != 0); + } + spin_unlock_bh(&omap_port->lock); + + return 0; +} + + + +static void ssi_error(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 err; + u32 val; + u32 tmp; + + /* ACK error */ + err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); + dev_err(&port->device, "SSI error: 0x%02x\n", err); + if (!err) { + dev_dbg(&port->device, "spurious SSI error ignored!\n"); + return; + } + spin_lock(&omap_ssi->lock); + /* Cancel all GDD read transfers */ + for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((msg) && (msg->ttype == HSI_MSG_READ)) { + writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + omap_ssi->gdd_trn[i].msg = NULL; + } + } + tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock(&omap_ssi->lock); + /* Cancel all PIO read transfers */ + spin_lock(&omap_port->lock); + tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ + writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + /* ACK error */ + writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); + writel_relaxed(SSI_ERROROCCURED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + /* Signal the error all current pending read requests */ + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + spin_unlock(&omap_port->lock); + msg->complete(msg); + /* Now restart queued reads if any */ + ssi_transfer(omap_port, &omap_port->rxqueue[i]); + spin_lock(&omap_port->lock); + } + spin_unlock(&omap_port->lock); +} + +static void ssi_break_complete(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + struct hsi_msg *tmp; + u32 val; + + dev_dbg(&port->device, "HWBREAK received\n"); + + spin_lock(&omap_port->lock); + val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + val &= ~SSI_BREAKDETECTED; + writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); + writel(SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { + msg->status = HSI_STATUS_COMPLETED; + spin_lock(&omap_port->lock); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + } + +} + +static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) +{ + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_msg *msg; + u32 *buf; + u32 reg; + u32 val; + + spin_lock(&omap_port->lock); + msg = list_first_entry(queue, struct hsi_msg, link); + if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { + msg->actual_len = 0; + msg->status = HSI_STATUS_PENDING; + } + if (msg->ttype == HSI_MSG_WRITE) + val = SSI_DATAACCEPT(msg->channel); + else + val = SSI_DATAAVAILABLE(msg->channel); + if (msg->status == HSI_STATUS_PROCEEDING) { + buf = sg_virt(msg->sgt.sgl) + msg->actual_len; + if (msg->ttype == HSI_MSG_WRITE) + writel(*buf, omap_port->sst_base + + SSI_SST_BUFFER_CH_REG(msg->channel)); + else + *buf = readl(omap_port->ssr_base + + SSI_SSR_BUFFER_CH_REG(msg->channel)); + dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, + msg->ttype, *buf); + msg->actual_len += sizeof(*buf); + if (msg->actual_len >= msg->sgt.sgl->length) + msg->status = HSI_STATUS_COMPLETED; + /* + * Wait for the last written frame to be really sent before + * we call the complete callback + */ + if ((msg->status == HSI_STATUS_PROCEEDING) || + ((msg->status == HSI_STATUS_COMPLETED) && + (msg->ttype == HSI_MSG_WRITE))) { + writel(val, omap_ssi->sys + + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + return; + } + + } + /* Transfer completed at this point */ + reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + if (msg->ttype == HSI_MSG_WRITE) { + /* Release clocks for write transfer */ + pm_runtime_put_sync(omap_port->pdev); + } + reg &= ~val; + writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + ssi_transfer(omap_port, queue); +} + +static void ssi_pio_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int ch; + u32 status_reg; + + pm_runtime_get_sync(omap_port->pdev); + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + + for (ch = 0; ch < omap_port->channels; ch++) { + if (status_reg & SSI_DATAACCEPT(ch)) + ssi_pio_complete(port, &omap_port->txqueue[ch]); + if (status_reg & SSI_DATAAVAILABLE(ch)) + ssi_pio_complete(port, &omap_port->rxqueue[ch]); + } + if (status_reg & SSI_BREAKDETECTED) + ssi_break_complete(port); + if (status_reg & SSI_ERROROCCURED) + ssi_error(port); + + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + pm_runtime_put_sync(omap_port->pdev); + + if (status_reg) + tasklet_hi_schedule(&omap_port->pio_tasklet); + else + enable_irq(omap_port->irq); +} + +static irqreturn_t ssi_pio_isr(int irq, void *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + tasklet_hi_schedule(&omap_port->pio_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static void ssi_wake_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + if (ssi_wakein(port)) { + /** + * We can have a quick High-Low-High transition in the line. + * In such a case if we have long interrupt latencies, + * we can miss the low event or get twice a high event. + * This workaround will avoid breaking the clock reference + * count when such a situation ocurrs. + */ + spin_lock(&omap_port->lock); + if (!omap_port->wkin_cken) { + omap_port->wkin_cken = 1; + pm_runtime_get_sync(omap_port->pdev); + } + spin_unlock(&omap_port->lock); + dev_dbg(&ssi->device, "Wake in high\n"); + if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ + writel(SSI_WAKE(0), + omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + } + hsi_event(port, HSI_EVENT_START_RX); + } else { + dev_dbg(&ssi->device, "Wake in low\n"); + if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ + writel(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + } + hsi_event(port, HSI_EVENT_STOP_RX); + spin_lock(&omap_port->lock); + if (omap_port->wkin_cken) { + pm_runtime_put_sync(omap_port->pdev); + omap_port->wkin_cken = 0; + } + spin_unlock(&omap_port->lock); + } +} + +static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); + + tasklet_hi_schedule(&omap_port->wake_tasklet); + + return IRQ_HANDLED; +} + +static int __init ssi_port_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + int err; + + omap_port->irq = platform_get_irq(pd, 0); + if (omap_port->irq < 0) { + dev_err(&port->device, "Port IRQ resource missing\n"); + return omap_port->irq; + } + tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, + (unsigned long)port); + err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr, + 0, "mpu_irq0", port); + if (err < 0) + dev_err(&port->device, "Request IRQ %d failed (%d)\n", + omap_port->irq, err); + return err; +} + +static int __init ssi_wake_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + int cawake_irq; + int err; + + if (omap_port->wake_gpio == -1) { + omap_port->wake_irq = -1; + return 0; + } + + cawake_irq = gpio_to_irq(omap_port->wake_gpio); + + omap_port->wake_irq = cawake_irq; + tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, + (unsigned long)port); + err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "cawake", port); + if (err < 0) + dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", + cawake_irq, err); + err = enable_irq_wake(cawake_irq); + if (err < 0) + dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n", + cawake_irq, err); + + return err; +} + +static void __init ssi_queues_init(struct omap_ssi_port *omap_port) +{ + unsigned int ch; + + for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { + INIT_LIST_HEAD(&omap_port->txqueue[ch]); + INIT_LIST_HEAD(&omap_port->rxqueue[ch]); + } + INIT_LIST_HEAD(&omap_port->brkqueue); +} + +static int __init ssi_port_get_iomem(struct platform_device *pd, + const char *name, void __iomem **pbase, dma_addr_t *phy) +{ + struct hsi_port *port = platform_get_drvdata(pd); + struct resource *mem; + struct resource *ioarea; + void __iomem *base; + + mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); + if (!mem) { + dev_err(&pd->dev, "IO memory region missing (%s)\n", name); + return -ENXIO; + } + ioarea = devm_request_mem_region(&port->device, mem->start, + resource_size(mem), dev_name(&pd->dev)); + if (!ioarea) { + dev_err(&pd->dev, "%s IO memory region request failed\n", + mem->name); + return -ENXIO; + } + base = devm_ioremap(&port->device, mem->start, resource_size(mem)); + if (!base) { + dev_err(&pd->dev, "%s IO remap failed\n", mem->name); + return -ENXIO; + } + *pbase = base; + + if (phy) + *phy = mem->start; + + return 0; +} + +static int __init ssi_port_probe(struct platform_device *pd) +{ + struct device_node *np = pd->dev.of_node; + struct hsi_port *port; + struct omap_ssi_port *omap_port; + struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 cawake_gpio = 0; + u32 port_id; + int err; + + dev_dbg(&pd->dev, "init ssi port...\n"); + + err = ref_module(THIS_MODULE, ssi->owner); + if (err) { + dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n", + err); + return -ENODEV; + } + + if (!ssi->port || !omap_ssi->port) { + dev_err(&pd->dev, "ssi controller not initialized!\n"); + err = -ENODEV; + goto error; + } + + /* get id of first uninitialized port in controller */ + for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id]; + port_id++) + ; + + if (port_id >= ssi->num_ports) { + dev_err(&pd->dev, "port id out of range!\n"); + err = -ENODEV; + goto error; + } + + port = ssi->port[port_id]; + + if (!np) { + dev_err(&pd->dev, "missing device tree data\n"); + err = -EINVAL; + goto error; + } + + cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0); + if (cawake_gpio < 0) { + dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n", + cawake_gpio); + err = -ENODEV; + goto error; + } + + err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN, + "cawake"); + if (err) { + dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n", + err); + err = -ENXIO; + goto error; + } + + omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); + if (!omap_port) { + err = -ENOMEM; + goto error; + } + omap_port->wake_gpio = cawake_gpio; + omap_port->pdev = &pd->dev; + omap_port->port_id = port_id; + + /* initialize HSI port */ + port->async = ssi_async; + port->setup = ssi_setup; + port->flush = ssi_flush; + port->start_tx = ssi_start_tx; + port->stop_tx = ssi_stop_tx; + port->release = ssi_release; + hsi_port_set_drvdata(port, omap_port); + omap_ssi->port[port_id] = omap_port; + + platform_set_drvdata(pd, port); + + err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, + &omap_port->sst_dma); + if (err < 0) + goto error; + err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, + &omap_port->ssr_dma); + if (err < 0) + goto error; + + err = ssi_port_irq(port, pd); + if (err < 0) + goto error; + err = ssi_wake_irq(port, pd); + if (err < 0) + goto error; + + ssi_queues_init(omap_port); + spin_lock_init(&omap_port->lock); + spin_lock_init(&omap_port->wk_lock); + omap_port->dev = &port->device; + + pm_runtime_irq_safe(omap_port->pdev); + pm_runtime_enable(omap_port->pdev); + +#ifdef CONFIG_DEBUG_FS + err = ssi_debug_add_port(omap_port, omap_ssi->dir); + if (err < 0) { + pm_runtime_disable(omap_port->pdev); + goto error; + } +#endif + + hsi_add_clients_from_dt(port, np); + + dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n", + port_id, cawake_gpio); + + return 0; + +error: + return err; +} + +static int __exit ssi_port_remove(struct platform_device *pd) +{ + struct hsi_port *port = platform_get_drvdata(pd); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + +#ifdef CONFIG_DEBUG_FS + ssi_debug_remove_port(port); +#endif + + hsi_port_unregister_clients(port); + + tasklet_kill(&omap_port->wake_tasklet); + tasklet_kill(&omap_port->pio_tasklet); + + port->async = hsi_dummy_msg; + port->setup = hsi_dummy_cl; + port->flush = hsi_dummy_cl; + port->start_tx = hsi_dummy_cl; + port->stop_tx = hsi_dummy_cl; + port->release = hsi_dummy_cl; + + omap_ssi->port[omap_port->port_id] = NULL; + platform_set_drvdata(pd, NULL); + pm_runtime_disable(&pd->dev); + + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + omap_port->sys_mpu_enable = readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + + return 0; +} + +static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base; + + writel_relaxed(omap_port->sys_mpu_enable, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + + /* SST context */ + base = omap_port->sst_base; + writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); + writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); + writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); + + /* SSR context */ + base = omap_port->ssr_base; + writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); + writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); + writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); + + return 0; +} + +static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) +{ + u32 mode; + + writel_relaxed(omap_port->sst.mode, + omap_port->sst_base + SSI_SST_MODE_REG); + writel_relaxed(omap_port->ssr.mode, + omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static int ssi_restore_divisor(struct omap_ssi_port *omap_port) +{ + writel_relaxed(omap_port->sst.divisor, + omap_port->sst_base + SSI_SST_DIVISOR_REG); + + return 0; +} + +static int omap_ssi_port_runtime_suspend(struct device *dev) +{ + struct hsi_port *port = dev_get_drvdata(dev); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "port runtime suspend!\n"); + + ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); + if (omap_ssi->get_loss) + omap_port->loss_count = + omap_ssi->get_loss(ssi->device.parent); + ssi_save_port_ctx(omap_port); + + return 0; +} + +static int omap_ssi_port_runtime_resume(struct device *dev) +{ + struct hsi_port *port = dev_get_drvdata(dev); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "port runtime resume!\n"); + + if ((omap_ssi->get_loss) && (omap_port->loss_count == + omap_ssi->get_loss(ssi->device.parent))) + goto mode; /* We always need to restore the mode & TX divisor */ + + ssi_restore_port_ctx(omap_port); + +mode: + ssi_restore_divisor(omap_port); + ssi_restore_port_mode(omap_port); + + return 0; +} + +static const struct dev_pm_ops omap_ssi_port_pm_ops = { + SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend, + omap_ssi_port_runtime_resume, NULL) +}; + +#define DEV_PM_OPS (&omap_ssi_port_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif + + +#ifdef CONFIG_OF +static const struct of_device_id omap_ssi_port_of_match[] = { + { .compatible = "ti,omap3-ssi-port", }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match); +#else +#define omap_ssi_port_of_match NULL +#endif + +static struct platform_driver ssi_port_pdriver = { + .remove = __exit_p(ssi_port_remove), + .driver = { + .name = "omap_ssi_port", + .owner = THIS_MODULE, + .of_match_table = omap_ssi_port_of_match, + .pm = DEV_PM_OPS, + }, +}; + +module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe); + +MODULE_ALIAS("platform:omap_ssi_port"); +MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hsi/controllers/omap_ssi_regs.h b/drivers/hsi/controllers/omap_ssi_regs.h new file mode 100644 index 000000000000..08f98dd1d01f --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi_regs.h @@ -0,0 +1,171 @@ +/* Hardware definitions for SSI. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __OMAP_SSI_REGS_H__ +#define __OMAP_SSI_REGS_H__ + +/* + * SSI SYS registers + */ +#define SSI_REVISION_REG 0 +# define SSI_REV_MAJOR 0xf0 +# define SSI_REV_MINOR 0xf +#define SSI_SYSCONFIG_REG 0x10 +# define SSI_AUTOIDLE (1 << 0) +# define SSI_SOFTRESET (1 << 1) +# define SSI_SIDLEMODE_FORCE 0 +# define SSI_SIDLEMODE_NO (1 << 3) +# define SSI_SIDLEMODE_SMART (1 << 4) +# define SSI_SIDLEMODE_MASK 0x18 +# define SSI_MIDLEMODE_FORCE 0 +# define SSI_MIDLEMODE_NO (1 << 12) +# define SSI_MIDLEMODE_SMART (1 << 13) +# define SSI_MIDLEMODE_MASK 0x3000 +#define SSI_SYSSTATUS_REG 0x14 +# define SSI_RESETDONE 1 +#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2)) +#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8)) +# define SSI_DATAACCEPT(channel) (1 << (channel)) +# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8)) +# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16)) +# define SSI_ERROROCCURED (1 << 24) +# define SSI_BREAKDETECTED (1 << 25) +#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800 +#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804 +# define SSI_GDD_LCH(channel) (1 << (channel)) +#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10)) +#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10)) +#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10)) +# define SSI_WAKE(channel) (1 << (channel)) +# define SSI_WAKE_MASK 0xff + +/* + * SSI SST registers + */ +#define SSI_SST_ID_REG 0 +#define SSI_SST_MODE_REG 4 +# define SSI_MODE_VAL_MASK 3 +# define SSI_MODE_SLEEP 0 +# define SSI_MODE_STREAM 1 +# define SSI_MODE_FRAME 2 +# define SSI_MODE_MULTIPOINTS 3 +#define SSI_SST_FRAMESIZE_REG 8 +# define SSI_FRAMESIZE_DEFAULT 31 +#define SSI_SST_TXSTATE_REG 0xc +# define SSI_TXSTATE_IDLE 0 +#define SSI_SST_BUFSTATE_REG 0x10 +# define SSI_FULL(channel) (1 << (channel)) +#define SSI_SST_DIVISOR_REG 0x18 +# define SSI_MAX_DIVISOR 127 +#define SSI_SST_BREAK_REG 0x20 +#define SSI_SST_CHANNELS_REG 0x24 +# define SSI_CHANNELS_DEFAULT 4 +#define SSI_SST_ARBMODE_REG 0x28 +# define SSI_ARBMODE_ROUNDROBIN 0 +# define SSI_ARBMODE_PRIORITY 1 +#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI SSR registers + */ +#define SSI_SSR_ID_REG 0 +#define SSI_SSR_MODE_REG 4 +#define SSI_SSR_FRAMESIZE_REG 8 +#define SSI_SSR_RXSTATE_REG 0xc +#define SSI_SSR_BUFSTATE_REG 0x10 +# define SSI_NOTEMPTY(channel) (1 << (channel)) +#define SSI_SSR_BREAK_REG 0x1c +#define SSI_SSR_ERROR_REG 0x20 +#define SSI_SSR_ERRORACK_REG 0x24 +#define SSI_SSR_OVERRUN_REG 0x2c +#define SSI_SSR_OVERRUNACK_REG 0x30 +#define SSI_SSR_TIMEOUT_REG 0x34 +# define SSI_TIMEOUT_DEFAULT 0 +#define SSI_SSR_CHANNELS_REG 0x28 +#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI GDD registers + */ +#define SSI_GDD_HW_ID_REG 0 +#define SSI_GDD_PPORT_ID_REG 0x10 +#define SSI_GDD_MPORT_ID_REG 0x14 +#define SSI_GDD_PPORT_SR_REG 0x20 +#define SSI_GDD_MPORT_SR_REG 0x24 +# define SSI_ACTIVE_LCH_NUM_MASK 0xff +#define SSI_GDD_TEST_REG 0x40 +# define SSI_TEST 1 +#define SSI_GDD_GCR_REG 0x100 +# define SSI_CLK_AUTOGATING_ON (1 << 3) +# define SSI_FREE (1 << 2) +# define SSI_SWITCH_OFF (1 << 0) +#define SSI_GDD_GRST_REG 0x200 +# define SSI_SWRESET 1 +#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40)) +# define SSI_DST_BURST_EN_MASK 0xc000 +# define SSI_DST_SINGLE_ACCESS0 0 +# define SSI_DST_SINGLE_ACCESS (1 << 14) +# define SSI_DST_BURST_4x32_BIT (2 << 14) +# define SSI_DST_BURST_8x32_BIT (3 << 14) +# define SSI_DST_MASK 0x1e00 +# define SSI_DST_MEMORY_PORT (8 << 9) +# define SSI_DST_PERIPHERAL_PORT (9 << 9) +# define SSI_SRC_BURST_EN_MASK 0x180 +# define SSI_SRC_SINGLE_ACCESS0 0 +# define SSI_SRC_SINGLE_ACCESS (1 << 7) +# define SSI_SRC_BURST_4x32_BIT (2 << 7) +# define SSI_SRC_BURST_8x32_BIT (3 << 7) +# define SSI_SRC_MASK 0x3c +# define SSI_SRC_MEMORY_PORT (8 << 2) +# define SSI_SRC_PERIPHERAL_PORT (9 << 2) +# define SSI_DATA_TYPE_MASK 3 +# define SSI_DATA_TYPE_S32 2 +#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40)) +# define SSI_DST_AMODE_MASK (3 << 14) +# define SSI_DST_AMODE_CONST 0 +# define SSI_DST_AMODE_POSTINC (1 << 12) +# define SSI_SRC_AMODE_MASK (3 << 12) +# define SSI_SRC_AMODE_CONST 0 +# define SSI_SRC_AMODE_POSTINC (1 << 12) +# define SSI_CCR_ENABLE (1 << 7) +# define SSI_CCR_SYNC_MASK 0x1f +#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40)) +# define SSI_BLOCK_IE (1 << 5) +# define SSI_HALF_IE (1 << 2) +# define SSI_TOUT_IE (1 << 0) +#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40)) +# define SSI_CSR_SYNC (1 << 6) +# define SSI_CSR_BLOCK (1 << 5) +# define SSI_CSR_HALF (1 << 2) +# define SSI_CSR_TOUR (1 << 0) +#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40)) +#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40)) +#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40)) +#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40)) +#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40)) +#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40)) +# define SSI_ENABLE_LNK (1 << 15) +# define SSI_STOP_LNK (1 << 14) +# define SSI_NEXT_CH_ID_MASK 0xf + +#endif /* __OMAP_SSI_REGS_H__ */ diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index 749f7b5c8179..fe9371271ce2 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -26,6 +26,8 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_device.h> #include "hsi_core.h" static ssize_t modalias_show(struct device *dev, @@ -50,7 +52,13 @@ static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) static int hsi_bus_match(struct device *dev, struct device_driver *driver) { - return strcmp(dev_name(dev), driver->name) == 0; + if (of_driver_match_device(dev, driver)) + return true; + + if (strcmp(dev_name(dev), driver->name) == 0) + return true; + + return false; } static struct bus_type hsi_bus_type = { @@ -62,18 +70,37 @@ static struct bus_type hsi_bus_type = { static void hsi_client_release(struct device *dev) { - kfree(to_hsi_client(dev)); + struct hsi_client *cl = to_hsi_client(dev); + + kfree(cl->tx_cfg.channels); + kfree(cl->rx_cfg.channels); + kfree(cl); } -static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) +struct hsi_client *hsi_new_client(struct hsi_port *port, + struct hsi_board_info *info) { struct hsi_client *cl; + size_t size; cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (!cl) - return; + return NULL; + cl->tx_cfg = info->tx_cfg; + if (cl->tx_cfg.channels) { + size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels); + cl->tx_cfg.channels = kzalloc(size , GFP_KERNEL); + memcpy(cl->tx_cfg.channels, info->tx_cfg.channels, size); + } + cl->rx_cfg = info->rx_cfg; + if (cl->rx_cfg.channels) { + size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels); + cl->rx_cfg.channels = kzalloc(size , GFP_KERNEL); + memcpy(cl->rx_cfg.channels, info->rx_cfg.channels, size); + } + cl->device.bus = &hsi_bus_type; cl->device.parent = &port->device; cl->device.release = hsi_client_release; @@ -85,7 +112,10 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) pr_err("hsi: failed to register client: %s\n", info->name); put_device(&cl->device); } + + return cl; } +EXPORT_SYMBOL_GPL(hsi_new_client); static void hsi_scan_board_info(struct hsi_controller *hsi) { @@ -101,12 +131,209 @@ static void hsi_scan_board_info(struct hsi_controller *hsi) } } -static int hsi_remove_client(struct device *dev, void *data __maybe_unused) +#ifdef CONFIG_OF +static struct hsi_board_info hsi_char_dev_info = { + .name = "hsi_char", +}; + +static int hsi_of_property_parse_mode(struct device_node *client, char *name, + unsigned int *result) +{ + const char *mode; + int err; + + err = of_property_read_string(client, name, &mode); + if (err < 0) + return err; + + if (strcmp(mode, "stream") == 0) + *result = HSI_MODE_STREAM; + else if (strcmp(mode, "frame") == 0) + *result = HSI_MODE_FRAME; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_flow(struct device_node *client, char *name, + unsigned int *result) +{ + const char *flow; + int err; + + err = of_property_read_string(client, name, &flow); + if (err < 0) + return err; + + if (strcmp(flow, "synchronized") == 0) + *result = HSI_FLOW_SYNC; + else if (strcmp(flow, "pipeline") == 0) + *result = HSI_FLOW_PIPE; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_arb_mode(struct device_node *client, + char *name, unsigned int *result) +{ + const char *arb_mode; + int err; + + err = of_property_read_string(client, name, &arb_mode); + if (err < 0) + return err; + + if (strcmp(arb_mode, "round-robin") == 0) + *result = HSI_ARB_RR; + else if (strcmp(arb_mode, "priority") == 0) + *result = HSI_ARB_PRIO; + else + return -EINVAL; + + return 0; +} + +static void hsi_add_client_from_dt(struct hsi_port *port, + struct device_node *client) +{ + struct hsi_client *cl; + struct hsi_channel channel; + struct property *prop; + char name[32]; + int length, cells, err, i, max_chan, mode; + + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return; + + err = of_modalias_node(client, name, sizeof(name)); + if (err) + goto err; + + dev_set_name(&cl->device, "%s", name); + + err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); + if (err) { + err = hsi_of_property_parse_mode(client, "hsi-rx-mode", + &cl->rx_cfg.mode); + if (err) + goto err; + + err = hsi_of_property_parse_mode(client, "hsi-tx-mode", + &cl->tx_cfg.mode); + if (err) + goto err; + } else { + cl->rx_cfg.mode = mode; + cl->tx_cfg.mode = mode; + } + + err = of_property_read_u32(client, "hsi-speed-kbps", + &cl->tx_cfg.speed); + if (err) + goto err; + cl->rx_cfg.speed = cl->tx_cfg.speed; + + err = hsi_of_property_parse_flow(client, "hsi-flow", + &cl->rx_cfg.flow); + if (err) + goto err; + + err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode", + &cl->rx_cfg.arb_mode); + if (err) + goto err; + + prop = of_find_property(client, "hsi-channel-ids", &length); + if (!prop) { + err = -EINVAL; + goto err; + } + + cells = length / sizeof(u32); + + cl->rx_cfg.num_channels = cells; + cl->tx_cfg.num_channels = cells; + + cl->rx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->rx_cfg.channels) { + err = -ENOMEM; + goto err; + } + + cl->tx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->tx_cfg.channels) { + err = -ENOMEM; + goto err2; + } + + max_chan = 0; + for (i = 0; i < cells; i++) { + err = of_property_read_u32_index(client, "hsi-channel-ids", i, + &channel.id); + if (err) + goto err3; + + err = of_property_read_string_index(client, "hsi-channel-names", + i, &channel.name); + if (err) + channel.name = NULL; + + if (channel.id > max_chan) + max_chan = channel.id; + + cl->rx_cfg.channels[i] = channel; + cl->tx_cfg.channels[i] = channel; + } + + cl->rx_cfg.num_hw_channels = max_chan + 1; + cl->tx_cfg.num_hw_channels = max_chan + 1; + + cl->device.bus = &hsi_bus_type; + cl->device.parent = &port->device; + cl->device.release = hsi_client_release; + cl->device.of_node = client; + + if (device_register(&cl->device) < 0) { + pr_err("hsi: failed to register client: %s\n", name); + put_device(&cl->device); + goto err3; + } + + return; + +err3: + kfree(cl->tx_cfg.channels); +err2: + kfree(cl->rx_cfg.channels); +err: + kfree(cl); + pr_err("hsi client: missing or incorrect of property: err=%d\n", err); +} + +void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients) +{ + struct device_node *child; + + /* register hsi-char device */ + hsi_new_client(port, &hsi_char_dev_info); + + for_each_available_child_of_node(clients, child) + hsi_add_client_from_dt(port, child); +} +EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt); +#endif + +int hsi_remove_client(struct device *dev, void *data __maybe_unused) { device_unregister(dev); return 0; } +EXPORT_SYMBOL_GPL(hsi_remove_client); static int hsi_remove_port(struct device *dev, void *data __maybe_unused) { @@ -130,6 +357,16 @@ static void hsi_port_release(struct device *dev) } /** + * hsi_unregister_port - Unregister an HSI port + * @port: The HSI port to unregister + */ +void hsi_port_unregister_clients(struct hsi_port *port) +{ + device_for_each_child(&port->device, NULL, hsi_remove_client); +} +EXPORT_SYMBOL_GPL(hsi_port_unregister_clients); + +/** * hsi_unregister_controller - Unregister an HSI controller * @hsi: The HSI controller to register */ @@ -472,7 +709,7 @@ int hsi_unregister_port_event(struct hsi_client *cl) EXPORT_SYMBOL_GPL(hsi_unregister_port_event); /** - * hsi_event -Notifies clients about port events + * hsi_event - Notifies clients about port events * @port: Port where the event occurred * @event: The event type * @@ -492,6 +729,32 @@ int hsi_event(struct hsi_port *port, unsigned long event) } EXPORT_SYMBOL_GPL(hsi_event); +/** + * hsi_get_channel_id_by_name - acquire channel id by channel name + * @cl: HSI client, which uses the channel + * @name: name the channel is known under + * + * Clients can call this function to get the hsi channel ids similar to + * requesting IRQs or GPIOs by name. This function assumes the same + * channel configuration is used for RX and TX. + * + * Returns -errno on error or channel id on success. + */ +int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name) +{ + int i; + + if (!cl->rx_cfg.channels) + return -ENOENT; + + for (i = 0; i < cl->rx_cfg.num_channels; i++) + if (!strcmp(cl->rx_cfg.channels[i].name, name)) + return cl->rx_cfg.channels[i].id; + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name); + static int __init hsi_init(void) { return bus_register(&hsi_bus_type); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index bc196f49ec53..4af0da96c2e2 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1053,7 +1053,7 @@ config SENSORS_PC87427 config SENSORS_NTC_THERMISTOR tristate "NTC thermistor support" - depends on (!OF && !IIO) || (OF && IIO) + depends on !OF || IIO=n || IIO help This driver supports NTC thermistors sensor reading and its interpretation. The driver can also monitor the temperature and diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 90ec1173b8a1..01723f04fe45 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev, if (retval < 0) goto fail; - hyst = val - retval * 1000; + hyst = retval * 1000 - val; hyst = DIV_ROUND_CLOSEST(hyst, 1000); if (hyst < 0 || hyst > 255) { retval = -ERANGE; @@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client, } id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); - if (id != 0x01) + if (id < 0x01 || id > 0x04) return -ENODEV; return 0; @@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client, if (id->driver_data) data->groups[1] = &emc1404_group; - hwmon_dev = hwmon_device_register_with_groups(&client->dev, - client->name, data, - data->groups); + hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, + client->name, data, + data->groups); if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev); diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 8a17f01e8672..e76feb86a1d4 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -44,6 +44,7 @@ struct ntc_compensation { unsigned int ohm; }; +/* Order matters, ntc_match references the entries by index */ static const struct platform_device_id ntc_thermistor_id[] = { { "ncp15wb473", TYPE_NCPXXWB473 }, { "ncp18wb473", TYPE_NCPXXWB473 }, @@ -141,7 +142,7 @@ struct ntc_data { char name[PLATFORM_NAME_SIZE]; }; -#ifdef CONFIG_OF +#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO) static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) { struct iio_channel *channel = pdata->chan; @@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) static const struct of_device_id ntc_match[] = { { .compatible = "ntc,ncp15wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[0] }, { .compatible = "ntc,ncp18wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[1] }, { .compatible = "ntc,ncp21wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[2] }, { .compatible = "ntc,ncp03wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[3] }, { .compatible = "ntc,ncp15wl333", - .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, + .data = &ntc_thermistor_id[4] }, { }, }; MODULE_DEVICE_TABLE(of, ntc_match); @@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) return NULL; } +#define ntc_match NULL + static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) { } #endif diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 22e92c3d3d07..3c20e4bd6dd1 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) */ dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); + /* enforce disabled interrupts (due to HW issues) */ + i2c_dw_disable_int(dev); + /* Enable the adapter */ __i2c_dw_enable(dev, true); diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 28cbe1b2a2ec..32c85e9ecdae 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, resource_size(&adev->res)); - if (IS_ERR(dev->virtbase)) { + if (!dev->virtbase) { ret = -ENOMEM; goto err_no_mem; } diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 1b4cf14f1106..2a5efb5b487c 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap, int ret, idx; ret = pm_runtime_get_sync(qup->dev); - if (ret) + if (ret < 0) goto out; writel(1, qup->base + QUP_SW_RESET); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d4fa8eba6e9d..06d47aafbb79 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, ret = -EINVAL; for (i = 0; i < num; i++) { + /* This HW can't send STOP after address phase */ + if (msgs[i].len == 0) { + ret = -EOPNOTSUPP; + break; + } + /*-------------- spin lock -----------------*/ spin_lock_irqsave(&priv->lock, flags); @@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, static u32 rcar_i2c_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + /* This HW can't do SMBUS_QUICK and NOSTART */ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm rcar_i2c_algo = { diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index ae4491062e41..bb3a9964f7e0 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); - i2c->suspended = 0; clk_prepare_enable(i2c->clk); s3c24xx_i2c_init(i2c); clk_disable_unprepare(i2c->clk); + i2c->suspended = 0; return 0; } diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 16f69be820c7..ee880382e3bc 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -188,10 +188,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, ledtrig_ide_activity(); - pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n", + pr_debug("%s: %sing: block=%llu, sectors=%u\n", drive->name, rq_data_dir(rq) == READ ? "read" : "writ", - (unsigned long long)block, blk_rq_sectors(rq), - (unsigned long)rq->buffer); + (unsigned long long)block, blk_rq_sectors(rq)); if (hwif->rw_disk) hwif->rw_disk(drive, rq); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1b6dbe156a37..199c7896f081 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -48,6 +48,7 @@ #include <linux/mlx4/driver.h> #include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include "user.h" @@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, } #endif +#define MLX4_IB_INVALID_MAC ((u64)-1) +static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + int port) +{ + u64 new_smac = 0; + u64 release_mac = MLX4_IB_INVALID_MAC; + struct mlx4_ib_qp *qp; + + read_lock(&dev_base_lock); + new_smac = mlx4_mac_to_u64(dev->dev_addr); + read_unlock(&dev_base_lock); + + mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); + qp = ibdev->qp1_proxy[port - 1]; + if (qp) { + int new_smac_index; + u64 old_smac = qp->pri.smac; + struct mlx4_update_qp_params update_params; + + if (new_smac == old_smac) + goto unlock; + + new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); + + if (new_smac_index < 0) + goto unlock; + + update_params.smac_index = new_smac_index; + if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, + &update_params)) { + release_mac = new_smac; + goto unlock; + } + + qp->pri.smac = new_smac; + qp->pri.smac_index = new_smac_index; + + release_mac = old_smac; + } + +unlock: + mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); + if (release_mac != MLX4_IB_INVALID_MAC) + mlx4_unregister_mac(ibdev->dev, port, release_mac); +} + static void mlx4_ib_get_dev_addr(struct net_device *dev, struct mlx4_ib_dev *ibdev, u8 port) { @@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) return 0; } -static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) +static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + unsigned long event) + { struct mlx4_ib_iboe *iboe; + int update_qps_port = -1; int port; iboe = &ibdev->iboe; @@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) } curr_master = iboe->masters[port - 1]; + if (dev == iboe->netdevs[port - 1] && + (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || + event == NETDEV_UP || event == NETDEV_CHANGE)) + update_qps_port = port; + if (curr_netdev) { port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; @@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) } spin_unlock(&iboe->lock); + + if (update_qps_port > 0) + mlx4_ib_update_qps(ibdev, dev, update_qps_port); } static int mlx4_ib_netdev_event(struct notifier_block *this, @@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, return NOTIFY_DONE; ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); - mlx4_ib_scan_netdevs(ibdev); + mlx4_ib_scan_netdevs(ibdev, dev, event); return NOTIFY_DONE; } @@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_map; for (i = 0; i < ibdev->num_ports; ++i) { + mutex_init(&ibdev->qp1_proxy_lock[i]); if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == IB_LINK_LAYER_ETHERNET) { err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); @@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) for (i = 1 ; i <= ibdev->num_ports ; ++i) reset_gid_table(ibdev, i); rtnl_lock(); - mlx4_ib_scan_netdevs(ibdev); + mlx4_ib_scan_netdevs(ibdev, NULL, 0); rtnl_unlock(); mlx4_ib_init_gid_table(ibdev); } diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f589522fddfd..66b0b7dbd9f4 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -522,6 +522,9 @@ struct mlx4_ib_dev { int steer_qpn_count; int steer_qpn_base; int steering_support; + struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; + /* lock when destroying qp1_proxy and getting netdev events */ + struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; }; struct ib_event_work { diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 41308af4163c..dc57482ae7af 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) if (is_qp0(dev, mqp)) mlx4_CLOSE_PORT(dev->dev, mqp->port); + if (dev->qp1_proxy[mqp->port - 1] == mqp) { + mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); + dev->qp1_proxy[mqp->port - 1] = NULL; + mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); + } + pd = get_pd(mqp); destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); @@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); if (err) return -EINVAL; + if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) + dev->qp1_proxy[qp->port - 1] = qp; } } } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index c98fdb185931..a1710465faaf 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -28,6 +28,7 @@ #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/iscsi/iscsi_transport.h> +#include <linux/semaphore.h> #include "isert_proto.h" #include "ib_isert.h" @@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) struct isert_device *device; struct ib_device *ib_dev = cma_id->device; int ret = 0; - u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; + u8 pi_support; + + spin_lock_bh(&np->np_thread_lock); + if (!np->enabled) { + spin_unlock_bh(&np->np_thread_lock); + pr_debug("iscsi_np is not enabled, reject connect request\n"); + return rdma_reject(cma_id, NULL, 0); + } + spin_unlock_bh(&np->np_thread_lock); pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", cma_id, cma_id->context); @@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_mr; } + pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; if (pi_support && !device->pi_capable) { pr_err("Protection information requested but not supported\n"); ret = -EINVAL; @@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_conn_dev; mutex_lock(&isert_np->np_accept_mutex); - list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); + list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); mutex_unlock(&isert_np->np_accept_mutex); - pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); - wake_up(&isert_np->np_accept_wq); + pr_debug("isert_connect_request() up np_sem np: %p\n", np); + up(&isert_np->np_sem); return 0; out_conn_dev: @@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np, pr_err("Unable to allocate struct isert_np\n"); return -ENOMEM; } - init_waitqueue_head(&isert_np->np_accept_wq); + sema_init(&isert_np->np_sem, 0); mutex_init(&isert_np->np_accept_mutex); INIT_LIST_HEAD(&isert_np->np_accept_list); init_completion(&isert_np->np_login_comp); @@ -3048,18 +3058,6 @@ out: } static int -isert_check_accept_queue(struct isert_np *isert_np) -{ - int empty; - - mutex_lock(&isert_np->np_accept_mutex); - empty = list_empty(&isert_np->np_accept_list); - mutex_unlock(&isert_np->np_accept_mutex); - - return empty; -} - -static int isert_rdma_accept(struct isert_conn *isert_conn) { struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; @@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) int max_accept = 0, ret; accept_wait: - ret = wait_event_interruptible(isert_np->np_accept_wq, - !isert_check_accept_queue(isert_np) || - np->np_thread_state == ISCSI_NP_THREAD_RESET); + ret = down_interruptible(&isert_np->np_sem); if (max_accept > 5) return -ENODEV; spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { spin_unlock_bh(&np->np_thread_lock); - pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); + pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); return -ENODEV; } spin_unlock_bh(&np->np_thread_lock); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 4c072ae34c01..da6612e68000 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -182,7 +182,7 @@ struct isert_device { }; struct isert_np { - wait_queue_head_t np_accept_wq; + struct semaphore np_sem; struct rdma_cm_id *np_cm_id; struct mutex np_accept_mutex; struct list_head np_accept_list; diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 76842d7dc2e3..ffc7ad3a2c88 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -71,7 +71,7 @@ config KEYBOARD_ATKBD default y select SERIO select SERIO_LIBPS2 - select SERIO_I8042 if X86 + select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO select SERIO_GSCPS2 if GSC help Say Y here if you want to use a standard AT or PS/2 keyboard. Usually diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index d8241ba0afa0..a15063bea700 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -111,6 +111,8 @@ struct pxa27x_keypad { unsigned short keycodes[MAX_KEYPAD_KEYS]; int rotary_rel_code[2]; + unsigned int row_shift; + /* state row bits of each column scan */ uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; uint32_t direct_key_state; @@ -467,7 +469,8 @@ scan: if ((bits_changed & (1 << row)) == 0) continue; - code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); + code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); + input_event(input_dev, EV_MSC, MSC_SCAN, code); input_report_key(input_dev, keypad->keycodes[code], new_state[col] & (1 << row)); @@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev) goto failed_put_clk; } + keypad->row_shift = get_count_order(pdata->matrix_key_cols); + if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { input_dev->evbit[0] |= BIT_MASK(EV_REL); diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index effa9c5f2c5c..6b8441f7bc32 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig @@ -17,7 +17,7 @@ config MOUSE_PS2 default y select SERIO select SERIO_LIBPS2 - select SERIO_I8042 if X86 + select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO select SERIO_GSCPS2 if GSC help Say Y here if you have a PS/2 mouse connected to your system. This diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index d68d33fb5ac2..c5ec703c727e 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse) } #ifdef CONFIG_MOUSE_PS2_SYNAPTICS +struct min_max_quirk { + const char * const *pnp_ids; + int x_min, x_max, y_min, y_max; +}; + +static const struct min_max_quirk min_max_pnpid_table[] = { + { + (const char * const []){"LEN0033", NULL}, + 1024, 5052, 2258, 4832 + }, + { + (const char * const []){"LEN0035", "LEN0042", NULL}, + 1232, 5710, 1156, 4696 + }, + { + (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, + 1024, 5112, 2024, 4832 + }, + { + (const char * const []){"LEN2001", NULL}, + 1024, 5022, 2508, 4832 + }, + { } +}; + /* This list has been kindly provided by Synaptics. */ static const char * const topbuttonpad_pnp_ids[] = { "LEN0017", @@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN002D", "LEN002E", "LEN0033", /* Helix */ - "LEN0034", /* T431s, T540, X1 Carbon 2nd */ + "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ "LEN0035", /* X240 */ "LEN0036", /* T440 */ "LEN0037", @@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0048", "LEN0049", "LEN2000", - "LEN2001", + "LEN2001", /* Edge E431 */ "LEN2002", "LEN2003", "LEN2004", /* L440 */ @@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = { NULL }; +static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[]) +{ + int i; + + if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) + for (i = 0; ids[i]; i++) + if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i])) + return true; + + return false; +} + /***************************************************************************** * Synaptics communications functions ****************************************************************************/ @@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse) * Resolution is left zero if touchpad does not support the query */ -static const int *quirk_min_max; - static int synaptics_resolution(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char resp[3]; + int i; - if (quirk_min_max) { - priv->x_min = quirk_min_max[0]; - priv->x_max = quirk_min_max[1]; - priv->y_min = quirk_min_max[2]; - priv->y_max = quirk_min_max[3]; - return 0; - } + for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) + if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) { + priv->x_min = min_max_pnpid_table[i].x_min; + priv->x_max = min_max_pnpid_table[i].x_max; + priv->y_min = min_max_pnpid_table[i].y_min; + priv->y_max = min_max_pnpid_table[i].y_max; + return 0; + } if (SYN_ID_MAJOR(priv->identity) < 4) return 0; @@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse, if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); - /* See if this buttonpad has a top button area */ - if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) { - for (i = 0; topbuttonpad_pnp_ids[i]; i++) { - if (strstr(psmouse->ps2dev.serio->firmware_id, - topbuttonpad_pnp_ids[i])) { - __set_bit(INPUT_PROP_TOPBUTTONPAD, - dev->propbit); - break; - } - } - } + if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) + __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); /* Clickpads report only left button */ __clear_bit(BTN_RIGHT, dev->keybit); __clear_bit(BTN_MIDDLE, dev->keybit); @@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = { { } }; -static const struct dmi_system_id min_max_dmi_table[] __initconst = { -#if defined(CONFIG_DMI) - { - /* Lenovo ThinkPad Helix */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"), - }, - .driver_data = (int []){1024, 5052, 2258, 4832}, - }, - { - /* Lenovo ThinkPad X240 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"), - }, - .driver_data = (int []){1232, 5710, 1156, 4696}, - }, - { - /* Lenovo ThinkPad Edge E431 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"), - }, - .driver_data = (int []){1024, 5022, 2508, 4832}, - }, - { - /* Lenovo ThinkPad T431s */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo ThinkPad T440s */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo ThinkPad L440 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo ThinkPad T540p */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"), - }, - .driver_data = (int []){1024, 5056, 2058, 4832}, - }, - { - /* Lenovo ThinkPad L540 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo Yoga S1 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, - "ThinkPad S1 Yoga"), - }, - .driver_data = (int []){1232, 5710, 1156, 4696}, - }, - { - /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, - "ThinkPad X1 Carbon 2nd"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, -#endif - { } -}; - void __init synaptics_module_init(void) { - const struct dmi_system_id *min_max_dmi; - impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); broken_olpc_ec = dmi_check_system(olpc_dmi_table); - - min_max_dmi = dmi_first_match(min_max_dmi_table); - if (min_max_dmi) - quirk_min_max = min_max_dmi->driver_data; } static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index 762b08432de0..8b748d99b934 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io) writeb(divisor, KMICLKDIV); writeb(KMICR_EN, KMICR); - ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); + ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050", + kmi); if (ret) { printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); writeb(0, KMICR); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 68edc9db2c64..b845e9370871 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713 config TOUCHSCREEN_WM97XX_ATMEL tristate "WM97xx Atmel accelerated touch" - depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) + depends on TOUCHSCREEN_WM97XX && AVR32 help Say Y here for support for streaming mode with WM97xx touchscreens on Atmel AT91 or AVR32 systems with an AC97C module. diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c949520bd196..57068e8035b5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) iommu_flush_dte(iommu, devid); if (devid != alias) { irq_lookup_table[alias] = table; - set_dte_irq_entry(devid, table); + set_dte_irq_entry(alias, table); iommu_flush_dte(iommu, alias); } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b76c58dbe30c..0e08545d7298 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) * per device. But we can enable the exclusion range per * device. This is done here */ - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); + set_dev_entry_bit(devid, DEV_ENTRY_EX); iommu->exclusion_start = m->range_start; iommu->exclusion_length = m->range_length; } diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 5208828792e6..203b2e6a91cf 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work) write = !!(fault->flags & PPR_FAULT_WRITE); + down_read(&fault->state->mm->mmap_sem); npages = get_user_pages(fault->state->task, fault->state->mm, fault->address, 1, write, 0, &page, NULL); + up_read(&fault->state->mm->mmap_sem); if (npages == 1) { put_page(page); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 074018979cdf..2ca0744b0a45 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1011,13 +1011,13 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, } static struct iommu_ops exynos_iommu_ops = { - .domain_init = &exynos_iommu_domain_init, - .domain_destroy = &exynos_iommu_domain_destroy, - .attach_dev = &exynos_iommu_attach_device, - .detach_dev = &exynos_iommu_detach_device, - .map = &exynos_iommu_map, - .unmap = &exynos_iommu_unmap, - .iova_to_phys = &exynos_iommu_iova_to_phys, + .domain_init = exynos_iommu_domain_init, + .domain_destroy = exynos_iommu_domain_destroy, + .attach_dev = exynos_iommu_attach_device, + .detach_dev = exynos_iommu_detach_device, + .map = exynos_iommu_map, + .unmap = exynos_iommu_unmap, + .iova_to_phys = exynos_iommu_iova_to_phys, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, }; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9380be7b1895..5f054c44b485 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->num_discard_bios = 1; ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; + /* Discard bios must be split on a block boundary */ + ti->split_discard_bios = true; cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 784695d22fde..53b213226c01 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -19,7 +19,6 @@ #include <linux/crypto.h> #include <linux/workqueue.h> #include <linux/backing-dev.h> -#include <linux/percpu.h> #include <linux/atomic.h> #include <linux/scatterlist.h> #include <asm/page.h> @@ -43,6 +42,7 @@ struct convert_context { struct bvec_iter iter_out; sector_t cc_sector; atomic_t cc_pending; + struct ablkcipher_request *req; }; /* @@ -111,15 +111,7 @@ struct iv_tcw_private { enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; /* - * Duplicated per-CPU state for cipher. - */ -struct crypt_cpu { - struct ablkcipher_request *req; -}; - -/* - * The fields in here must be read only after initialization, - * changing state should be in crypt_cpu. + * The fields in here must be read only after initialization. */ struct crypt_config { struct dm_dev *dev; @@ -150,12 +142,6 @@ struct crypt_config { sector_t iv_offset; unsigned int iv_size; - /* - * Duplicated per cpu state. Access through - * per_cpu_ptr() only. - */ - struct crypt_cpu __percpu *cpu; - /* ESSIV: struct crypto_cipher *essiv_tfm */ void *iv_private; struct crypto_ablkcipher **tfms; @@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) -{ - return this_cpu_ptr(cc->cpu); -} - /* * Use this to access cipher attributes that are the same for each CPU. */ @@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); - if (!this_cc->req) - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + if (!ctx->req) + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); - ablkcipher_request_set_callback(this_cc->req, + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); + ablkcipher_request_set_callback(ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); + kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } /* @@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); int r; atomic_set(&ctx->cc_pending, 1); @@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->cc_pending); - r = crypt_convert_block(cc, ctx, this_cc->req); + r = crypt_convert_block(cc, ctx, ctx->req); switch (r) { /* async */ @@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc, reinit_completion(&ctx->restart); /* fall through*/ case -EINPROGRESS: - this_cc->req = NULL; + ctx->req = NULL; ctx->cc_sector++; continue; @@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, io->sector = sector; io->error = 0; io->base_io = NULL; + io->ctx.req = NULL; atomic_set(&io->io_pending, 0); return io; @@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->io_pending)) return; + if (io->ctx.req) + mempool_free(io->ctx.req, cc->req_pool); mempool_free(io, cc->io_pool); if (likely(!base_io)) @@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc) static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; - struct crypt_cpu *cpu_cc; - int cpu; ti->private = NULL; @@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); - if (cc->cpu) - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - if (cpu_cc->req) - mempool_free(cpu_cc->req, cc->req_pool); - } - crypt_free_tfms(cc); if (cc->bs) @@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->dev) dm_put_device(ti, cc->dev); - if (cc->cpu) - free_percpu(cc->cpu); - kzfree(cc->cipher); kzfree(cc->cipher_string); @@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), - __alignof__(struct crypt_cpu)); - if (!cc->cpu) { - ti->error = "Cannot allocate per cpu state"; - goto bad_mem; - } - /* * For compatibility with the original dm-crypt mapping format, if * only the cipher name is supplied, use cbc-plain. diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aa009e865871..ebfa411d1a7d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, else m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; - if (!m->queue_if_no_path) - dm_table_run_md_queue_async(m->ti->table); - spin_unlock_irqrestore(&m->lock, flags); + if (!queue_if_no_path) + dm_table_run_md_queue_async(m->ti->table); + return 0; } @@ -954,7 +954,7 @@ out: */ static int reinstate_path(struct pgpath *pgpath) { - int r = 0; + int r = 0, run_queue = 0; unsigned long flags; struct multipath *m = pgpath->pg->m; @@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath) if (!m->nr_valid_paths++) { m->current_pgpath = NULL; - dm_table_run_md_queue_async(m->ti->table); + run_queue = 1; } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; @@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath) out: spin_unlock_irqrestore(&m->lock, flags); + if (run_queue) + dm_table_run_md_queue_async(m->ti->table); return r; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 13abade76ad9..242ac2ea5f29 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -27,6 +27,9 @@ #define MAPPING_POOL_SIZE 1024 #define PRISON_CELLS 1024 #define COMMIT_PERIOD HZ +#define NO_SPACE_TIMEOUT_SECS 60 + +static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, "A percentage of time allocated for copy on write"); @@ -175,6 +178,7 @@ struct pool { struct workqueue_struct *wq; struct work_struct worker; struct delayed_work waker; + struct delayed_work no_space_timeout; unsigned long last_commit_jiffies; unsigned ref_count; @@ -935,7 +939,7 @@ static int commit(struct pool *pool) { int r; - if (get_pool_mode(pool) != PM_WRITE) + if (get_pool_mode(pool) >= PM_READ_ONLY) return -EINVAL; r = dm_pool_commit_metadata(pool->pmd); @@ -1590,6 +1594,20 @@ static void do_waker(struct work_struct *ws) queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); } +/* + * We're holding onto IO to allow userland time to react. After the + * timeout either the pool will have been resized (and thus back in + * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. + */ +static void do_no_space_timeout(struct work_struct *ws) +{ + struct pool *pool = container_of(to_delayed_work(ws), struct pool, + no_space_timeout); + + if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) + set_pool_mode(pool, PM_READ_ONLY); +} + /*----------------------------------------------------------------*/ struct noflush_work { @@ -1654,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) struct pool_c *pt = pool->ti->private; bool needs_check = dm_pool_metadata_needs_check(pool->pmd); enum pool_mode old_mode = get_pool_mode(pool); + unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; /* * Never allow the pool to transition to PM_WRITE mode if user @@ -1715,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) pool->process_discard = process_discard; pool->process_prepared_mapping = process_prepared_mapping; pool->process_prepared_discard = process_prepared_discard_passdown; + + if (!pool->pf.error_if_no_space && no_space_timeout) + queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); break; case PM_WRITE: @@ -2100,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_WORK(&pool->worker, do_worker); INIT_DELAYED_WORK(&pool->waker, do_waker); + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); spin_lock_init(&pool->lock); bio_list_init(&pool->deferred_flush_bios); INIT_LIST_HEAD(&pool->prepared_mappings); @@ -2662,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti) struct pool *pool = pt->pool; cancel_delayed_work(&pool->waker); + cancel_delayed_work(&pool->no_space_timeout); flush_workqueue(pool->wq); (void) commit(pool); } @@ -3487,6 +3511,9 @@ static void dm_thin_exit(void) module_init(dm_thin_init); module_exit(dm_thin_exit); +module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); + MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 455e64916498..6a71bc7c9133 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1544,7 +1544,6 @@ static int setup_clone(struct request *clone, struct request *rq, clone->cmd = rq->cmd; clone->cmd_len = rq->cmd_len; clone->sense = rq->sense; - clone->buffer = rq->buffer; clone->end_io = end_clone_request; clone->end_io_data = tio; diff --git a/drivers/md/md.c b/drivers/md/md.c index 8fda38d23e38..237b7e0ddc7a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this, if (mddev_trylock(mddev)) { if (mddev->pers) __md_stop_writes(mddev); - mddev->safemode = 2; + if (mddev->persistent) + mddev->safemode = 2; mddev_unlock(mddev); } need_delay = 1; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 33fc408e5eac..cb882aae9e20 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio) int max_sectors; int sectors; + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + */ + wait_barrier(conf); + sectors = bio_sectors(bio); while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio->bi_iter.bi_sector < conf->reshape_progress && @@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio) md_write_start(mddev, bio); - /* - * Register the new request and wait if the reconstruction - * thread has put up a bar for new requests. - * Continue immediately if no resync is active currently. - */ - wait_barrier(conf); do { diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index e8a1ce204036..cdd7c1b7259b 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd, * windows that fall outside that. */ for (i = 0; i < n_win_sizes; i++) { - struct ov7670_win_size *win = &info->devtype->win_sizes[index]; + struct ov7670_win_size *win = &info->devtype->win_sizes[i]; if (info->min_width && win->width < info->min_width) continue; if (info->min_height && win->height < info->min_height) diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index a4459301b5f8..ee0f57e01b56 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state) if (ret < 0) return -EINVAL; - node_ep = v4l2_of_get_next_endpoint(node, NULL); + node_ep = of_graph_get_next_endpoint(node, NULL); if (!node_ep) { dev_warn(dev, "no endpoint defined for node: %s\n", node->full_name); diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index d5a7a135f75d..703560fa5e73 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev, struct media_entity *ent; struct media_entity_desc u_ent; + memset(&u_ent, 0, sizeof(u_ent)); if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) return -EFAULT; diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index b4f12d00be05..656708252962 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq) { struct vpbe_fh *fh = vb2_get_drv_priv(vq); struct vpbe_layer *layer = fh->layer; + struct vpbe_display *disp = fh->disp_dev; + unsigned long flags; if (!vb2_is_streaming(vq)) return 0; /* release all active buffers */ + spin_lock_irqsave(&disp->dma_queue_lock, flags); + if (layer->cur_frm == layer->next_frm) { + vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (layer->cur_frm != NULL) + vb2_buffer_done(&layer->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (layer->next_frm != NULL) + vb2_buffer_done(&layer->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&layer->dma_queue)) { layer->next_frm = list_entry(layer->dma_queue.next, struct vpbe_disp_buffer, list); list_del(&layer->next_frm->list); vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); } - + spin_unlock_irqrestore(&disp->dma_queue_lock, flags); return 0; } diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index d762246eabf5..0379cb9f9a9c 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -734,6 +734,8 @@ static int vpfe_release(struct file *file) } vpfe_dev->io_usrs = 0; vpfe_dev->numbuffers = config_params.numbuffers; + videobuf_stop(&vpfe_dev->buffer_queue); + videobuf_mmap_free(&vpfe_dev->buffer_queue); } /* Decrement device usrs counter */ diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 756da78bac23..8dea0b84a3ad 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq) common = &ch->common[VPIF_VIDEO_INDEX]; + /* Disable channel as per its device type and channel id */ + if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { + enable_channel0(0); + channel0_intr_enable(0); + } + if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || + (2 == common->started)) { + enable_channel1(0); + channel1_intr_enable(0); + } + common->started = 0; + /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); + if (common->cur_frm == common->next_frm) { + vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (common->cur_frm != NULL) + vb2_buffer_done(&common->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (common->next_frm != NULL) + vb2_buffer_done(&common->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); @@ -933,17 +956,6 @@ static int vpif_release(struct file *filep) if (fh->io_allowed[VPIF_VIDEO_INDEX]) { /* Reset io_usrs member of channel object */ common->io_usrs = 0; - /* Disable channel as per its device type and channel id */ - if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { - enable_channel0(0); - channel0_intr_enable(0); - } - if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || - (2 == common->started)) { - enable_channel1(0); - channel1_intr_enable(0); - } - common->started = 0; /* Free buffers allocated */ vb2_queue_release(&common->buffer_queue); vb2_dma_contig_cleanup_ctx(common->alloc_ctx); diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 0ac841e35aa4..aed41edd0501 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq) common = &ch->common[VPIF_VIDEO_INDEX]; + /* Disable channel */ + if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { + enable_channel2(0); + channel2_intr_enable(0); + } + if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || + (2 == common->started)) { + enable_channel3(0); + channel3_intr_enable(0); + } + common->started = 0; + /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); + if (common->cur_frm == common->next_frm) { + vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (common->cur_frm != NULL) + vb2_buffer_done(&common->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (common->next_frm != NULL) + vb2_buffer_done(&common->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_disp_buffer, list); @@ -773,18 +796,6 @@ static int vpif_release(struct file *filep) if (fh->io_allowed[VPIF_VIDEO_INDEX]) { /* Reset io_usrs member of channel object */ common->io_usrs = 0; - /* Disable channel */ - if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { - enable_channel2(0); - channel2_intr_enable(0); - } - if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || - (2 == common->started)) { - enable_channel3(0); - channel3_intr_enable(0); - } - common->started = 0; - /* Free buffers allocated */ vb2_queue_release(&common->buffer_queue); vb2_dma_contig_cleanup_ctx(common->alloc_ctx); diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index da2fc86cc524..25dbf5b05a96 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = { }, { .name = "YUV 4:2:2 planar, Y/Cb/Cr", .fourcc = V4L2_PIX_FMT_YUV422P, - .depth = { 12 }, + .depth = { 16 }, .color = FIMC_FMT_YCBYCR422, .memplanes = 1, .colplanes = 3, diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c index 3aecaf465094..f0c9c42867de 100644 --- a/drivers/media/tuners/fc2580.c +++ b/drivers/media/tuners/fc2580.c @@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe) f_ref = 2UL * priv->cfg->clock / r_val; n_val = div_u64_rem(f_vco, f_ref, &k_val); - k_val_reg = 1UL * k_val * (1 << 20) / f_ref; + k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref); ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); if (ret < 0) @@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe) if (ret < 0) goto err; - ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ - fc2580_if_filter_lut[i].mul / 1000000000); + ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock * + fc2580_if_filter_lut[i].mul, 1000000000)); if (ret < 0) goto err; diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h index be38a9e637e0..646c99452136 100644 --- a/drivers/media/tuners/fc2580_priv.h +++ b/drivers/media/tuners/fc2580_priv.h @@ -22,6 +22,7 @@ #define FC2580_PRIV_H #include "fc2580.h" +#include <linux/math64.h> struct fc2580_reg_val { u8 reg; diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile index 7407b8338ccf..bc38f03394cd 100644 --- a/drivers/media/usb/dvb-usb-v2/Makefile +++ b/drivers/media/usb/dvb-usb-v2/Makefile @@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core ccflags-y += -I$(srctree)/drivers/media/dvb-frontends ccflags-y += -I$(srctree)/drivers/media/tuners ccflags-y += -I$(srctree)/drivers/media/common -ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 61d196e8b3ab..dcbd392e6efc 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -24,7 +24,6 @@ #include "rtl2830.h" #include "rtl2832.h" -#include "rtl2832_sdr.h" #include "qt1010.h" #include "mt2060.h" @@ -36,6 +35,45 @@ #include "tua9001.h" #include "r820t.h" +/* + * RTL2832_SDR module is in staging. That logic is added in order to avoid any + * hard dependency to drivers/staging/ directory as we want compile mainline + * driver even whole staging directory is missing. + */ +#include <media/v4l2-subdev.h> + +#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR) +struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, + struct i2c_adapter *i2c, const struct rtl2832_config *cfg, + struct v4l2_subdev *sd); +#else +static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, + struct i2c_adapter *i2c, const struct rtl2832_config *cfg, + struct v4l2_subdev *sd) +{ + return NULL; +} +#endif + +#ifdef CONFIG_MEDIA_ATTACH +#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ + void *__r = NULL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (void *) __a(ARGS); \ + if (__r == NULL) \ + symbol_put(FUNCTION); \ + } \ + __r; \ +}) + +#else +#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +#endif + static int rtl28xxu_disable_rc; module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller"); @@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.tuner_ops.get_rf_strength; /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, &rtl28xxu_rtl2832_fc0012_config, NULL); break; case TUNER_RTL2832_FC0013: @@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.tuner_ops.get_rf_strength; /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, &rtl28xxu_rtl2832_fc0013_config, NULL); break; case TUNER_RTL2832_E4000: { @@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) i2c_set_adapdata(i2c_adap_internal, d); /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], i2c_adap_internal, &rtl28xxu_rtl2832_e4000_config, sd); } @@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.tuner_ops.get_rf_strength; /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, &rtl28xxu_rtl2832_r820t_config, NULL); break; case TUNER_RTL2832_R828D: diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 7277dbd2afcd..ecbcb39feb71 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, -#if !IS_ENABLED(CONFIG_USB_SN9C102) {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, -#endif {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 04b2daf567be..7e2411c36419 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -178,6 +178,9 @@ struct v4l2_create_buffers32 { static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { + if (get_user(kp->type, &up->type)) + return -EFAULT; + switch (kp->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || - get_user(kp->type, &up->type)) - return -EFAULT; + if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) + return -EFAULT; return __get_v4l2_format32(kp, up); } static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) - return -EFAULT; + copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; return __get_v4l2_format32(&kp->format, &up->format); } diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index 110c03627051..b59a17fb7c3e 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c @@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus, node->full_name); return err; } - /* Convert bit width to byte width */ - r.bus_width /= 8; + + /* + * The bus width is encoded into the register as 0 for 8 bits, + * and 1 for 16 bits, so we do the necessary conversion here. + */ + if (r.bus_width == 8) + r.bus_width = 0; + else if (r.bus_width == 16) + r.bus_width = 1; + else { + dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width); + return -EINVAL; + } err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", &r.badr_skew); diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index d049d271699c..c00adfaa6279 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -718,7 +718,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) int rc; struct pci_dev *pci_dev = cd->pci_dev; - rc = pci_enable_msi_block(pci_dev, count); + rc = pci_enable_msi_exact(pci_dev, count); if (rc == 0) cd->flags |= GENWQE_FLAG_MSI_ENABLED; return rc; diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 771c60ab4a32..a084edd37af5 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> +#include <linux/io.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -23,6 +24,7 @@ #include <linux/mmc/pm.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h> #include <linux/amba/bus.h> #include <linux/clk.h> #include <linux/scatterlist.h> @@ -364,7 +366,6 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) #ifdef CONFIG_DMA_ENGINE static void mmci_dma_setup(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; const char *rxname, *txname; dma_cap_mask_t mask; @@ -378,25 +379,6 @@ static void mmci_dma_setup(struct mmci_host *host) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - if (plat && plat->dma_filter) { - if (!host->dma_rx_channel && plat->dma_rx_param) { - host->dma_rx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_rx_param); - /* E.g if no DMA hardware is present */ - if (!host->dma_rx_channel) - dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); - } - - if (!host->dma_tx_channel && plat->dma_tx_param) { - host->dma_tx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_tx_param); - if (!host->dma_tx_channel) - dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); - } - } - /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is @@ -444,11 +426,9 @@ static void mmci_dma_setup(struct mmci_host *host) */ static inline void mmci_dma_release(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; - if (host->dma_rx_channel) dma_release_channel(host->dma_rx_channel); - if (host->dma_tx_channel && plat->dma_tx_param) + if (host->dma_tx_channel) dma_release_channel(host->dma_tx_channel); host->dma_rx_channel = host->dma_tx_channel = NULL; } @@ -1285,7 +1265,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) * indicating signal direction for the signals in * the SD/MMC bus and feedback-clock usage. */ - pwr |= host->plat->sigdir; + pwr |= host->pwr_reg_add; if (ios->bus_width == MMC_BUS_WIDTH_4) pwr &= ~MCI_ST_DATA74DIREN; @@ -1326,35 +1306,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) pm_runtime_put_autosuspend(mmc_dev(mmc)); } -static int mmci_get_ro(struct mmc_host *mmc) -{ - struct mmci_host *host = mmc_priv(mmc); - - if (host->gpio_wp == -ENOSYS) - return -ENOSYS; - - return gpio_get_value_cansleep(host->gpio_wp); -} - static int mmci_get_cd(struct mmc_host *mmc) { struct mmci_host *host = mmc_priv(mmc); struct mmci_platform_data *plat = host->plat; - unsigned int status; + unsigned int status = mmc_gpio_get_cd(mmc); - if (host->gpio_cd == -ENOSYS) { + if (status == -ENOSYS) { if (!plat->status) return 1; /* Assume always present */ status = plat->status(mmc_dev(host->mmc)); - } else - status = !!gpio_get_value_cansleep(host->gpio_cd) - ^ plat->cd_invert; - - /* - * Use positive logic throughout - status is zero for no card, - * non-zero for card inserted. - */ + } return status; } @@ -1391,70 +1354,44 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) return ret; } -static irqreturn_t mmci_cd_irq(int irq, void *dev_id) -{ - struct mmci_host *host = dev_id; - - mmc_detect_change(host->mmc, msecs_to_jiffies(500)); - - return IRQ_HANDLED; -} - static struct mmc_host_ops mmci_ops = { .request = mmci_request, .pre_req = mmci_pre_request, .post_req = mmci_post_request, .set_ios = mmci_set_ios, - .get_ro = mmci_get_ro, + .get_ro = mmc_gpio_get_ro, .get_cd = mmci_get_cd, .start_signal_voltage_switch = mmci_sig_volt_switch, }; -#ifdef CONFIG_OF -static void mmci_dt_populate_generic_pdata(struct device_node *np, - struct mmci_platform_data *pdata) +static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) { - int bus_width = 0; - - pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); - pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); + struct mmci_host *host = mmc_priv(mmc); + int ret = mmc_of_parse(mmc); - if (of_get_property(np, "cd-inverted", NULL)) - pdata->cd_invert = true; - else - pdata->cd_invert = false; + if (ret) + return ret; - of_property_read_u32(np, "max-frequency", &pdata->f_max); - if (!pdata->f_max) - pr_warn("%s has no 'max-frequency' property\n", np->full_name); + if (of_get_property(np, "st,sig-dir-dat0", NULL)) + host->pwr_reg_add |= MCI_ST_DATA0DIREN; + if (of_get_property(np, "st,sig-dir-dat2", NULL)) + host->pwr_reg_add |= MCI_ST_DATA2DIREN; + if (of_get_property(np, "st,sig-dir-dat31", NULL)) + host->pwr_reg_add |= MCI_ST_DATA31DIREN; + if (of_get_property(np, "st,sig-dir-dat74", NULL)) + host->pwr_reg_add |= MCI_ST_DATA74DIREN; + if (of_get_property(np, "st,sig-dir-cmd", NULL)) + host->pwr_reg_add |= MCI_ST_CMDDIREN; + if (of_get_property(np, "st,sig-pin-fbclk", NULL)) + host->pwr_reg_add |= MCI_ST_FBCLKEN; if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) - pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) - pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; - of_property_read_u32(np, "bus-width", &bus_width); - switch (bus_width) { - case 0 : - /* No bus-width supplied. */ - break; - case 4 : - pdata->capabilities |= MMC_CAP_4_BIT_DATA; - break; - case 8 : - pdata->capabilities |= MMC_CAP_8_BIT_DATA; - break; - default : - pr_warn("%s: Unsupported bus width\n", np->full_name); - } -} -#else -static void mmci_dt_populate_generic_pdata(struct device_node *np, - struct mmci_platform_data *pdata) -{ - return; + return 0; } -#endif static int mmci_probe(struct amba_device *dev, const struct amba_id *id) @@ -1478,26 +1415,17 @@ static int mmci_probe(struct amba_device *dev, return -ENOMEM; } - if (np) - mmci_dt_populate_generic_pdata(np, plat); + mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); + if (!mmc) + return -ENOMEM; - ret = amba_request_regions(dev, DRIVER_NAME); + ret = mmci_of_parse(np, mmc); if (ret) - goto out; - - mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); - if (!mmc) { - ret = -ENOMEM; - goto rel_regions; - } + goto host_free; host = mmc_priv(mmc); host->mmc = mmc; - host->gpio_wp = -ENOSYS; - host->gpio_cd = -ENOSYS; - host->gpio_cd_irq = -1; - host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); @@ -1529,10 +1457,11 @@ static int mmci_probe(struct amba_device *dev, dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", host->mclk); } + host->phybase = dev->res.start; - host->base = ioremap(dev->res.start, resource_size(&dev->res)); - if (!host->base) { - ret = -ENOMEM; + host->base = devm_ioremap_resource(&dev->dev, &dev->res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); goto clk_disable; } @@ -1546,15 +1475,13 @@ static int mmci_probe(struct amba_device *dev, else mmc->f_min = DIV_ROUND_UP(host->mclk, 512); /* - * If the platform data supplies a maximum operating - * frequency, this takes precedence. Else, we fall back - * to using the module parameter, which has a (low) - * default value in case it is not specified. Either - * value must not exceed the clock rate into the block, - * of course. + * If no maximum operating frequency is supplied, fall back to use + * the module parameter, which has a (low) default value in case it + * is not specified. Either value must not exceed the clock rate into + * the block, of course. */ - if (plat->f_max) - mmc->f_max = min(host->mclk, plat->f_max); + if (mmc->f_max) + mmc->f_max = min(host->mclk, mmc->f_max); else mmc->f_max = min(host->mclk, fmax); dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); @@ -1566,8 +1493,15 @@ static int mmci_probe(struct amba_device *dev, else if (plat->ocr_mask) dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); - mmc->caps = plat->capabilities; - mmc->caps2 = plat->capabilities2; + /* DT takes precedence over platform data. */ + if (!np) { + if (!plat->cd_invert) + mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + } + + /* We support these capabilities. */ + mmc->caps |= MMC_CAP_CMD23; if (variant->busy_detect) { mmci_ops.card_busy = mmci_card_busy; @@ -1579,7 +1513,7 @@ static int mmci_probe(struct amba_device *dev, mmc->ops = &mmci_ops; /* We support these PM capabilities. */ - mmc->pm_caps = MMC_PM_KEEP_POWER; + mmc->pm_caps |= MMC_PM_KEEP_POWER; /* * We can do SGIO @@ -1616,62 +1550,30 @@ static int mmci_probe(struct amba_device *dev, writel(0, host->base + MMCIMASK1); writel(0xfff, host->base + MMCICLEAR); - if (plat->gpio_cd == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_gpio_cd; - } - if (gpio_is_valid(plat->gpio_cd)) { - ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); - if (ret == 0) - ret = gpio_direction_input(plat->gpio_cd); - if (ret == 0) - host->gpio_cd = plat->gpio_cd; - else if (ret != -ENOSYS) - goto err_gpio_cd; - - /* - * A gpio pin that will detect cards when inserted and removed - * will most likely want to trigger on the edges if it is - * 0 when ejected and 1 when inserted (or mutatis mutandis - * for the inverted case) so we request triggers on both - * edges. - */ - ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), - mmci_cd_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - DRIVER_NAME " (cd)", host); - if (ret >= 0) - host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); - } - if (plat->gpio_wp == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_gpio_wp; + /* If DT, cd/wp gpios must be supplied through it. */ + if (!np && gpio_is_valid(plat->gpio_cd)) { + ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0); + if (ret) + goto clk_disable; } - if (gpio_is_valid(plat->gpio_wp)) { - ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); - if (ret == 0) - ret = gpio_direction_input(plat->gpio_wp); - if (ret == 0) - host->gpio_wp = plat->gpio_wp; - else if (ret != -ENOSYS) - goto err_gpio_wp; + if (!np && gpio_is_valid(plat->gpio_wp)) { + ret = mmc_gpio_request_ro(mmc, plat->gpio_wp); + if (ret) + goto clk_disable; } - if ((host->plat->status || host->gpio_cd != -ENOSYS) - && host->gpio_cd_irq < 0) - mmc->caps |= MMC_CAP_NEEDS_POLL; - - ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); + ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); if (ret) - goto unmap; + goto clk_disable; if (!dev->irq[1]) host->singleirq = true; else { - ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, - DRIVER_NAME " (pio)", host); + ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, + IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) - goto irq0_free; + goto clk_disable; } writel(MCI_IRQENABLE, host->base + MMCIMASK0); @@ -1693,25 +1595,10 @@ static int mmci_probe(struct amba_device *dev, return 0; - irq0_free: - free_irq(dev->irq[0], host); - unmap: - if (host->gpio_wp != -ENOSYS) - gpio_free(host->gpio_wp); - err_gpio_wp: - if (host->gpio_cd_irq >= 0) - free_irq(host->gpio_cd_irq, host); - if (host->gpio_cd != -ENOSYS) - gpio_free(host->gpio_cd); - err_gpio_cd: - iounmap(host->base); clk_disable: clk_disable_unprepare(host->clk); host_free: mmc_free_host(mmc); - rel_regions: - amba_release_regions(dev); - out: return ret; } @@ -1737,92 +1624,46 @@ static int mmci_remove(struct amba_device *dev) writel(0, host->base + MMCIDATACTRL); mmci_dma_release(host); - free_irq(dev->irq[0], host); - if (!host->singleirq) - free_irq(dev->irq[1], host); - - if (host->gpio_wp != -ENOSYS) - gpio_free(host->gpio_wp); - if (host->gpio_cd_irq >= 0) - free_irq(host->gpio_cd_irq, host); - if (host->gpio_cd != -ENOSYS) - gpio_free(host->gpio_cd); - - iounmap(host->base); clk_disable_unprepare(host->clk); - mmc_free_host(mmc); - - amba_release_regions(dev); - } - - return 0; -} - -#ifdef CONFIG_SUSPEND -static int mmci_suspend(struct device *dev) -{ - struct amba_device *adev = to_amba_device(dev); - struct mmc_host *mmc = amba_get_drvdata(adev); - - if (mmc) { - struct mmci_host *host = mmc_priv(mmc); - pm_runtime_get_sync(dev); - writel(0, host->base + MMCIMASK0); } return 0; } -static int mmci_resume(struct device *dev) -{ - struct amba_device *adev = to_amba_device(dev); - struct mmc_host *mmc = amba_get_drvdata(adev); - - if (mmc) { - struct mmci_host *host = mmc_priv(mmc); - writel(MCI_IRQENABLE, host->base + MMCIMASK0); - pm_runtime_put(dev); - } - - return 0; -} -#endif - -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static void mmci_save(struct mmci_host *host) { unsigned long flags; - if (host->variant->pwrreg_nopower) { - spin_lock_irqsave(&host->lock, flags); + spin_lock_irqsave(&host->lock, flags); - writel(0, host->base + MMCIMASK0); + writel(0, host->base + MMCIMASK0); + if (host->variant->pwrreg_nopower) { writel(0, host->base + MMCIDATACTRL); writel(0, host->base + MMCIPOWER); writel(0, host->base + MMCICLOCK); - mmci_reg_delay(host); - - spin_unlock_irqrestore(&host->lock, flags); } + mmci_reg_delay(host); + spin_unlock_irqrestore(&host->lock, flags); } static void mmci_restore(struct mmci_host *host) { unsigned long flags; - if (host->variant->pwrreg_nopower) { - spin_lock_irqsave(&host->lock, flags); + spin_lock_irqsave(&host->lock, flags); + if (host->variant->pwrreg_nopower) { writel(host->clk_reg, host->base + MMCICLOCK); writel(host->datactrl_reg, host->base + MMCIDATACTRL); writel(host->pwr_reg, host->base + MMCIPOWER); - writel(MCI_IRQENABLE, host->base + MMCIMASK0); - mmci_reg_delay(host); - - spin_unlock_irqrestore(&host->lock, flags); } + writel(MCI_IRQENABLE, host->base + MMCIMASK0); + mmci_reg_delay(host); + + spin_unlock_irqrestore(&host->lock, flags); } static int mmci_runtime_suspend(struct device *dev) @@ -1857,8 +1698,9 @@ static int mmci_runtime_resume(struct device *dev) #endif static const struct dev_pm_ops mmci_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) - SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; static struct amba_id mmci_ids[] = { diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 58b1b8896bf2..347d942d740b 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -13,6 +13,16 @@ #define MCI_PWR_ON 0x03 #define MCI_OD (1 << 6) #define MCI_ROD (1 << 7) +/* + * The ST Micro version does not have ROD and reuse the voltage registers for + * direction settings. + */ +#define MCI_ST_DATA2DIREN (1 << 2) +#define MCI_ST_CMDDIREN (1 << 3) +#define MCI_ST_DATA0DIREN (1 << 4) +#define MCI_ST_DATA31DIREN (1 << 5) +#define MCI_ST_FBCLKEN (1 << 7) +#define MCI_ST_DATA74DIREN (1 << 8) #define MMCICLOCK 0x004 #define MCI_CLK_ENABLE (1 << 8) @@ -176,9 +186,6 @@ struct mmci_host { struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; - int gpio_cd; - int gpio_wp; - int gpio_cd_irq; bool singleirq; spinlock_t lock; @@ -186,6 +193,7 @@ struct mmci_host { unsigned int mclk; unsigned int cclk; u32 pwr_reg; + u32 pwr_reg_add; u32 clk_reg; u32 datactrl_reg; u32 busy_status; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0b2ccb68c0d0..4dbfaee9aa95 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -82,8 +82,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, block = blk_rq_pos(req) << 9 >> tr->blkshift; nsect = blk_rq_cur_bytes(req) >> tr->blkshift; - - buf = req->buffer; + buf = bio_data(req->bio); if (req->cmd_type != REQ_TYPE_FS) return -EIO; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 8d659e6a1b4c..20a667c95da4 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -253,7 +253,7 @@ static int do_ubiblock_request(struct ubiblock *dev, struct request *req) * flash access anyway. */ mutex_lock(&dev->dev_mutex); - ret = ubiblock_read(dev, req->buffer, sec, len); + ret = ubiblock_read(dev, bio_data(req->bio), sec, len); mutex_unlock(&dev->dev_mutex); return ret; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 9f69e818b000..93580a47cc54 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) } /* Forward declaration */ -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match); static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); static void rlb_src_unlink(struct bonding *bond, u32 index); static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, @@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) bond->alb_info.rlb_promisc_timeout_counter = 0; - alb_send_learning_packets(bond->curr_active_slave, addr); + alb_send_learning_packets(bond->curr_active_slave, addr, true); } /* slave being removed should not be active at this point @@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) /*********************** tlb/rlb shared functions *********************/ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], - u16 vid) + __be16 vlan_proto, u16 vid) { struct learning_pkt pkt; struct sk_buff *skb; @@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->dev = slave->dev; if (vid) { - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); + skb = vlan_put_tag(skb, vlan_proto, vid); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", slave->bond->dev->name); @@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], dev_queue_xmit(skb); } - -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match) { struct bonding *bond = bond_get_bond_by_slave(slave); struct net_device *upper; struct list_head *iter; /* send untagged */ - alb_send_lp_vid(slave, mac_addr, 0); + alb_send_lp_vid(slave, mac_addr, 0, 0); /* loop through vlans and send one packet for each */ rcu_read_lock(); netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper->priv_flags & IFF_802_1Q_VLAN) - alb_send_lp_vid(slave, mac_addr, - vlan_dev_vlan_id(upper)); + if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { + if (strict_match && + ether_addr_equal_64bits(mac_addr, + upper->dev_addr)) { + alb_send_lp_vid(slave, mac_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } else if (!strict_match) { + alb_send_lp_vid(slave, upper->dev_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } + } } rcu_read_unlock(); } @@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, /* fasten the change in the switch */ if (SLAVE_IS_OK(slave1)) { - alb_send_learning_packets(slave1, slave1->dev->dev_addr); + alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, } if (SLAVE_IS_OK(slave2)) { - alb_send_learning_packets(slave2, slave2->dev->dev_addr); + alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work) /* send learning packets */ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { + bool strict_match; + /* change of curr_active_slave involves swapping of mac addresses. * in order to avoid this swapping from happening while * sending the learning packets, the curr_slave_lock must be held for @@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work) */ read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) - alb_send_learning_packets(slave, slave->dev->dev_addr); + bond_for_each_slave_rcu(bond, slave, iter) { + /* If updating current_active, use all currently + * user mac addreses (!strict_match). Otherwise, only + * use mac of the slave device. + */ + strict_match = (slave != bond->curr_active_slave); + alb_send_learning_packets(slave, slave->dev->dev_addr, + strict_match); + } read_unlock(&bond->curr_slave_lock); @@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave } else { /* set the new_slave to the bond mac address */ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); - alb_send_learning_packets(new_slave, bond->dev->dev_addr); + alb_send_learning_packets(new_slave, bond->dev->dev_addr, + false); } write_lock_bh(&bond->curr_slave_lock); @@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); read_lock(&bond->lock); - alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); + alb_send_learning_packets(bond->curr_active_slave, + bond_dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, bond->curr_active_slave); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69aff72c8957..d3a67896d435 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) */ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, - struct bond_vlan_tag *inner, - struct bond_vlan_tag *outer) + struct bond_vlan_tag *tags) { struct sk_buff *skb; + int i; pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", arp_op, slave_dev->name, &dest_ip, &src_ip); @@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, net_err_ratelimited("ARP packet allocation failed\n"); return; } - if (outer->vlan_id) { - if (inner->vlan_id) { - pr_debug("inner tag: proto %X vid %X\n", - ntohs(inner->vlan_proto), inner->vlan_id); - skb = __vlan_put_tag(skb, inner->vlan_proto, - inner->vlan_id); - if (!skb) { - net_err_ratelimited("failed to insert inner VLAN tag\n"); - return; - } - } - pr_debug("outer reg: proto %X vid %X\n", - ntohs(outer->vlan_proto), outer->vlan_id); - skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); + /* Go through all the tags backwards and add them to the packet */ + for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) { + if (!tags[i].vlan_id) + continue; + + pr_debug("inner tag: proto %X vid %X\n", + ntohs(tags[i].vlan_proto), tags[i].vlan_id); + skb = __vlan_put_tag(skb, tags[i].vlan_proto, + tags[i].vlan_id); + if (!skb) { + net_err_ratelimited("failed to insert inner VLAN tag\n"); + return; + } + } + /* Set the outer tag */ + if (tags[0].vlan_id) { + pr_debug("outer tag: proto %X vid %X\n", + ntohs(tags[0].vlan_proto), tags[0].vlan_id); + skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id); if (!skb) { net_err_ratelimited("failed to insert outer VLAN tag\n"); return; @@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, arp_xmit(skb); } +/* Validate the device path between the @start_dev and the @end_dev. + * The path is valid if the @end_dev is reachable through device + * stacking. + * When the path is validated, collect any vlan information in the + * path. + */ +static bool bond_verify_device_path(struct net_device *start_dev, + struct net_device *end_dev, + struct bond_vlan_tag *tags) +{ + struct net_device *upper; + struct list_head *iter; + int idx; + + if (start_dev == end_dev) + return true; + + netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { + if (bond_verify_device_path(upper, end_dev, tags)) { + if (is_vlan_dev(upper)) { + idx = vlan_get_encap_level(upper); + if (idx >= BOND_MAX_VLAN_ENCAP) + return false; + + tags[idx].vlan_proto = + vlan_dev_vlan_proto(upper); + tags[idx].vlan_id = vlan_dev_vlan_id(upper); + } + return true; + } + } + + return false; +} static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { - struct net_device *upper, *vlan_upper; - struct list_head *iter, *vlan_iter; struct rtable *rt; - struct bond_vlan_tag inner, outer; + struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP]; __be32 *targets = bond->params.arp_targets, addr; int i; + bool ret; for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { pr_debug("basa: target %pI4\n", &targets[i]); - inner.vlan_proto = 0; - inner.vlan_id = 0; - outer.vlan_proto = 0; - outer.vlan_id = 0; + memset(tags, 0, sizeof(tags)); /* Find out through which dev should the packet go */ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, @@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", bond->dev->name, &targets[i]); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); + bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], + 0, tags); continue; } @@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) goto found; rcu_read_lock(); - /* first we search only for vlan devices. for every vlan - * found we verify its upper dev list, searching for the - * rt->dst.dev. If found we save the tag of the vlan and - * proceed to send the packet. - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper, - vlan_iter) { - if (!is_vlan_dev(vlan_upper)) - continue; - - if (vlan_upper == rt->dst.dev) { - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - netdev_for_each_all_upper_dev_rcu(vlan_upper, upper, - iter) { - if (upper == rt->dst.dev) { - /* If the upper dev is a vlan dev too, - * set the vlan tag to inner tag. - */ - if (is_vlan_dev(upper)) { - inner.vlan_proto = vlan_dev_vlan_proto(upper); - inner.vlan_id = vlan_dev_vlan_id(upper); - } - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - } - } - - /* if the device we're looking for is not on top of any of - * our upper vlans, then just search for any dev that - * matches, and in case it's a vlan - save the id - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper == rt->dst.dev) { - rcu_read_unlock(); - goto found; - } - } + ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags); rcu_read_unlock(); + if (ret) + goto found; + /* Not our device - skip */ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &targets[i], @@ -2259,7 +2255,7 @@ found: addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); ip_rt_put(rt); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, &inner, &outer); + addr, tags); } } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 724e30fa20b9..832070298446 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = { static const struct bond_opt_value bond_intmax_tbl[] = { { "off", 0, BOND_VALFLAG_DEFAULT}, { "maxval", INT_MAX, BOND_VALFLAG_MAX}, + { NULL, -1, 0} }; static const struct bond_opt_value bond_lacp_rate_tbl[] = { diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index b8bdd0acc8f3..00bea320e3b5 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -36,6 +36,7 @@ #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" +#define BOND_MAX_VLAN_ENCAP 2 #define BOND_MAX_ARP_TARGETS 16 #define BOND_DEFAULT_MIIMON 100 diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig index 8ab7103d4f44..61ffc12d8fd8 100644 --- a/drivers/net/can/c_can/Kconfig +++ b/drivers/net/can/c_can/Kconfig @@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) boards like am335x, dm814x, dm813x and dm811x. -config CAN_C_CAN_STRICT_FRAME_ORDERING - bool "Force a strict RX CAN frame order (may cause frame loss)" - ---help--- - The RX split buffer prevents packet reordering but can cause packet - loss. Only enable this option when you accept to lose CAN frames - in favour of getting the received CAN frames in the correct order. - config CAN_C_CAN_PCI tristate "Generic PCI Bus based C_CAN/D_CAN driver" depends on PCI diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index a2ca820b5373..95e04e2002da 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend) static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - if (obj < C_CAN_MSG_RX_LOW_LAST) - c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW); - else -#endif c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); } static inline void c_can_rx_finalize(struct net_device *dev, struct c_can_priv *priv, u32 obj) { -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - if (obj < C_CAN_MSG_RX_LOW_LAST) - priv->rxmasked |= BIT(obj - 1); - else if (obj == C_CAN_MSG_RX_LOW_LAST) { - priv->rxmasked = 0; - /* activate all lower message objects */ - c_can_activate_all_lower_rx_msg_obj(dev, IF_RX); - } -#endif if (priv->type != BOSCH_D_CAN) c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); } @@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) { u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - pend &= ~priv->rxmasked; -#endif return pend; } @@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) * has arrived. To work-around this issue, we keep two groups of message * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. * - * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y - * - * To ensure in-order frame reception we use the following - * approach while re-activating a message object to receive further - * frames: - * - if the current message object number is lower than - * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing - * the INTPND bit. - * - if the current message object number is equal to - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower - * receive message objects. - * - if the current message object number is greater than - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of - * only this message object. - * - * This can cause packet loss! - * - * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n - * * We clear the newdat bit right away. * * This can result in packet reordering when the readout is slow. diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index c540e3d12e3d..564933ae218c 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; - struct net_device *dev; + struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; @@ -688,11 +688,13 @@ failure_remove_channels: writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; - for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { - unregister_sja1000dev(dev); - free_sja1000dev(dev); + for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; + prev_dev = chan->prev_dev; + + unregister_sja1000dev(dev); + free_sja1000dev(dev); } /* free any PCIeC resources too */ @@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev) /* Loop over all registered devices */ while (1) { + struct net_device *prev_dev = chan->prev_dev; + dev_info(&pdev->dev, "removing device %s\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); - dev = chan->prev_dev; + dev = prev_dev; if (!dev) { /* do that only for first channel */ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 39b26fe28d10..d7401017a3f1 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" + +config CX_ECAT + tristate "Beckhoff CX5020 EtherCAT master support" + depends on PCI + ---help--- + Driver for EtherCAT master module located on CCAT FPGA + that can be found on Beckhoff CX5020, and possibly other of CX + Beckhoff CX series industrial PCs. + + To compile this driver as a module, choose M here. The module + will be called ec_bhf. + source "drivers/net/ethernet/davicom/Kconfig" config DNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 545d0b3b9cb4..35190e36c456 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ +obj-$(CONFIG_CX_ECAT) += ec_bhf.o obj-$(CONFIG_DM9000) += davicom/ obj-$(CONFIG_DNET) += dnet.o obj-$(CONFIG_NET_VENDOR_DEC) += dec/ diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile index d4a187e45369..3eff2fd3997e 100644 --- a/drivers/net/ethernet/altera/Makefile +++ b/drivers/net/ethernet/altera/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_ALTERA_TSE) += altera_tse.o altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ altera_msgdma.o altera_sgdma.o altera_utils.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 4d1f2fdd5c32..0fb986ba3290 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv) void msgdma_reset(struct altera_tse_private *priv) { int counter; - struct msgdma_csr *txcsr = - (struct msgdma_csr *)priv->tx_dma_csr; - struct msgdma_csr *rxcsr = - (struct msgdma_csr *)priv->rx_dma_csr; /* Reset Rx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, + msgdma_csroffs(status)); + csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&rxcsr->status, + if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Rx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status)); /* Reset Tx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, + msgdma_csroffs(status)); + + csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&txcsr->status, + if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Tx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); } void msgdma_disable_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_enable_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_disable_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_enable_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_clear_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status)); } void msgdma_clear_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status)); } /* return 0 to indicate transmit is pending */ int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) { - struct msgdma_extended_desc *desc = priv->tx_dma_desc; - - iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); - iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); - iowrite32(0, &desc->write_addr_lo); - iowrite32(0, &desc->write_addr_hi); - iowrite32(buffer->len, &desc->len); - iowrite32(0, &desc->burst_seq_num); - iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); - iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); + csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_lo)); + csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_hi)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi)); + csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc, + msgdma_descroffs(stride)); + csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc, + msgdma_descroffs(control)); return 0; } @@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) u32 ready = 0; u32 inuse; u32 status; - struct msgdma_csr *txcsr = - (struct msgdma_csr *)priv->tx_dma_csr; /* Get number of sent descriptors */ - inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; + inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level)) + & 0xffff; if (inuse) { /* Tx FIFO is not empty */ ready = priv->tx_prod - priv->tx_cons - inuse - 1; } else { /* Check for buffered last packet */ - status = ioread32(&txcsr->status); + status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); if (status & MSGDMA_CSR_STAT_BUSY) ready = priv->tx_prod - priv->tx_cons - 1; else @@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) void msgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *rxbuffer) { - struct msgdma_extended_desc *desc = priv->rx_dma_desc; u32 len = priv->rx_dma_buf_sz; dma_addr_t dma_addr = rxbuffer->dma_addr; u32 control = (MSGDMA_DESC_CTL_END_ON_EOP @@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv, | MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO); - iowrite32(0, &desc->read_addr_lo); - iowrite32(0, &desc->read_addr_hi); - iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); - iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); - iowrite32(len, &desc->len); - iowrite32(0, &desc->burst_seq_num); - iowrite32(0x00010001, &desc->stride); - iowrite32(control, &desc->control); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi)); + csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_lo)); + csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_hi)); + csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride)); + csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control)); } /* status is returned on upper 16 bits, @@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv) u32 rxstatus = 0; u32 pktlength; u32 pktstatus; - struct msgdma_csr *rxcsr = - (struct msgdma_csr *)priv->rx_dma_csr; - struct msgdma_response *rxresp = - (struct msgdma_response *)priv->rx_dma_resp; - - if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { - pktlength = ioread32(&rxresp->bytes_transferred); - pktstatus = ioread32(&rxresp->status); + + if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level)) + & 0xffff) { + pktlength = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(bytes_transferred)); + pktstatus = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(status)); rxstatus = pktstatus; rxstatus = rxstatus << 16; rxstatus |= (pktlength & 0xffff); diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index d7b59ba4019c..e335626e1b6b 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -17,15 +17,6 @@ #ifndef __ALTERA_MSGDMAHW_H__ #define __ALTERA_MSGDMAHW_H__ -/* mSGDMA standard descriptor format - */ -struct msgdma_desc { - u32 read_addr; /* data buffer source address */ - u32 write_addr; /* data buffer destination address */ - u32 len; /* the number of bytes to transfer per descriptor */ - u32 control; /* characteristics of the transfer */ -}; - /* mSGDMA extended descriptor format */ struct msgdma_extended_desc { @@ -159,6 +150,10 @@ struct msgdma_response { u32 status; }; +#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a)) +#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a)) +#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a)) + /* mSGDMA response register bit definitions */ #define MSGDMA_RESP_EARLY_TERM BIT(8) diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 9ce8630692b6..99cc56f451cf 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -20,8 +20,8 @@ #include "altera_sgdmahw.h" #include "altera_sgdma.h" -static void sgdma_setup_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, dma_addr_t ndesc_phys, dma_addr_t raddr, dma_addr_t waddr, @@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, int wfixed); static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static int sgdma_async_read(struct altera_tse_private *priv); static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static int sgdma_txbusy(struct altera_tse_private *priv); @@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv) priv->rxdescphys = (dma_addr_t) 0; priv->txdescphys = (dma_addr_t) 0; - priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, + priv->rxdescphys = dma_map_single(priv->device, + (void __force *)priv->rx_dma_desc, priv->rxdescmem, DMA_BIDIRECTIONAL); if (dma_mapping_error(priv->device, priv->rxdescphys)) { @@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv) return -EINVAL; } - priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, + priv->txdescphys = dma_map_single(priv->device, + (void __force *)priv->tx_dma_desc, priv->txdescmem, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, priv->txdescphys)) { @@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv) } /* Initialize descriptor memory to all 0's, sync memory to cache */ - memset(priv->tx_dma_desc, 0, priv->txdescmem); - memset(priv->rx_dma_desc, 0, priv->rxdescmem); + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); dma_sync_single_for_device(priv->device, priv->txdescphys, priv->txdescmem, DMA_TO_DEVICE); @@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv) */ void sgdma_reset(struct altera_tse_private *priv) { - u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc; - u32 txdescriplen = priv->txdescmem; - u32 *prxdescripmem = (u32 *)priv->rx_dma_desc; - u32 rxdescriplen = priv->rxdescmem; - struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr; - struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr; - /* Initialize descriptor memory to 0 */ - memset(ptxdescripmem, 0, txdescriplen); - memset(prxdescripmem, 0, rxdescriplen); + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); - iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); - iowrite32(0, &ptxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); - iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); - iowrite32(0, &prxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); } /* For SGDMA, interrupts remain enabled after initially enabling, @@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv) void sgdma_clear_rxirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); } void sgdma_clear_txirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); } /* transmits buffer through SGDMA. Returns number of buffers @@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv) */ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) { - int pktstx = 0; - struct sgdma_descrip *descbase = - (struct sgdma_descrip *)priv->tx_dma_desc; + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->tx_dma_desc; - struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; /* wait 'til the tx sgdma is ready for the next transmit request */ if (sgdma_txbusy(priv)) @@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 0, /* read fixed */ SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ - pktstx = sgdma_async_write(priv, cdesc); + sgdma_async_write(priv, cdesc); /* enqueue the request to the pending transmit queue */ queue_tx(priv, buffer); @@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) u32 sgdma_tx_completions(struct altera_tse_private *priv) { u32 ready = 0; - struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc; if (!sgdma_txbusy(priv) && - ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && + ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) + & SGDMA_CONTROL_HW_OWNED) == 0) && (dequeue_tx(priv))) { ready = 1; } @@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv, */ u32 sgdma_rx_status(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; - struct sgdma_descrip *desc = NULL; - int pktsrx; - unsigned int rxstatus = 0; - unsigned int pktlength = 0; - unsigned int pktstatus = 0; + struct sgdma_descrip __iomem *base = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; + struct sgdma_descrip __iomem *desc = NULL; struct tse_buffer *rxbuffer = NULL; + unsigned int rxstatus = 0; - u32 sts = ioread32(&csr->status); + u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); desc = &base[0]; if (sts & SGDMA_STSREG_EOP) { + unsigned int pktlength = 0; + unsigned int pktstatus = 0; dma_sync_single_for_cpu(priv->device, priv->rxdescphys, priv->sgdmadesclen, DMA_FROM_DEVICE); - pktlength = desc->bytes_xferred; - pktstatus = desc->status & 0x3f; - rxstatus = pktstatus; + pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); + pktstatus = csrrd8(desc, sgdma_descroffs(status)); + rxstatus = pktstatus & ~SGDMA_STATUS_EOP; rxstatus = rxstatus << 16; rxstatus |= (pktlength & 0xffff); if (rxstatus) { - desc->status = 0; + csrwr8(0, desc, sgdma_descroffs(status)); rxbuffer = dequeue_rx(priv); if (rxbuffer == NULL) @@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) "sgdma rx and rx queue empty!\n"); /* Clear control */ - iowrite32(0, &csr->control); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); /* clear status */ - iowrite32(0xf, &csr->status); + csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); /* kick the rx sgdma after reaping this descriptor */ - pktsrx = sgdma_async_read(priv); + sgdma_async_read(priv); } else { /* If the SGDMA indicated an end of packet on recv, @@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) */ netdev_err(priv->dev, "SGDMA RX Error Info: %x, %x, %x\n", - sts, desc->status, rxstatus); + sts, csrrd8(desc, sgdma_descroffs(status)), + rxstatus); } } else if (sts == 0) { - pktsrx = sgdma_async_read(priv); + sgdma_async_read(priv); } return rxstatus; @@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) /* Private functions */ -static void sgdma_setup_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, dma_addr_t ndesc_phys, dma_addr_t raddr, dma_addr_t waddr, @@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, int wfixed) { /* Clear the next descriptor as not owned by hardware */ - u32 ctrl = ndesc->control; + + u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); ctrl &= ~SGDMA_CONTROL_HW_OWNED; - ndesc->control = ctrl; + csrwr8(ctrl, ndesc, sgdma_descroffs(control)); - ctrl = 0; ctrl = SGDMA_CONTROL_HW_OWNED; ctrl |= generate_eop; ctrl |= rfixed; ctrl |= wfixed; /* Channel is implicitly zero, initialized to 0 by default */ - - desc->raddr = raddr; - desc->waddr = waddr; - desc->next = lower_32_bits(ndesc_phys); - desc->control = ctrl; - desc->status = 0; - desc->rburst = 0; - desc->wburst = 0; - desc->bytes = length; - desc->bytes_xferred = 0; + csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); + csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); + + csrwr32(0, desc, sgdma_descroffs(pad1)); + csrwr32(0, desc, sgdma_descroffs(pad2)); + csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); + + csrwr8(ctrl, desc, sgdma_descroffs(control)); + csrwr8(0, desc, sgdma_descroffs(status)); + csrwr8(0, desc, sgdma_descroffs(wburst)); + csrwr8(0, desc, sgdma_descroffs(rburst)); + csrwr16(length, desc, sgdma_descroffs(bytes)); + csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); } /* If hardware is busy, don't restart async read. @@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, */ static int sgdma_async_read(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - struct sgdma_descrip *descbase = - (struct sgdma_descrip *)priv->rx_dma_desc; + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; - struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; struct tse_buffer *rxbuffer = NULL; @@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv) priv->sgdmadesclen, DMA_TO_DEVICE); - iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), + priv->rx_dma_csr, + sgdma_csroffs(next_descrip)); - iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), + priv->rx_dma_csr, + sgdma_csroffs(control)); return 1; } @@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv) } static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; - if (sgdma_txbusy(priv)) return 0; /* clear control and status */ - iowrite32(0, &csr->control); - iowrite32(0x1f, &csr->status); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); dma_sync_single_for_device(priv->device, priv->txdescphys, priv->sgdmadesclen, DMA_TO_DEVICE); - iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), + priv->tx_dma_csr, + sgdma_csroffs(next_descrip)); - iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), + priv->tx_dma_csr, + sgdma_csroffs(control)); return 1; } static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->txdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; @@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv, static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->rxdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; @@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv) */ static int sgdma_rxbusy(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - return ioread32(&csr->status) & SGDMA_STSREG_BUSY; + return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY; } /* waits for the tx sgdma to finish it's current operation, returns 0 @@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv) static int sgdma_txbusy(struct altera_tse_private *priv) { int delay = 0; - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; /* if DMA is busy, wait for current transactino to finish */ - while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) + while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) && (delay++ < 100)) udelay(1); - if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { + if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) { netdev_err(priv->dev, "timeout waiting for tx dma\n"); return 1; } diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h index ba3334f35383..85bc33b218d9 100644 --- a/drivers/net/ethernet/altera/altera_sgdmahw.h +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -19,16 +19,16 @@ /* SGDMA descriptor structure */ struct sgdma_descrip { - unsigned int raddr; /* address of data to be read */ - unsigned int pad1; - unsigned int waddr; - unsigned int pad2; - unsigned int next; - unsigned int pad3; - unsigned short bytes; - unsigned char rburst; - unsigned char wburst; - unsigned short bytes_xferred; /* 16 bits, bytes xferred */ + u32 raddr; /* address of data to be read */ + u32 pad1; + u32 waddr; + u32 pad2; + u32 next; + u32 pad3; + u16 bytes; + u8 rburst; + u8 wburst; + u16 bytes_xferred; /* 16 bits, bytes xferred */ /* bit 0: error * bit 1: length error @@ -39,7 +39,7 @@ struct sgdma_descrip { * bit 6: reserved * bit 7: status eop for recv case */ - unsigned char status; + u8 status; /* bit 0: eop * bit 1: read_fixed @@ -47,7 +47,7 @@ struct sgdma_descrip { * bits 3,4,5,6: Channel (always 0) * bit 7: hardware owned */ - unsigned char control; + u8 control; } __packed; @@ -101,6 +101,8 @@ struct sgdma_csr { u32 pad3[3]; }; +#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a)) +#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a)) #define SGDMA_STSREG_ERR BIT(0) /* Error */ #define SGDMA_STSREG_EOP BIT(1) /* EOP */ diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 465c4aabebbd..2adb24d4523c 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -357,6 +357,8 @@ struct altera_tse_mac { u32 reserved5[42]; }; +#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a)) + /* Transmit and Receive Command Registers Bit Definitions */ #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) @@ -487,4 +489,49 @@ struct altera_tse_private { */ void altera_tse_set_ethtool_ops(struct net_device *); +static inline +u32 csrrd32(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readl(paddr); +} + +static inline +u16 csrrd16(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readw(paddr); +} + +static inline +u8 csrrd8(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readb(paddr); +} + +static inline +void csrwr32(u32 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writel(val, paddr); +} + +static inline +void csrwr16(u16 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writew(val, paddr); +} + +static inline +void csrwr8(u8 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writeb(val, paddr); +} + #endif /* __ALTERA_TSE_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c index 76133caffa78..54c25eff7952 100644 --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *buf) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; u64 ext; - buf[0] = ioread32(&mac->frames_transmitted_ok); - buf[1] = ioread32(&mac->frames_received_ok); - buf[2] = ioread32(&mac->frames_check_sequence_errors); - buf[3] = ioread32(&mac->alignment_errors); + buf[0] = csrrd32(priv->mac_dev, + tse_csroffs(frames_transmitted_ok)); + buf[1] = csrrd32(priv->mac_dev, + tse_csroffs(frames_received_ok)); + buf[2] = csrrd32(priv->mac_dev, + tse_csroffs(frames_check_sequence_errors)); + buf[3] = csrrd32(priv->mac_dev, + tse_csroffs(alignment_errors)); /* Extended aOctetsTransmittedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; - ext |= ioread32(&mac->octets_transmitted_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_transmitted_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_transmitted_ok)); buf[4] = ext; /* Extended aOctetsReceivedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; - ext |= ioread32(&mac->octets_received_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_received_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_received_ok)); buf[5] = ext; - buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); - buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); - buf[8] = ioread32(&mac->if_in_errors); - buf[9] = ioread32(&mac->if_out_errors); - buf[10] = ioread32(&mac->if_in_ucast_pkts); - buf[11] = ioread32(&mac->if_in_multicast_pkts); - buf[12] = ioread32(&mac->if_in_broadcast_pkts); - buf[13] = ioread32(&mac->if_out_discards); - buf[14] = ioread32(&mac->if_out_ucast_pkts); - buf[15] = ioread32(&mac->if_out_multicast_pkts); - buf[16] = ioread32(&mac->if_out_broadcast_pkts); - buf[17] = ioread32(&mac->ether_stats_drop_events); + buf[6] = csrrd32(priv->mac_dev, + tse_csroffs(tx_pause_mac_ctrl_frames)); + buf[7] = csrrd32(priv->mac_dev, + tse_csroffs(rx_pause_mac_ctrl_frames)); + buf[8] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_errors)); + buf[9] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_errors)); + buf[10] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_ucast_pkts)); + buf[11] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_multicast_pkts)); + buf[12] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_broadcast_pkts)); + buf[13] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_discards)); + buf[14] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_ucast_pkts)); + buf[15] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_multicast_pkts)); + buf[16] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_broadcast_pkts)); + buf[17] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_drop_events)); /* Extended etherStatsOctets counter */ - ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; - ext |= ioread32(&mac->ether_stats_octets); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_ether_stats_octets)) << 32; + ext |= csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_octets)); buf[18] = ext; - buf[19] = ioread32(&mac->ether_stats_pkts); - buf[20] = ioread32(&mac->ether_stats_undersize_pkts); - buf[21] = ioread32(&mac->ether_stats_oversize_pkts); - buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); - buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); - buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); - buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); - buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); - buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); - buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); - buf[29] = ioread32(&mac->ether_stats_jabbers); - buf[30] = ioread32(&mac->ether_stats_fragments); + buf[19] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts)); + buf[20] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_undersize_pkts)); + buf[21] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_oversize_pkts)); + buf[22] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_64_octets)); + buf[23] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_65to127_octets)); + buf[24] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_128to255_octets)); + buf[25] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_256to511_octets)); + buf[26] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_512to1023_octets)); + buf[27] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1024to1518_octets)); + buf[28] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1519tox_octets)); + buf[29] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_jabbers)); + buf[30] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_fragments)); } static int tse_sset_count(struct net_device *dev, int sset) @@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, { int i; struct altera_tse_private *priv = netdev_priv(dev); - u32 *tse_mac_regs = (u32 *)priv->mac_dev; u32 *buf = regbuf; /* Set version to a known value, so ethtool knows @@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->version = 1; for (i = 0; i < TSE_NUM_REGS; i++) - buf[i] = ioread32(&tse_mac_regs[i]); + buf[i] = csrrd32(priv->mac_dev, i * 4); } static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index e44a4aeb9701..7330681574d2 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv) */ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; - unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; - u32 data; + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); /* set MDIO address */ - iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy0_addr)); /* get the data */ - data = ioread32(&mdio_regs[regnum]) & 0xffff; - return data; + return csrrd32(priv->mac_dev, + tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; } static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { - struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; - unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); /* set MDIO address */ - iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy0_addr)); /* write the data */ - iowrite32((u32) value, &mdio_regs[regnum]); + csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); return 0; } @@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) for (i = 0; i < PHY_MAX_ADDR; i++) mdio->irq[i] = PHY_POLL; - mdio->priv = priv->mac_dev; + mdio->priv = dev; mdio->parent = priv->device; ret = of_mdiobus_register(mdio, mdio_node); @@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int nopaged_len = skb_headlen(skb); enum netdev_tx ret = NETDEV_TX_OK; dma_addr_t dma_addr; - int txcomplete = 0; spin_lock_bh(&priv->tx_lock); @@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_sync_single_for_device(priv->device, buffer->dma_addr, buffer->len, DMA_TO_DEVICE); - txcomplete = priv->dmaops->tx_buffer(priv, buffer); + priv->dmaops->tx_buffer(priv, buffer); skb_tx_timestamp(skb); @@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev) struct altera_tse_private *priv = netdev_priv(dev); struct phy_device *phydev = NULL; char phy_id_fmt[MII_BUS_ID_SIZE + 3]; - int ret; if (priv->phy_addr != POLL_PHY) { snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, @@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev) netdev_err(dev, "Could not attach to PHY\n"); } else { + int ret; phydev = phy_find_first(priv->mdio); if (phydev == NULL) { netdev_err(dev, "No PHY found\n"); @@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev) static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) { - struct altera_tse_mac *mac = priv->mac_dev; u32 msb; u32 lsb; @@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) lsb = ((addr[5] << 8) | addr[4]) & 0xffff; /* Set primary MAC address */ - iowrite32(msb, &mac->mac_addr_0); - iowrite32(lsb, &mac->mac_addr_1); + csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); + csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); } /* MAC software reset. @@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) */ static int reset_mac(struct altera_tse_private *priv) { - void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config; int counter; u32 dat; - dat = ioread32(cmd_cfg_reg); + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; - iowrite32(dat, cmd_cfg_reg); + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) + if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_SW_RESET)) break; udelay(1); } if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - dat = ioread32(cmd_cfg_reg); + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); dat &= ~MAC_CMDCFG_SW_RESET; - iowrite32(dat, cmd_cfg_reg); + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); return -1; } return 0; @@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv) */ static int init_mac(struct altera_tse_private *priv) { - struct altera_tse_mac *mac = priv->mac_dev; unsigned int cmd = 0; u32 frm_length; /* Setup Rx FIFO */ - iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, - &mac->rx_section_empty); - iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); - iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); - iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); + csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(rx_section_empty)); + + csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, + tse_csroffs(rx_section_full)); + + csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(rx_almost_empty)); + + csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(rx_almost_full)); /* Setup Tx FIFO */ - iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, - &mac->tx_section_empty); - iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); - iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); - iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); + csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(tx_section_empty)); + + csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, + tse_csroffs(tx_section_full)); + + csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(tx_almost_empty)); + + csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(tx_almost_full)); /* MAC Address Configuration */ tse_update_mac_addr(priv, priv->dev->dev_addr); /* MAC Function Configuration */ frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; - iowrite32(frm_length, &mac->frm_length); - iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); + csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); + + csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, + tse_csroffs(tx_ipg_length)); /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit * start address */ - tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); - tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | - ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); + tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), + ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); + + tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), + ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | + ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); /* Set the MAC options */ - cmd = ioread32(&mac->command_config); + cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames @@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv) cmd &= ~MAC_CMDCFG_ETH_SPEED; cmd &= ~MAC_CMDCFG_ENA_10; - iowrite32(cmd, &mac->command_config); + csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); - iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta); + csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, + tse_csroffs(pause_quanta)); if (netif_msg_hw(priv)) dev_dbg(priv->device, @@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv) */ static void tse_set_mac(struct altera_tse_private *priv, bool enable) { - struct altera_tse_mac *mac = priv->mac_dev; - u32 value = ioread32(&mac->command_config); + u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); if (enable) value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; else value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); - iowrite32(value, &mac->command_config); + csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); } /* Change the MTU @@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu) static void altera_tse_set_mcfilter(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; int i; struct netdev_hw_addr *ha; /* clear the hash filter */ for (i = 0; i < 64; i++) - iowrite32(0, &(mac->hash_table[i])); + csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); netdev_for_each_mc_addr(ha, dev) { unsigned int hash = 0; @@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev) hash = (hash << 1) | xor_bit; } - iowrite32(1, &(mac->hash_table[hash])); + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); } } @@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev) static void altera_tse_set_mcfilterall(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; int i; /* set the hash filter */ for (i = 0; i < 64; i++) - iowrite32(1, &(mac->hash_table[i])); + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); } /* Set or clear the multicast filter for this adaptor @@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev) static void tse_set_rx_mode_hashfilter(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; spin_lock(&priv->mac_cfg_lock); if (dev->flags & IFF_PROMISC) - tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); if (dev->flags & IFF_ALLMULTI) altera_tse_set_mcfilterall(dev); @@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev) static void tse_set_rx_mode(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; spin_lock(&priv->mac_cfg_lock); if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) - tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); else - tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); spin_unlock(&priv->mac_cfg_lock); } @@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev) of_property_read_bool(pdev->dev.of_node, "altr,has-hash-multicast-filter"); + /* Set hash filter to not set for now until the + * multicast filter receive issue is debugged + */ + priv->hash_filter = 0; + /* get supplemental address settings for this instance */ priv->added_unicast = of_property_read_bool(pdev->dev.of_node, @@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev) return 0; } -struct altera_dmaops altera_dtype_sgdma = { +static const struct altera_dmaops altera_dtype_sgdma = { .altera_dtype = ALTERA_DTYPE_SGDMA, .dmamask = 32, .reset_dma = sgdma_reset, @@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = { .start_rxdma = sgdma_start_rxdma, }; -struct altera_dmaops altera_dtype_msgdma = { +static const struct altera_dmaops altera_dtype_msgdma = { .altera_dtype = ALTERA_DTYPE_MSGDMA, .dmamask = 64, .reset_dma = msgdma_reset, diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c index 70fa13f486b2..d7eeb1713ad2 100644 --- a/drivers/net/ethernet/altera/altera_utils.c +++ b/drivers/net/ethernet/altera/altera_utils.c @@ -17,28 +17,28 @@ #include "altera_tse.h" #include "altera_utils.h" -void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); value |= bit_mask; - iowrite32(value, ioaddr); + csrwr32(value, ioaddr, offs); } -void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); value &= ~bit_mask; - iowrite32(value, ioaddr); + csrwr32(value, ioaddr, offs); } -int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 1 : 0; } -int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 0 : 1; } diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h index ce1db36d3583..baf100ccf587 100644 --- a/drivers/net/ethernet/altera/altera_utils.h +++ b/drivers/net/ethernet/altera/altera_utils.h @@ -19,9 +19,9 @@ #ifndef __ALTERA_UTILS_H__ #define __ALTERA_UTILS_H__ -void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); -void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); -int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); -int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask); #endif /* __ALTERA_UTILS_H__*/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b260913db236..3b0d43154e67 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) #define BCM_5710_UNDI_FW_MF_MINOR (0x08) #define BCM_5710_UNDI_FW_MF_VERS (0x05) -#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) -#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) +#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) +#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) { u8 major, minor, version; @@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { u32 timer_count = 1000; + bool need_write = true; /* Close the MAC Rx to prevent BRB from filling up */ bnx2x_prev_unload_close_mac(bp, &mac_vals); @@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) * cleaning methods - might be redundant but harmless. */ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { - bnx2x_prev_unload_undi_mf(bp); + if (need_write) { + bnx2x_prev_unload_undi_mf(bp); + need_write = false; + } } else if (prev_undi) { /* If UNDI resides in memory, * manually increment it diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 81cc2d9831c2..b8078d50261b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2695,7 +2695,7 @@ out: bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); } - return 0; + return rc; } int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0c067e8564dd..784c7155b98a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) out: bnx2x_vfpf_finalize(bp, &req->first_tlv); - return 0; + return rc; } /* request pf to config rss table for vf queues*/ diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c new file mode 100644 index 000000000000..4884205e56ee --- /dev/null +++ b/drivers/net/ethernet/ec_bhf.c @@ -0,0 +1,706 @@ + /* + * drivers/net/ethernet/beckhoff/ec_bhf.c + * + * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* This is a driver for EtherCAT master module present on CCAT FPGA. + * Those can be found on Bechhoff CX50xx industrial PCs. + */ + +#if 0 +#define DEBUG +#endif +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/skbuff.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/stat.h> + +#define TIMER_INTERVAL_NSEC 20000 + +#define INFO_BLOCK_SIZE 0x10 +#define INFO_BLOCK_TYPE 0x0 +#define INFO_BLOCK_REV 0x2 +#define INFO_BLOCK_BLK_CNT 0x4 +#define INFO_BLOCK_TX_CHAN 0x4 +#define INFO_BLOCK_RX_CHAN 0x5 +#define INFO_BLOCK_OFFSET 0x8 + +#define EC_MII_OFFSET 0x4 +#define EC_FIFO_OFFSET 0x8 +#define EC_MAC_OFFSET 0xc + +#define MAC_FRAME_ERR_CNT 0x0 +#define MAC_RX_ERR_CNT 0x1 +#define MAC_CRC_ERR_CNT 0x2 +#define MAC_LNK_LST_ERR_CNT 0x3 +#define MAC_TX_FRAME_CNT 0x10 +#define MAC_RX_FRAME_CNT 0x14 +#define MAC_TX_FIFO_LVL 0x20 +#define MAC_DROPPED_FRMS 0x28 +#define MAC_CONNECTED_CCAT_FLAG 0x78 + +#define MII_MAC_ADDR 0x8 +#define MII_MAC_FILT_FLAG 0xe +#define MII_LINK_STATUS 0xf + +#define FIFO_TX_REG 0x0 +#define FIFO_TX_RESET 0x8 +#define FIFO_RX_REG 0x10 +#define FIFO_RX_ADDR_VALID (1u << 31) +#define FIFO_RX_RESET 0x18 + +#define DMA_CHAN_OFFSET 0x1000 +#define DMA_CHAN_SIZE 0x8 + +#define DMA_WINDOW_SIZE_MASK 0xfffffffc + +static struct pci_device_id ids[] = { + { PCI_DEVICE(0x15ec, 0x5000), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ids); + +struct rx_header { +#define RXHDR_NEXT_ADDR_MASK 0xffffffu +#define RXHDR_NEXT_VALID (1u << 31) + __le32 next; +#define RXHDR_NEXT_RECV_FLAG 0x1 + __le32 recv; +#define RXHDR_LEN_MASK 0xfffu + __le16 len; + __le16 port; + __le32 reserved; + u8 timestamp[8]; +} __packed; + +#define PKT_PAYLOAD_SIZE 0x7e8 +struct rx_desc { + struct rx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +struct tx_header { + __le16 len; +#define TX_HDR_PORT_0 0x1 +#define TX_HDR_PORT_1 0x2 + u8 port; + u8 ts_enable; +#define TX_HDR_SENT 0x1 + __le32 sent; + u8 timestamp[8]; +} __packed; + +struct tx_desc { + struct tx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +#define FIFO_SIZE 64 + +static long polling_frequency = TIMER_INTERVAL_NSEC; + +struct bhf_dma { + u8 *buf; + size_t len; + dma_addr_t buf_phys; + + u8 *alloc; + size_t alloc_len; + dma_addr_t alloc_phys; +}; + +struct ec_bhf_priv { + struct net_device *net_dev; + + struct pci_dev *dev; + + void * __iomem io; + void * __iomem dma_io; + + struct hrtimer hrtimer; + + int tx_dma_chan; + int rx_dma_chan; + void * __iomem ec_io; + void * __iomem fifo_io; + void * __iomem mii_io; + void * __iomem mac_io; + + struct bhf_dma rx_buf; + struct rx_desc *rx_descs; + int rx_dnext; + int rx_dcount; + + struct bhf_dma tx_buf; + struct tx_desc *tx_descs; + int tx_dcount; + int tx_dnext; + + u64 stat_rx_bytes; + u64 stat_tx_bytes; +}; + +#define PRIV_TO_DEV(priv) (&(priv)->dev->dev) + +#define ETHERCAT_MASTER_ID 0x14 + +static void ec_bhf_print_status(struct ec_bhf_priv *priv) +{ + struct device *dev = PRIV_TO_DEV(priv); + + dev_dbg(dev, "Frame error counter: %d\n", + ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); + dev_dbg(dev, "RX error counter: %d\n", + ioread8(priv->mac_io + MAC_RX_ERR_CNT)); + dev_dbg(dev, "CRC error counter: %d\n", + ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); + dev_dbg(dev, "TX frame counter: %d\n", + ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); + dev_dbg(dev, "RX frame counter: %d\n", + ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); + dev_dbg(dev, "TX fifo level: %d\n", + ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); + dev_dbg(dev, "Dropped frames: %d\n", + ioread8(priv->mac_io + MAC_DROPPED_FRMS)); + dev_dbg(dev, "Connected with CCAT slot: %d\n", + ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); + dev_dbg(dev, "Link status: %d\n", + ioread8(priv->mii_io + MII_LINK_STATUS)); +} + +static void ec_bhf_reset(struct ec_bhf_priv *priv) +{ + iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT); + iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT); + iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT); + iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS); + + iowrite8(0, priv->fifo_io + FIFO_TX_RESET); + iowrite8(0, priv->fifo_io + FIFO_RX_RESET); + + iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL); +} + +static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) +{ + u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header); + u32 addr = (u8 *)desc - priv->tx_buf.buf; + + iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); + + dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); +} + +static int ec_bhf_desc_sent(struct tx_desc *desc) +{ + return le32_to_cpu(desc->header.sent) & TX_HDR_SENT; +} + +static void ec_bhf_process_tx(struct ec_bhf_priv *priv) +{ + if (unlikely(netif_queue_stopped(priv->net_dev))) { + /* Make sure that we perceive changes to tx_dnext. */ + smp_rmb(); + + if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) + netif_wake_queue(priv->net_dev); + } +} + +static int ec_bhf_pkt_received(struct rx_desc *desc) +{ + return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG; +} + +static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) +{ + iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf), + priv->fifo_io + FIFO_RX_REG); +} + +static void ec_bhf_process_rx(struct ec_bhf_priv *priv) +{ + struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; + struct device *dev = PRIV_TO_DEV(priv); + + while (ec_bhf_pkt_received(desc)) { + int pkt_size = (le16_to_cpu(desc->header.len) & + RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; + u8 *data = desc->data; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); + dev_dbg(dev, "Received packet, size: %d\n", pkt_size); + + if (skb) { + memcpy(skb_put(skb, pkt_size), data, pkt_size); + skb->protocol = eth_type_trans(skb, priv->net_dev); + dev_dbg(dev, "Protocol type: %x\n", skb->protocol); + + priv->stat_rx_bytes += pkt_size; + + netif_rx(skb); + } else { + dev_err_ratelimited(dev, + "Couldn't allocate a skb_buff for a packet of size %u\n", + pkt_size); + } + + desc->header.recv = 0; + + ec_bhf_add_rx_desc(priv, desc); + + priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; + desc = &priv->rx_descs[priv->rx_dnext]; + } + +} + +static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) +{ + struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv, + hrtimer); + ec_bhf_process_rx(priv); + ec_bhf_process_tx(priv); + + if (!netif_running(priv->net_dev)) + return HRTIMER_NORESTART; + + hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); + return HRTIMER_RESTART; +} + +static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) +{ + struct device *dev = PRIV_TO_DEV(priv); + unsigned block_count, i; + void * __iomem ec_info; + + dev_dbg(dev, "Info block:\n"); + dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); + dev_dbg(dev, "Revision of function: %x\n", + (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); + + block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); + dev_dbg(dev, "Number of function blocks: %x\n", block_count); + + for (i = 0; i < block_count; i++) { + u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + + INFO_BLOCK_TYPE); + if (type == ETHERCAT_MASTER_ID) + break; + } + if (i == block_count) { + dev_err(dev, "EtherCAT master with DMA block not found\n"); + return -ENODEV; + } + dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); + + ec_info = priv->io + i * INFO_BLOCK_SIZE; + dev_dbg(dev, "EtherCAT master revision: %d\n", + ioread16(ec_info + INFO_BLOCK_REV)); + + priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); + dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", + priv->tx_dma_chan); + + priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); + dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", + priv->rx_dma_chan); + + priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); + priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); + priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); + priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); + + dev_dbg(dev, + "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", + priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); + + return 0; +} + +static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct tx_desc *desc; + unsigned len; + + dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); + + desc = &priv->tx_descs[priv->tx_dnext]; + + skb_copy_and_csum_dev(skb, desc->data); + len = skb->len; + + memset(&desc->header, 0, sizeof(desc->header)); + desc->header.len = cpu_to_le16(len); + desc->header.port = TX_HDR_PORT_0; + + ec_bhf_send_packet(priv, desc); + + priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; + + if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { + /* Make sure that update updates to tx_dnext are perceived + * by timer routine. + */ + smp_wmb(); + + netif_stop_queue(net_dev); + + dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); + ec_bhf_print_status(priv); + } + + priv->stat_tx_bytes += len; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, + struct bhf_dma *buf, + int channel, + int size) +{ + int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET; + struct device *dev = PRIV_TO_DEV(priv); + u32 mask; + + iowrite32(0xffffffff, priv->dma_io + offset); + + mask = ioread32(priv->dma_io + offset); + mask &= DMA_WINDOW_SIZE_MASK; + dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); + + /* We want to allocate a chunk of memory that is: + * - aligned to the mask we just read + * - is of size 2^mask bytes (at most) + * In order to ensure that we will allocate buffer of + * 2 * 2^mask bytes. + */ + buf->len = min_t(int, ~mask + 1, size); + buf->alloc_len = 2 * buf->len; + + dev_dbg(dev, "Allocating %d bytes for channel %d", + (int)buf->alloc_len, channel); + buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, + GFP_KERNEL); + if (buf->alloc == NULL) { + dev_info(dev, "Failed to allocate buffer\n"); + return -ENOMEM; + } + + buf->buf_phys = (buf->alloc_phys + buf->len) & mask; + buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys); + + iowrite32(0, priv->dma_io + offset + 4); + iowrite32(buf->buf_phys, priv->dma_io + offset); + dev_dbg(dev, "Buffer: %x and read from dev: %x", + (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); + + return 0; +} + +static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) +{ + int i = 0; + + priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); + priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; + priv->tx_dnext = 0; + + for (i = 0; i < priv->tx_dcount; i++) + priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT); +} + +static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) +{ + int i; + + priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); + priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; + priv->rx_dnext = 0; + + for (i = 0; i < priv->rx_dcount; i++) { + struct rx_desc *desc = &priv->rx_descs[i]; + u32 next; + + if (i != priv->rx_dcount - 1) + next = (u8 *)(desc + 1) - priv->rx_buf.buf; + else + next = 0; + next |= RXHDR_NEXT_VALID; + desc->header.next = cpu_to_le32(next); + desc->header.recv = 0; + ec_bhf_add_rx_desc(priv, desc); + } +} + +static int ec_bhf_open(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + int err = 0; + + dev_info(dev, "Opening device\n"); + + ec_bhf_reset(priv); + + err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, + FIFO_SIZE * sizeof(struct rx_desc)); + if (err) { + dev_err(dev, "Failed to allocate rx buffer\n"); + goto out; + } + ec_bhf_setup_rx_descs(priv); + + dev_info(dev, "RX buffer allocated, address: %x\n", + (unsigned)priv->rx_buf.buf_phys); + + err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, + FIFO_SIZE * sizeof(struct tx_desc)); + if (err) { + dev_err(dev, "Failed to allocate tx buffer\n"); + goto error_rx_free; + } + dev_dbg(dev, "TX buffer allocated, addres: %x\n", + (unsigned)priv->tx_buf.buf_phys); + + iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); + + ec_bhf_setup_tx_descs(priv); + + netif_start_queue(net_dev); + + hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->hrtimer.function = ec_bhf_timer_fun; + hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), + HRTIMER_MODE_REL); + + dev_info(PRIV_TO_DEV(priv), "Device open\n"); + + ec_bhf_print_status(priv); + + return 0; + +error_rx_free: + dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, + priv->rx_buf.alloc_len); +out: + return err; +} + +static int ec_bhf_stop(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + + hrtimer_cancel(&priv->hrtimer); + + ec_bhf_reset(priv); + + netif_tx_disable(net_dev); + + dma_free_coherent(dev, priv->tx_buf.alloc_len, + priv->tx_buf.alloc, priv->tx_buf.alloc_phys); + dma_free_coherent(dev, priv->rx_buf.alloc_len, + priv->rx_buf.alloc, priv->rx_buf.alloc_phys); + + return 0; +} + +static struct rtnl_link_stats64 * +ec_bhf_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) + + ioread8(priv->mac_io + MAC_CRC_ERR_CNT) + + ioread8(priv->mac_io + MAC_FRAME_ERR_CNT); + stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT); + stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT); + stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS); + + stats->tx_bytes = priv->stat_tx_bytes; + stats->rx_bytes = priv->stat_rx_bytes; + + return stats; +} + +static const struct net_device_ops ec_bhf_netdev_ops = { + .ndo_start_xmit = ec_bhf_start_xmit, + .ndo_open = ec_bhf_open, + .ndo_stop = ec_bhf_stop, + .ndo_get_stats64 = ec_bhf_get_stats, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr +}; + +static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct net_device *net_dev; + struct ec_bhf_priv *priv; + void * __iomem dma_io; + void * __iomem io; + int err = 0; + + err = pci_enable_device(dev); + if (err) + return err; + + pci_set_master(dev); + + err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + err = -EIO; + goto err_disable_dev; + } + + err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + goto err_disable_dev; + } + + err = pci_request_regions(dev, "ec_bhf"); + if (err) { + dev_err(&dev->dev, "Failed to request pci memory regions\n"); + goto err_disable_dev; + } + + io = pci_iomap(dev, 0, 0); + if (!io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 0"); + err = -EIO; + goto err_release_regions; + } + + dma_io = pci_iomap(dev, 2, 0); + if (!dma_io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 2"); + err = -EIO; + goto err_unmap; + } + + net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); + if (net_dev == 0) { + err = -ENOMEM; + goto err_unmap_dma_io; + } + + pci_set_drvdata(dev, net_dev); + SET_NETDEV_DEV(net_dev, &dev->dev); + + net_dev->features = 0; + net_dev->flags |= IFF_NOARP; + + net_dev->netdev_ops = &ec_bhf_netdev_ops; + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + priv->io = io; + priv->dma_io = dma_io; + priv->dev = dev; + + err = ec_bhf_setup_offsets(priv); + if (err < 0) + goto err_free_net_dev; + + memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); + + dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", + net_dev->dev_addr); + + err = register_netdev(net_dev); + if (err < 0) + goto err_free_net_dev; + + return 0; + +err_free_net_dev: + free_netdev(net_dev); +err_unmap_dma_io: + pci_iounmap(dev, dma_io); +err_unmap: + pci_iounmap(dev, io); +err_release_regions: + pci_release_regions(dev); +err_disable_dev: + pci_clear_master(dev); + pci_disable_device(dev); + + return err; +} + +static void ec_bhf_remove(struct pci_dev *dev) +{ + struct net_device *net_dev = pci_get_drvdata(dev); + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); + free_netdev(net_dev); + + pci_iounmap(dev, priv->dma_io); + pci_iounmap(dev, priv->io); + pci_release_regions(dev); + pci_clear_master(dev); + pci_disable_device(dev); +} + +static struct pci_driver pci_driver = { + .name = "ec_bhf", + .id_table = ids, + .probe = ec_bhf_probe, + .remove = ec_bhf_remove, +}; + +static int __init ec_bhf_init(void) +{ + return pci_register_driver(&pci_driver); +} + +static void __exit ec_bhf_exit(void) +{ + pci_unregister_driver(&pci_driver); +} + +module_init(ec_bhf_init); +module_exit(ec_bhf_exit); + +module_param(polling_frequency, long, S_IRUGO); +MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>"); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a18645407d21..dc19bc5dec77 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev) if (status) goto err; + /* On some BE3 FW versions, after a HW reset, + * interrupts will remain disabled for each function. + * So, explicitly enable interrupts + */ + be_intr_set(adapter, true); + /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b0c6050479eb..b78378cea5e3 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme, return idx; } -static void +static int jme_fill_tx_map(struct pci_dev *pdev, struct txdesc *txdesc, struct jme_buffer_info *txbi, @@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev, len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) + return -EINVAL; + pci_dma_sync_single_for_device(pdev, dmaaddr, len, @@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev, txbi->mapping = dmaaddr; txbi->len = len; + return 0; } -static void +static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + int mask = jme->tx_ring_mask; + int j; + + for (j = 0 ; j < count ; j++) { + ctxbi = txbi + ((startidx + j + 2) & (mask)); + pci_unmap_page(jme->pdev, + ctxbi->mapping, + ctxbi->len, + PCI_DMA_TODEVICE); + + ctxbi->mapping = 0; + ctxbi->len = 0; + } + +} + +static int jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) { struct jme_ring *txring = &(jme->txring[0]); @@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) int mask = jme->tx_ring_mask; const struct skb_frag_struct *frag; u32 len; + int ret = 0; for (i = 0 ; i < nr_frags ; ++i) { frag = &skb_shinfo(skb)->frags[i]; ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, skb_frag_page(frag), frag->page_offset, skb_frag_size(frag), hidma); + if (ret) { + jme_drop_tx_map(jme, idx, i); + goto out; + } + } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; ctxdesc = txdesc + ((idx + 1) & (mask)); ctxbi = txbi + ((idx + 1) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), offset_in_page(skb->data), len, hidma); + if (ret) + jme_drop_tx_map(jme, idx, i); + +out: + return ret; } + static int jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) { @@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) struct txdesc *txdesc; struct jme_buffer_info *txbi; u8 flags; + int ret = 0; txdesc = (struct txdesc *)txring->desc + idx; txbi = txring->bufinf + idx; @@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) jme_tx_csum(jme, skb, &flags); jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); - jme_map_tx_skb(jme, skb, idx); + ret = jme_map_tx_skb(jme, skb, idx); + if (ret) + return ret; + txdesc->desc1.flags = flags; /* * Set tx buffer info after telling NIC to send @@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - jme_fill_tx_desc(jme, skb, idx); + if (jme_fill_tx_desc(jme, skb, idx)) + return NETDEV_TX_OK; jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0 | diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 78099eab7673..92d3249f63f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = { }, { .opcode = MLX4_CMD_UPDATE_QP, - .has_inbox = false, + .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = mlx4_CMD_EPERM_wrapper + .wrapper = mlx4_UPDATE_QP_wrapper }, { .opcode = MLX4_CMD_GET_OP_REQ, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index f9c465101963..212cea440f90 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 61d64ebffd56..fbd32af89c7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -389,6 +389,41 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); +#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC +int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *cmd; + u64 pri_addr_path_mask = 0; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cmd = (struct mlx4_update_qp_context *)mailbox->buf; + + if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) + return -EINVAL; + + if (attr & MLX4_UPDATE_QP_SMAC) { + pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; + cmd->qp_context.pri_path.grh_mylmc = params->smac_index; + } + + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + + err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_update_qp); + void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1c3fdd4a1f7d..8f1254a79832 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, } +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7b52a88923ef..f785d01c7d12 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) tx_ring->producer; } -static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, - struct net_device *netdev) -{ - int err; - - netdev->num_tx_queues = adapter->drv_tx_rings; - netdev->real_num_tx_queues = adapter->drv_tx_rings; - - err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); - if (err) - netdev_err(netdev, "failed to set %d Tx queues\n", - adapter->drv_tx_rings); - - return err; -} - struct qlcnic_nic_template { int (*config_bridged_mode) (struct qlcnic_adapter *, u32); int (*config_led) (struct qlcnic_adapter *, u32, u32); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0bc914859e38..7e55e88a81bf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) ahw->max_uc_count = count; } +static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, + u8 tx_queues, u8 rx_queues) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (tx_queues) { + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, "failed to set %d Tx queues\n", + tx_queues); + return err; + } + } + + if (rx_queues) { + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, "failed to set %d Rx queues\n", + rx_queues); + } + + return err; +} + int qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, int pci_using_dac) @@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; - err = qlcnic_set_real_num_queues(adapter, netdev); + err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, + adapter->drv_sds_rings); if (err) return err; @@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter) tx_ring->tx_stats.xmit_called, tx_ring->tx_stats.xmit_on, tx_ring->tx_stats.xmit_off); + + if (tx_ring->crb_intr_mask) + netdev_info(netdev, "crb_intr_mask=%d\n", + readl(tx_ring->crb_intr_mask)); + netdev_info(netdev, - "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", - readl(tx_ring->crb_intr_mask), + "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", readl(tx_ring->crb_cmd_producer), tx_ring->producer, tx_ring->sw_consumer, le32_to_cpu(*(tx_ring->hw_consumer))); @@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, int qlcnic_setup_rings(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; + u8 tx_rings, rx_rings; int err; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; + tx_rings = adapter->drv_tss_rings; + rx_rings = adapter->drv_rss_rings; + netif_device_detach(netdev); + + err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings); + if (err) + goto done; + if (netif_running(netdev)) __qlcnic_down(adapter, netdev); @@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter) return err; } - netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); + /* Check if we need to update real_num_{tx|rx}_queues because + * qlcnic_setup_intr() may change Tx/Rx rings size + */ + if ((tx_rings != adapter->drv_tx_rings) || + (rx_rings != adapter->drv_sds_rings)) { + err = qlcnic_set_real_num_queues(adapter, + adapter->drv_tx_rings, + adapter->drv_sds_rings); + if (err) + goto done; + } if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_initialize_nic(adapter, 1); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 32d969e857f7..89b83e59e1dc 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) efx->net_dev->rx_cpu_rmap = NULL; #endif - /* Disable MSI/MSI-X interrupts */ - efx_for_each_channel(channel, efx) - free_irq(channel->irq, &efx->msi_context[channel->channel]); - - /* Disable legacy interrupt */ - if (efx->legacy_irq) + if (EFX_INT_MODE_USE_MSI(efx)) { + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } else { + /* Disable legacy interrupt */ free_irq(efx->legacy_irq, efx); + } } /* Register dump */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d940034acdd4..0f4841d2e8dc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev) if (ret) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - goto phy_error; + return ret; } } @@ -1779,8 +1779,6 @@ init_error: dma_desc_error: if (priv->phydev) phy_disconnect(priv->phydev); -phy_error: - clk_disable_unprepare(priv->stmmac_clk); return ret; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index df8d383acf48..b9ac20f42651 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp) int i; for (i = 0; i < N_TX_RINGS; i++) - spin_lock(&cp->tx_lock[i]); + spin_lock_nested(&cp->tx_lock[i], i); } static inline void cas_lock_all(struct cas *cp) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 36aa109416c4..c331b7ebc812 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); mdio = of_find_device_by_node(mdio_node); - - if (strncmp(mdio->name, "gpio", 4) == 0) { - /* GPIO bitbang MDIO driver attached */ - struct mii_bus *bus = dev_get_drvdata(&mdio->dev); - - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, bus->id, phyid); - } else { - /* davinci MDIO driver attached */ - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, mdio->name, phyid); + of_node_put(mdio_node); + if (!mdio) { + pr_err("Missing mdio platform device\n"); + return -EINVAL; } + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), + PHY_ID_FMT, mdio->name, phyid); mac_addr = of_get_mac_address(slave_node); if (mac_addr) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b0e2865a6810..d53e299ae1d9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (change & IFF_ALLMULTI) - dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + } } static void macvlan_set_mac_lists(struct net_device *dev) @@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) +static int macvlan_get_nest_level(struct net_device *dev) +{ + return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; +} + static void macvlan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) @@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev, static void macvlan_set_lockdep_class(struct net_device *dev) { - lockdep_set_class(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key, + macvlan_get_nest_level(dev)); netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); } @@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, + .ndo_get_lock_subclass = macvlan_get_nest_level, }; void macvlan_common_setup(struct net_device *dev) @@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; + vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 9c4defdec67b..5f1a2250018f 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(pdev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); + if (bus_id < 0) { + dev_warn(&pdev->dev, "failed to get alias id\n"); + bus_id = 0; + } } else { pdata = dev_get_platdata(&pdev->dev); bus_id = pdev->id; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a972056b2249..3bc079a67a3d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct phy_device *phydev = container_of(dwork, struct phy_device, state_queue); - int needs_aneg = 0, do_suspend = 0; + bool needs_aneg = false, do_suspend = false, do_resume = false; int err = 0; mutex_lock(&phydev->lock); @@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work) case PHY_PENDING: break; case PHY_UP: - needs_aneg = 1; + needs_aneg = true; phydev->link_timeout = PHY_AN_TIMEOUT; @@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work) phydev->adjust_link(phydev->attached_dev); } else if (0 == phydev->link_timeout--) - needs_aneg = 1; + needs_aneg = true; break; case PHY_NOLINK: err = phy_read_status(phydev); @@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work) netif_carrier_on(phydev->attached_dev); } else { if (0 == phydev->link_timeout--) - needs_aneg = 1; + needs_aneg = true; } phydev->adjust_link(phydev->attached_dev); @@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work) phydev->link = 0; netif_carrier_off(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); - do_suspend = 1; + do_suspend = true; } break; case PHY_RESUMING: @@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work) } phydev->adjust_link(phydev->attached_dev); } + do_resume = true; break; } @@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work) if (needs_aneg) err = phy_start_aneg(phydev); - - if (do_suspend) + else if (do_suspend) phy_suspend(phydev); + else if (do_resume) + phy_resume(phydev); if (err < 0) phy_error(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0ce606624296..4987a1c6dc52 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = phy_init_hw(phydev); if (err) phy_detach(phydev); - - phy_resume(phydev); + else + phy_resume(phydev); return err; } diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index c9f3281506af..2e025ddcef21 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf) cdc_ncm_unbind(dev, intf); } +/* verify that the ethernet protocol is IPv4 or IPv6 */ +static bool is_ip_proto(__be16 proto) +{ + switch (proto) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + return true; + } + return false; +} static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { @@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; + bool is_ip; u8 *c; if (!ctx) @@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb if (skb->len <= ETH_HLEN) goto error; + /* Some applications using e.g. packet sockets will + * bypass the VLAN acceleration and create tagged + * ethernet frames directly. We primarily look for + * the accelerated out-of-band tag, but fall back if + * required + */ + skb_reset_mac_header(skb); + if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && + __vlan_get_tag(skb, &tci) == 0) { + is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); + skb_pull(skb, VLAN_ETH_HLEN); + } else { + is_ip = is_ip_proto(eth_hdr(skb)->h_proto); + skb_pull(skb, ETH_HLEN); + } + /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> * 1 - 255 => IPS session <vlanid> * 256 - 511 => DSS session <vlanid - 256> * 512 - 4095 => unsupported, drop */ - vlan_get_tag(skb, &tci); - switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ - /* verify that datagram is IPv4 or IPv6 */ - skb_reset_mac_header(skb); - switch (eth_hdr(skb)->h_proto) { - case htons(ETH_P_IP): - case htons(ETH_P_IPV6): - break; - default: + if (!is_ip) goto error; - } c = (u8 *)&sign; c[3] = tci; break; @@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb "unsupported tci=0x%04x\n", tci); goto error; } - skb_pull(skb, ETH_HLEN); } spin_lock_bh(&ctx->mtx); @@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) return; /* need to send the NA on the VLAN dev, if any */ - if (tci) + rcu_read_lock(); + if (tci) { netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), tci); - else + if (!netdev) { + rcu_read_unlock(); + return; + } + } else { netdev = dev->net; - if (!netdev) - return; + } + dev_hold(netdev); + rcu_read_unlock(); in6_dev = in6_dev_get(netdev); if (!in6_dev) - return; + goto out; is_router = !!in6_dev->cnf.forwarding; in6_dev_put(in6_dev); @@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) true /* solicited */, false /* override */, true /* inc_opt */); +out: + dev_put(netdev); } static bool is_neigh_solicit(u8 *buf, size_t len) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f46cd0250e48..5627917c5ff7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) if ((vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT) && - bss_conf->enable_beacon) + bss_conf->enable_beacon) { priv->reconfig_beacon = true; + priv->rearm_ani = true; + } if (bss_conf->assoc) { priv->rearm_ani = true; @@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, ath9k_htc_ps_wakeup(priv); + ath9k_htc_stop_ani(priv); del_timer_sync(&priv->tx.cleanup_timer); ath9k_htc_tx_drain(priv); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index afb3d15e38ff..be1985296bdc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp) if (!err) { /* only set 2G bandwidth using bw_cap command */ band_bwcap.band = cpu_to_le32(WLC_BAND_2G); - band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); + band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ); err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, sizeof(band_bwcap)); } else { diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index fa858d548d13..0489314425cb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); if (IWL_MVM_BT_COEX_CORUNNING) { - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | + BT_VALID_CORUN_LUT_40); bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); } if (IWL_MVM_BT_COEX_MPLUT) { bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); } if (mvm->cfg->bt_shared_single_ant) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 9426905de6b2..d73a89ecd78a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -183,9 +183,9 @@ enum iwl_scan_type { * this number of packets were received (typically 1) * @passive2active: is auto switching from passive to active during scan allowed * @rxchain_sel_flags: RXON_RX_CHAIN_* - * @max_out_time: in usecs, max out of serving channel time + * @max_out_time: in TUs, max out of serving channel time * @suspend_time: how long to pause scan when returning to service channel: - * bits 0-19: beacon interal in usecs (suspend before executing) + * bits 0-19: beacon interal in TUs (suspend before executing) * bits 20-23: reserved * bits 24-31: number of beacons (suspend between channels) * @rxon_flags: RXON_FLG_* @@ -383,8 +383,8 @@ enum scan_framework_client { * @quiet_plcp_th: quiet channel num of packets threshold * @good_CRC_th: passive to active promotion threshold * @rx_chain: RXON rx chain. - * @max_out_time: max uSec to be out of assoceated channel - * @suspend_time: pause scan this long when returning to service channel + * @max_out_time: max TUs to be out of assoceated channel + * @suspend_time: pause scan this TUs when returning to service channel * @flags: RXON flags * @filter_flags: RXONfilter * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index f0cebf12c7b8..b41dc84e9431 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); - ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } @@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; - ieee80211_iterate_active_interfaces( + ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); } @@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (!iwl_mvm_is_idle(mvm)) { + ret = -EBUSY; + goto out; + } + switch (mvm->scan_status) { case IWL_MVM_SCAN_OS: IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d564233a65da..f1ec0986c3c9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) return mvmvif->low_latency; } +/* Assoc status */ +bool iwl_mvm_is_idle(struct iwl_mvm *mvm); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 9f52c5b3f0ec..e1c838899363 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, return; } -#ifdef CPTCFG_MAC80211_DEBUGFS +#ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate */ if (lq_sta->dbg_fixed_rate) { IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index c91dc8498852..c28de54c75d4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_condition_iterator, &global_bound); - /* - * Under low latency traffic passive scan is fragmented meaning - * that dwell on a particular channel will be fragmented. Each fragment - * dwell time is 20ms and fragments period is 105ms. Skipping to next - * channel will be delayed by the same period - 105ms. So suspend_time - * parameter describing both fragments and channels skipping periods is - * set to 105ms. This value is chosen so that overall passive scan - * duration will not be too long. Max_out_time in this case is set to - * 70ms, so for active scanning operating channel will be left for 70ms - * while for passive still for 20ms (fragment dwell). - */ - if (global_bound) { - if (!iwl_mvm_low_latency(mvm)) { - params->suspend_time = ieee80211_tu_to_usec(100); - params->max_out_time = ieee80211_tu_to_usec(600); - } else { - params->suspend_time = ieee80211_tu_to_usec(105); - /* P2P doesn't support fragmented passive scan, so - * configure max_out_time to be at least longest dwell - * time for passive scan. - */ - if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { - params->max_out_time = ieee80211_tu_to_usec(70); - params->passive_fragmented = true; - } else { - u32 passive_dwell; - /* - * Use band G so that passive channel dwell time - * will be assigned with maximum value. - */ - band = IEEE80211_BAND_2GHZ; - passive_dwell = iwl_mvm_get_passive_dwell(band); - params->max_out_time = - ieee80211_tu_to_usec(passive_dwell); - } - } + if (!global_bound) + goto not_bound; + + params->suspend_time = 100; + params->max_out_time = 600; + + if (iwl_mvm_low_latency(mvm)) { + params->suspend_time = 250; + params->max_out_time = 250; } +not_bound: + for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - if (params->passive_fragmented) - params->dwell[band].passive = 20; - else - params->dwell[band].passive = - iwl_mvm_get_passive_dwell(band); + params->dwell[band].passive = iwl_mvm_get_passive_dwell(band); params->dwell[band].active = iwl_mvm_get_active_dwell(band, n_ssids); } @@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; int head = 0; - int tail = band_2ghz + band_5ghz; + int tail = band_2ghz + band_5ghz - 1; u32 ssid_bitmap; int cmd_len; int ret; diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index d619851745a1..2180902266ae 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm) return result; } + +static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + bool *idle = _data; + + if (!vif->bss_conf.idle) + *idle = false; +} + +bool iwl_mvm_is_idle(struct iwl_mvm *mvm) +{ + bool idle = true; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_idle_iter, &idle); + + return idle; +} diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index dcfd6d866d09..2365553f1ef7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + trans->dev = &pdev->dev; + trans_pcie->pci_dev = pdev; + iwl_disable_interrupts(trans); + err = pci_enable_msi(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); @@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - trans->dev = &pdev->dev; - trans_pcie->pci_dev = pdev; trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), @@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_pci_disable_msi; } - trans_pcie->inta_mask = CSR_INI_SET_MASK; - if (iwl_pcie_alloc_ict(trans)) goto out_free_cmd_pool; @@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_free_ict; } + trans_pcie->inta_mask = CSR_INI_SET_MASK; + return trans; out_free_ict: diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 630a3fcf65bc..0d4a285cbd7e 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif, grant_ref_t rx_ring_ref); /* Check for SKBs from frontend and schedule backend processing */ -void xenvif_check_rx_xenvif(struct xenvif *vif); +void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); /* Prevent the device from generating any further traffic. */ void xenvif_carrier_off(struct xenvif *vif); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index ef05c5c49d41..20e9defa1060 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget) work_done = xenvif_tx_action(vif, budget); if (work_done < budget) { - int more_to_do = 0; - unsigned long flags; - - /* It is necessary to disable IRQ before calling - * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might - * lose event from the frontend. - * - * Consider: - * RING_HAS_UNCONSUMED_REQUESTS - * <frontend generates event to trigger napi_schedule> - * __napi_complete - * - * This handler is still in scheduled state so the - * event has no effect at all. After __napi_complete - * this handler is descheduled and cannot get - * scheduled again. We lose event in this case and the ring - * will be completely stalled. - */ - - local_irq_save(flags); - - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); - if (!more_to_do) - __napi_complete(napi); - - local_irq_restore(flags); + napi_complete(napi); + xenvif_napi_schedule_or_enable_events(vif); } return work_done; @@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif) enable_irq(vif->tx_irq); if (vif->tx_irq != vif->rx_irq) enable_irq(vif->rx_irq); - xenvif_check_rx_xenvif(vif); + xenvif_napi_schedule_or_enable_events(vif); } static void xenvif_down(struct xenvif *vif) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 76665405c5aa..7367208ee8cd 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif, /* Find the containing VIF's structure from a pointer in pending_tx_info array */ -static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) +static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) { u16 pending_idx = ubuf->desc; struct pending_tx_info *temp = @@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, } /* + * Find the grant ref for a given frag in a chain of struct ubuf_info's + * skb: the skb itself + * i: the frag's number + * ubuf: a pointer to an element in the chain. It should not be NULL + * + * Returns a pointer to the element in the chain where the page were found. If + * not found, returns NULL. + * See the definition of callback_struct in common.h for more details about + * the chain. + */ +static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, + const int i, + const struct ubuf_info *ubuf) +{ + struct xenvif *foreign_vif = ubuf_to_vif(ubuf); + + do { + u16 pending_idx = ubuf->desc; + + if (skb_shinfo(skb)->frags[i].page.p == + foreign_vif->mmap_pages[pending_idx]) + break; + ubuf = (struct ubuf_info *) ubuf->ctx; + } while (ubuf); + + return ubuf; +} + +/* * Prepare an SKB to be transmitted to the frontend. * * This function is responsible for allocating grant operations, meta @@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb, int head = 1; int old_meta_prod; int gso_type; - struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; - grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; - struct xenvif *foreign_vif = NULL; + const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; + const struct ubuf_info *const head_ubuf = ubuf; old_meta_prod = npo->meta_prod; @@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, npo->copy_off = 0; npo->copy_gref = req->gref; - if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && - (ubuf->callback == &xenvif_zerocopy_callback)) { - int i = 0; - foreign_vif = ubuf_to_vif(ubuf); - - do { - u16 pending_idx = ubuf->desc; - foreign_grefs[i++] = - foreign_vif->pending_tx_info[pending_idx].req.gref; - ubuf = (struct ubuf_info *) ubuf->ctx; - } while (ubuf); - } - data = skb->data; while (data < skb_tail_pointer(skb)) { unsigned int offset = offset_in_page(data); @@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb, } for (i = 0; i < nr_frags; i++) { + /* This variable also signals whether foreign_gref has a real + * value or not. + */ + struct xenvif *foreign_vif = NULL; + grant_ref_t foreign_gref; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && + (ubuf->callback == &xenvif_zerocopy_callback)) { + const struct ubuf_info *const startpoint = ubuf; + + /* Ideally ubuf points to the chain element which + * belongs to this frag. Or if frags were removed from + * the beginning, then shortly before it. + */ + ubuf = xenvif_find_gref(skb, i, ubuf); + + /* Try again from the beginning of the list, if we + * haven't tried from there. This only makes sense in + * the unlikely event of reordering the original frags. + * For injected local pages it's an unnecessary second + * run. + */ + if (unlikely(!ubuf) && startpoint != head_ubuf) + ubuf = xenvif_find_gref(skb, i, head_ubuf); + + if (likely(ubuf)) { + u16 pending_idx = ubuf->desc; + + foreign_vif = ubuf_to_vif(ubuf); + foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; + /* Just a safety measure. If this was the last + * element on the list, the for loop will + * iterate again if a local page were added to + * the end. Using head_ubuf here prevents the + * second search on the chain. Or the original + * frags changed order, but that's less likely. + * In any way, ubuf shouldn't be NULL. + */ + ubuf = ubuf->ctx ? + (struct ubuf_info *) ubuf->ctx : + head_ubuf; + } else + /* This frag was a local page, added to the + * array after the skb left netback. + */ + ubuf = head_ubuf; + } xenvif_gop_frag_copy(vif, skb, npo, skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, &head, foreign_vif, - foreign_grefs[i]); + foreign_vif ? foreign_gref : UINT_MAX); } return npo->meta_prod - old_meta_prod; @@ -654,7 +716,7 @@ done: notify_remote_via_irq(vif->rx_irq); } -void xenvif_check_rx_xenvif(struct xenvif *vif) +void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) { int more_to_do; @@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data) { struct xenvif *vif = (struct xenvif *)data; tx_add_credit(vif); - xenvif_check_rx_xenvif(vif); + xenvif_napi_schedule_or_enable_events(vif); } static void xenvif_tx_err(struct xenvif *vif, diff --git a/drivers/of/base.c b/drivers/of/base.c index 6d4ee22708c9..32e969d95319 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop) if (!found) return -ENODEV; + /* At early boot, bail out and defer setup to of_init() */ + if (!of_kset) + return found ? 0 : -ENODEV; + /* Update the sysfs attribute */ sysfs_remove_bin_file(&np->kobj, &oldprop->attr); __of_add_property_sysfs(np, newprop); diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 7f8b78c08879..8c148f39e8d7 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -148,7 +148,7 @@ static noinline void pci_wait_cfg(struct pci_dev *dev) int pci_user_read_config_##size \ (struct pci_dev *dev, int pos, type *val) \ { \ - int ret = 0; \ + int ret = PCIBIOS_SUCCESSFUL; \ u32 data = -1; \ if (PCI_##size##_BAD) \ return -EINVAL; \ @@ -159,9 +159,7 @@ int pci_user_read_config_##size \ pos, sizeof(type), &data); \ raw_spin_unlock_irq(&pci_lock); \ *val = (type)data; \ - if (ret > 0) \ - ret = -EINVAL; \ - return ret; \ + return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); @@ -170,7 +168,7 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); int pci_user_write_config_##size \ (struct pci_dev *dev, int pos, type val) \ { \ - int ret = -EIO; \ + int ret = PCIBIOS_SUCCESSFUL; \ if (PCI_##size##_BAD) \ return -EINVAL; \ raw_spin_lock_irq(&pci_lock); \ @@ -179,9 +177,7 @@ int pci_user_write_config_##size \ ret = dev->bus->ops->write(dev->bus, dev->devfn, \ pos, sizeof(type), val); \ raw_spin_unlock_irq(&pci_lock); \ - if (ret > 0) \ - ret = -EINVAL; \ - return ret; \ + return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_write_config_##size); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index fb8aed307c28..447d393725e1 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -13,7 +13,6 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/proc_fs.h> -#include <linux/init.h> #include <linux/slab.h> #include "pci.h" @@ -236,7 +235,7 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } * * This adds add sysfs entries and start device drivers */ -int pci_bus_add_device(struct pci_dev *dev) +void pci_bus_add_device(struct pci_dev *dev) { int retval; @@ -253,8 +252,6 @@ int pci_bus_add_device(struct pci_dev *dev) WARN_ON(retval < 0); dev->is_added = 1; - - return 0; } /** @@ -267,16 +264,12 @@ void pci_bus_add_devices(const struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child; - int retval; list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip already-added devices */ if (dev->is_added) continue; - retval = pci_bus_add_device(dev); - if (retval) - dev_err(&dev->dev, "Error adding device (%d)\n", - retval); + pci_bus_add_device(dev); } list_for_each_entry(dev, &bus->devices, bus_list) { diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c index 47aaf22d814e..0e5f3c95af5b 100644 --- a/drivers/pci/host-bridge.c +++ b/drivers/pci/host-bridge.c @@ -3,7 +3,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/pci.h> #include <linux/module.h> diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index a6f67ec8882f..21df477be0c8 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -33,4 +33,17 @@ config PCI_RCAR_GEN2 There are 3 internal PCI controllers available with a single built-in EHCI/OHCI host controller present on each one. +config PCI_RCAR_GEN2_PCIE + bool "Renesas R-Car PCIe controller" + depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST) + help + Say Y here if you want PCIe controller support on R-Car Gen2 SoCs. + +config PCI_HOST_GENERIC + bool "Generic PCI host controller" + depends on ARM && OF + help + Say Y here if you want to support a simple generic PCI host + controller, such as the one emulated by kvmtool. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 13fb3333aa05..611ba4b48c94 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o +obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o +obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index 3de6bfbbe8e9..1632661c5b7f 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c @@ -415,9 +415,7 @@ static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg) { struct pcie_port *pp = arg; - dw_handle_msi_irq(pp); - - return IRQ_HANDLED; + return dw_handle_msi_irq(pp); } static void exynos_pcie_msi_init(struct pcie_port *pp) @@ -511,7 +509,8 @@ static struct pcie_host_ops exynos_pcie_host_ops = { .host_init = exynos_pcie_host_init, }; -static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) +static int __init add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) { int ret; @@ -568,10 +567,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie), GFP_KERNEL); - if (!exynos_pcie) { - dev_err(&pdev->dev, "no memory for exynos pcie\n"); + if (!exynos_pcie) return -ENOMEM; - } pp = &exynos_pcie->pp; diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c new file mode 100644 index 000000000000..44fe6aa6a43f --- /dev/null +++ b/drivers/pci/host/pci-host-generic.c @@ -0,0 +1,388 @@ +/* + * Simple, generic PCI host controller driver targetting firmware-initialised + * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> + +struct gen_pci_cfg_bus_ops { + u32 bus_shift; + void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int); +}; + +struct gen_pci_cfg_windows { + struct resource res; + struct resource bus_range; + void __iomem **win; + + const struct gen_pci_cfg_bus_ops *ops; +}; + +struct gen_pci { + struct pci_host_bridge host; + struct gen_pci_cfg_windows cfg; + struct list_head resources; +}; + +static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, + unsigned int devfn, + int where) +{ + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + resource_size_t idx = bus->number - pci->cfg.bus_range.start; + + return pci->cfg.win[idx] + ((devfn << 8) | where); +} + +static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = { + .bus_shift = 16, + .map_bus = gen_pci_map_cfg_bus_cam, +}; + +static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, + unsigned int devfn, + int where) +{ + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + resource_size_t idx = bus->number - pci->cfg.bus_range.start; + + return pci->cfg.win[idx] + ((devfn << 12) | where); +} + +static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { + .bus_shift = 20, + .map_bus = gen_pci_map_cfg_bus_ecam, +}; + +static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + void __iomem *addr; + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + + addr = pci->cfg.ops->map_bus(bus, devfn, where); + + switch (size) { + case 1: + *val = readb(addr); + break; + case 2: + *val = readw(addr); + break; + default: + *val = readl(addr); + } + + return PCIBIOS_SUCCESSFUL; +} + +static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + void __iomem *addr; + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + + addr = pci->cfg.ops->map_bus(bus, devfn, where); + + switch (size) { + case 1: + writeb(val, addr); + break; + case 2: + writew(val, addr); + break; + default: + writel(val, addr); + } + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops gen_pci_ops = { + .read = gen_pci_config_read, + .write = gen_pci_config_write, +}; + +static const struct of_device_id gen_pci_of_match[] = { + { .compatible = "pci-host-cam-generic", + .data = &gen_pci_cfg_cam_bus_ops }, + + { .compatible = "pci-host-ecam-generic", + .data = &gen_pci_cfg_ecam_bus_ops }, + + { }, +}; +MODULE_DEVICE_TABLE(of, gen_pci_of_match); + +static int gen_pci_calc_io_offset(struct device *dev, + struct of_pci_range *range, + struct resource *res, + resource_size_t *offset) +{ + static atomic_t wins = ATOMIC_INIT(0); + int err, idx, max_win; + unsigned int window; + + if (!PAGE_ALIGNED(range->cpu_addr)) + return -EINVAL; + + max_win = (IO_SPACE_LIMIT + 1) / SZ_64K; + idx = atomic_inc_return(&wins); + if (idx > max_win) + return -ENOSPC; + + window = (idx - 1) * SZ_64K; + err = pci_ioremap_io(window, range->cpu_addr); + if (err) + return err; + + of_pci_range_to_resource(range, dev->of_node, res); + res->start = window; + res->end = res->start + range->size - 1; + *offset = window - range->pci_addr; + return 0; +} + +static int gen_pci_calc_mem_offset(struct device *dev, + struct of_pci_range *range, + struct resource *res, + resource_size_t *offset) +{ + of_pci_range_to_resource(range, dev->of_node, res); + *offset = range->cpu_addr - range->pci_addr; + return 0; +} + +static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) +{ + struct pci_host_bridge_window *win; + + list_for_each_entry(win, &pci->resources, list) + release_resource(win->res); + + pci_free_resource_list(&pci->resources); +} + +static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int err, res_valid = 0; + struct device *dev = pci->host.dev.parent; + struct device_node *np = dev->of_node; + + if (of_pci_range_parser_init(&parser, np)) { + dev_err(dev, "missing \"ranges\" property\n"); + return -EINVAL; + } + + for_each_of_pci_range(&parser, &range) { + struct resource *parent, *res; + resource_size_t offset; + u32 restype = range.flags & IORESOURCE_TYPE_BITS; + + res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL); + if (!res) { + err = -ENOMEM; + goto out_release_res; + } + + switch (restype) { + case IORESOURCE_IO: + parent = &ioport_resource; + err = gen_pci_calc_io_offset(dev, &range, res, &offset); + break; + case IORESOURCE_MEM: + parent = &iomem_resource; + err = gen_pci_calc_mem_offset(dev, &range, res, &offset); + res_valid |= !(res->flags & IORESOURCE_PREFETCH || err); + break; + default: + err = -EINVAL; + continue; + } + + if (err) { + dev_warn(dev, + "error %d: failed to add resource [type 0x%x, %lld bytes]\n", + err, restype, range.size); + continue; + } + + err = request_resource(parent, res); + if (err) + goto out_release_res; + + pci_add_resource_offset(&pci->resources, res, offset); + } + + if (!res_valid) { + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + goto out_release_res; + } + + return 0; + +out_release_res: + gen_pci_release_of_pci_ranges(pci); + return err; +} + +static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) +{ + int err; + u8 bus_max; + resource_size_t busn; + struct resource *bus_range; + struct device *dev = pci->host.dev.parent; + struct device_node *np = dev->of_node; + + if (of_pci_parse_bus_range(np, &pci->cfg.bus_range)) + pci->cfg.bus_range = (struct resource) { + .name = np->name, + .start = 0, + .end = 0xff, + .flags = IORESOURCE_BUS, + }; + + err = of_address_to_resource(np, 0, &pci->cfg.res); + if (err) { + dev_err(dev, "missing \"reg\" property\n"); + return err; + } + + pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range), + sizeof(*pci->cfg.win), GFP_KERNEL); + if (!pci->cfg.win) + return -ENOMEM; + + /* Limit the bus-range to fit within reg */ + bus_max = pci->cfg.bus_range.start + + (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; + pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end, + bus_max); + + /* Map our Configuration Space windows */ + if (!devm_request_mem_region(dev, pci->cfg.res.start, + resource_size(&pci->cfg.res), + "Configuration Space")) + return -ENOMEM; + + bus_range = &pci->cfg.bus_range; + for (busn = bus_range->start; busn <= bus_range->end; ++busn) { + u32 idx = busn - bus_range->start; + u32 sz = 1 << pci->cfg.ops->bus_shift; + + pci->cfg.win[idx] = devm_ioremap(dev, + pci->cfg.res.start + busn * sz, + sz); + if (!pci->cfg.win[idx]) + return -ENOMEM; + } + + /* Register bus resource */ + pci_add_resource(&pci->resources, bus_range); + return 0; +} + +static int gen_pci_setup(int nr, struct pci_sys_data *sys) +{ + struct gen_pci *pci = sys->private_data; + list_splice_init(&pci->resources, &sys->resources); + return 1; +} + +static int gen_pci_probe(struct platform_device *pdev) +{ + int err; + const char *type; + const struct of_device_id *of_id; + const int *prop; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + struct hw_pci hw = { + .nr_controllers = 1, + .private_data = (void **)&pci, + .setup = gen_pci_setup, + .map_irq = of_irq_parse_and_map_pci, + .ops = &gen_pci_ops, + }; + + if (!pci) + return -ENOMEM; + + type = of_get_property(np, "device_type", NULL); + if (!type || strcmp(type, "pci")) { + dev_err(dev, "invalid \"device_type\" %s\n", type); + return -EINVAL; + } + + prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL); + if (prop) { + if (*prop) + pci_add_flags(PCI_PROBE_ONLY); + else + pci_clear_flags(PCI_PROBE_ONLY); + } + + of_id = of_match_node(gen_pci_of_match, np); + pci->cfg.ops = of_id->data; + pci->host.dev.parent = dev; + INIT_LIST_HEAD(&pci->host.windows); + INIT_LIST_HEAD(&pci->resources); + + /* Parse our PCI ranges and request their resources */ + err = gen_pci_parse_request_of_pci_ranges(pci); + if (err) + return err; + + /* Parse and map our Configuration Space windows */ + err = gen_pci_parse_map_cfg_windows(pci); + if (err) { + gen_pci_release_of_pci_ranges(pci); + return err; + } + + pci_common_init_dev(dev, &hw); + return 0; +} + +static struct platform_driver gen_pci_driver = { + .driver = { + .name = "pci-host-generic", + .owner = THIS_MODULE, + .of_match_table = gen_pci_of_match, + }, + .probe = gen_pci_probe, +}; +module_platform_driver(gen_pci_driver); + +MODULE_DESCRIPTION("Generic PCI host driver"); +MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); +MODULE_LICENSE("GPLv2"); diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index ee082509b0ba..a5645ae4aef0 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -25,6 +25,7 @@ #include <linux/resource.h> #include <linux/signal.h> #include <linux/types.h> +#include <linux/interrupt.h> #include "pcie-designware.h" @@ -32,13 +33,9 @@ struct imx6_pcie { int reset_gpio; - int power_on_gpio; - int wake_up_gpio; - int disable_gpio; - struct clk *lvds_gate; - struct clk *sata_ref_100m; - struct clk *pcie_ref_125m; - struct clk *pcie_axi; + struct clk *pcie_bus; + struct clk *pcie_phy; + struct clk *pcie; struct pcie_port pp; struct regmap *iomuxc_gpr; void __iomem *mem_base; @@ -231,36 +228,27 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); int ret; - if (gpio_is_valid(imx6_pcie->power_on_gpio)) - gpio_set_value(imx6_pcie->power_on_gpio, 1); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); - ret = clk_prepare_enable(imx6_pcie->sata_ref_100m); - if (ret) { - dev_err(pp->dev, "unable to enable sata_ref_100m\n"); - goto err_sata_ref; - } - - ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m); + ret = clk_prepare_enable(imx6_pcie->pcie_phy); if (ret) { - dev_err(pp->dev, "unable to enable pcie_ref_125m\n"); - goto err_pcie_ref; + dev_err(pp->dev, "unable to enable pcie_phy clock\n"); + goto err_pcie_phy; } - ret = clk_prepare_enable(imx6_pcie->lvds_gate); + ret = clk_prepare_enable(imx6_pcie->pcie_bus); if (ret) { - dev_err(pp->dev, "unable to enable lvds_gate\n"); - goto err_lvds_gate; + dev_err(pp->dev, "unable to enable pcie_bus clock\n"); + goto err_pcie_bus; } - ret = clk_prepare_enable(imx6_pcie->pcie_axi); + ret = clk_prepare_enable(imx6_pcie->pcie); if (ret) { - dev_err(pp->dev, "unable to enable pcie_axi\n"); - goto err_pcie_axi; + dev_err(pp->dev, "unable to enable pcie clock\n"); + goto err_pcie; } /* allow the clocks to stabilize */ @@ -274,13 +262,11 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) } return 0; -err_pcie_axi: - clk_disable_unprepare(imx6_pcie->lvds_gate); -err_lvds_gate: - clk_disable_unprepare(imx6_pcie->pcie_ref_125m); -err_pcie_ref: - clk_disable_unprepare(imx6_pcie->sata_ref_100m); -err_sata_ref: +err_pcie: + clk_disable_unprepare(imx6_pcie->pcie_bus); +err_pcie_bus: + clk_disable_unprepare(imx6_pcie->pcie_phy); +err_pcie_phy: return ret; } @@ -329,6 +315,13 @@ static int imx6_pcie_wait_for_link(struct pcie_port *pp) return 0; } +static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + + return dw_handle_msi_irq(pp); +} + static int imx6_pcie_start_link(struct pcie_port *pp) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); @@ -403,6 +396,9 @@ static void imx6_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); imx6_pcie_start_link(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); } static void imx6_pcie_reset_phy(struct pcie_port *pp) @@ -487,15 +483,25 @@ static struct pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; -static int imx6_add_pcie_port(struct pcie_port *pp, +static int __init imx6_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) { int ret; - pp->irq = platform_get_irq(pdev, 0); - if (!pp->irq) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -ENODEV; + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq <= 0) { + dev_err(&pdev->dev, "failed to get MSI irq\n"); + return -ENODEV; + } + + ret = devm_request_irq(&pdev->dev, pp->msi_irq, + imx6_pcie_msi_handler, + IRQF_SHARED, "mx6-pcie-msi", pp); + if (ret) { + dev_err(&pdev->dev, "failed to request MSI irq\n"); + return -ENODEV; + } } pp->root_bus_nr = -1; @@ -546,69 +552,26 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) } } - imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); - if (gpio_is_valid(imx6_pcie->power_on_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, - imx6_pcie->power_on_gpio, - GPIOF_OUT_INIT_LOW, - "PCIe power enable"); - if (ret) { - dev_err(&pdev->dev, "unable to get power-on gpio\n"); - return ret; - } - } - - imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0); - if (gpio_is_valid(imx6_pcie->wake_up_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, - imx6_pcie->wake_up_gpio, - GPIOF_IN, - "PCIe wake up"); - if (ret) { - dev_err(&pdev->dev, "unable to get wake-up gpio\n"); - return ret; - } - } - - imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0); - if (gpio_is_valid(imx6_pcie->disable_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, - imx6_pcie->disable_gpio, - GPIOF_OUT_INIT_HIGH, - "PCIe disable endpoint"); - if (ret) { - dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); - return ret; - } - } - /* Fetch clocks */ - imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate"); - if (IS_ERR(imx6_pcie->lvds_gate)) { - dev_err(&pdev->dev, - "lvds_gate clock select missing or invalid\n"); - return PTR_ERR(imx6_pcie->lvds_gate); - } - - imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); - if (IS_ERR(imx6_pcie->sata_ref_100m)) { + imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); + if (IS_ERR(imx6_pcie->pcie_phy)) { dev_err(&pdev->dev, - "sata_ref_100m clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->sata_ref_100m); + "pcie_phy clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_phy); } - imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); - if (IS_ERR(imx6_pcie->pcie_ref_125m)) { + imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus"); + if (IS_ERR(imx6_pcie->pcie_bus)) { dev_err(&pdev->dev, - "pcie_ref_125m clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie_ref_125m); + "pcie_bus clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_bus); } - imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); - if (IS_ERR(imx6_pcie->pcie_axi)) { + imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie"); + if (IS_ERR(imx6_pcie->pcie)) { dev_err(&pdev->dev, - "pcie_axi clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie_axi); + "pcie clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie); } /* Grab GPR config register range */ diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index d3d1cfd51e09..e384e2534594 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, return PCIBIOS_SUCCESSFUL; } +/* + * Remove windows, starting from the largest ones to the smallest + * ones. + */ +static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, + phys_addr_t base, size_t size) +{ + while (size) { + size_t sz = 1 << (fls(size) - 1); + + mvebu_mbus_del_window(base, sz); + base += sz; + size -= sz; + } +} + +/* + * MBus windows can only have a power of two size, but PCI BARs do not + * have this constraint. Therefore, we have to split the PCI BAR into + * areas each having a power of two size. We start from the largest + * one (i.e highest order bit set in the size). + */ +static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, + unsigned int target, unsigned int attribute, + phys_addr_t base, size_t size, + phys_addr_t remap) +{ + size_t size_mapped = 0; + + while (size) { + size_t sz = 1 << (fls(size) - 1); + int ret; + + ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, + sz, remap); + if (ret) { + dev_err(&port->pcie->pdev->dev, + "Could not create MBus window at 0x%x, size 0x%x: %d\n", + base, sz, ret); + mvebu_pcie_del_windows(port, base - size_mapped, + size_mapped); + return; + } + + size -= sz; + size_mapped += sz; + base += sz; + if (remap != MVEBU_MBUS_NO_REMAP) + remap += sz; + } +} + static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { phys_addr_t iobase; @@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) /* If a window was configured, remove it */ if (port->iowin_base) { - mvebu_mbus_del_window(port->iowin_base, - port->iowin_size); + mvebu_pcie_del_windows(port, port->iowin_base, + port->iowin_size); port->iowin_base = 0; port->iowin_size = 0; } @@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) port->iowin_base = port->pcie->io.start + iobase; port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | (port->bridge.iolimitupper << 16)) - - iobase); + iobase) + 1; - mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, - port->iowin_base, port->iowin_size, - iobase); + mvebu_pcie_add_windows(port, port->io_target, port->io_attr, + port->iowin_base, port->iowin_size, + iobase); } static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) @@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) /* If a window was configured, remove it */ if (port->memwin_base) { - mvebu_mbus_del_window(port->memwin_base, - port->memwin_size); + mvebu_pcie_del_windows(port, port->memwin_base, + port->memwin_size); port->memwin_base = 0; port->memwin_size = 0; } @@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); port->memwin_size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - - port->memwin_base; + port->memwin_base + 1; - mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr, - port->memwin_base, port->memwin_size); + mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr, + port->memwin_base, port->memwin_size, + MVEBU_MBUS_NO_REMAP); } /* @@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, /* * On the PCI-to-PCI bridge side, the I/O windows must have at - * least a 64 KB size and be aligned on their size, and the - * memory windows must have at least a 1 MB size and be - * aligned on their size + * least a 64 KB size and the memory windows must have at + * least a 1 MB size. Moreover, MBus windows need to have a + * base address aligned on their size, and their size must be + * a power of two. This means that if the BAR doesn't have a + * power of two size, several MBus windows will actually be + * created. We need to ensure that the biggest MBus window + * (which will be the first one) is aligned on its size, which + * explains the rounddown_pow_of_two() being done here. */ if (res->flags & IORESOURCE_IO) - return round_up(start, max_t(resource_size_t, SZ_64K, size)); + return round_up(start, max_t(resource_size_t, SZ_64K, + rounddown_pow_of_two(size))); else if (res->flags & IORESOURCE_MEM) - return round_up(start, max_t(resource_size_t, SZ_1M, size)); + return round_up(start, max_t(resource_size_t, SZ_1M, + rounddown_pow_of_two(size))); else return start; } diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 4fe349dcaf59..3ef854f5a5b5 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -99,6 +99,7 @@ struct rcar_pci_priv { struct resource io_res; struct resource mem_res; struct resource *cfg_res; + unsigned busnr; int irq; unsigned long window_size; }; @@ -318,8 +319,8 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) pci_add_resource(&sys->resources, &priv->io_res); pci_add_resource(&sys->resources, &priv->mem_res); - /* Setup bus number based on platform device id */ - sys->busnr = to_platform_device(priv->dev)->id; + /* Setup bus number based on platform device id / of bus-range */ + sys->busnr = priv->busnr; return 1; } @@ -372,6 +373,23 @@ static int rcar_pci_probe(struct platform_device *pdev) priv->window_size = SZ_1G; + if (pdev->dev.of_node) { + struct resource busnr; + int ret; + + ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse bus-range\n"); + return ret; + } + + priv->busnr = busnr.start; + if (busnr.end != busnr.start) + dev_warn(&pdev->dev, "only one bus number supported\n"); + } else { + priv->busnr = pdev->id; + } + hw_private[0] = priv; memset(&hw, 0, sizeof(hw)); hw.nr_controllers = ARRAY_SIZE(hw_private); @@ -383,11 +401,20 @@ static int rcar_pci_probe(struct platform_device *pdev) return 0; } +static struct of_device_id rcar_pci_of_match[] = { + { .compatible = "renesas,pci-r8a7790", }, + { .compatible = "renesas,pci-r8a7791", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, rcar_pci_of_match); + static struct platform_driver rcar_pci_driver = { .driver = { .name = "pci-rcar-gen2", .owner = THIS_MODULE, .suppress_bind_attrs = true, + .of_match_table = rcar_pci_of_match, }, .probe = rcar_pci_probe, }; diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index c4e373294476..e3bf9e6d5d9a 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -156,15 +156,17 @@ static struct irq_chip dw_msi_irq_chip = { }; /* MSI int handler */ -void dw_handle_msi_irq(struct pcie_port *pp) +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) { unsigned long val; int i, pos, irq; + irqreturn_t ret = IRQ_NONE; for (i = 0; i < MAX_MSI_CTRLS; i++) { dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, (u32 *)&val); if (val) { + ret = IRQ_HANDLED; pos = 0; while ((pos = find_next_bit(&val, 32, pos)) != 32) { irq = irq_find_mapping(pp->irq_domain, @@ -177,6 +179,8 @@ void dw_handle_msi_irq(struct pcie_port *pp) } } } + + return ret; } void dw_pcie_msi_init(struct pcie_port *pp) diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h index 3063b3594d88..a169d22d517e 100644 --- a/drivers/pci/host/pcie-designware.h +++ b/drivers/pci/host/pcie-designware.h @@ -68,7 +68,7 @@ struct pcie_host_ops { int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val); -void dw_handle_msi_irq(struct pcie_port *pp); +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); void dw_pcie_msi_init(struct pcie_port *pp); int dw_pcie_link_up(struct pcie_port *pp); void dw_pcie_setup_rc(struct pcie_port *pp); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c new file mode 100644 index 000000000000..8e06124aa80f --- /dev/null +++ b/drivers/pci/host/pcie-rcar.c @@ -0,0 +1,1008 @@ +/* + * PCIe driver for Renesas R-Car SoCs + * Copyright (C) 2014 Renesas Electronics Europe Ltd + * + * Based on: + * arch/sh/drivers/pci/pcie-sh7786.c + * arch/sh/drivers/pci/ops-sh7786.c + * Copyright (C) 2009 - 2011 Paul Mundt + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define DRV_NAME "rcar-pcie" + +#define PCIECAR 0x000010 +#define PCIECCTLR 0x000018 +#define CONFIG_SEND_ENABLE (1 << 31) +#define TYPE0 (0 << 8) +#define TYPE1 (1 << 8) +#define PCIECDR 0x000020 +#define PCIEMSR 0x000028 +#define PCIEINTXR 0x000400 +#define PCIEMSITXR 0x000840 + +/* Transfer control */ +#define PCIETCTLR 0x02000 +#define CFINIT 1 +#define PCIETSTR 0x02004 +#define DATA_LINK_ACTIVE 1 +#define PCIEERRFR 0x02020 +#define UNSUPPORTED_REQUEST (1 << 4) +#define PCIEMSIFR 0x02044 +#define PCIEMSIALR 0x02048 +#define MSIFE 1 +#define PCIEMSIAUR 0x0204c +#define PCIEMSIIER 0x02050 + +/* root port address */ +#define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) + +/* local address reg & mask */ +#define PCIELAR(x) (0x02200 + ((x) * 0x20)) +#define PCIELAMR(x) (0x02208 + ((x) * 0x20)) +#define LAM_PREFETCH (1 << 3) +#define LAM_64BIT (1 << 2) +#define LAR_ENABLE (1 << 1) + +/* PCIe address reg & mask */ +#define PCIEPARL(x) (0x03400 + ((x) * 0x20)) +#define PCIEPARH(x) (0x03404 + ((x) * 0x20)) +#define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) +#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) +#define PAR_ENABLE (1 << 31) +#define IO_SPACE (1 << 8) + +/* Configuration */ +#define PCICONF(x) (0x010000 + ((x) * 0x4)) +#define PMCAP(x) (0x010040 + ((x) * 0x4)) +#define EXPCAP(x) (0x010070 + ((x) * 0x4)) +#define VCCAP(x) (0x010100 + ((x) * 0x4)) + +/* link layer */ +#define IDSETR1 0x011004 +#define TLCTLR 0x011048 +#define MACSR 0x011054 +#define MACCTLR 0x011058 +#define SCRAMBLE_DISABLE (1 << 27) + +/* R-Car H1 PHY */ +#define H1_PCIEPHYADRR 0x04000c +#define WRITE_CMD (1 << 16) +#define PHY_ACK (1 << 24) +#define RATE_POS 12 +#define LANE_POS 8 +#define ADR_POS 0 +#define H1_PCIEPHYDOUTR 0x040014 +#define H1_PCIEPHYSR 0x040018 + +#define INT_PCI_MSI_NR 32 + +#define RCONF(x) (PCICONF(0)+(x)) +#define RPMCAP(x) (PMCAP(0)+(x)) +#define REXPCAP(x) (EXPCAP(0)+(x)) +#define RVCCAP(x) (VCCAP(0)+(x)) + +#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) +#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) +#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) + +#define PCI_MAX_RESOURCES 4 +#define MAX_NR_INBOUND_MAPS 6 + +struct rcar_msi { + DECLARE_BITMAP(used, INT_PCI_MSI_NR); + struct irq_domain *domain; + struct msi_chip chip; + unsigned long pages; + struct mutex lock; + int irq1; + int irq2; +}; + +static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) +{ + return container_of(chip, struct rcar_msi, chip); +} + +/* Structure representing the PCIe interface */ +struct rcar_pcie { + struct device *dev; + void __iomem *base; + struct resource res[PCI_MAX_RESOURCES]; + struct resource busn; + int root_bus_nr; + struct clk *clk; + struct clk *bus_clk; + struct rcar_msi msi; +}; + +static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys) +{ + return sys->private_data; +} + +static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val, + unsigned long reg) +{ + writel(val, pcie->base + reg); +} + +static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg) +{ + return readl(pcie->base + reg); +} + +enum { + PCI_ACCESS_READ, + PCI_ACCESS_WRITE, +}; + +static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) +{ + int shift = 8 * (where & 3); + u32 val = pci_read_reg(pcie, where & ~3); + + val &= ~(mask << shift); + val |= data << shift; + pci_write_reg(pcie, val, where & ~3); +} + +static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) +{ + int shift = 8 * (where & 3); + u32 val = pci_read_reg(pcie, where & ~3); + + return val >> shift; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rcar_pcie_config_access(struct rcar_pcie *pcie, + unsigned char access_type, struct pci_bus *bus, + unsigned int devfn, int where, u32 *data) +{ + int dev, func, reg, index; + + dev = PCI_SLOT(devfn); + func = PCI_FUNC(devfn); + reg = where & ~3; + index = reg / 4; + + /* + * While each channel has its own memory-mapped extended config + * space, it's generally only accessible when in endpoint mode. + * When in root complex mode, the controller is unable to target + * itself with either type 0 or type 1 accesses, and indeed, any + * controller initiated target transfer to its own config space + * result in a completer abort. + * + * Each channel effectively only supports a single device, but as + * the same channel <-> device access works for any PCI_SLOT() + * value, we cheat a bit here and bind the controller's config + * space to devfn 0 in order to enable self-enumeration. In this + * case the regular ECAR/ECDR path is sidelined and the mangled + * config access itself is initiated as an internal bus transaction. + */ + if (pci_is_root_bus(bus)) { + if (dev != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (access_type == PCI_ACCESS_READ) { + *data = pci_read_reg(pcie, PCICONF(index)); + } else { + /* Keep an eye out for changes to the root bus number */ + if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) + pcie->root_bus_nr = *data & 0xff; + + pci_write_reg(pcie, *data, PCICONF(index)); + } + + return PCIBIOS_SUCCESSFUL; + } + + if (pcie->root_bus_nr < 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Clear errors */ + pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); + + /* Set the PIO address */ + pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) | + PCIE_CONF_FUNC(func) | reg, PCIECAR); + + /* Enable the configuration access */ + if (bus->parent->number == pcie->root_bus_nr) + pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); + else + pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); + + /* Check for errors */ + if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check for master and target aborts */ + if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & + (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (access_type == PCI_ACCESS_READ) + *data = pci_read_reg(pcie, PCIECDR); + else + pci_write_reg(pcie, *data, PCIECDR); + + /* Disable the configuration access */ + pci_write_reg(pcie, 0, PCIECCTLR); + + return PCIBIOS_SUCCESSFUL; +} + +static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + int ret; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ, + bus, devfn, where, val); + if (ret != PCIBIOS_SUCCESSFUL) { + *val = 0xffffffff; + return ret; + } + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 2))) & 0xffff; + + dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x " + "where=0x%04x size=%d val=0x%08lx\n", bus->number, + devfn, where, size, (unsigned long)*val); + + return ret; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + int shift, ret; + u32 data; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ, + bus, devfn, where, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x " + "where=0x%04x size=%d val=0x%08lx\n", bus->number, + devfn, where, size, (unsigned long)val); + + if (size == 1) { + shift = 8 * (where & 3); + data &= ~(0xff << shift); + data |= ((val & 0xff) << shift); + } else if (size == 2) { + shift = 8 * (where & 2); + data &= ~(0xffff << shift); + data |= ((val & 0xffff) << shift); + } else + data = val; + + ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE, + bus, devfn, where, &data); + + return ret; +} + +static struct pci_ops rcar_pcie_ops = { + .read = rcar_pcie_read_conf, + .write = rcar_pcie_write_conf, +}; + +static void rcar_pcie_setup_window(int win, struct resource *res, + struct rcar_pcie *pcie) +{ + /* Setup PCIe address space mappings for each resource */ + resource_size_t size; + u32 mask; + + pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); + + /* + * The PAMR mask is calculated in units of 128Bytes, which + * keeps things pretty simple. + */ + size = resource_size(res); + mask = (roundup_pow_of_two(size) / SZ_128) - 1; + pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); + + pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win)); + pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win)); + + /* First resource is for IO */ + mask = PAR_ENABLE; + if (res->flags & IORESOURCE_IO) + mask |= IO_SPACE; + + pci_write_reg(pcie, mask, PCIEPTCTLR(win)); +} + +static int rcar_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct rcar_pcie *pcie = sys_to_pcie(sys); + struct resource *res; + int i; + + pcie->root_bus_nr = -1; + + /* Setup PCI resources */ + for (i = 0; i < PCI_MAX_RESOURCES; i++) { + + res = &pcie->res[i]; + if (!res->flags) + continue; + + rcar_pcie_setup_window(i, res, pcie); + + if (res->flags & IORESOURCE_IO) + pci_ioremap_io(nr * SZ_64K, res->start); + else + pci_add_resource(&sys->resources, res); + } + pci_add_resource(&sys->resources, &pcie->busn); + + return 1; +} + +static void rcar_pcie_add_bus(struct pci_bus *bus) +{ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + + bus->msi = &pcie->msi.chip; + } +} + +struct hw_pci rcar_pci = { + .setup = rcar_pcie_setup, + .map_irq = of_irq_parse_and_map_pci, + .ops = &rcar_pcie_ops, + .add_bus = rcar_pcie_add_bus, +}; + +static void rcar_pcie_enable(struct rcar_pcie *pcie) +{ + struct platform_device *pdev = to_platform_device(pcie->dev); + + rcar_pci.nr_controllers = 1; + rcar_pci.private_data = (void **)&pcie; + + pci_common_init_dev(&pdev->dev, &rcar_pci); +#ifdef CONFIG_PCI_DOMAINS + rcar_pci.domain++; +#endif +} + +static int phy_wait_for_ack(struct rcar_pcie *pcie) +{ + unsigned int timeout = 100; + + while (timeout--) { + if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) + return 0; + + udelay(100); + } + + dev_err(pcie->dev, "Access to PCIe phy timed out\n"); + + return -ETIMEDOUT; +} + +static void phy_write_reg(struct rcar_pcie *pcie, + unsigned int rate, unsigned int addr, + unsigned int lane, unsigned int data) +{ + unsigned long phyaddr; + + phyaddr = WRITE_CMD | + ((rate & 1) << RATE_POS) | + ((lane & 0xf) << LANE_POS) | + ((addr & 0xff) << ADR_POS); + + /* Set write data */ + pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); + pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); + + /* Ignore errors as they will be dealt with if the data link is down */ + phy_wait_for_ack(pcie); + + /* Clear command */ + pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); + pci_write_reg(pcie, 0, H1_PCIEPHYADRR); + + /* Ignore errors as they will be dealt with if the data link is down */ + phy_wait_for_ack(pcie); +} + +static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) +{ + unsigned int timeout = 10; + + while (timeout--) { + if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) + return 0; + + msleep(5); + } + + return -ETIMEDOUT; +} + +static int rcar_pcie_hw_init(struct rcar_pcie *pcie) +{ + int err; + + /* Begin initialization */ + pci_write_reg(pcie, 0, PCIETCTLR); + + /* Set mode */ + pci_write_reg(pcie, 1, PCIEMSR); + + /* + * Initial header for port config space is type 1, set the device + * class to match. Hardware takes care of propagating the IDSETR + * settings, so there is no need to bother with a quirk. + */ + pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); + + /* + * Setup Secondary Bus Number & Subordinate Bus Number, even though + * they aren't used, to avoid bridge being detected as broken. + */ + rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); + rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); + + /* Initialize default capabilities. */ + rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP); + rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), + PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); + rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, + PCI_HEADER_TYPE_BRIDGE); + + /* Enable data link layer active state reporting */ + rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC); + + /* Write out the physical slot number = 0 */ + rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); + + /* Set the completion timer timeout to the maximum 50ms. */ + rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50); + + /* Terminate list of capabilities (Next Capability Offset=0) */ + rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0); + + /* Enable MAC data scrambling. */ + rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0); + + /* Enable MSI */ + if (IS_ENABLED(CONFIG_PCI_MSI)) + pci_write_reg(pcie, 0x101f0000, PCIEMSITXR); + + /* Finish initialization - establish a PCI Express link */ + pci_write_reg(pcie, CFINIT, PCIETCTLR); + + /* This will timeout if we don't have a link. */ + err = rcar_pcie_wait_for_dl(pcie); + if (err) + return err; + + /* Enable INTx interrupts */ + rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); + + /* Enable slave Bus Mastering */ + rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST); + + wmb(); + + return 0; +} + +static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie) +{ + unsigned int timeout = 10; + + /* Initialize the phy */ + phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); + phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); + phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); + phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); + phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); + phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); + phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); + phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); + phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); + phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); + phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); + phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); + + phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); + phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); + phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); + + while (timeout--) { + if (pci_read_reg(pcie, H1_PCIEPHYSR)) + return rcar_pcie_hw_init(pcie); + + msleep(5); + } + + return -ETIMEDOUT; +} + +static int rcar_msi_alloc(struct rcar_msi *chip) +{ + int msi; + + mutex_lock(&chip->lock); + + msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); + if (msi < INT_PCI_MSI_NR) + set_bit(msi, chip->used); + else + msi = -ENOSPC; + + mutex_unlock(&chip->lock); + + return msi; +} + +static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) +{ + mutex_lock(&chip->lock); + clear_bit(irq, chip->used); + mutex_unlock(&chip->lock); +} + +static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) +{ + struct rcar_pcie *pcie = data; + struct rcar_msi *msi = &pcie->msi; + unsigned long reg; + + reg = pci_read_reg(pcie, PCIEMSIFR); + + /* MSI & INTx share an interrupt - we only handle MSI here */ + if (!reg) + return IRQ_NONE; + + while (reg) { + unsigned int index = find_first_bit(®, 32); + unsigned int irq; + + /* clear the interrupt */ + pci_write_reg(pcie, 1 << index, PCIEMSIFR); + + irq = irq_find_mapping(msi->domain, index); + if (irq) { + if (test_bit(index, msi->used)) + generic_handle_irq(irq); + else + dev_info(pcie->dev, "unhandled MSI\n"); + } else { + /* Unknown MSI, just clear it */ + dev_dbg(pcie->dev, "unexpected MSI\n"); + } + + /* see if there's any more pending in this vector */ + reg = pci_read_reg(pcie, PCIEMSIFR); + } + + return IRQ_HANDLED; +} + +static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct rcar_msi *msi = to_rcar_msi(chip); + struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); + struct msi_msg msg; + unsigned int irq; + int hwirq; + + hwirq = rcar_msi_alloc(msi); + if (hwirq < 0) + return hwirq; + + irq = irq_create_mapping(msi->domain, hwirq); + if (!irq) { + rcar_msi_free(msi, hwirq); + return -EINVAL; + } + + irq_set_msi_desc(irq, desc); + + msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; + msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR); + msg.data = hwirq; + + write_msi_msg(irq, &msg); + + return 0; +} + +static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) +{ + struct rcar_msi *msi = to_rcar_msi(chip); + struct irq_data *d = irq_get_irq_data(irq); + + rcar_msi_free(msi, d->hwirq); +} + +static struct irq_chip rcar_msi_irq_chip = { + .name = "R-Car PCIe MSI", + .irq_enable = unmask_msi_irq, + .irq_disable = mask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, +}; + +static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + set_irq_flags(irq, IRQF_VALID); + + return 0; +} + +static const struct irq_domain_ops msi_domain_ops = { + .map = rcar_msi_map, +}; + +static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) +{ + struct platform_device *pdev = to_platform_device(pcie->dev); + struct rcar_msi *msi = &pcie->msi; + unsigned long base; + int err; + + mutex_init(&msi->lock); + + msi->chip.dev = pcie->dev; + msi->chip.setup_irq = rcar_msi_setup_irq; + msi->chip.teardown_irq = rcar_msi_teardown_irq; + + msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, + &msi_domain_ops, &msi->chip); + if (!msi->domain) { + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + /* Two irqs are for MSI, but they are also used for non-MSI irqs */ + err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq, + IRQF_SHARED, rcar_msi_irq_chip.name, pcie); + if (err < 0) { + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + goto err; + } + + err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq, + IRQF_SHARED, rcar_msi_irq_chip.name, pcie); + if (err < 0) { + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + goto err; + } + + /* setup MSI data target */ + msi->pages = __get_free_pages(GFP_KERNEL, 0); + base = virt_to_phys((void *)msi->pages); + + pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); + pci_write_reg(pcie, 0, PCIEMSIAUR); + + /* enable all MSI interrupts */ + pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); + + return 0; + +err: + irq_domain_remove(msi->domain); + return err; +} + +static int rcar_pcie_get_resources(struct platform_device *pdev, + struct rcar_pcie *pcie) +{ + struct resource res; + int err, i; + + err = of_address_to_resource(pdev->dev.of_node, 0, &res); + if (err) + return err; + + pcie->clk = devm_clk_get(&pdev->dev, "pcie"); + if (IS_ERR(pcie->clk)) { + dev_err(pcie->dev, "cannot get platform clock\n"); + return PTR_ERR(pcie->clk); + } + err = clk_prepare_enable(pcie->clk); + if (err) + goto fail_clk; + + pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); + if (IS_ERR(pcie->bus_clk)) { + dev_err(pcie->dev, "cannot get pcie bus clock\n"); + err = PTR_ERR(pcie->bus_clk); + goto fail_clk; + } + err = clk_prepare_enable(pcie->bus_clk); + if (err) + goto err_map_reg; + + i = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (i < 0) { + dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); + err = -ENOENT; + goto err_map_reg; + } + pcie->msi.irq1 = i; + + i = irq_of_parse_and_map(pdev->dev.of_node, 1); + if (i < 0) { + dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); + err = -ENOENT; + goto err_map_reg; + } + pcie->msi.irq2 = i; + + pcie->base = devm_ioremap_resource(&pdev->dev, &res); + if (IS_ERR(pcie->base)) { + err = PTR_ERR(pcie->base); + goto err_map_reg; + } + + return 0; + +err_map_reg: + clk_disable_unprepare(pcie->bus_clk); +fail_clk: + clk_disable_unprepare(pcie->clk); + + return err; +} + +static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, + struct of_pci_range *range, + int *index) +{ + u64 restype = range->flags; + u64 cpu_addr = range->cpu_addr; + u64 cpu_end = range->cpu_addr + range->size; + u64 pci_addr = range->pci_addr; + u32 flags = LAM_64BIT | LAR_ENABLE; + u64 mask; + u64 size; + int idx = *index; + + if (restype & IORESOURCE_PREFETCH) + flags |= LAM_PREFETCH; + + /* + * If the size of the range is larger than the alignment of the start + * address, we have to use multiple entries to perform the mapping. + */ + if (cpu_addr > 0) { + unsigned long nr_zeros = __ffs64(cpu_addr); + u64 alignment = 1ULL << nr_zeros; + size = min(range->size, alignment); + } else { + size = range->size; + } + /* Hardware supports max 4GiB inbound region */ + size = min(size, 1ULL << 32); + + mask = roundup_pow_of_two(size) - 1; + mask &= ~0xf; + + while (cpu_addr < cpu_end) { + /* + * Set up 64-bit inbound regions as the range parser doesn't + * distinguish between 32 and 64-bit types. + */ + pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); + pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); + pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx)); + + pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1)); + pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1)); + pci_write_reg(pcie, 0, PCIELAMR(idx+1)); + + pci_addr += size; + cpu_addr += size; + idx += 2; + + if (idx > MAX_NR_INBOUND_MAPS) { + dev_err(pcie->dev, "Failed to map inbound regions!\n"); + return -EINVAL; + } + } + *index = idx; + + return 0; +} + +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "dma-ranges", &rlen); + if (!parser->range) + return -ENOENT; + + parser->end = parser->range + rlen / sizeof(__be32); + return 0; +} + +static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, + struct device_node *np) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int index = 0; + int err; + + if (pci_dma_range_parser_init(&parser, np)) + return -EINVAL; + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + + err = rcar_pcie_inbound_ranges(pcie, &range, &index); + if (err) + return err; + } + + return 0; +} + +static const struct of_device_id rcar_pcie_of_match[] = { + { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, + { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init }, + { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init }, + {}, +}; +MODULE_DEVICE_TABLE(of, rcar_pcie_of_match); + +static int rcar_pcie_probe(struct platform_device *pdev) +{ + struct rcar_pcie *pcie; + unsigned int data; + struct of_pci_range range; + struct of_pci_range_parser parser; + const struct of_device_id *of_id; + int err, win = 0; + int (*hw_init_fn)(struct rcar_pcie *); + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = &pdev->dev; + platform_set_drvdata(pdev, pcie); + + /* Get the bus range */ + if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) { + dev_err(&pdev->dev, "failed to parse bus-range property\n"); + return -EINVAL; + } + + if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) { + dev_err(&pdev->dev, "missing ranges property\n"); + return -EINVAL; + } + + err = rcar_pcie_get_resources(pdev, pcie); + if (err < 0) { + dev_err(&pdev->dev, "failed to request resources: %d\n", err); + return err; + } + + for_each_of_pci_range(&parser, &range) { + of_pci_range_to_resource(&range, pdev->dev.of_node, + &pcie->res[win++]); + + if (win > PCI_MAX_RESOURCES) + break; + } + + err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node); + if (err) + return err; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + err = rcar_pcie_enable_msi(pcie); + if (err < 0) { + dev_err(&pdev->dev, + "failed to enable MSI support: %d\n", + err); + return err; + } + } + + of_id = of_match_device(rcar_pcie_of_match, pcie->dev); + if (!of_id || !of_id->data) + return -EINVAL; + hw_init_fn = of_id->data; + + /* Failure to get a link might just be that no cards are inserted */ + err = hw_init_fn(pcie); + if (err) { + dev_info(&pdev->dev, "PCIe link down\n"); + return 0; + } + + data = pci_read_reg(pcie, MACSR); + dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); + + rcar_pcie_enable(pcie); + + return 0; +} + +static struct platform_driver rcar_pcie_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = rcar_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = rcar_pcie_probe, +}; +module_platform_driver(rcar_pcie_driver); + +MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>"); +MODULE_DESCRIPTION("Renesas R-Car PCIe driver"); +MODULE_LICENSE("GPLv2"); diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c index 6258dc260d9f..c68366cee6b7 100644 --- a/drivers/pci/hotplug-pci.c +++ b/drivers/pci/hotplug-pci.c @@ -4,7 +4,7 @@ #include <linux/export.h> #include "pci.h" -int __ref pci_hp_add_bridge(struct pci_dev *dev) +int pci_hp_add_bridge(struct pci_dev *dev) { struct pci_bus *parent = dev->bus; int pass, busnr, start = parent->busn_res.start; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index bccc27ee1030..75e178330215 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -41,7 +41,6 @@ #define pr_fmt(fmt) "acpiphp_glue: " fmt -#include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> @@ -501,7 +500,7 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot) * This function should be called per *physical slot*, * not per each slot object in ACPI namespace. */ -static void __ref enable_slot(struct acpiphp_slot *slot) +static void enable_slot(struct acpiphp_slot *slot) { struct pci_dev *dev; struct pci_bus *bus = slot->bus; @@ -516,8 +515,7 @@ static void __ref enable_slot(struct acpiphp_slot *slot) if (PCI_SLOT(dev->devfn) != slot->device) continue; - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { + if (pci_is_bridge(dev)) { max = pci_scan_bridge(bus, dev, max, pass); if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 8c1464851768..f6ef64c2ccb5 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot) * Device configuration functions */ -int __ref cpci_configure_slot(struct slot *slot) +int cpci_configure_slot(struct slot *slot) { struct pci_dev *dev; struct pci_bus *parent; @@ -289,8 +289,7 @@ int __ref cpci_configure_slot(struct slot *slot) list_for_each_entry(dev, &parent->devices, bus_list) if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) continue; - if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || - (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) + if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 11845b796799..f593585f2784 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -709,7 +709,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz temp = temp->next; } - temp->next = max->next; + if (temp) + temp->next = max->next; } max->next = NULL; diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c index 76ba8a1c774d..9600a392eaae 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.c +++ b/drivers/pci/hotplug/cpqphp_nvram.c @@ -34,7 +34,6 @@ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> -#include <linux/init.h> #include <asm/uaccess.h> #include "cpqphp.h" #include "cpqphp_nvram.h" diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 8a66866b8cf1..8e9012dca450 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -127,7 +127,7 @@ struct controller { #define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS) #define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP) #define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS) -#define PSN(ctrl) ((ctrl)->slot_cap >> 19) +#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19) int pciehp_sysfs_enable_slot(struct slot *slot); int pciehp_sysfs_disable_slot(struct slot *slot); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index d7d058fa19a4..1463412cf7f8 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -159,6 +159,8 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_CC) { + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_CC); if (!ctrl->no_cmd_complete) { /* * After 1 sec and CMD_COMPLETED still not set, just diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 1b533060ce65..b6cb1df67097 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -62,8 +62,7 @@ int pciehp_configure_device(struct slot *p_slot) } list_for_each_entry(dev, &parent->devices, bus_list) - if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || - (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) + if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); pci_assign_unassigned_bridge_resources(bridge); diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 16f920352317..e246a10a0d2c 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c @@ -160,8 +160,7 @@ void pci_configure_slot(struct pci_dev *dev) (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) return; - if (dev->bus) - pcie_bus_configure_settings(dev->bus); + pcie_bus_configure_settings(dev->bus); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 4fcdeedda31b..7660232ef460 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -157,8 +157,7 @@ static void dlpar_pci_add_bus(struct device_node *dn) } /* Scan below the new bridge */ - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) of_scan_pci_bridge(dev); /* Map IO space for child bus, which may or may not succeed */ diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 4796c15fba94..984d708552f6 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -223,16 +223,16 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index, type_tmp = (char *) &types[1]; /* Iterate through parent properties, looking for my-drc-index */ - for (i = 0; i < indexes[0]; i++) { + for (i = 0; i < be32_to_cpu(indexes[0]); i++) { if ((unsigned int) indexes[i + 1] == *my_index) { if (drc_name) *drc_name = name_tmp; if (drc_type) *drc_type = type_tmp; if (drc_index) - *drc_index = *my_index; + *drc_index = be32_to_cpu(*my_index); if (drc_power_domain) - *drc_power_domain = domains[i+1]; + *drc_power_domain = be32_to_cpu(domains[i+1]); return 0; } name_tmp += (strlen(name_tmp) + 1); @@ -321,16 +321,19 @@ int rpaphp_add_slot(struct device_node *dn) /* register PCI devices */ name = (char *) &names[1]; type = (char *) &types[1]; - for (i = 0; i < indexes[0]; i++) { + for (i = 0; i < be32_to_cpu(indexes[0]); i++) { + int index; - slot = alloc_slot_struct(dn, indexes[i + 1], name, power_domains[i + 1]); + index = be32_to_cpu(indexes[i + 1]); + slot = alloc_slot_struct(dn, index, name, + be32_to_cpu(power_domains[i + 1])); if (!slot) return -ENOMEM; slot->type = simple_strtoul(type, NULL, 10); dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n", - indexes[i + 1], name, type); + index, name, type); retval = rpaphp_enable_slot(slot); if (!retval) diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 8d2ce22151eb..d1332d2f8730 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -15,7 +15,6 @@ #include <linux/slab.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> -#include <linux/init.h> #include <asm/pci_debug.h> #include <asm/sclp.h> diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index 58499277903a..6efc2ec5e4db 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot) return WRONG_BUS_FREQUENCY; } - bsp = ctrl->pci_dev->bus->cur_bus_speed; - msp = ctrl->pci_dev->bus->max_bus_speed; + bsp = ctrl->pci_dev->subordinate->cur_bus_speed; + msp = ctrl->pci_dev->subordinate->max_bus_speed; /* Check if there are other slots or devices on the same bus */ if (!list_empty(&ctrl->pci_dev->subordinate->devices)) diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 2bf69fe1926c..9202d133485c 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c @@ -34,7 +34,7 @@ #include "../pci.h" #include "shpchp.h" -int __ref shpchp_configure_device(struct slot *p_slot) +int shpchp_configure_device(struct slot *p_slot) { struct pci_dev *dev; struct controller *ctrl = p_slot->ctrl; @@ -64,8 +64,7 @@ int __ref shpchp_configure_device(struct slot *p_slot) list_for_each_entry(dev, &parent->devices, bus_list) { if (PCI_SLOT(dev->devfn) != p_slot->device) continue; - if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || - (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) + if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); } diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index de7a74782f92..cb6f24740ee3 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -106,7 +106,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) pci_device_add(virtfn, virtfn->bus); mutex_unlock(&iov->dev->sriov->lock); - rc = pci_bus_add_device(virtfn); + pci_bus_add_device(virtfn); sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 955ab7990c5b..27a7e67ddfe4 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -10,7 +10,6 @@ #include <linux/mm.h> #include <linux/irq.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/export.h> #include <linux/ioport.h> #include <linux/pci.h> @@ -544,22 +543,18 @@ static int populate_msi_sysfs(struct pci_dev *pdev) if (!msi_attrs) return -ENOMEM; list_for_each_entry(entry, &pdev->msi_list, list) { - char *name = kmalloc(20, GFP_KERNEL); - if (!name) - goto error_attrs; - msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); - if (!msi_dev_attr) { - kfree(name); + if (!msi_dev_attr) goto error_attrs; - } + msi_attrs[count] = &msi_dev_attr->attr; - sprintf(name, "%d", entry->irq); sysfs_attr_init(&msi_dev_attr->attr); - msi_dev_attr->attr.name = name; + msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", + entry->irq); + if (!msi_dev_attr->attr.name) + goto error_attrs; msi_dev_attr->attr.mode = S_IRUGO; msi_dev_attr->show = msi_mode_show; - msi_attrs[count] = &msi_dev_attr->attr; ++count; } @@ -883,50 +878,6 @@ int pci_msi_vec_count(struct pci_dev *dev) } EXPORT_SYMBOL(pci_msi_vec_count); -/** - * pci_enable_msi_block - configure device's MSI capability structure - * @dev: device to configure - * @nvec: number of interrupts to configure - * - * Allocate IRQs for a device with the MSI capability. - * This function returns a negative errno if an error occurs. If it - * is unable to allocate the number of interrupts requested, it returns - * the number of interrupts it might be able to allocate. If it successfully - * allocates at least the number of interrupts requested, it returns 0 and - * updates the @dev's irq member to the lowest new interrupt number; the - * other interrupt numbers allocated to this device are consecutive. - */ -int pci_enable_msi_block(struct pci_dev *dev, int nvec) -{ - int status, maxvec; - - if (dev->current_state != PCI_D0) - return -EINVAL; - - maxvec = pci_msi_vec_count(dev); - if (maxvec < 0) - return maxvec; - if (nvec > maxvec) - return maxvec; - - status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); - if (status) - return status; - - WARN_ON(!!dev->msi_enabled); - - /* Check whether driver already requested MSI-X irqs */ - if (dev->msix_enabled) { - dev_info(&dev->dev, "can't enable MSI " - "(MSI-X already enabled)\n"); - return -EINVAL; - } - - status = msi_capability_init(dev, nvec); - return status; -} -EXPORT_SYMBOL(pci_enable_msi_block); - void pci_msi_shutdown(struct pci_dev *dev) { struct msi_desc *desc; @@ -1132,14 +1083,45 @@ void pci_msi_init_pci_dev(struct pci_dev *dev) **/ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) { - int nvec = maxvec; + int nvec; int rc; + if (dev->current_state != PCI_D0) + return -EINVAL; + + WARN_ON(!!dev->msi_enabled); + + /* Check whether driver already requested MSI-X irqs */ + if (dev->msix_enabled) { + dev_info(&dev->dev, + "can't enable MSI (MSI-X already enabled)\n"); + return -EINVAL; + } + if (maxvec < minvec) return -ERANGE; + nvec = pci_msi_vec_count(dev); + if (nvec < 0) + return nvec; + else if (nvec < minvec) + return -EINVAL; + else if (nvec > maxvec) + nvec = maxvec; + + do { + rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + do { - rc = pci_enable_msi_block(dev, nvec); + rc = msi_capability_init(dev, nvec); if (rc < 0) { return rc; } else if (rc > 0) { diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index f49abef88485..ca4927ba8433 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -309,13 +309,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev) bool check_children; u64 addr; - /* - * pci_is_bridge() is not suitable here, because pci_dev->subordinate - * is set only after acpi_pci_find_device() has been called for the - * given device. - */ - check_children = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE - || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; + check_children = pci_is_bridge(pci_dev); /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d911e0c1f359..837d71f5390b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -107,7 +107,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) subdevice=PCI_ANY_ID, class=0, class_mask=0; unsigned long driver_data=0; int fields=0; - int retval; + int retval = 0; fields = sscanf(buf, "%x %x %x %x %x %x %lx", &vendor, &device, &subvendor, &subdevice, @@ -115,6 +115,26 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) if (fields < 2) return -EINVAL; + if (fields != 7) { + struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); + if (!pdev) + return -ENOMEM; + + pdev->vendor = vendor; + pdev->device = device; + pdev->subsystem_vendor = subvendor; + pdev->subsystem_device = subdevice; + pdev->class = class; + + if (pci_match_id(pdrv->id_table, pdev)) + retval = -EEXIST; + + kfree(pdev); + + if (retval) + return retval; + } + /* Only accept driver_data values that match an existing id_table entry */ if (ids) { @@ -216,6 +236,13 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, return NULL; } +static const struct pci_device_id pci_device_id_any = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, +}; + /** * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure * @drv: the PCI driver to match against @@ -229,18 +256,30 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev) { struct pci_dynid *dynid; + const struct pci_device_id *found_id = NULL; + + /* When driver_override is set, only bind to the matching driver */ + if (dev->driver_override && strcmp(dev->driver_override, drv->name)) + return NULL; /* Look at the dynamic ids first, before the static ones */ spin_lock(&drv->dynids.lock); list_for_each_entry(dynid, &drv->dynids.list, node) { if (pci_match_one_device(&dynid->id, dev)) { - spin_unlock(&drv->dynids.lock); - return &dynid->id; + found_id = &dynid->id; + break; } } spin_unlock(&drv->dynids.lock); - return pci_match_id(drv->id_table, dev); + if (!found_id) + found_id = pci_match_id(drv->id_table, dev); + + /* driver_override will always match, send a dummy id */ + if (!found_id && dev->driver_override) + found_id = &pci_device_id_any; + + return found_id; } struct drv_dev_and_id { @@ -580,14 +619,14 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev) { pci_fixup_device(pci_fixup_resume, pci_dev); - if (!pci_is_bridge(pci_dev)) + if (!pci_has_subordinate(pci_dev)) pci_enable_wake(pci_dev, PCI_D0, false); } static void pci_pm_default_suspend(struct pci_dev *pci_dev) { /* Disable non-bridge devices without PM support */ - if (!pci_is_bridge(pci_dev)) + if (!pci_has_subordinate(pci_dev)) pci_disable_enabled_device(pci_dev); } @@ -717,7 +756,7 @@ static int pci_pm_suspend_noirq(struct device *dev) if (!pci_dev->state_saved) { pci_save_state(pci_dev); - if (!pci_is_bridge(pci_dev)) + if (!pci_has_subordinate(pci_dev)) pci_prepare_to_sleep(pci_dev); } @@ -971,7 +1010,7 @@ static int pci_pm_poweroff_noirq(struct device *dev) return error; } - if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) + if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev)) pci_prepare_to_sleep(pci_dev); /* @@ -1325,8 +1364,6 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) return -ENODEV; pdev = to_pci_dev(dev); - if (!pdev) - return -ENODEV; if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class)) return -ENOMEM; @@ -1347,6 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), (u8)(pdev->class))) return -ENOMEM; + return 0; } diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 4e0acefb7565..84c350994b06 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/vgaarb.h> #include <linux/pm_runtime.h> +#include <linux/of.h> #include "pci.h" static int sysfs_initialized; /* = 0 */ @@ -416,6 +417,20 @@ static ssize_t d3cold_allowed_show(struct device *dev, static DEVICE_ATTR_RW(d3cold_allowed); #endif +#ifdef CONFIG_OF +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct device_node *np = pci_device_to_OF_node(pdev); + + if (np == NULL || np->full_name == NULL) + return 0; + return sprintf(buf, "%s", np->full_name); +} +static DEVICE_ATTR_RO(devspec); +#endif + #ifdef CONFIG_PCI_IOV static ssize_t sriov_totalvfs_show(struct device *dev, struct device_attribute *attr, @@ -499,6 +514,45 @@ static struct device_attribute sriov_numvfs_attr = sriov_numvfs_show, sriov_numvfs_store); #endif /* CONFIG_PCI_IOV */ +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + char *driver_override, *old = pdev->driver_override, *cp; + + if (count > PATH_MAX) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + if (strlen(driver_override)) { + pdev->driver_override = driver_override; + } else { + kfree(driver_override); + pdev->driver_override = NULL; + } + + kfree(old); + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%s\n", pdev->driver_override); +} +static DEVICE_ATTR_RW(driver_override); + static struct attribute *pci_dev_attrs[] = { &dev_attr_resource.attr, &dev_attr_vendor.attr, @@ -521,6 +575,10 @@ static struct attribute *pci_dev_attrs[] = { #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) &dev_attr_d3cold_allowed.attr, #endif +#ifdef CONFIG_OF + &dev_attr_devspec.attr, +#endif + &dev_attr_driver_override.attr, NULL, }; @@ -1255,11 +1313,6 @@ static struct bin_attribute pcie_config_attr = { .write = pci_write_config, }; -int __weak pcibios_add_platform_entries(struct pci_dev *dev) -{ - return 0; -} - static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1375,11 +1428,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) pdev->rom_attr = attr; } - /* add platform-specific attributes */ - retval = pcibios_add_platform_entries(pdev); - if (retval) - goto err_rom_file; - /* add sysfs entries for various capabilities */ retval = pci_create_capabilities_sysfs(pdev); if (retval) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7325d43bf030..7ae7aa0166b6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1468,6 +1468,17 @@ void __weak pcibios_release_device(struct pci_dev *dev) {} */ void __weak pcibios_disable_device (struct pci_dev *dev) {} +/** + * pcibios_penalize_isa_irq - penalize an ISA IRQ + * @irq: ISA IRQ to penalize + * @active: IRQ active or not + * + * Permits the platform to provide architecture-specific functionality when + * penalizing ISA IRQs. This is the default implementation. Architecture + * implementations can override this. + */ +void __weak pcibios_penalize_isa_irq(int irq, int active) {} + static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; @@ -3067,7 +3078,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev) if (!pci_is_pcie(dev)) return 1; - return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); + return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_TRPND); } EXPORT_SYMBOL(pci_wait_for_pending_transaction); @@ -3109,7 +3121,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe) return 0; /* Wait for Transaction Pending bit clean */ - if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) + if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) goto clear; dev_err(&dev->dev, "transaction is not cleared; " @@ -3305,8 +3317,27 @@ static void pci_dev_unlock(struct pci_dev *dev) pci_cfg_access_unlock(dev); } +/** + * pci_reset_notify - notify device driver of reset + * @dev: device to be notified of reset + * @prepare: 'true' if device is about to be reset; 'false' if reset attempt + * completed + * + * Must be called prior to device access being disabled and after device + * access is restored. + */ +static void pci_reset_notify(struct pci_dev *dev, bool prepare) +{ + const struct pci_error_handlers *err_handler = + dev->driver ? dev->driver->err_handler : NULL; + if (err_handler && err_handler->reset_notify) + err_handler->reset_notify(dev, prepare); +} + static void pci_dev_save_and_disable(struct pci_dev *dev) { + pci_reset_notify(dev, true); + /* * Wake-up device prior to save. PM registers default to D0 after * reset and a simple register restore doesn't reliably return @@ -3328,6 +3359,7 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) static void pci_dev_restore(struct pci_dev *dev) { pci_restore_state(dev); + pci_reset_notify(dev, false); } static int pci_dev_reset(struct pci_dev *dev, int probe) @@ -3344,6 +3376,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) return rc; } + /** * __pci_reset_function - reset a PCI device function * @dev: PCI device to reset @@ -4125,7 +4158,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, u16 cmd; int rc; - WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); + WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); /* ARCH specific VGA enables */ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6bd082299e31..0601890db22d 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -77,7 +77,7 @@ static inline void pci_wakeup_event(struct pci_dev *dev) pm_wakeup_event(&dev->dev, 100); } -static inline bool pci_is_bridge(struct pci_dev *pci_dev) +static inline bool pci_has_subordinate(struct pci_dev *pci_dev) { return !!(pci_dev->subordinate); } @@ -201,11 +201,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int reg); int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); void pci_configure_ari(struct pci_dev *dev); -void __ref __pci_bus_size_bridges(struct pci_bus *bus, +void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head); -void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct list_head *realloc_head, - struct list_head *fail_head); +void __pci_bus_assign_resources(const struct pci_bus *bus, + struct list_head *realloc_head, + struct list_head *fail_head); /** * pci_ari_enabled - query ARI forwarding status diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 986f8eadfd39..2f0ce668a775 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -99,7 +99,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) for (i = 0; i < nr_entries; i++) msix_entries[i].entry = i; - status = pci_enable_msix(dev, msix_entries, nr_entries); + status = pci_enable_msix_exact(dev, msix_entries, nr_entries); if (status) goto Exit; @@ -171,7 +171,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) pci_disable_msix(dev); /* Now allocate the MSI-X vectors for real */ - status = pci_enable_msix(dev, msix_entries, nvec); + status = pci_enable_msix_exact(dev, msix_entries, nvec); if (status) goto Exit; } @@ -379,10 +379,13 @@ int pcie_port_device_register(struct pci_dev *dev) /* * Initialize service irqs. Don't use service devices that * require interrupts if there is no way to generate them. + * However, some drivers may have a polling mode (e.g. pciehp_poll_mode) + * that can be used in the absence of irqs. Allow them to determine + * if that is to be used. */ status = init_service_irqs(dev, irqs, capabilities); if (status) { - capabilities &= PCIE_PORT_SERVICE_VC; + capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP; if (!capabilities) goto error_disable; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ef09f5f2fe6c..2bbf5221afb3 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -171,9 +171,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int pos) { u32 l, sz, mask; + u64 l64, sz64, mask64; u16 orig_cmd; struct pci_bus_region region, inverted_region; - bool bar_too_big = false, bar_disabled = false; + bool bar_too_big = false, bar_too_high = false, bar_invalid = false; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; @@ -226,9 +227,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, } if (res->flags & IORESOURCE_MEM_64) { - u64 l64 = l; - u64 sz64 = sz; - u64 mask64 = mask | (u64)~0 << 32; + l64 = l; + sz64 = sz; + mask64 = mask | (u64)~0 << 32; pci_read_config_dword(dev, pos + 4, &l); pci_write_config_dword(dev, pos + 4, ~0); @@ -243,19 +244,22 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, if (!sz64) goto fail; - if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { + if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) && + sz64 > 0x100000000ULL) { + res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; + res->start = 0; + res->end = 0; bar_too_big = true; - goto fail; + goto out; } - if ((sizeof(resource_size_t) < 8) && l) { - /* Address above 32-bit boundary; disable the BAR */ - pci_write_config_dword(dev, pos, 0); - pci_write_config_dword(dev, pos + 4, 0); + if ((sizeof(dma_addr_t) < 8) && l) { + /* Above 32-bit boundary; try to reallocate */ res->flags |= IORESOURCE_UNSET; - region.start = 0; - region.end = sz64; - bar_disabled = true; + res->start = 0; + res->end = sz64; + bar_too_high = true; + goto out; } else { region.start = l64; region.end = l64 + sz64; @@ -285,11 +289,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, * be claimed by the device. */ if (inverted_region.start != region.start) { - dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n", - pos, ®ion.start); res->flags |= IORESOURCE_UNSET; - res->end -= res->start; res->start = 0; + res->end = region.end - region.start; + bar_invalid = true; } goto out; @@ -303,8 +306,15 @@ out: pci_write_config_word(dev, PCI_COMMAND, orig_cmd); if (bar_too_big) - dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos); - if (res->flags && !bar_disabled) + dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", + pos, (unsigned long long) sz64); + if (bar_too_high) + dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n", + pos, (unsigned long long) l64); + if (bar_invalid) + dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n", + pos, (unsigned long long) region.start); + if (res->flags) dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res); return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; @@ -465,7 +475,7 @@ void pci_read_bridge_bases(struct pci_bus *child) if (dev->transparent) { pci_bus_for_each_resource(child->parent, res, i) { - if (res) { + if (res && res->flags) { pci_bus_add_resource(child, res, PCI_SUBTRACTIVE_DECODE); dev_printk(KERN_DEBUG, &dev->dev, @@ -719,7 +729,7 @@ add_dev: return child; } -struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) +struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) { struct pci_bus *child; @@ -984,6 +994,43 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) /** + * pci_ext_cfg_is_aliased - is ext config space just an alias of std config? + * @dev: PCI device + * + * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that + * when forwarding a type1 configuration request the bridge must check that + * the extended register address field is zero. The bridge is not permitted + * to forward the transactions and must handle it as an Unsupported Request. + * Some bridges do not follow this rule and simply drop the extended register + * bits, resulting in the standard config space being aliased, every 256 + * bytes across the entire configuration space. Test for this condition by + * comparing the first dword of each potential alias to the vendor/device ID. + * Known offenders: + * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) + * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40) + */ +static bool pci_ext_cfg_is_aliased(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_QUIRKS + int pos; + u32 header, tmp; + + pci_read_config_dword(dev, PCI_VENDOR_ID, &header); + + for (pos = PCI_CFG_SPACE_SIZE; + pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) { + if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL + || header != tmp) + return false; + } + + return true; +#else + return false; +#endif +} + +/** * pci_cfg_space_size - get the configuration space size of the PCI device. * @dev: PCI device * @@ -1001,7 +1048,7 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev) if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) goto fail; - if (status == 0xffffffff) + if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev)) goto fail; return PCI_CFG_SPACE_EXP_SIZE; @@ -1215,6 +1262,7 @@ static void pci_release_dev(struct device *dev) pci_release_of_node(pci_dev); pcibios_release_device(pci_dev); pci_bus_put(pci_dev->bus); + kfree(pci_dev->driver_override); kfree(pci_dev); } @@ -1369,7 +1417,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) WARN_ON(ret < 0); } -struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) +struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; @@ -1617,7 +1665,7 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) */ void pcie_bus_configure_settings(struct pci_bus *bus) { - u8 smpss; + u8 smpss = 0; if (!bus->self) return; @@ -1670,8 +1718,7 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus) for (pass=0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) max = pci_scan_bridge(bus, dev, max, pass); } @@ -1958,7 +2005,7 @@ EXPORT_SYMBOL(pci_scan_bus); * * Returns the max number of subordinate bus discovered. */ -unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) +unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge) { unsigned int max; struct pci_bus *bus = bridge->subordinate; @@ -1981,7 +2028,7 @@ unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) * * Returns the max number of subordinate bus discovered. */ -unsigned int __ref pci_rescan_bus(struct pci_bus *bus) +unsigned int pci_rescan_bus(struct pci_bus *bus) { unsigned int max; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e7292065a1b1..92e68c7747f7 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2954,6 +2954,7 @@ static void disable_igfx_irq(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); /* * PCI devices which are on Intel chips can skip the 10ms delay @@ -2991,6 +2992,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ quirk_broken_intx_masking); +/* + * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) + * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC + * + * RTL8110SC - Fails under PCI device assignment using DisINTx masking. + */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, + quirk_broken_intx_masking); static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) @@ -3453,6 +3462,8 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = { /* Wildcat PCH */ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97, 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, + /* Patsburg (X79) PCH */ + 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, }; static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 4a1b972efe7f..8e495bda678f 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -7,7 +7,6 @@ * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com> */ -#include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 138bdd6393be..fd9b545c3cf5 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -713,12 +713,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) bus resource of a given type. Note: we intentionally skip the bus resources which have already been assigned (that is, have non-NULL parent resource). */ -static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type) +static struct resource *find_free_bus_resource(struct pci_bus *bus, + unsigned long type_mask, unsigned long type) { int i; struct resource *r; - unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_PREFETCH; pci_bus_for_each_resource(bus, r, i) { if (r == &ioport_resource || r == &iomem_resource) @@ -815,7 +814,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, resource_size_t add_size, struct list_head *realloc_head) { struct pci_dev *dev; - struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); + struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, + IORESOURCE_IO); resource_size_t size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; resource_size_t min_align, align; @@ -907,36 +907,40 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns, * @bus : the bus * @mask: mask the resource flag, then compare it with type * @type: the type of free resource from bridge + * @type2: second match type + * @type3: third match type * @min_size : the minimum memory window that must to be allocated * @add_size : additional optional memory window * @realloc_head : track the additional memory window on this list * * Calculate the size of the bus and minimal alignment which * guarantees that all child resources fit in this size. + * + * Returns -ENOSPC if there's no available bus resource of the desired type. + * Otherwise, sets the bus resource start/end to indicate the required + * size, adds things to realloc_head (if supplied), and returns 0. */ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, - unsigned long type, resource_size_t min_size, - resource_size_t add_size, - struct list_head *realloc_head) + unsigned long type, unsigned long type2, + unsigned long type3, + resource_size_t min_size, resource_size_t add_size, + struct list_head *realloc_head) { struct pci_dev *dev; resource_size_t min_align, align, size, size0, size1; - resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ + resource_size_t aligns[14]; /* Alignments from 1Mb to 8Gb */ int order, max_order; - struct resource *b_res = find_free_bus_resource(bus, type); - unsigned int mem64_mask = 0; + struct resource *b_res = find_free_bus_resource(bus, + mask | IORESOURCE_PREFETCH, type); resource_size_t children_add_size = 0; if (!b_res) - return 0; + return -ENOSPC; memset(aligns, 0, sizeof(aligns)); max_order = 0; size = 0; - mem64_mask = b_res->flags & IORESOURCE_MEM_64; - b_res->flags &= ~IORESOURCE_MEM_64; - list_for_each_entry(dev, &bus->devices, bus_list) { int i; @@ -944,7 +948,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, struct resource *r = &dev->resource[i]; resource_size_t r_size; - if (r->parent || (r->flags & mask) != type) + if (r->parent || ((r->flags & mask) != type && + (r->flags & mask) != type2 && + (r->flags & mask) != type3)) continue; r_size = resource_size(r); #ifdef CONFIG_PCI_IOV @@ -957,10 +963,17 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, continue; } #endif - /* For bridges size != alignment */ + /* + * aligns[0] is for 1MB (since bridge memory + * windows are always at least 1MB aligned), so + * keep "order" from being negative for smaller + * resources. + */ align = pci_resource_alignment(dev, r); order = __ffs(align) - 20; - if (order > 11) { + if (order < 0) + order = 0; + if (order >= ARRAY_SIZE(aligns)) { dev_warn(&dev->dev, "disabling BAR %d: %pR " "(bad alignment %#llx)\n", i, r, (unsigned long long) align); @@ -968,15 +981,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, continue; } size += r_size; - if (order < 0) - order = 0; /* Exclude ranges with size > align from calculation of the alignment. */ if (r_size == align) aligns[order] += align; if (order > max_order) max_order = order; - mem64_mask &= r->flags & IORESOURCE_MEM_64; if (realloc_head) children_add_size += get_res_add_size(realloc_head, r); @@ -997,18 +1007,18 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, "%pR to %pR (unused)\n", b_res, &bus->busn_res); b_res->flags = 0; - return 1; + return 0; } b_res->start = min_align; b_res->end = size0 + min_align - 1; - b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; + b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window " "%pR to %pR add_size %llx\n", b_res, &bus->busn_res, (unsigned long long)size1-size0); } - return 1; + return 0; } unsigned long pci_cardbus_resource_alignment(struct resource *res) @@ -1113,12 +1123,13 @@ handle_done: ; } -void __ref __pci_bus_size_bridges(struct pci_bus *bus, - struct list_head *realloc_head) +void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *dev; - unsigned long mask, prefmask; + unsigned long mask, prefmask, type2 = 0, type3 = 0; resource_size_t additional_mem_size = 0, additional_io_size = 0; + struct resource *b_res; + int ret; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_bus *b = dev->subordinate; @@ -1152,41 +1163,93 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, additional_io_size = pci_hotplug_io_size; additional_mem_size = pci_hotplug_mem_size; } - /* - * Follow thru - */ + /* Fall through */ default: pbus_size_io(bus, realloc_head ? 0 : additional_io_size, additional_io_size, realloc_head); - /* If the bridge supports prefetchable range, size it - separately. If it doesn't, or its prefetchable window - has already been allocated by arch code, try - non-prefetchable range for both types of PCI memory - resources. */ + + /* + * If there's a 64-bit prefetchable MMIO window, compute + * the size required to put all 64-bit prefetchable + * resources in it. + */ + b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES]; mask = IORESOURCE_MEM; prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; - if (pbus_size_mem(bus, prefmask, prefmask, + if (b_res[2].flags & IORESOURCE_MEM_64) { + prefmask |= IORESOURCE_MEM_64; + ret = pbus_size_mem(bus, prefmask, prefmask, + prefmask, prefmask, realloc_head ? 0 : additional_mem_size, - additional_mem_size, realloc_head)) - mask = prefmask; /* Success, size non-prefetch only. */ - else - additional_mem_size += additional_mem_size; - pbus_size_mem(bus, mask, IORESOURCE_MEM, + additional_mem_size, realloc_head); + + /* + * If successful, all non-prefetchable resources + * and any 32-bit prefetchable resources will go in + * the non-prefetchable window. + */ + if (ret == 0) { + mask = prefmask; + type2 = prefmask & ~IORESOURCE_MEM_64; + type3 = prefmask & ~IORESOURCE_PREFETCH; + } + } + + /* + * If there is no 64-bit prefetchable window, compute the + * size required to put all prefetchable resources in the + * 32-bit prefetchable window (if there is one). + */ + if (!type2) { + prefmask &= ~IORESOURCE_MEM_64; + ret = pbus_size_mem(bus, prefmask, prefmask, + prefmask, prefmask, + realloc_head ? 0 : additional_mem_size, + additional_mem_size, realloc_head); + + /* + * If successful, only non-prefetchable resources + * will go in the non-prefetchable window. + */ + if (ret == 0) + mask = prefmask; + else + additional_mem_size += additional_mem_size; + + type2 = type3 = IORESOURCE_MEM; + } + + /* + * Compute the size required to put everything else in the + * non-prefetchable window. This includes: + * + * - all non-prefetchable resources + * - 32-bit prefetchable resources if there's a 64-bit + * prefetchable window or no prefetchable window at all + * - 64-bit prefetchable resources if there's no + * prefetchable window at all + * + * Note that the strategy in __pci_assign_resource() must + * match that used here. Specifically, we cannot put a + * 32-bit prefetchable resource in a 64-bit prefetchable + * window. + */ + pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3, realloc_head ? 0 : additional_mem_size, additional_mem_size, realloc_head); break; } } -void __ref pci_bus_size_bridges(struct pci_bus *bus) +void pci_bus_size_bridges(struct pci_bus *bus) { __pci_bus_size_bridges(bus, NULL); } EXPORT_SYMBOL(pci_bus_size_bridges); -void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct list_head *realloc_head, - struct list_head *fail_head) +void __pci_bus_assign_resources(const struct pci_bus *bus, + struct list_head *realloc_head, + struct list_head *fail_head) { struct pci_bus *b; struct pci_dev *dev; @@ -1218,15 +1281,15 @@ void __ref __pci_bus_assign_resources(const struct pci_bus *bus, } } -void __ref pci_bus_assign_resources(const struct pci_bus *bus) +void pci_bus_assign_resources(const struct pci_bus *bus) { __pci_bus_assign_resources(bus, NULL, NULL); } EXPORT_SYMBOL(pci_bus_assign_resources); -static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, - struct list_head *add_head, - struct list_head *fail_head) +static void __pci_bridge_assign_resources(const struct pci_dev *bridge, + struct list_head *add_head, + struct list_head *fail_head) { struct pci_bus *b; @@ -1257,42 +1320,66 @@ static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, static void pci_bridge_release_resources(struct pci_bus *bus, unsigned long type) { - int idx; - bool changed = false; - struct pci_dev *dev; + struct pci_dev *dev = bus->self; struct resource *r; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_PREFETCH; + IORESOURCE_PREFETCH | IORESOURCE_MEM_64; + unsigned old_flags = 0; + struct resource *b_res; + int idx = 1; - dev = bus->self; - for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; - idx++) { - r = &dev->resource[idx]; - if ((r->flags & type_mask) != type) - continue; - if (!r->parent) - continue; - /* - * if there are children under that, we should release them - * all - */ - release_child_resources(r); - if (!release_resource(r)) { - dev_printk(KERN_DEBUG, &dev->dev, - "resource %d %pR released\n", idx, r); - /* keep the old size */ - r->end = resource_size(r) - 1; - r->start = 0; - r->flags = 0; - changed = true; - } - } + b_res = &dev->resource[PCI_BRIDGE_RESOURCES]; + + /* + * 1. if there is io port assign fail, will release bridge + * io port. + * 2. if there is non pref mmio assign fail, release bridge + * nonpref mmio. + * 3. if there is 64bit pref mmio assign fail, and bridge pref + * is 64bit, release bridge pref mmio. + * 4. if there is pref mmio assign fail, and bridge pref is + * 32bit mmio, release bridge pref mmio + * 5. if there is pref mmio assign fail, and bridge pref is not + * assigned, release bridge nonpref mmio. + */ + if (type & IORESOURCE_IO) + idx = 0; + else if (!(type & IORESOURCE_PREFETCH)) + idx = 1; + else if ((type & IORESOURCE_MEM_64) && + (b_res[2].flags & IORESOURCE_MEM_64)) + idx = 2; + else if (!(b_res[2].flags & IORESOURCE_MEM_64) && + (b_res[2].flags & IORESOURCE_PREFETCH)) + idx = 2; + else + idx = 1; + + r = &b_res[idx]; + + if (!r->parent) + return; + + /* + * if there are children under that, we should release them + * all + */ + release_child_resources(r); + if (!release_resource(r)) { + type = old_flags = r->flags & type_mask; + dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n", + PCI_BRIDGE_RESOURCES + idx, r); + /* keep the old size */ + r->end = resource_size(r) - 1; + r->start = 0; + r->flags = 0; - if (changed) { /* avoiding touch the one without PREF */ if (type & IORESOURCE_PREFETCH) type = IORESOURCE_PREFETCH; __pci_setup_bridge(bus, type); + /* for next child res under same bridge */ + r->flags = old_flags; } } @@ -1304,9 +1391,9 @@ enum release_type { * try to release pci bridge resources that is from leaf bridge, * so we can allocate big new one later */ -static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, - unsigned long type, - enum release_type rel_type) +static void pci_bus_release_bridge_resources(struct pci_bus *bus, + unsigned long type, + enum release_type rel_type) { struct pci_dev *dev; bool is_leaf_bridge = true; @@ -1471,7 +1558,7 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) LIST_HEAD(fail_head); struct pci_dev_resource *fail_res; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_PREFETCH; + IORESOURCE_PREFETCH | IORESOURCE_MEM_64; int pci_try_num = 1; enum enable_type enable_local; @@ -1629,9 +1716,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus) down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) - if (dev->subordinate) + if (pci_is_bridge(dev) && pci_has_subordinate(dev)) __pci_bus_size_bridges(dev->subordinate, &add_list); up_read(&pci_bus_sem); diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index 9bd6864ec5d3..dbc4ffcf42de 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c @@ -10,7 +10,6 @@ */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/errno.h> diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7eed671d5586..33f9e32d94d0 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -16,7 +16,6 @@ * Resource sorting */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> @@ -209,21 +208,42 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; - /* First, try exact prefetching match.. */ + /* + * First, try exact prefetching match. Even if a 64-bit + * prefetchable bridge window is below 4GB, we can't put a 32-bit + * prefetchable resource in it because pbus_size_mem() assumes a + * 64-bit window will contain no 32-bit resources. If we assign + * things differently than they were sized, not everything will fit. + */ ret = pci_bus_alloc_resource(bus, res, size, align, min, - IORESOURCE_PREFETCH, + IORESOURCE_PREFETCH | IORESOURCE_MEM_64, pcibios_align_resource, dev); + if (ret == 0) + return 0; - if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) { - /* - * That failed. - * - * But a prefetching area can handle a non-prefetching - * window (it will just not perform as well). - */ - ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, + /* + * If the prefetchable window is only 32 bits wide, we can put + * 64-bit prefetchable resources in it. + */ + if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) == + (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) { + ret = pci_bus_alloc_resource(bus, res, size, align, min, + IORESOURCE_PREFETCH, pcibios_align_resource, dev); + if (ret == 0) + return 0; } + + /* + * If we didn't find a better match, we can put any memory resource + * in a non-prefetchable window. If this resource is 32 bits and + * non-prefetchable, the first call already tried the only possibility + * so we don't need to try again. + */ + if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) + ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, + pcibios_align_resource, dev); + return ret; } diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index 8bde61952d20..4fe4cc4ae19a 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c @@ -78,8 +78,7 @@ int __ref cb_alloc(struct pcmcia_socket *s) max = bus->busn_res.start; for (pass = 0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) max = pci_scan_bridge(bus, dev, max, pass); /* diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c index 9802b67040cc..2c61281bebd7 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset) return GPIOF_DIR_IN; } -static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - return pinctrl_gpio_direction_input(chip->base + offset); -} - -static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset, - int value) -{ - return pinctrl_gpio_direction_output(chip->base + offset); -} - static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) { struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); @@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset, wmt_clearbits(data, reg_data_out, BIT(bit)); } +static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + return pinctrl_gpio_direction_input(chip->base + offset); +} + +static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + wmt_gpio_set_value(chip, offset, value); + return pinctrl_gpio_direction_output(chip->base + offset); +} + static struct gpio_chip wmt_gpio_chip = { .label = "gpio-wmt", .owner = THIS_MODULE, diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index c5e082fb82fa..91ef69a52263 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -642,8 +642,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus) dev = pci_scan_single_device(bus, 0); if (dev) { pci_bus_assign_resources(bus); - if (pci_bus_add_device(dev)) - pr_err("Unable to hotplug wifi\n"); + pci_bus_add_device(dev); } } else { dev = pci_get_slot(bus, 0); diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 399e8c562192..9b0c57cd1d4a 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -633,8 +633,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) dev = pci_scan_single_device(bus, 0); if (dev) { pci_bus_assign_resources(bus); - if (pci_bus_add_device(dev)) - pr_err("Unable to hotplug wifi\n"); + pci_bus_add_device(dev); } } else { dev = pci_get_slot(bus, 0); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 6963bdf54175..6aea373547f6 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -6,6 +6,7 @@ menu "PTP clock support" config PTP_1588_CLOCK tristate "PTP clock support" + depends on NET select PPS select NET_PTP_CLASSIFY help @@ -74,7 +75,7 @@ config DP83640_PHY config PTP_1588_CLOCK_PCH tristate "Intel PCH EG20T as PTP clock" depends on X86 || COMPILE_TEST - depends on HAS_IOMEM + depends on HAS_IOMEM && NET select PTP_1588_CLOCK help This driver adds support for using the PCH EG20T as a PTP diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c index bd628a6f981d..e5f13c4310fe 100644 --- a/drivers/rtc/rtc-hym8563.c +++ b/drivers/rtc/rtc-hym8563.c @@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client, if (IS_ERR(hym8563->rtc)) return PTR_ERR(hym8563->rtc); + /* the hym8563 alarm only supports a minute accuracy */ + hym8563->rtc->uie_unsupported = 1; + #ifdef CONFIG_COMMON_CLK hym8563_clkout_register_clk(hym8563); #endif diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index 4ccb5d869389..a40ee1e37486 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -207,7 +207,7 @@ static void jsfd_do_request(struct request_queue *q) goto end; } - jsfd_read(req->buffer, jdp->dbase + offset, len); + jsfd_read(bio_data(req->bio), jdp->dbase + offset, len); err = 0; end: if (!__blk_end_request_cur(req, err)) diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 296c936cc03c..a8d721ff19eb 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -639,7 +639,7 @@ static int __init atari_scsi_detect(struct scsi_host_template *host) "double buffer\n"); return 0; } - atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); + atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); atari_dma_orig_addr = 0; } #endif diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9db097a28a74..a0c95cac91f0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -140,7 +140,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) cmd->result = 0; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); - kblockd_schedule_work(q, &device->requeue_work); + kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -1019,8 +1019,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1158,7 +1156,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } cmd->cmd_len = req->cmd_len; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 1b681427dde0..c341f855fadc 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy) list_del(&rphy->list); mutex_unlock(&sas_host->lock); - sas_bsg_remove(shost, rphy); - transport_destroy_device(dev); put_device(dev); @@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); + sas_bsg_remove(NULL, rphy); transport_remove_device(dev); device_del(dev); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index efcbcd182863..96af195224f2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -737,16 +737,14 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) goto out; } + rq->completion_data = page; blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); - rq->buffer = page_address(page); rq->__data_len = nr_bytes; out: - if (ret != BLKPREP_OK) { + if (ret != BLKPREP_OK) __free_page(page); - rq->buffer = NULL; - } return ret; } @@ -842,10 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) { struct scsi_cmnd *SCpnt = rq->special; - if (rq->cmd_flags & REQ_DISCARD) { - free_page((unsigned long)rq->buffer); - rq->buffer = NULL; - } + if (rq->cmd_flags & REQ_DISCARD) + __free_page(rq->completion_data); + if (SCpnt->cmnd != rq->cmd) { mempool_free(SCpnt->cmnd, sd_cdb_pool); SCpnt->cmnd = NULL; diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index fc67f564f02c..788ed9b59b4e 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile @@ -1,10 +1,12 @@ # # Makefile for the SuperH specific drivers. # -obj-y := intc/ +obj-$(CONFIG_SUPERH) += intc/ +obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/ +ifneq ($(CONFIG_COMMON_CLK),y) +obj-$(CONFIG_HAVE_CLK) += clk/ +endif +obj-$(CONFIG_MAPLE) += maple/ +obj-$(CONFIG_SUPERHYWAY) += superhyway/ -obj-$(CONFIG_HAVE_CLK) += clk/ -obj-$(CONFIG_MAPLE) += maple/ -obj-$(CONFIG_SUPERHYWAY) += superhyway/ - -obj-y += pm_runtime.o +obj-y += pm_runtime.o diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 8afa5a4589f2..10c65eb51f85 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = { .con_ids = { NULL, }, }; +static bool default_pm_on; + static int __init sh_pm_runtime_init(void) { + if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { + if (!of_machine_is_compatible("renesas,emev2") && + !of_machine_is_compatible("renesas,r7s72100") && + !of_machine_is_compatible("renesas,r8a73a4") && + !of_machine_is_compatible("renesas,r8a7740") && + !of_machine_is_compatible("renesas,r8a7778") && + !of_machine_is_compatible("renesas,r8a7779") && + !of_machine_is_compatible("renesas,r8a7790") && + !of_machine_is_compatible("renesas,r8a7791") && + !of_machine_is_compatible("renesas,sh7372") && + !of_machine_is_compatible("renesas,sh73a0")) + return 0; + } + + default_pm_on = true; pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } @@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init); static int __init sh_pm_runtime_late_init(void) { - pm_genpd_poweroff_unused(); + if (default_pm_on) + pm_genpd_poweroff_unused(); return 0; } late_initcall(sh_pm_runtime_late_init); diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 713af4806f26..f6759dc0153b 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, struct sg_table *sgt; void *buf, *pbuf; - /* - * Some DMA controllers have problems transferring buffers that are - * not multiple of 4 bytes. So we truncate the transfer so that it - * is suitable for such controllers, and handle the trailing bytes - * manually after the DMA completes. - * - * REVISIT: It would be better if this information could be - * retrieved directly from the DMA device in a similar way than - * ->copy_align etc. is done. - */ - len = ALIGN(drv_data->len, 4); - if (dir == DMA_TO_DEVICE) { dmadev = drv_data->tx_chan->device->dev; sgt = &drv_data->tx_sgt; @@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, if (!error) { pxa2xx_spi_unmap_dma_buffers(drv_data); - /* Handle the last bytes of unaligned transfer */ drv_data->tx += drv_data->tx_map_len; - drv_data->write(drv_data); - drv_data->rx += drv_data->rx_map_len; - drv_data->read(drv_data); msg->actual_length += drv_data->len; msg->state = pxa2xx_spi_next_transfer(drv_data); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index b032e8885e24..78c66e3c53ed 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev) int ret; ret = pm_runtime_get_sync(&pdev->dev); - if (ret) + if (ret < 0) return ret; ret = spi_qup_set_state(controller, QUP_STATE_RESET); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 4eb9bf02996c..939edf473235 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable) spi->master->set_cs(spi, !enable); } +#ifdef CONFIG_HAS_DMA static int spi_map_buf(struct spi_master *master, struct device *dev, struct sg_table *sgt, void *buf, size_t len, enum dma_data_direction dir) @@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev, } } -static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) { struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; - void *tmp; - unsigned int max_tx, max_rx; int ret; - if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { - max_tx = 0; - max_rx = 0; - - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - if ((master->flags & SPI_MASTER_MUST_TX) && - !xfer->tx_buf) - max_tx = max(xfer->len, max_tx); - if ((master->flags & SPI_MASTER_MUST_RX) && - !xfer->rx_buf) - max_rx = max(xfer->len, max_rx); - } - - if (max_tx) { - tmp = krealloc(master->dummy_tx, max_tx, - GFP_KERNEL | GFP_DMA); - if (!tmp) - return -ENOMEM; - master->dummy_tx = tmp; - memset(tmp, 0, max_tx); - } - - if (max_rx) { - tmp = krealloc(master->dummy_rx, max_rx, - GFP_KERNEL | GFP_DMA); - if (!tmp) - return -ENOMEM; - master->dummy_rx = tmp; - } - - if (max_tx || max_rx) { - list_for_each_entry(xfer, &msg->transfers, - transfer_list) { - if (!xfer->tx_buf) - xfer->tx_buf = master->dummy_tx; - if (!xfer->rx_buf) - xfer->rx_buf = master->dummy_rx; - } - } - } - if (!master->can_dma) return 0; @@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) return 0; } +#else /* !CONFIG_HAS_DMA */ +static inline int __spi_map_msg(struct spi_master *master, + struct spi_message *msg) +{ + return 0; +} + +static inline int spi_unmap_msg(struct spi_master *master, + struct spi_message *msg) +{ + return 0; +} +#endif /* !CONFIG_HAS_DMA */ + +static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ + struct spi_transfer *xfer; + void *tmp; + unsigned int max_tx, max_rx; + + if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { + max_tx = 0; + max_rx = 0; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if ((master->flags & SPI_MASTER_MUST_TX) && + !xfer->tx_buf) + max_tx = max(xfer->len, max_tx); + if ((master->flags & SPI_MASTER_MUST_RX) && + !xfer->rx_buf) + max_rx = max(xfer->len, max_rx); + } + + if (max_tx) { + tmp = krealloc(master->dummy_tx, max_tx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_tx = tmp; + memset(tmp, 0, max_tx); + } + + if (max_rx) { + tmp = krealloc(master->dummy_rx, max_rx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_rx = tmp; + } + + if (max_tx || max_rx) { + list_for_each_entry(xfer, &msg->transfers, + transfer_list) { + if (!xfer->tx_buf) + xfer->tx_buf = master->dummy_tx; + if (!xfer->rx_buf) + xfer->rx_buf = master->dummy_rx; + } + } + } + + return __spi_map_msg(master, msg); +} /* * spi_transfer_one_message - Default implementation of transfer_one_message() @@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master) { int ret; - master->queued = true; master->transfer = spi_queued_transfer; if (!master->transfer_one_message) master->transfer_one_message = spi_transfer_one_message; @@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master) dev_err(&master->dev, "problem initializing queue\n"); goto err_init_queue; } + master->queued = true; ret = spi_start_queue(master); if (ret) { dev_err(&master->dev, "problem starting queue\n"); @@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master) return 0; err_start_queue: -err_init_queue: spi_destroy_queue(master); +err_init_queue: return ret; } @@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); */ int spi_setup(struct spi_device *spi) { - unsigned bad_bits; + unsigned bad_bits, ugly_bits; int status = 0; /* check mode to prevent that DUAL and QUAD set at the same time @@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi) * that aren't supported with their current master */ bad_bits = spi->mode & ~spi->master->mode_bits; + ugly_bits = bad_bits & + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); + if (ugly_bits) { + dev_warn(&spi->dev, + "setup: ignoring unsupported mode bits %x\n", + ugly_bits); + spi->mode &= ~ugly_bits; + bad_bits &= ~ugly_bits; + } if (bad_bits) { dev_err(&spi->dev, "setup: unsupported mode bits %x\n", bad_bits); diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 4144a75e5f71..c270c9ae6d27 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node, of_node_put(port); if (port == imx_crtc->port) { ret = of_graph_parse_endpoint(ep, &endpoint); - return ret ? ret : endpoint.id; + return ret ? ret : endpoint.port; } } while (ep); @@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev) if (!remote || !of_device_is_available(remote)) { of_node_put(remote); continue; + } else if (!of_device_is_available(remote->parent)) { + dev_warn(&pdev->dev, "parent device of %s is not available\n", + remote->full_name); + of_node_put(remote); + continue; } ret = imx_drm_add_component(&pdev->dev, remote); diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 575533f4fd64..a23f4f773146 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c @@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) tve->dev = dev; spin_lock_init(&tve->lock); - ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); if (ddc_node) { tve->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 8c101cbbee97..acc8184c46cd 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq) struct vpfe_fh *fh = vb2_get_drv_priv(vq); struct vpfe_video_device *video = fh->video; - if (!vb2_is_streaming(vq)) - return 0; /* release all active buffers */ + if (video->cur_frm == video->next_frm) { + vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (video->cur_frm != NULL) + vb2_buffer_done(&video->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (video->next_frm != NULL) + vb2_buffer_done(&video->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&video->dma_queue)) { video->next_frm = list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list); diff --git a/drivers/staging/media/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h index b3d2cc729657..4ba569258498 100644 --- a/drivers/staging/media/sn9c102/sn9c102_devtable.h +++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h @@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = { { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, /* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, -#endif { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, -#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c index 57eca7a45672..4fe751f7c2bf 100644 --- a/drivers/staging/rtl8723au/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c @@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev) #endif /* CONFIG_8723AU_P2P */ rtw_scan_abort23a(padapter); - /* set this at the end */ - padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c index c49160e477d8..07e542e5d156 100644 --- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c +++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c @@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr) if (addr == RECV_BULK_IN_ADDR) { pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); } else if (addr == RECV_INT_IN_ADDR) { - pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); + pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]); } else if (addr < HW_QUEUE_ENTRY) { ep_num = pdvobj->Queue2Pipe[addr]; pipe = usb_sndbulkpipe(pusbd, ep_num); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 78cab13bbb1b..46588c85d39b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * Initiator is expecting a NopIN ping reply.. */ if (hdr->itt != RESERVED_ITT) { - BUG_ON(!cmd); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 6960f22909ae..302eb3b78715 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -775,6 +775,7 @@ struct iscsi_np { int np_ip_proto; int np_sock_type; enum np_thread_state_table np_thread_state; + bool enabled; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8739b98f6f93..ca31fa1b8a4b 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2( } off = mrdsl % PAGE_SIZE; if (!off) - return 0; + goto check_prot; if (mrdsl < PAGE_SIZE) mrdsl = PAGE_SIZE; @@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2( ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; } + /* + * ISER currently requires that ImmediateData + Unsolicited + * Data be disabled when protection / signature MRs are enabled. + */ +check_prot: + if (sess->se_sess->sup_prot_ops & + (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | + TARGET_PROT_DOUT_INSERT)) { + + sprintf(buf, "ImmediateData=No"); + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + sprintf(buf, "InitialR2T=Yes"); + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" + " T10-PI enabled ISER session\n"); + } } return 0; @@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket( } np->np_transport = t; + np->enabled = true; return 0; } diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index eb96b20dc09e..ca1811858afd 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread( return; } + tpg_np->tpg_np->enabled = false; iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 65001e133670..26416c15d65c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("emulate_write_cache not supported for pSCSI\n"); return -EINVAL; } - if (dev->transport->get_write_cache) { - pr_warn("emulate_write_cache cannot be changed when underlying" - " HW reports WriteCacheEnabled, ignoring request\n"); - return 0; + if (flag && + dev->transport->get_write_cache) { + pr_err("emulate_write_cache not supported for this device\n"); + return -EINVAL; } dev->dev_attrib.emulate_write_cache = flag; @@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag) return 0; } if (!dev->transport->init_prot || !dev->transport->free_prot) { + /* 0 is only allowed value for non-supporting backends */ + if (flag == 0) + return 0; + pr_err("DIF protection not supported by backend: %s\n", dev->transport->name); return -ENOSYS; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d4b98690a736..789aa9eb0a1e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1113,6 +1113,7 @@ void transport_init_se_cmd( init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); + kref_init(&cmd->cmd_kref); cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; @@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, unsigned long flags; int ret = 0; - kref_init(&se_cmd->cmd_kref); /* * Add a second kref if the fabric caller is expecting to handle * fabric acknowledgement that requires two target_put_sess_cmd() diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 01cf37f212c3..f5fd515b2bee 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; - struct se_session *se_sess; + struct ft_sess *sess; if (!cmd) return; - se_sess = cmd->sess->se_sess; + sess = cmd->sess; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); - percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); - ft_sess_put(cmd->sess); /* undo get from lookup at recv */ + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ft_sess_put(sess); /* undo get from lookup at recv */ } void ft_release_cmd(struct se_cmd *se_cmd) diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index e21d1f58554c..4953b657635e 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c @@ -191,7 +191,7 @@ static struct fb_info fb_info = { }; static void *screen_base; /* base address of screen */ -static void *real_screen_base; /* (only for Overscan) */ +static unsigned long phys_screen_base; /* (only for Overscan) */ static int screen_len; @@ -213,7 +213,8 @@ static unsigned int external_yres; */ static unsigned int external_depth; static int external_pmode; -static void *external_addr; +static void *external_screen_base; +static unsigned long external_addr; static unsigned long external_len; static unsigned long external_vgaiobase; static unsigned int external_bitspercol = 6; @@ -592,7 +593,7 @@ static int tt_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) int mode; strcpy(fix->id, "Atari Builtin"); - fix->smem_start = (unsigned long)real_screen_base; + fix->smem_start = phys_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; @@ -790,7 +791,7 @@ static void tt_get_par(struct atafb_par *par) addr = ((shifter.bas_hi & 0xff) << 16) | ((shifter.bas_md & 0xff) << 8) | ((shifter.bas_lo & 0xff)); - par->screen_base = phys_to_virt(addr); + par->screen_base = atari_stram_to_virt(addr); } static void tt_set_par(struct atafb_par *par) @@ -888,7 +889,7 @@ static int falcon_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Atari Builtin"); - fix->smem_start = (unsigned long)real_screen_base; + fix->smem_start = phys_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; @@ -1584,7 +1585,7 @@ static void falcon_get_par(struct atafb_par *par) addr = (shifter.bas_hi & 0xff) << 16 | (shifter.bas_md & 0xff) << 8 | (shifter.bas_lo & 0xff); - par->screen_base = phys_to_virt(addr); + par->screen_base = atari_stram_to_virt(addr); /* derived parameters */ hw->ste_mode = (hw->f_shift & 0x510) == 0 && hw->st_shift == 0x100; @@ -1814,7 +1815,7 @@ static int stste_encode_fix(struct fb_fix_screeninfo *fix, int mode; strcpy(fix->id, "Atari Builtin"); - fix->smem_start = (unsigned long)real_screen_base; + fix->smem_start = phys_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; @@ -1980,7 +1981,7 @@ static void stste_get_par(struct atafb_par *par) ((shifter.bas_md & 0xff) << 8); if (ATARIHW_PRESENT(EXTD_SHIFTER)) addr |= (shifter.bas_lo & 0xff); - par->screen_base = phys_to_virt(addr); + par->screen_base = atari_stram_to_virt(addr); } static void stste_set_par(struct atafb_par *par) @@ -2039,7 +2040,7 @@ static int stste_detect(void) static void stste_set_screen_base(void *s_base) { unsigned long addr; - addr = virt_to_phys(s_base); + addr = atari_stram_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); @@ -2113,7 +2114,7 @@ static void st_ovsc_switch(void) static int ext_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Unknown Extern"); - fix->smem_start = (unsigned long)external_addr; + fix->smem_start = external_addr; fix->smem_len = PAGE_ALIGN(external_len); if (external_depth == 1) { fix->type = FB_TYPE_PACKED_PIXELS; @@ -2213,7 +2214,7 @@ static int ext_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) static void ext_get_par(struct atafb_par *par) { - par->screen_base = external_addr; + par->screen_base = external_screen_base; } static void ext_set_par(struct atafb_par *par) @@ -2286,7 +2287,7 @@ static void set_screen_base(void *s_base) { unsigned long addr; - addr = virt_to_phys(s_base); + addr = atari_stram_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); @@ -2433,7 +2434,9 @@ static void atafb_set_disp(struct fb_info *info) atafb_get_var(&info->var, info); atafb_get_fix(&info->fix, info); - info->screen_base = (void *)info->fix.smem_start; + /* Note: smem_start derives from phys_screen_base, not screen_base! */ + info->screen_base = (external_addr ? external_screen_base : + atari_stram_to_virt(info->fix.smem_start)); } static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, @@ -2904,7 +2907,7 @@ static void __init atafb_setup_ext(char *spec) external_yres = yres; external_depth = depth; external_pmode = planes; - external_addr = (void *)addr; + external_addr = addr; external_len = len; if (external_card_type == IS_MV300) { @@ -3166,30 +3169,30 @@ int __init atafb_init(void) memset(screen_base, 0, mem_req); pad = -(unsigned long)screen_base & (PAGE_SIZE - 1); screen_base += pad; - real_screen_base = screen_base + ovsc_offset; + phys_screen_base = atari_stram_to_phys(screen_base + ovsc_offset); screen_len = (mem_req - pad - ovsc_offset) & PAGE_MASK; st_ovsc_switch(); if (CPU_IS_040_OR_060) { /* On a '040+, the cache mode of video RAM must be set to * write-through also for internal video hardware! */ - cache_push(virt_to_phys(screen_base), screen_len); + cache_push(atari_stram_to_phys(screen_base), screen_len); kernel_set_cachemode(screen_base, screen_len, IOMAP_WRITETHROUGH); } - printk("atafb: screen_base %p real_screen_base %p screen_len %d\n", - screen_base, real_screen_base, screen_len); + printk("atafb: screen_base %p phys_screen_base %lx screen_len %d\n", + screen_base, phys_screen_base, screen_len); #ifdef ATAFB_EXT } else { /* Map the video memory (physical address given) to somewhere * in the kernel address space. */ - external_addr = ioremap_writethrough((unsigned long)external_addr, + external_screen_base = ioremap_writethrough(external_addr, external_len); if (external_vgaiobase) external_vgaiobase = (unsigned long)ioremap(external_vgaiobase, 0x10000); - screen_base = - real_screen_base = external_addr; + screen_base = external_screen_base; + phys_screen_base = external_addr; screen_len = external_len & PAGE_MASK; memset (screen_base, 0, external_len); } @@ -3235,8 +3238,8 @@ int __init atafb_init(void) if (register_framebuffer(&fb_info) < 0) { #ifdef ATAFB_EXT if (external_addr) { - iounmap(external_addr); - external_addr = NULL; + iounmap(external_screen_base); + external_addr = 0; } if (external_vgaiobase) { iounmap((void*)external_vgaiobase); diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 96109a9972b6..84b4bfb84344 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue); static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; static unsigned event_array_pages __read_mostly; +/* + * sync_set_bit() and friends must be unsigned long aligned on non-x86 + * platforms. + */ +#if !defined(CONFIG_X86) && BITS_PER_LONG > 32 + +#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) +#define EVTCHN_FIFO_BIT(b, w) \ + (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b) + +#else + #define BM(w) ((unsigned long *)(w)) +#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b + +#endif static inline event_word_t *event_word_from_port(unsigned port) { @@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu) static void evtchn_fifo_clear_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); + sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static void evtchn_fifo_set_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); + sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static bool evtchn_fifo_is_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static bool evtchn_fifo_test_and_set_mask(unsigned port) { event_word_t *word = event_word_from_port(port); - return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); + return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); } static void evtchn_fifo_mask(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); + sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); } +static bool evtchn_fifo_is_masked(unsigned port) +{ + event_word_t *word = event_word_from_port(port); + return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +} /* * Clear MASKED, spinning if BUSY is set. */ @@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port) BUG_ON(!irqs_disabled()); clear_masked(word); - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { + if (evtchn_fifo_is_pending(port)) { struct evtchn_unmask unmask = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); } @@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port) static void consume_one_event(unsigned cpu, struct evtchn_fifo_control_block *control_block, - unsigned priority, uint32_t *ready) + unsigned priority, unsigned long *ready) { struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); uint32_t head; @@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu, * copy of the ready word. */ if (head == 0) - clear_bit(priority, BM(ready)); + clear_bit(priority, ready); - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) - && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word))) + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) handle_irq_for_port(port); q->head[priority] = head; @@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu, static void evtchn_fifo_handle_events(unsigned cpu) { struct evtchn_fifo_control_block *control_block; - uint32_t ready; + unsigned long ready; unsigned q; control_block = per_cpu(cpu_control_block, cpu); diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 32f9236c959f..c3667b202f2f 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -41,9 +41,6 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; struct suspend_info { int cancelled; - unsigned long arg; /* extra hypercall argument */ - void (*pre)(void); - void (*post)(int cancelled); }; static RAW_NOTIFIER_HEAD(xen_resume_notifier); @@ -61,26 +58,6 @@ void xen_resume_notifier_unregister(struct notifier_block *nb) EXPORT_SYMBOL_GPL(xen_resume_notifier_unregister); #ifdef CONFIG_HIBERNATE_CALLBACKS -static void xen_hvm_post_suspend(int cancelled) -{ - xen_arch_hvm_post_suspend(cancelled); - gnttab_resume(); -} - -static void xen_pre_suspend(void) -{ - xen_mm_pin_all(); - gnttab_suspend(); - xen_arch_pre_suspend(); -} - -static void xen_post_suspend(int cancelled) -{ - xen_arch_post_suspend(cancelled); - gnttab_resume(); - xen_mm_unpin_all(); -} - static int xen_suspend(void *data) { struct suspend_info *si = data; @@ -94,18 +71,20 @@ static int xen_suspend(void *data) return err; } - if (si->pre) - si->pre(); + gnttab_suspend(); + xen_arch_pre_suspend(); /* * This hypercall returns 1 if suspend was cancelled * or the domain was merely checkpointed, and 0 if it * is resuming in a new domain. */ - si->cancelled = HYPERVISOR_suspend(si->arg); + si->cancelled = HYPERVISOR_suspend(xen_pv_domain() + ? virt_to_mfn(xen_start_info) + : 0); - if (si->post) - si->post(si->cancelled); + xen_arch_post_suspend(si->cancelled); + gnttab_resume(); if (!si->cancelled) { xen_irq_resume(); @@ -154,16 +133,6 @@ static void do_suspend(void) si.cancelled = 1; - if (xen_hvm_domain()) { - si.arg = 0UL; - si.pre = NULL; - si.post = &xen_hvm_post_suspend; - } else { - si.arg = virt_to_mfn(xen_start_info); - si.pre = &xen_pre_suspend; - si.post = &xen_post_suspend; - } - err = stop_machine(xen_suspend, &si, cpumask_of(0)); raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 82358d14ecf1..59fc190f1e92 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -127,7 +127,7 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr) pr_debug(" C%d: %s %d uS\n", cx->type, cx->desc, (u32)cx->latency); } - } else if (ret != -EINVAL) + } else if ((ret != -EINVAL) && (ret != -ENOSYS)) /* EINVAL means the ACPI ID is incorrect - meaning the ACPI * table is referencing a non-existing CPU - which can happen * with broken ACPI tables. */ @@ -259,7 +259,7 @@ static int push_pxx_to_hypervisor(struct acpi_processor *_pr) (u32) perf->states[i].power, (u32) perf->states[i].transition_latency); } - } else if (ret != -EINVAL) + } else if ((ret != -EINVAL) && (ret != -ENOSYS)) /* EINVAL means the ACPI ID is incorrect - meaning the ACPI * table is referencing a non-existing CPU - which can happen * with broken ACPI tables. */ diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 62fcd485f0a7..d57a173685f3 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -242,6 +242,15 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, return found_dev; } +/* + * Called when: + * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device + * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove + * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove + * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove + * + * As such we have to be careful. + */ void pcistub_put_pci_dev(struct pci_dev *dev) { struct pcistub_device *psdev, *found_psdev = NULL; @@ -272,16 +281,16 @@ void pcistub_put_pci_dev(struct pci_dev *dev) * and want to inhibit the user from fiddling with 'reset' */ pci_reset_function(dev); - pci_restore_state(psdev->dev); + pci_restore_state(dev); /* This disables the device. */ - xen_pcibk_reset_device(found_psdev->dev); + xen_pcibk_reset_device(dev); /* And cleanup up our emulated fields. */ - xen_pcibk_config_free_dyn_fields(found_psdev->dev); - xen_pcibk_config_reset_dev(found_psdev->dev); + xen_pcibk_config_reset_dev(dev); + xen_pcibk_config_free_dyn_fields(dev); - xen_unregister_device_domain_owner(found_psdev->dev); + xen_unregister_device_domain_owner(dev); spin_lock_irqsave(&found_psdev->lock, flags); found_psdev->pdev = NULL; @@ -493,6 +502,8 @@ static int pcistub_seize(struct pci_dev *dev) return err; } +/* Called when 'bind'. This means we must _NOT_ call pci_reset_function or + * other functions that take the sysfs lock. */ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err = 0; @@ -520,6 +531,8 @@ out: return err; } +/* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or + * other functions that take the sysfs lock. */ static void pcistub_remove(struct pci_dev *dev) { struct pcistub_device *psdev, *found_psdev = NULL; @@ -551,6 +564,8 @@ static void pcistub_remove(struct pci_dev *dev) pr_warn("****** shutdown driver domain before binding device\n"); pr_warn("****** to other drivers or domains\n"); + /* N.B. This ends up calling pcistub_put_pci_dev which ends up + * doing the FLR. */ xen_pcibk_release_pci_dev(found_psdev->pdev, found_psdev->dev); } diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index a9ed867afaba..4a7e6e0a5f4c 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -93,6 +93,8 @@ static void free_pdev(struct xen_pcibk_device *pdev) xen_pcibk_disconnect(pdev); + /* N.B. This calls pcistub_put_pci_dev which does the FLR on all + * of the PCIe devices. */ xen_pcibk_release_devices(pdev); dev_set_drvdata(&pdev->xdev->dev, NULL); @@ -286,6 +288,8 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev, dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id); xen_unregister_device_domain_owner(dev); + /* N.B. This ends up calling pcistub_put_pci_dev which ends up + * doing the FLR. */ xen_pcibk_release_pci_dev(pdev, dev); out: diff --git a/fs/Makefile b/fs/Makefile index f9cb9876e466..4030cbfbc9af 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -14,14 +14,13 @@ obj-y := open.o read_write.o file_table.o super.o \ stack.o fs_struct.o statfs.o ifeq ($(CONFIG_BLOCK),y) -obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o +obj-y += buffer.o block_dev.o direct-io.o mpage.o else obj-y += no-block.o endif obj-$(CONFIG_PROC_FS) += proc_namespace.o -obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o obj-y += notify/ obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 1c8c6cc6de30..4b0eff6da674 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -130,6 +130,15 @@ static void afs_cm_destructor(struct afs_call *call) { _enter(""); + /* Break the callbacks here so that we do it after the final ACK is + * received. The step number here must match the final number in + * afs_deliver_cb_callback(). + */ + if (call->unmarshall == 6) { + ASSERT(call->server && call->count && call->request); + afs_break_callbacks(call->server, call->count, call->request); + } + afs_put_server(call->server); call->server = NULL; kfree(call->buffer); @@ -272,6 +281,16 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, _debug("trailer"); if (skb->len != 0) return -EBADMSG; + + /* Record that the message was unmarshalled successfully so + * that the call destructor can know do the callback breaking + * work, even if the final ACK isn't received. + * + * If the step number changes, then afs_cm_destructor() must be + * updated also. + */ + call->unmarshall++; + case 6: break; } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index be75b500005d..590b55f46d61 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -75,7 +75,7 @@ struct afs_call { const struct afs_call_type *type; /* type of call */ const struct afs_wait_mode *wait_mode; /* completion wait mode */ wait_queue_head_t waitq; /* processes awaiting completion */ - work_func_t async_workfn; + void (*async_workfn)(struct afs_call *call); /* asynchronous work function */ struct work_struct async_work; /* asynchronous work processor */ struct work_struct work; /* actual work processor */ struct sk_buff_head rx_queue; /* received packets */ diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index ef943df73b8c..03a3beb17004 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -25,7 +25,7 @@ static void afs_wake_up_call_waiter(struct afs_call *); static int afs_wait_for_call_to_complete(struct afs_call *); static void afs_wake_up_async_call(struct afs_call *); static int afs_dont_wait_for_call_to_complete(struct afs_call *); -static void afs_process_async_call(struct work_struct *); +static void afs_process_async_call(struct afs_call *); static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *); static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool); @@ -58,6 +58,13 @@ static void afs_collect_incoming_call(struct work_struct *); static struct sk_buff_head afs_incoming_calls; static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call); +static void afs_async_workfn(struct work_struct *work) +{ + struct afs_call *call = container_of(work, struct afs_call, async_work); + + call->async_workfn(call); +} + /* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT @@ -184,6 +191,28 @@ static void afs_free_call(struct afs_call *call) } /* + * End a call but do not free it + */ +static void afs_end_call_nofree(struct afs_call *call) +{ + if (call->rxcall) { + rxrpc_kernel_end_call(call->rxcall); + call->rxcall = NULL; + } + if (call->type->destructor) + call->type->destructor(call); +} + +/* + * End a call and free it + */ +static void afs_end_call(struct afs_call *call) +{ + afs_end_call_nofree(call); + afs_free_call(call); +} + +/* * allocate a call with flat request and reply buffers */ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type, @@ -326,7 +355,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, atomic_read(&afs_outstanding_calls)); call->wait_mode = wait_mode; - INIT_WORK(&call->async_work, afs_process_async_call); + call->async_workfn = afs_process_async_call; + INIT_WORK(&call->async_work, afs_async_workfn); memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; @@ -383,11 +413,8 @@ error_do_abort: rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT); while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); - rxrpc_kernel_end_call(rxcall); - call->rxcall = NULL; error_kill_call: - call->type->destructor(call); - afs_free_call(call); + afs_end_call(call); _leave(" = %d", ret); return ret; } @@ -509,12 +536,8 @@ static void afs_deliver_to_call(struct afs_call *call) if (call->state >= AFS_CALL_COMPLETE) { while ((skb = skb_dequeue(&call->rx_queue))) afs_free_skb(skb); - if (call->incoming) { - rxrpc_kernel_end_call(call->rxcall); - call->rxcall = NULL; - call->type->destructor(call); - afs_free_call(call); - } + if (call->incoming) + afs_end_call(call); } _leave(""); @@ -564,10 +587,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) } _debug("call complete"); - rxrpc_kernel_end_call(call->rxcall); - call->rxcall = NULL; - call->type->destructor(call); - afs_free_call(call); + afs_end_call(call); _leave(" = %d", ret); return ret; } @@ -603,11 +623,8 @@ static int afs_dont_wait_for_call_to_complete(struct afs_call *call) /* * delete an asynchronous call */ -static void afs_delete_async_call(struct work_struct *work) +static void afs_delete_async_call(struct afs_call *call) { - struct afs_call *call = - container_of(work, struct afs_call, async_work); - _enter(""); afs_free_call(call); @@ -620,11 +637,8 @@ static void afs_delete_async_call(struct work_struct *work) * - on a multiple-thread workqueue this work item may try to run on several * CPUs at the same time */ -static void afs_process_async_call(struct work_struct *work) +static void afs_process_async_call(struct afs_call *call) { - struct afs_call *call = - container_of(work, struct afs_call, async_work); - _enter(""); if (!skb_queue_empty(&call->rx_queue)) @@ -637,10 +651,7 @@ static void afs_process_async_call(struct work_struct *work) call->reply = NULL; /* kill the call */ - rxrpc_kernel_end_call(call->rxcall); - call->rxcall = NULL; - if (call->type->destructor) - call->type->destructor(call); + afs_end_call_nofree(call); /* we can't just delete the call because the work item may be * queued */ @@ -663,13 +674,6 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) call->reply_size += len; } -static void afs_async_workfn(struct work_struct *work) -{ - struct afs_call *call = container_of(work, struct afs_call, async_work); - - call->async_workfn(work); -} - /* * accept the backlog of incoming calls */ @@ -790,10 +794,7 @@ void afs_send_empty_reply(struct afs_call *call) _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); default: - rxrpc_kernel_end_call(call->rxcall); - call->rxcall = NULL; - call->type->destructor(call); - afs_free_call(call); + afs_end_call(call); _leave(" [error]"); return; } @@ -823,17 +824,16 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) call->state = AFS_CALL_AWAIT_ACK; n = rxrpc_kernel_send_data(call->rxcall, &msg, len); if (n >= 0) { + /* Success */ _leave(" [replied]"); return; } + if (n == -ENOMEM) { _debug("oom"); rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT); } - rxrpc_kernel_end_call(call->rxcall); - call->rxcall = NULL; - call->type->destructor(call); - afs_free_call(call); + afs_end_call(call); _leave(" [error]"); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2ad7de94efef..2f6d7b13b5bd 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3120,6 +3120,8 @@ process_slot: } else if (type == BTRFS_FILE_EXTENT_INLINE) { u64 skip = 0; u64 trim = 0; + u64 aligned_end = 0; + if (off > key.offset) { skip = off - key.offset; new_key.offset += skip; @@ -3136,9 +3138,11 @@ process_slot: size -= skip + trim; datal -= skip + trim; + aligned_end = ALIGN(new_key.offset + datal, + root->sectorsize); ret = btrfs_drop_extents(trans, root, inode, new_key.offset, - new_key.offset + datal, + aligned_end, 1); if (ret) { if (ret != -EOPNOTSUPP) diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index eb6537a08c1b..fd38b5053479 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1668,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino, goto out; } - if (key.type == BTRFS_INODE_REF_KEY) { + if (found_key.type == BTRFS_INODE_REF_KEY) { struct btrfs_inode_ref *iref; iref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_ref); diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index aadc2b68678b..a22d667f1069 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1737,6 +1737,9 @@ cifs_inode_needs_reval(struct inode *inode) if (cifs_i->time == 0) return true; + if (!cifs_sb->actimeo) + return true; + if (!time_in_range(jiffies, cifs_i->time, cifs_i->time + cifs_sb->actimeo)) return true; diff --git a/fs/dcache.c b/fs/dcache.c index 42ae01eefc07..be2bea834bf4 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -441,42 +441,12 @@ void d_drop(struct dentry *dentry) } EXPORT_SYMBOL(d_drop); -/* - * Finish off a dentry we've decided to kill. - * dentry->d_lock must be held, returns with it unlocked. - * If ref is non-zero, then decrement the refcount too. - * Returns dentry requiring refcount drop, or NULL if we're done. - */ -static struct dentry * -dentry_kill(struct dentry *dentry, int unlock_on_failure) - __releases(dentry->d_lock) +static void __dentry_kill(struct dentry *dentry) { - struct inode *inode; struct dentry *parent = NULL; bool can_free = true; - - if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { - can_free = dentry->d_flags & DCACHE_MAY_FREE; - spin_unlock(&dentry->d_lock); - goto out; - } - - inode = dentry->d_inode; - if (inode && !spin_trylock(&inode->i_lock)) { -relock: - if (unlock_on_failure) { - spin_unlock(&dentry->d_lock); - cpu_relax(); - } - return dentry; /* try again with same dentry */ - } if (!IS_ROOT(dentry)) parent = dentry->d_parent; - if (parent && !spin_trylock(&parent->d_lock)) { - if (inode) - spin_unlock(&inode->i_lock); - goto relock; - } /* * The dentry is now unrecoverably dead to the world. @@ -520,9 +490,72 @@ relock: can_free = false; } spin_unlock(&dentry->d_lock); -out: if (likely(can_free)) dentry_free(dentry); +} + +/* + * Finish off a dentry we've decided to kill. + * dentry->d_lock must be held, returns with it unlocked. + * If ref is non-zero, then decrement the refcount too. + * Returns dentry requiring refcount drop, or NULL if we're done. + */ +static struct dentry *dentry_kill(struct dentry *dentry) + __releases(dentry->d_lock) +{ + struct inode *inode = dentry->d_inode; + struct dentry *parent = NULL; + + if (inode && unlikely(!spin_trylock(&inode->i_lock))) + goto failed; + + if (!IS_ROOT(dentry)) { + parent = dentry->d_parent; + if (unlikely(!spin_trylock(&parent->d_lock))) { + if (inode) + spin_unlock(&inode->i_lock); + goto failed; + } + } + + __dentry_kill(dentry); + return parent; + +failed: + spin_unlock(&dentry->d_lock); + cpu_relax(); + return dentry; /* try again with same dentry */ +} + +static inline struct dentry *lock_parent(struct dentry *dentry) +{ + struct dentry *parent = dentry->d_parent; + if (IS_ROOT(dentry)) + return NULL; + if (likely(spin_trylock(&parent->d_lock))) + return parent; + spin_unlock(&dentry->d_lock); + rcu_read_lock(); +again: + parent = ACCESS_ONCE(dentry->d_parent); + spin_lock(&parent->d_lock); + /* + * We can't blindly lock dentry until we are sure + * that we won't violate the locking order. + * Any changes of dentry->d_parent must have + * been done with parent->d_lock held, so + * spin_lock() above is enough of a barrier + * for checking if it's still our child. + */ + if (unlikely(parent != dentry->d_parent)) { + spin_unlock(&parent->d_lock); + goto again; + } + rcu_read_unlock(); + if (parent != dentry) + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + else + parent = NULL; return parent; } @@ -579,7 +612,7 @@ repeat: return; kill_it: - dentry = dentry_kill(dentry, 1); + dentry = dentry_kill(dentry); if (dentry) goto repeat; } @@ -797,8 +830,11 @@ static void shrink_dentry_list(struct list_head *list) struct dentry *dentry, *parent; while (!list_empty(list)) { + struct inode *inode; dentry = list_entry(list->prev, struct dentry, d_lru); spin_lock(&dentry->d_lock); + parent = lock_parent(dentry); + /* * The dispose list is isolated and dentries are not accounted * to the LRU here, so we can simply remove it from the list @@ -812,26 +848,33 @@ static void shrink_dentry_list(struct list_head *list) */ if ((int)dentry->d_lockref.count > 0) { spin_unlock(&dentry->d_lock); + if (parent) + spin_unlock(&parent->d_lock); continue; } - parent = dentry_kill(dentry, 0); - /* - * If dentry_kill returns NULL, we have nothing more to do. - */ - if (!parent) + + if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { + bool can_free = dentry->d_flags & DCACHE_MAY_FREE; + spin_unlock(&dentry->d_lock); + if (parent) + spin_unlock(&parent->d_lock); + if (can_free) + dentry_free(dentry); continue; + } - if (unlikely(parent == dentry)) { - /* - * trylocks have failed and d_lock has been held the - * whole time, so it could not have been added to any - * other lists. Just add it back to the shrink list. - */ + inode = dentry->d_inode; + if (inode && unlikely(!spin_trylock(&inode->i_lock))) { d_shrink_add(dentry, list); spin_unlock(&dentry->d_lock); + if (parent) + spin_unlock(&parent->d_lock); continue; } + + __dentry_kill(dentry); + /* * We need to prune ancestors too. This is necessary to prevent * quadratic behavior of shrink_dcache_parent(), but is also @@ -839,8 +882,26 @@ static void shrink_dentry_list(struct list_head *list) * fragmentation. */ dentry = parent; - while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) - dentry = dentry_kill(dentry, 1); + while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) { + parent = lock_parent(dentry); + if (dentry->d_lockref.count != 1) { + dentry->d_lockref.count--; + spin_unlock(&dentry->d_lock); + if (parent) + spin_unlock(&parent->d_lock); + break; + } + inode = dentry->d_inode; /* can't be NULL */ + if (unlikely(!spin_trylock(&inode->i_lock))) { + spin_unlock(&dentry->d_lock); + if (parent) + spin_unlock(&parent->d_lock); + cpu_relax(); + continue; + } + __dentry_kill(dentry); + dentry = parent; + } } } diff --git a/fs/exec.c b/fs/exec.c index 476f3ebf437e..238b7aa26f68 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -657,10 +657,10 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long rlim_stack; #ifdef CONFIG_STACK_GROWSUP - /* Limit stack size to 1GB */ + /* Limit stack size */ stack_base = rlimit_max(RLIMIT_STACK); - if (stack_base > (1 << 30)) - stack_base = 1 << 30; + if (stack_base > STACK_SIZE_MAX) + stack_base = STACK_SIZE_MAX; /* Make sure we didn't let the argument array grow too large. */ if (vma->vm_end - vma->vm_start > stack_base) diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index e01ea4a14a01..5e9a80cfc3d8 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -610,6 +610,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn, static int kernfs_fop_open(struct inode *inode, struct file *file) { struct kernfs_node *kn = file->f_path.dentry->d_fsdata; + struct kernfs_root *root = kernfs_root(kn); const struct kernfs_ops *ops; struct kernfs_open_file *of; bool has_read, has_write, has_mmap; @@ -624,14 +625,16 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) has_write = ops->write || ops->mmap; has_mmap = ops->mmap; - /* check perms and supported operations */ - if ((file->f_mode & FMODE_WRITE) && - (!(inode->i_mode & S_IWUGO) || !has_write)) - goto err_out; + /* see the flag definition for details */ + if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) { + if ((file->f_mode & FMODE_WRITE) && + (!(inode->i_mode & S_IWUGO) || !has_write)) + goto err_out; - if ((file->f_mode & FMODE_READ) && - (!(inode->i_mode & S_IRUGO) || !has_read)) - goto err_out; + if ((file->f_mode & FMODE_READ) && + (!(inode->i_mode & S_IRUGO) || !has_read)) + goto err_out; + } /* allocate a kernfs_open_file for the file */ error = -ENOMEM; diff --git a/fs/locks.c b/fs/locks.c index e663aeac579e..e390bd9ae068 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -389,18 +389,6 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, fl->fl_ops = NULL; fl->fl_lmops = NULL; - /* Ensure that fl->fl_filp has compatible f_mode */ - switch (l->l_type) { - case F_RDLCK: - if (!(filp->f_mode & FMODE_READ)) - return -EBADF; - break; - case F_WRLCK: - if (!(filp->f_mode & FMODE_WRITE)) - return -EBADF; - break; - } - return assign_type(fl, l->l_type); } @@ -2034,6 +2022,22 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, return error; } +/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */ +static int +check_fmode_for_setlk(struct file_lock *fl) +{ + switch (fl->fl_type) { + case F_RDLCK: + if (!(fl->fl_file->f_mode & FMODE_READ)) + return -EBADF; + break; + case F_WRLCK: + if (!(fl->fl_file->f_mode & FMODE_WRITE)) + return -EBADF; + } + return 0; +} + /* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). */ @@ -2071,6 +2075,10 @@ again: if (error) goto out; + error = check_fmode_for_setlk(file_lock); + if (error) + goto out; + /* * If the cmd is requesting file-private locks, then set the * FL_OFDLCK flag and override the owner. @@ -2206,6 +2214,10 @@ again: if (error) goto out; + error = check_fmode_for_setlk(file_lock); + if (error) + goto out; + /* * If the cmd is requesting file-private locks, then set the * FL_OFDLCK flag and override the owner. diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index 6f3f392d48af..f66c66b9f182 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -402,8 +402,10 @@ sort_pacl(struct posix_acl *pacl) * by uid/gid. */ int i, j; - if (pacl->a_count <= 4) - return; /* no users or groups */ + /* no users or groups */ + if (!pacl || pacl->a_count <= 4) + return; + i = 1; while (pacl->a_entries[i].e_tag == ACL_USER) i++; @@ -530,13 +532,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) /* * ACLs with no ACEs are treated differently in the inheritable - * and effective cases: when there are no inheritable ACEs, we - * set a zero-length default posix acl: + * and effective cases: when there are no inheritable ACEs, + * calls ->set_acl with a NULL ACL structure. */ - if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) { - pacl = posix_acl_alloc(0, GFP_KERNEL); - return pacl ? pacl : ERR_PTR(-ENOMEM); - } + if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) + return NULL; + /* * When there are no effective ACEs, the following will end * up setting a 3-element effective posix ACL with all @@ -589,7 +590,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) add_to_mask(state, &state->groups->aces[i].perms); } - if (!state->users->n && !state->groups->n) { + if (state->users->n || state->groups->n) { pace++; pace->e_tag = ACL_MASK; low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3ba65979a3cd..9a77a5a21557 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1078,6 +1078,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) return NULL; } clp->cl_name.len = name.len; + INIT_LIST_HEAD(&clp->cl_sessions); + idr_init(&clp->cl_stateids); + atomic_set(&clp->cl_refcount, 0); + clp->cl_cb_state = NFSD4_CB_UNKNOWN; + INIT_LIST_HEAD(&clp->cl_idhash); + INIT_LIST_HEAD(&clp->cl_openowners); + INIT_LIST_HEAD(&clp->cl_delegations); + INIT_LIST_HEAD(&clp->cl_lru); + INIT_LIST_HEAD(&clp->cl_callbacks); + INIT_LIST_HEAD(&clp->cl_revoked); + spin_lock_init(&clp->cl_lock); + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; } @@ -1095,6 +1107,7 @@ free_client(struct nfs4_client *clp) WARN_ON_ONCE(atomic_read(&ses->se_ref)); free_session(ses); } + rpc_destroy_wait_queue(&clp->cl_cb_waitq); free_svc_cred(&clp->cl_cred); kfree(clp->cl_name.data); idr_destroy(&clp->cl_stateids); @@ -1347,7 +1360,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, if (clp == NULL) return NULL; - INIT_LIST_HEAD(&clp->cl_sessions); ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); if (ret) { spin_lock(&nn->client_lock); @@ -1355,20 +1367,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name, spin_unlock(&nn->client_lock); return NULL; } - idr_init(&clp->cl_stateids); - atomic_set(&clp->cl_refcount, 0); - clp->cl_cb_state = NFSD4_CB_UNKNOWN; - INIT_LIST_HEAD(&clp->cl_idhash); - INIT_LIST_HEAD(&clp->cl_openowners); - INIT_LIST_HEAD(&clp->cl_delegations); - INIT_LIST_HEAD(&clp->cl_lru); - INIT_LIST_HEAD(&clp->cl_callbacks); - INIT_LIST_HEAD(&clp->cl_revoked); - spin_lock_init(&clp->cl_lock); nfsd4_init_callback(&clp->cl_cb_null); clp->cl_time = get_seconds(); clear_bit(0, &clp->cl_cb_slot_busy); - rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); copy_verf(clp, verf); rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); gen_confirm(clp); @@ -3716,9 +3717,16 @@ out: static __be32 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) { - if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner))) + struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); + + if (check_for_locks(stp->st_file, lo)) return nfserr_locks_held; - release_lock_stateid(stp); + /* + * Currently there's a 1-1 lock stateid<->lockowner + * correspondance, and we have to delete the lockowner when we + * delete the lock stateid: + */ + unhash_lockowner(lo); return nfs_ok; } @@ -4158,6 +4166,10 @@ static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, c if (!same_owner_str(&lo->lo_owner, owner, clid)) return false; + if (list_empty(&lo->lo_owner.so_stateids)) { + WARN_ON_ONCE(1); + return false; + } lst = list_first_entry(&lo->lo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); return lst->st_file->fi_inode == inode; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index af3f7aa73e13..ee1f88419cb0 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -472,11 +472,15 @@ bail: void dlm_destroy_master_caches(void) { - if (dlm_lockname_cache) + if (dlm_lockname_cache) { kmem_cache_destroy(dlm_lockname_cache); + dlm_lockname_cache = NULL; + } - if (dlm_lockres_cache) + if (dlm_lockres_cache) { kmem_cache_destroy(dlm_lockres_cache); + dlm_lockres_cache = NULL; + } } static void dlm_lockres_release(struct kref *kref) diff --git a/fs/splice.c b/fs/splice.c index 9bc07d2b53cf..e246954ea48c 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1537,7 +1537,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; - ssize_t count = 0; + ssize_t count; pipe = get_pipe_info(file); if (!pipe) @@ -1546,8 +1546,9 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, ret = rw_copy_check_uvector(READ, uiov, nr_segs, ARRAY_SIZE(iovstack), iovstack, &iov); if (ret <= 0) - return ret; + goto out; + count = ret; iov_iter_init(&iter, iov, nr_segs, count, 0); sd.len = 0; @@ -1560,6 +1561,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov, ret = __splice_from_pipe(pipe, &sd, pipe_to_user); pipe_unlock(pipe); +out: if (iov != iovstack) kfree(iov); diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 28cc1acd5439..e9ef59b3abb1 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v) ssize_t count; char *buf; - /* acquire buffer and ensure that it's >= PAGE_SIZE */ + /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */ count = seq_get_buf(sf, &buf); if (count < PAGE_SIZE) { seq_commit(sf, -1); return 0; } + memset(buf, 0, PAGE_SIZE); /* * Invoke show(). Control may reach here via seq file lseek even diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index a66ad6196f59..8794423f7efb 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -63,7 +63,8 @@ int __init sysfs_init(void) { int err; - sysfs_root = kernfs_create_root(NULL, 0, NULL); + sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, + NULL); if (IS_ERR(sysfs_root)) return PTR_ERR(sysfs_root); diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 1399e187d425..753e467aa1a5 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata( if (!lsn) return 0; - return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); + return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); } const struct export_operations xfs_export_operations = { diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 951a2321ee01..830c1c937b88 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -155,7 +155,7 @@ xfs_dir_fsync( if (!lsn) return 0; - return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); + return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); } STATIC int @@ -295,7 +295,7 @@ xfs_file_aio_read( xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); if (inode->i_mapping->nrpages) { - ret = -filemap_write_and_wait_range( + ret = filemap_write_and_wait_range( VFS_I(ip)->i_mapping, pos, -1); if (ret) { @@ -837,7 +837,7 @@ xfs_file_fallocate( unsigned blksize_mask = (1 << inode->i_blkbits) - 1; if (offset & blksize_mask || len & blksize_mask) { - error = -EINVAL; + error = EINVAL; goto out_unlock; } @@ -846,7 +846,7 @@ xfs_file_fallocate( * in which case it is effectively a truncate operation */ if (offset + len >= i_size_read(inode)) { - error = -EINVAL; + error = EINVAL; goto out_unlock; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 301ecbfcc0be..36d630319a27 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -72,8 +72,8 @@ xfs_initxattrs( int error = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { - error = xfs_attr_set(ip, xattr->name, xattr->value, - xattr->value_len, ATTR_SECURE); + error = -xfs_attr_set(ip, xattr->name, xattr->value, + xattr->value_len, ATTR_SECURE); if (error < 0) break; } @@ -93,8 +93,8 @@ xfs_init_security( struct inode *dir, const struct qstr *qstr) { - return security_inode_init_security(inode, dir, qstr, - &xfs_initxattrs, NULL); + return -security_inode_init_security(inode, dir, qstr, + &xfs_initxattrs, NULL); } static void @@ -173,12 +173,12 @@ xfs_generic_create( #ifdef CONFIG_XFS_POSIX_ACL if (default_acl) { - error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); + error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); if (error) goto out_cleanup_inode; } if (acl) { - error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); + error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); if (error) goto out_cleanup_inode; } diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 348e4d2ed6e6..dc977b6e6a36 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -843,22 +843,17 @@ xfs_qm_init_quotainfo( qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); - if ((error = list_lru_init(&qinf->qi_lru))) { - kmem_free(qinf); - mp->m_quotainfo = NULL; - return error; - } + error = -list_lru_init(&qinf->qi_lru); + if (error) + goto out_free_qinf; /* * See if quotainodes are setup, and if not, allocate them, * and change the superblock accordingly. */ - if ((error = xfs_qm_init_quotainos(mp))) { - list_lru_destroy(&qinf->qi_lru); - kmem_free(qinf); - mp->m_quotainfo = NULL; - return error; - } + error = xfs_qm_init_quotainos(mp); + if (error) + goto out_free_lru; INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); @@ -918,7 +913,7 @@ xfs_qm_init_quotainfo( qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); - + xfs_qm_dqdestroy(dqp); } else { qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; @@ -935,6 +930,13 @@ xfs_qm_init_quotainfo( qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; register_shrinker(&qinf->qi_shrinker); return 0; + +out_free_lru: + list_lru_destroy(&qinf->qi_lru); +out_free_qinf: + kmem_free(qinf); + mp->m_quotainfo = NULL; + return error; } diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 205376776377..3494eff8e4eb 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1433,11 +1433,11 @@ xfs_fs_fill_super( if (error) goto out_free_fsname; - error = xfs_init_mount_workqueues(mp); + error = -xfs_init_mount_workqueues(mp); if (error) goto out_close_devices; - error = xfs_icsb_init_counters(mp); + error = -xfs_icsb_init_counters(mp); if (error) goto out_destroy_workqueues; diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h index 2be8a2dbc868..0297e5875798 100644 --- a/include/asm-generic/dma-coherent.h +++ b/include/asm-generic/dma-coherent.h @@ -16,16 +16,13 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, * Standard interface */ #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, + dma_addr_t device_addr, size_t size, int flags); -extern void -dma_release_declared_memory(struct device *dev); +void dma_release_declared_memory(struct device *dev); -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); +void *dma_mark_declared_memory_occupied(struct device *dev, + dma_addr_t device_addr, size_t size); #else #define dma_alloc_from_coherent(dev, size, handle, ret) (0) #define dma_release_from_coherent(dev, order, vaddr) (0) diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index b4ea8f50fc65..5e752b959054 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -12,7 +12,7 @@ [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \ - [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \ + [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_NPROC] = { 0, 0 }, \ diff --git a/include/dt-bindings/clk/at91.h b/include/dt-bindings/clock/at91.h index 0b4cb999a3f7..0b4cb999a3f7 100644 --- a/include/dt-bindings/clk/at91.h +++ b/include/dt-bindings/clock/at91.h diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 63b5eff0a80f..fdd7e1b61f60 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -47,6 +47,7 @@ struct amba_driver { enum amba_vendor { AMBA_VENDOR_ARM = 0x41, AMBA_VENDOR_ST = 0x80, + AMBA_VENDOR_QCOM = 0x51, }; extern struct bus_type amba_bustype; diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index 32a89cf5ec45..8c98113069ce 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h @@ -6,28 +6,9 @@ #include <linux/mmc/host.h> - -/* - * These defines is places here due to access is needed from machine - * configuration files. The ST Micro version does not have ROD and - * reuse the voltage registers for direction settings. - */ -#define MCI_ST_DATA2DIREN (1 << 2) -#define MCI_ST_CMDDIREN (1 << 3) -#define MCI_ST_DATA0DIREN (1 << 4) -#define MCI_ST_DATA31DIREN (1 << 5) -#define MCI_ST_FBCLKEN (1 << 7) -#define MCI_ST_DATA74DIREN (1 << 8) - -/* Just some dummy forwarding */ -struct dma_chan; - /** * struct mmci_platform_data - platform configuration for the MMCI * (also known as PL180) block. - * @f_max: the maximum operational frequency for this host in this - * platform configuration. When this is specified it takes precedence - * over the module parameter for the same frequency. * @ocr_mask: available voltages on the 4 pins from the block, this * is ignored if a regulator is used, see the MMC_VDD_* masks in * mmc/host.h @@ -42,37 +23,14 @@ struct dma_chan; * @gpio_wp: read this GPIO pin to see if the card is write protected * @gpio_cd: read this GPIO pin to detect card insertion * @cd_invert: true if the gpio_cd pin value is active low - * @capabilities: the capabilities of the block as implemented in - * this platform, signify anything MMC_CAP_* from mmc/host.h - * @capabilities2: more capabilities, MMC_CAP2_* from mmc/host.h - * @sigdir: a bit field indicating for what bits in the MMC bus the host - * should enable signal direction indication. - * @dma_filter: function used to select an appropriate RX and TX - * DMA channel to be used for DMA, if and only if you're deploying the - * generic DMA engine - * @dma_rx_param: parameter passed to the DMA allocation - * filter in order to select an appropriate RX channel. If - * there is a bidirectional RX+TX channel, then just specify - * this and leave dma_tx_param set to NULL - * @dma_tx_param: parameter passed to the DMA allocation - * filter in order to select an appropriate TX channel. If this - * is NULL the driver will attempt to use the RX channel as a - * bidirectional channel */ struct mmci_platform_data { - unsigned int f_max; unsigned int ocr_mask; int (*ios_handler)(struct device *, struct mmc_ios *); unsigned int (*status)(struct device *); int gpio_wp; int gpio_cd; bool cd_invert; - unsigned long capabilities; - unsigned long capabilities2; - u32 sigdir; - bool (*dma_filter)(struct dma_chan *chan, void *filter_param); - void *dma_rx_param; - void *dma_tx_param; }; #endif diff --git a/include/linux/bio.h b/include/linux/bio.h index bba550826921..5a645769f020 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -333,7 +333,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, extern struct bio_set *bioset_create(unsigned int, unsigned int); extern void bioset_free(struct bio_set *); -extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries); +extern mempool_t *biovec_create_pool(int pool_entries); extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern void bio_put(struct bio *); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0120451545d8..c15128833100 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -8,7 +8,13 @@ struct blk_mq_tags; struct blk_mq_cpu_notifier { struct list_head list; void *data; - void (*notify)(void *data, unsigned long action, unsigned int cpu); + int (*notify)(void *data, unsigned long action, unsigned int cpu); +}; + +struct blk_mq_ctxmap { + unsigned int map_size; + unsigned int bits_per_word; + struct blk_align_bitmap *map; }; struct blk_mq_hw_ctx { @@ -18,7 +24,11 @@ struct blk_mq_hw_ctx { } ____cacheline_aligned_in_smp; unsigned long state; /* BLK_MQ_S_* flags */ - struct delayed_work delayed_work; + struct delayed_work run_work; + struct delayed_work delay_work; + cpumask_var_t cpumask; + int next_cpu; + int next_cpu_batch; unsigned long flags; /* BLK_MQ_F_* flags */ @@ -27,13 +37,13 @@ struct blk_mq_hw_ctx { void *driver_data; + struct blk_mq_ctxmap ctx_map; + unsigned int nr_ctx; struct blk_mq_ctx **ctxs; - unsigned int nr_ctx_map; - unsigned long *ctx_map; - struct request **rqs; - struct list_head page_list; + unsigned int wait_index; + struct blk_mq_tags *tags; unsigned long queued; @@ -41,31 +51,40 @@ struct blk_mq_hw_ctx { #define BLK_MQ_MAX_DISPATCH_ORDER 10 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; - unsigned int queue_depth; unsigned int numa_node; unsigned int cmd_size; /* per-request extra data */ + atomic_t nr_active; + struct blk_mq_cpu_notifier cpu_notifier; struct kobject kobj; }; -struct blk_mq_reg { +struct blk_mq_tag_set { struct blk_mq_ops *ops; unsigned int nr_hw_queues; - unsigned int queue_depth; + unsigned int queue_depth; /* max hw supported */ unsigned int reserved_tags; unsigned int cmd_size; /* per-request extra data */ int numa_node; unsigned int timeout; unsigned int flags; /* BLK_MQ_F_* */ + void *driver_data; + + struct blk_mq_tags **tags; + + struct mutex tag_list_lock; + struct list_head tag_list; }; typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); -typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int); -typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); +typedef int (init_request_fn)(void *, struct request *, unsigned int, + unsigned int, unsigned int); +typedef void (exit_request_fn)(void *, struct request *, unsigned int, + unsigned int); struct blk_mq_ops { /* @@ -86,18 +105,20 @@ struct blk_mq_ops { softirq_done_fn *complete; /* - * Override for hctx allocations (should probably go) - */ - alloc_hctx_fn *alloc_hctx; - free_hctx_fn *free_hctx; - - /* * Called when the block layer side of a hardware queue has been * set up, allowing the driver to allocate/init matching structures. * Ditto for exit/teardown. */ init_hctx_fn *init_hctx; exit_hctx_fn *exit_hctx; + + /* + * Called for every command allocated by the block layer to allow + * the driver to set up driver specific data. + * Ditto for exit/teardown. + */ + init_request_fn *init_request; + exit_request_fn *exit_request; }; enum { @@ -107,18 +128,24 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_SHOULD_SORT = 1 << 1, - BLK_MQ_F_SHOULD_IPI = 1 << 2, + BLK_MQ_F_TAG_SHARED = 1 << 2, + BLK_MQ_F_SG_MERGE = 1 << 3, + BLK_MQ_F_SYSFS_UP = 1 << 4, BLK_MQ_S_STOPPED = 0, + BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_MAX_DEPTH = 2048, + + BLK_MQ_CPU_WORK_BATCH = 8, }; -struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); +struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); int blk_mq_register_disk(struct gendisk *); void blk_mq_unregister_disk(struct gendisk *); -int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); -void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); + +int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set); +void blk_mq_free_tag_set(struct blk_mq_tag_set *set); void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); @@ -126,28 +153,28 @@ void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); -struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); -struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); -struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); +struct request *blk_mq_alloc_request(struct request_queue *q, int rw, + gfp_t gfp, bool reserved); +struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag); struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); -struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); -void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); +struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -bool blk_mq_end_io_partial(struct request *rq, int error, - unsigned int nr_bytes); -static inline void blk_mq_end_io(struct request *rq, int error) -{ - bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); - BUG_ON(!done); -} +void blk_mq_end_io(struct request *rq, int error); +void __blk_mq_end_io(struct request *rq, int error); +void blk_mq_requeue_request(struct request *rq); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_stop_hw_queues(struct request_queue *q); -void blk_mq_start_stopped_hw_queues(struct request_queue *q); +void blk_mq_start_hw_queues(struct request_queue *q); +void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); +void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); /* * Driver command data is immediately after the request. So subtract request @@ -162,12 +189,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) return (void *) rq + sizeof(*rq); } -static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, - unsigned int tag) -{ - return hctx->rqs[tag]; -} - #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index aa0eaa2d0bd8..d8e4cea23a25 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -190,6 +190,7 @@ enum rq_flag_bits { __REQ_PM, /* runtime pm request */ __REQ_END, /* last of chain of requests */ __REQ_HASHED, /* on IO scheduler merge hash */ + __REQ_MQ_INFLIGHT, /* track inflight for MQ */ __REQ_NR_BITS, /* stops here */ }; @@ -243,5 +244,6 @@ enum rq_flag_bits { #define REQ_PM (1ULL << __REQ_PM) #define REQ_END (1ULL << __REQ_END) #define REQ_HASHED (1ULL << __REQ_HASHED) +#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0d84981ee03f..8aba35f46f87 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -90,15 +90,15 @@ enum rq_cmd_type_bits { #define BLK_MAX_CDB 16 /* - * try to put the fields that are referenced together in the same cacheline. - * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() - * as well! + * Try to put the fields that are referenced together in the same cacheline. + * + * If you modify this structure, make sure to update blk_rq_init() and + * especially blk_mq_rq_ctx_init() to take care of the added fields. */ struct request { struct list_head queuelist; union { struct call_single_data csd; - struct work_struct mq_flush_work; unsigned long fifo_time; }; @@ -178,7 +178,6 @@ struct request { unsigned short ioprio; void *special; /* opaque pointer available for LLD use */ - char *buffer; /* kaddr of the current segment if available */ int tag; int errors; @@ -463,6 +462,10 @@ struct request_queue { struct request *flush_rq; spinlock_t mq_flush_lock; + struct list_head requeue_list; + spinlock_t requeue_lock; + struct work_struct requeue_work; + struct mutex sysfs_lock; int bypass_depth; @@ -481,6 +484,9 @@ struct request_queue { wait_queue_head_t mq_freeze_wq; struct percpu_counter mq_usage_counter; struct list_head all_q_node; + + struct blk_mq_tag_set *tag_set; + struct list_head tag_set_list; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ @@ -504,6 +510,7 @@ struct request_queue { #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ +#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ (1 << QUEUE_FLAG_STACKABLE) | \ @@ -613,6 +620,15 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) +/* + * Driver can handle struct request, if it either has an old style + * request_fn defined, or is blk-mq based. + */ +static inline bool queue_is_rq_based(struct request_queue *q) +{ + return q->request_fn || q->mq_ops; +} + static inline unsigned int blk_queue_cluster(struct request_queue *q) { return q->limits.cluster; @@ -937,6 +953,7 @@ extern struct request *blk_fetch_request(struct request_queue *q); */ extern bool blk_update_request(struct request *rq, int error, unsigned int nr_bytes); +extern void blk_finish_request(struct request *rq, int error); extern bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes); extern void blk_end_request_all(struct request *rq, int error); @@ -1053,7 +1070,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} * schedule() where blk_schedule_flush_plug() is called. */ struct blk_plug { - unsigned long magic; /* detect uninitialized use-cases */ struct list_head list; /* requests */ struct list_head mq_list; /* blk-mq requests */ struct list_head cb_list; /* md requires an unplug callback */ @@ -1102,7 +1118,8 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) /* * tag stuff */ -#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) +#define blk_rq_tagged(rq) \ + ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED)) extern int blk_queue_start_tag(struct request_queue *, struct request *); extern struct request *blk_queue_find_tag(struct request_queue *, int); extern void blk_queue_end_tag(struct request_queue *, struct request *); @@ -1370,8 +1387,9 @@ static inline void put_dev_sector(Sector p) } struct work_struct; -int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); -int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); +int kblockd_schedule_work(struct work_struct *work); +int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); +int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); #ifdef CONFIG_BLK_CGROUP /* diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c2515851c1aa..d60904b9e505 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -473,6 +473,7 @@ struct cftype { }; extern struct cgroup_root cgrp_dfl_root; +extern struct css_set init_css_set; static inline bool cgroup_on_dfl(const struct cgroup *cgrp) { @@ -700,6 +701,20 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task, return task_css_check(task, subsys_id, false); } +/** + * task_css_is_root - test whether a task belongs to the root css + * @task: the target task + * @subsys_id: the target subsystem ID + * + * Test whether @task belongs to the root css on the specified subsystem. + * May be invoked in any context. + */ +static inline bool task_css_is_root(struct task_struct *task, int subsys_id) +{ + return task_css_check(task, subsys_id, true) == + init_css_set.subsys[subsys_id]; +} + static inline struct cgroup *task_cgroup(struct task_struct *task, int subsys_id) { diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index fd4aee29ad10..0c3eab1e39ac 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -8,6 +8,12 @@ #include <linux/dma-direction.h> #include <linux/scatterlist.h> +/* + * A dma_addr_t can hold any valid DMA or bus address for the platform. + * It can be given to a device to use as a DMA source or target. A CPU cannot + * reference a dma_addr_t directly because there may be translation between + * its physical address space and the bus address space. + */ struct dma_map_ops { void* (*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, @@ -186,7 +192,7 @@ static inline int dma_get_cache_alignment(void) #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY static inline int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { return 0; @@ -217,13 +223,14 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +extern int dmam_declare_coherent_memory(struct device *dev, + phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags); extern void dmam_release_declared_memory(struct device *dev); #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ static inline int dmam_declare_coherent_memory(struct device *dev, - dma_addr_t bus_addr, dma_addr_t device_addr, + phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, gfp_t gfp) { return 0; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 8300fb87b84a..72cb0ddb9678 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -429,6 +429,7 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); typedef void (*dma_async_tx_callback)(void *dma_async_param); struct dmaengine_unmap_data { + u8 map_cnt; u8 to_cnt; u8 from_cnt; u8 bidi_cnt; diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index bed128e8f4b1..05e53ccb708b 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -1,6 +1,7 @@ #ifndef __LINUX_GPIO_CONSUMER_H #define __LINUX_GPIO_CONSUMER_H +#include <linux/bug.h> #include <linux/err.h> #include <linux/kernel.h> @@ -23,6 +24,12 @@ struct gpio_desc *__must_check gpiod_get(struct device *dev, struct gpio_desc *__must_check gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx); +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, + const char *con_id); +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index); + void gpiod_put(struct gpio_desc *desc); struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, @@ -30,6 +37,12 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, const char *con_id, unsigned int idx); +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, + const char *con_id); +struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index); + void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); int gpiod_get_direction(const struct gpio_desc *desc); @@ -73,6 +86,20 @@ static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev, { return ERR_PTR(-ENOSYS); } + +static inline struct gpio_desc *__must_check +gpiod_get_optional(struct device *dev, const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index) +{ + return ERR_PTR(-ENOSYS); +} + static inline void gpiod_put(struct gpio_desc *desc) { might_sleep(); @@ -93,6 +120,20 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, { return ERR_PTR(-ENOSYS); } + +static inline struct gpio_desc *__must_check +devm_gpiod_get_optional(struct device *dev, const char *con_id) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *__must_check +devm_gpiod_get_index_optional(struct device *dev, const char *con_id, + unsigned int index) +{ + return ERR_PTR(-ENOSYS); +} + static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) { might_sleep(); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 1827b43966d9..573e4f3243d0 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -51,7 +51,10 @@ struct seq_file; * format specifier for an unsigned int. It is substituted by the actual * number of the gpio. * @can_sleep: flag must be set iff get()/set() methods sleep, as they - * must while accessing GPIO expander chips over I2C or SPI + * must while accessing GPIO expander chips over I2C or SPI. This + * implies that if the chip supports IRQs, these IRQs need to be threaded + * as the chip access may sleep when e.g. reading out the IRQ status + * registers. * @exported: flags if the gpiochip is exported for use from sysfs. Private. * * A gpio_chip can help platforms abstract various sources of GPIOs so diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 39bfd5b89077..3ec06300d535 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -68,17 +68,31 @@ enum { }; /** + * struct hsi_channel - channel resource used by the hsi clients + * @id: Channel number + * @name: Channel name + */ +struct hsi_channel { + unsigned int id; + const char *name; +}; + +/** * struct hsi_config - Configuration for RX/TX HSI modules * @mode: Bit transmission mode (STREAM or FRAME) - * @channels: Number of channels to use [1..16] + * @channels: Channel resources used by the client + * @num_channels: Number of channel resources + * @num_hw_channels: Number of channels the transceiver is configured for [1..16] * @speed: Max bit transmission speed (Kbit/s) * @flow: RX flow type (SYNCHRONIZED or PIPELINE) * @arb_mode: Arbitration mode for TX frame (Round robin, priority) */ struct hsi_config { - unsigned int mode; - unsigned int channels; - unsigned int speed; + unsigned int mode; + struct hsi_channel *channels; + unsigned int num_channels; + unsigned int num_hw_channels; + unsigned int speed; union { unsigned int flow; /* RX only */ unsigned int arb_mode; /* TX only */ @@ -282,6 +296,21 @@ struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); void hsi_put_controller(struct hsi_controller *hsi); int hsi_register_controller(struct hsi_controller *hsi); void hsi_unregister_controller(struct hsi_controller *hsi); +struct hsi_client *hsi_new_client(struct hsi_port *port, + struct hsi_board_info *info); +int hsi_remove_client(struct device *dev, void *data); +void hsi_port_unregister_clients(struct hsi_port *port); + +#ifdef CONFIG_OF +void hsi_add_clients_from_dt(struct hsi_port *port, + struct device_node *clients); +#else +static inline void hsi_add_clients_from_dt(struct hsi_port *port, + struct device_node *clients) +{ + return; +} +#endif static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, void *data) @@ -305,6 +334,8 @@ static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, */ int hsi_async(struct hsi_client *cl, struct hsi_msg *msg); +int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name); + /** * hsi_id - Get HSI controller ID associated to a client * @cl: Pointer to a HSI client diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h new file mode 100644 index 000000000000..1433651be0dc --- /dev/null +++ b/include/linux/hsi/ssi_protocol.h @@ -0,0 +1,42 @@ +/* + * ssip_slave.h + * + * SSIP slave support header file + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_SSIP_SLAVE_H__ +#define __LINUX_SSIP_SLAVE_H__ + +#include <linux/hsi/hsi.h> + +static inline void ssip_slave_put_master(struct hsi_client *master) +{ +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave); +int ssip_slave_start_tx(struct hsi_client *master); +int ssip_slave_stop_tx(struct hsi_client *master); +void ssip_reset_event(struct hsi_client *master); + +int ssip_slave_running(struct hsi_client *master); + +#endif /* __LINUX_SSIP_SLAVE_H__ */ + diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 7c8b20b120ea..a9a53b12397b 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -56,6 +56,7 @@ struct macvlan_dev { int numqueues; netdev_features_t tap_features; int minor; + int nest_level; }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 13bbbde00e68..b2acc4a1b13c 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); -static inline int is_vlan_dev(struct net_device *dev) +static inline bool is_vlan_dev(struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN; } @@ -159,6 +159,7 @@ struct vlan_dev_priv { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + unsigned int nest_level; }; static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) @@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev, const struct net_device *by_dev); extern bool vlan_uses_dev(const struct net_device *dev); + +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG_ON(!is_vlan_dev(dev)); + return vlan_dev_priv(dev)->nest_level; +} #else static inline struct net_device * __vlan_find_dev_deep(struct net_device *real_dev, @@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev) { return false; } +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG(); + return 0; +} #endif static inline bool vlan_hw_offload_capable(netdev_features_t features, @@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, */ skb->protocol = htons(ETH_P_802_2); } + #endif /* !(_LINUX_IF_VLAN_H_) */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 97ac926c78a7..051c85032f48 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -272,6 +272,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) return -EINVAL; } +static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) +{ + return 0; +} + static inline int irq_can_set_affinity(unsigned int irq) { return 0; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index b0122dc6f96a..ca1be5c9136c 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -50,7 +50,24 @@ enum kernfs_node_flag { /* @flags for kernfs_create_root() */ enum kernfs_root_flag { - KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, + /* + * kernfs_nodes are created in the deactivated state and invisible. + * They require explicit kernfs_activate() to become visible. This + * can be used to make related nodes become visible atomically + * after all nodes are created successfully. + */ + KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001, + + /* + * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2) + * succeeds regardless of the RW permissions. sysfs had an extra + * layer of enforcement where open(2) fails with -EACCES regardless + * of CAP_DAC_OVERRIDE if the permission doesn't have the + * respective read or write access at all (none of S_IRUGO or + * S_IWUGO) or the respective operation isn't implemented. The + * following flag enables that behavior. + */ + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002, }; /* type-specific structures for kernfs_node union members */ diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h index 6b8e1ff4672b..e6088c2e2092 100644 --- a/include/linux/mfd/tc3589x.h +++ b/include/linux/mfd/tc3589x.h @@ -111,7 +111,6 @@ enum tx3589x_block { #define TC3589x_INT_PORIRQ 7 #define TC3589x_NR_INTERNAL_IRQS 8 -#define TC3589x_INT_GPIO(x) (TC3589x_NR_INTERNAL_IRQS + (x)) struct tc3589x { struct mutex lock; diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index b66e7610d4ee..7040dc98ff8b 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg { __be32 byte_count; }; +enum mlx4_update_qp_attr { + MLX4_UPDATE_QP_SMAC = 1 << 0, +}; + +struct mlx4_update_qp_params { + u8 smac_index; +}; + +int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params); int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, diff --git a/include/linux/net.h b/include/linux/net.h index 94734a6259a4..17d83393afcc 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -248,24 +248,17 @@ do { \ bool __net_get_random_once(void *buf, int nbytes, bool *done, struct static_key *done_key); -#ifdef HAVE_JUMP_LABEL -#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \ - { .enabled = ATOMIC_INIT(0), .entries = (void *)1 }) -#else /* !HAVE_JUMP_LABEL */ -#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE -#endif /* HAVE_JUMP_LABEL */ - #define net_get_random_once(buf, nbytes) \ ({ \ bool ___ret = false; \ static bool ___done = false; \ - static struct static_key ___done_key = \ - ___NET_RANDOM_STATIC_KEY_INIT; \ - if (!static_key_true(&___done_key)) \ + static struct static_key ___once_key = \ + STATIC_KEY_INIT_TRUE; \ + if (static_key_true(&___once_key)) \ ___ret = __net_get_random_once(buf, \ nbytes, \ &___done, \ - &___done_key); \ + &___once_key); \ ___ret; \ }) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7ed3a3aa6604..b42d07b0390b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1144,6 +1144,7 @@ struct net_device_ops { netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, struct net_device *dev, void *priv); + int (*ndo_get_lock_subclass)(struct net_device *dev); }; /** @@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev) static inline void netif_addr_lock_nested(struct net_device *dev) { - spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); + int subclass = SINGLE_DEPTH_NESTING; + + if (dev->netdev_ops->ndo_get_lock_subclass) + subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); + + spin_lock_nested(&dev->addr_list_lock, subclass); } static inline void netif_addr_lock_bh(struct net_device *dev) @@ -3050,10 +3056,19 @@ extern int weight_p; extern int bpf_jit_enable; bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); +struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter); struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); /* iterate through upper list, must be called under RCU read lock */ +#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ + for (iter = &(dev)->adj_list.upper, \ + updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ + updev; \ + updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) + +/* iterate through upper list, must be called under RCU read lock */ #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ for (iter = &(dev)->all_adj_list.upper, \ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ @@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, priv; \ priv = netdev_lower_get_next_private_rcu(dev, &(iter))) +void *netdev_lower_get_next(struct net_device *dev, + struct list_head **iter); +#define netdev_for_each_lower_dev(dev, ldev, iter) \ + for (iter = &(dev)->adj_list.lower, \ + ldev = netdev_lower_get_next(dev, &(iter)); \ + ldev; \ + ldev = netdev_lower_get_next(dev, &(iter))) + void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); @@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev, void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); +int dev_get_nest_level(struct net_device *dev, + bool (*type_check)(struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); @@ -3180,12 +3205,7 @@ void netdev_change_features(struct net_device *dev); void netif_stacked_transfer_operstate(const struct net_device *rootdev, struct net_device *dev); -netdev_features_t netif_skb_dev_features(struct sk_buff *skb, - const struct net_device *dev); -static inline netdev_features_t netif_skb_features(struct sk_buff *skb) -{ - return netif_skb_dev_features(skb, skb->dev); -} +netdev_features_t netif_skb_features(struct sk_buff *skb); static inline bool net_gso_ok(netdev_features_t features, int gso_type) { diff --git a/include/linux/of.h b/include/linux/of.h index 4c50d0b78b89..fa362867b453 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -356,7 +356,7 @@ int of_device_is_stdout_path(struct device_node *dn); #else /* CONFIG_OF */ -static inline const char* of_node_full_name(struct device_node *np) +static inline const char* of_node_full_name(const struct device_node *np) { return "<no-node>"; } diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f14123a5a9df..38fc05036015 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -19,7 +19,6 @@ #include <linux/errno.h> #include <linux/gpio.h> #include <linux/of.h> -#include <linux/gpio/consumer.h> struct device_node; @@ -48,7 +47,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) return container_of(gc, struct of_mm_gpio_chip, gc); } -extern struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, +extern int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); extern int of_mm_gpiochip_add(struct device_node *np, @@ -63,10 +62,10 @@ extern int of_gpio_simple_xlate(struct gpio_chip *gc, #else /* CONFIG_OF_GPIO */ /* Drivers may not strictly depend on the GPIO support, so let them link. */ -static inline struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, +static inline int of_get_named_gpio_flags(struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { - return ERR_PTR(-ENOSYS); + return -ENOSYS; } static inline int of_gpio_simple_xlate(struct gpio_chip *gc, @@ -81,18 +80,6 @@ static inline void of_gpiochip_remove(struct gpio_chip *gc) { } #endif /* CONFIG_OF_GPIO */ -static inline int of_get_named_gpio_flags(struct device_node *np, - const char *list_name, int index, enum of_gpio_flags *flags) -{ - struct gpio_desc *desc; - desc = of_get_named_gpiod_flags(np, list_name, index, flags); - - if (IS_ERR(desc)) - return PTR_ERR(desc); - else - return desc_to_gpio(desc); -} - /** * of_gpio_named_count() - Count GPIOs for a device * @np: device node to count GPIOs for @@ -129,22 +116,6 @@ static inline int of_gpio_count(struct device_node *np) return of_gpio_named_count(np, "gpios"); } -/** - * of_get_gpiod_flags() - Get a GPIO descriptor and flags to use with GPIO API - * @np: device node to get GPIO from - * @index: index of the GPIO - * @flags: a flags pointer to fill in - * - * Returns GPIO descriptor to use with Linux generic GPIO API, or a errno - * value on the error condition. If @flags is not NULL the function also fills - * in flags for the GPIO. - */ -static inline struct gpio_desc *of_get_gpiod_flags(struct device_node *np, - int index, enum of_gpio_flags *flags) -{ - return of_get_named_gpiod_flags(np, "gpios", index, flags); -} - static inline int of_get_gpio_flags(struct device_node *np, int index, enum of_gpio_flags *flags) { diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 6fe8464ed767..881a7c3571f4 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -31,7 +31,12 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); #else /* CONFIG_OF */ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) { - return -ENOSYS; + /* + * Fall back to the non-DT function to register a bus. + * This way, we don't have to keep compat bits around in drivers. + */ + + return mdiobus_register(mdio); } static inline struct phy_device *of_phy_find_device(struct device_node *phy_np) diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h index 41a13e70f41f..7944cdc27bed 100644 --- a/include/linux/omap-dma.h +++ b/include/linux/omap-dma.h @@ -10,7 +10,7 @@ struct dma_chan; -#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE) +#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE)) bool omap_dma_filter_fn(struct dma_chan *, void *); #else static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d) diff --git a/include/linux/pci.h b/include/linux/pci.h index aab57b4abe7f..71d9673c1b2c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -365,6 +365,7 @@ struct pci_dev { #endif phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ size_t romlen; /* Length of ROM if it's not from the BAR */ + char *driver_override; /* Driver name to force a match */ }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -477,6 +478,19 @@ static inline bool pci_is_root_bus(struct pci_bus *pbus) return !(pbus->parent); } +/** + * pci_is_bridge - check if the PCI device is a bridge + * @dev: PCI device + * + * Return true if the PCI device is bridge whether it has subordinate + * or not. + */ +static inline bool pci_is_bridge(struct pci_dev *dev) +{ + return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; +} + static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) { dev = pci_physfn(dev); @@ -518,7 +532,7 @@ static inline int pcibios_err_to_errno(int err) case PCIBIOS_FUNC_NOT_SUPPORTED: return -ENOENT; case PCIBIOS_BAD_VENDOR_ID: - return -EINVAL; + return -ENOTTY; case PCIBIOS_DEVICE_NOT_FOUND: return -ENODEV; case PCIBIOS_BAD_REGISTER_NUMBER: @@ -529,7 +543,7 @@ static inline int pcibios_err_to_errno(int err) return -ENOSPC; } - return -ENOTTY; + return -ERANGE; } /* Low-level architecture-dependent routines */ @@ -603,6 +617,9 @@ struct pci_error_handlers { /* PCI slot has been reset */ pci_ers_result_t (*slot_reset)(struct pci_dev *dev); + /* PCI function reset prepare or completed */ + void (*reset_notify)(struct pci_dev *dev, bool prepare); + /* Device driver may resume normal operations */ void (*resume)(struct pci_dev *dev); }; @@ -680,8 +697,8 @@ struct pci_driver { /** * PCI_VDEVICE - macro used to describe a specific pci device in short form - * @vendor: the vendor name - * @device: the 16 bit PCI Device ID + * @vend: the vendor name + * @dev: the 16 bit PCI Device ID * * This macro is used to create a struct pci_device_id that matches a * specific PCI device. The subvendor, and subdevice fields will be set @@ -689,9 +706,9 @@ struct pci_driver { * private data. */ -#define PCI_VDEVICE(vendor, device) \ - PCI_VENDOR_ID_##vendor, (device), \ - PCI_ANY_ID, PCI_ANY_ID, 0, 0 +#define PCI_VDEVICE(vend, dev) \ + .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 /* these external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI @@ -764,7 +781,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn); struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); unsigned int pci_scan_child_bus(struct pci_bus *bus); -int __must_check pci_bus_add_device(struct pci_dev *dev); +void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); @@ -1158,7 +1175,6 @@ struct msix_entry { #ifdef CONFIG_PCI_MSI int pci_msi_vec_count(struct pci_dev *dev); -int pci_enable_msi_block(struct pci_dev *dev, int nvec); void pci_msi_shutdown(struct pci_dev *dev); void pci_disable_msi(struct pci_dev *dev); int pci_msix_vec_count(struct pci_dev *dev); @@ -1188,8 +1204,6 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, } #else static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } -static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec) -{ return -ENOSYS; } static inline void pci_msi_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msi(struct pci_dev *dev) { } static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } @@ -1244,7 +1258,7 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { } static inline void pcie_ecrc_get_policy(char *str) { } #endif -#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) +#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1) #ifdef CONFIG_HT_IRQ /* The functions a driver should call */ @@ -1572,13 +1586,13 @@ extern unsigned long pci_hotplug_io_size; extern unsigned long pci_hotplug_mem_size; /* Architecture-specific versions may override these (weak) */ -int pcibios_add_platform_entries(struct pci_dev *dev); void pcibios_disable_device(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev); int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pcibios_add_device(struct pci_dev *dev); void pcibios_release_device(struct pci_dev *dev); +void pcibios_penalize_isa_irq(int irq, int active); #ifdef CONFIG_HIBERNATE_CALLBACKS extern struct dev_pm_ops pcibios_pm_ops; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d4de24b4d4c6..7fa31731c854 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1631,8 +1631,6 @@ #define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480 #define PCI_VENDOR_ID_SPECIALIX 0x11cb -#define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000 -#define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000 #define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004 #define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4 @@ -2874,7 +2872,6 @@ #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 #define PCI_VENDOR_ID_COMPUTONE 0x8e0e -#define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291 #define PCI_DEVICE_ID_COMPUTONE_PG 0x0302 #define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e #define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3356abcfff18..3ef6ea12806a 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -402,6 +402,8 @@ struct perf_event { struct ring_buffer *rb; struct list_head rb_entry; + unsigned long rcu_batches; + int rcu_pending; /* poll related */ wait_queue_head_t waitq; diff --git a/include/linux/reset.h b/include/linux/reset.h index c0eda5023d74..349f150ae12c 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -2,6 +2,7 @@ #define _LINUX_RESET_H_ struct device; +struct device_node; struct reset_control; #ifdef CONFIG_RESET_CONTROLLER @@ -33,6 +34,9 @@ static inline struct reset_control *devm_reset_control_get_optional( return devm_reset_control_get(dev, id); } +struct reset_control *of_reset_control_get(struct device_node *node, + const char *id); + #else static inline int reset_control_reset(struct reset_control *rstc) @@ -75,6 +79,12 @@ static inline struct reset_control *devm_reset_control_get_optional( return ERR_PTR(-ENOSYS); } +static inline struct reset_control *of_reset_control_get( + struct device_node *node, const char *id) +{ + return ERR_PTR(-ENOSYS); +} + #endif /* CONFIG_RESET_CONTROLLER */ #endif diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 8e3e66ac0a52..953937ea5233 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -4,6 +4,7 @@ #include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/wait.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -22,6 +23,10 @@ extern void rtnl_lock(void); extern void rtnl_unlock(void); extern int rtnl_trylock(void); extern int rtnl_is_locked(void); + +extern wait_queue_head_t netdev_unregistering_wq; +extern struct mutex net_mutex; + #ifdef CONFIG_PROVE_LOCKING extern int lockdep_rtnl_is_held(void); #else diff --git a/include/linux/sched.h b/include/linux/sched.h index 25f54c79f757..221b2bde3723 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); #define TASK_PARKED 512 #define TASK_STATE_MAX 1024 -#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" +#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP" extern char ___assert_task_state[1 - 2*!!( sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; @@ -1153,9 +1153,12 @@ struct sched_dl_entity { * * @dl_boosted tells if we are boosted due to DI. If so we are * outside bandwidth enforcement mechanism (but only until we - * exit the critical section). + * exit the critical section); + * + * @dl_yielded tells if task gave up the cpu before consuming + * all its available runtime during the last job. */ - int dl_throttled, dl_new, dl_boosted; + int dl_throttled, dl_new, dl_boosted, dl_yielded; /* * Bandwidth enforcement timer. Each -deadline task has its diff --git a/include/linux/types.h b/include/linux/types.h index 4d118ba11349..a0bb7048687f 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -142,6 +142,7 @@ typedef unsigned long blkcnt_t; #define pgoff_t unsigned long #endif +/* A dma_addr_t can hold any valid DMA or bus address for the platform */ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT typedef u64 dma_addr_t; #else diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index f3539a15c411..f856e5a746fa 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -3669,6 +3669,18 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy); void cfg80211_sched_scan_stopped(struct wiphy *wiphy); /** + * cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped + * + * @wiphy: the wiphy on which the scheduled scan stopped + * + * The driver can call this function to inform cfg80211 that the + * scheduled scan had to be stopped, for whatever reason. The driver + * is then called back via the sched_scan_stop operation when done. + * This function should be called with rtnl locked. + */ +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy); + +/** * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame * * @wiphy: the wiphy reporting the BSS diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 6c4f5eac98e7..216cecce65e9 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg); void rt6_ifdown(struct net *net, struct net_device *dev); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); +void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); /* diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 80f500a29498..b2704fd0ec80 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -20,6 +20,11 @@ struct local_ports { int range[2]; }; +struct ping_group_range { + seqlock_t lock; + kgid_t range[2]; +}; + struct netns_ipv4 { #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; @@ -66,13 +71,13 @@ struct netns_ipv4 { int sysctl_icmp_ratemask; int sysctl_icmp_errors_use_inbound_ifaddr; - struct local_ports sysctl_local_ports; + struct local_ports ip_local_ports; int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; - kgid_t sysctl_ping_group_range[2]; + struct ping_group_range ping_group_range; atomic_t dev_addr_genid; diff --git a/include/uapi/asm-generic/resource.h b/include/uapi/asm-generic/resource.h index f863428796d5..c6d10af50123 100644 --- a/include/uapi/asm-generic/resource.h +++ b/include/uapi/asm-generic/resource.h @@ -57,12 +57,5 @@ # define RLIM_INFINITY (~0UL) #endif -/* - * RLIMIT_STACK default maximum - some architectures override it: - */ -#ifndef _STK_LIM_MAX -# define _STK_LIM_MAX RLIM_INFINITY -#endif - #endif /* _UAPI_ASM_GENERIC_RESOURCE_H */ diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 6db66783d268..333640608087 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -697,9 +697,11 @@ __SYSCALL(__NR_finit_module, sys_finit_module) __SYSCALL(__NR_sched_setattr, sys_sched_setattr) #define __NR_sched_getattr 275 __SYSCALL(__NR_sched_getattr, sys_sched_getattr) +#define __NR_renameat2 276 +__SYSCALL(__NR_renameat2, sys_renameat2) #undef __NR_syscalls -#define __NR_syscalls 276 +#define __NR_syscalls 277 /* * All syscalls below here should go away really, diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index 11917f747cb4..4c31a366be16 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -331,9 +331,17 @@ enum { #define AUDIT_FAIL_PRINTK 1 #define AUDIT_FAIL_PANIC 2 +/* + * These bits disambiguate different calling conventions that share an + * ELF machine type, bitness, and endianness + */ +#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000 +#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000 + /* distinguish syscall tables */ #define __AUDIT_ARCH_64BIT 0x80000000 #define __AUDIT_ARCH_LE 0x40000000 + #define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) #define AUDIT_ARCH_ARMEB (EM_ARM) @@ -346,7 +354,11 @@ enum { #define AUDIT_ARCH_MIPS (EM_MIPS) #define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE) #define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\ + __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|\ + __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_OPENRISC (EM_OPENRISC) #define AUDIT_ARCH_PARISC (EM_PARISC) #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 1ba9d626aa83..194c1eab04d8 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3856,6 +3856,8 @@ enum nl80211_ap_sme_features { * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested * to work properly to suppport receiving regulatory hints from * cellular base stations. + * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only + * here to reserve the value for API/ABI compatibility) * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of * equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station * mode @@ -3897,7 +3899,7 @@ enum nl80211_feature_flags { NL80211_FEATURE_HT_IBSS = 1 << 1, NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3, - /* bit 4 is reserved - don't use */ + NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4, NL80211_FEATURE_SAE = 1 << 5, NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6, NL80211_FEATURE_SCAN_FLUSH = 1 << 7, diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 32ec05a6572f..c33e1c489eb2 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h @@ -86,7 +86,7 @@ typedef uint64_t blkif_sector_t; * Interface%20manuals/100293068c.pdf * The backend can optionally provide three extra XenBus attributes to * further optimize the discard functionality: - * 'discard-aligment' - Devices that support discard functionality may + * 'discard-alignment' - Devices that support discard functionality may * internally allocate space in units that are bigger than the exported * logical block size. The discard-alignment parameter indicates how many bytes * the beginning of the partition is offset from the internal allocation unit's diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index 0cd5ca333fac..de082130ba4b 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h @@ -275,9 +275,9 @@ DEFINE_GUEST_HANDLE_STRUCT(mmu_update); * NB. The fields are natural register size for this architecture. */ struct multicall_entry { - unsigned long op; - long result; - unsigned long args[6]; + xen_ulong_t op; + xen_long_t result; + xen_ulong_t args[6]; }; DEFINE_GUEST_HANDLE_STRUCT(multicall_entry); diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 2cf47175b12b..0b3149ed7eaa 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -9,10 +9,6 @@ DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); void xen_arch_pre_suspend(void); void xen_arch_post_suspend(int suspend_cancelled); -void xen_arch_hvm_post_suspend(int suspend_cancelled); - -void xen_mm_pin_all(void); -void xen_mm_unpin_all(void); void xen_timer_resume(void); void xen_arch_resume(void); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 9fcdaa705b6c..3f1ca934a237 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -348,7 +348,7 @@ struct cgrp_cset_link { * reference-counted, to improve performance when child cgroups * haven't been created. */ -static struct css_set init_css_set = { +struct css_set init_css_set = { .refcount = ATOMIC_INIT(1), .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), .tasks = LIST_HEAD_INIT(init_css_set.tasks), @@ -1495,7 +1495,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type, */ if (!use_task_css_set_links) cgroup_enable_task_cg_lists(); -retry: + mutex_lock(&cgroup_tree_mutex); mutex_lock(&cgroup_mutex); @@ -1503,7 +1503,7 @@ retry: ret = parse_cgroupfs_options(data, &opts); if (ret) goto out_unlock; - +retry: /* look for a matching existing root */ if (!opts.subsys_mask && !opts.none && !opts.name) { cgrp_dfl_root_visible = true; @@ -1562,9 +1562,9 @@ retry: if (!atomic_inc_not_zero(&root->cgrp.refcnt)) { mutex_unlock(&cgroup_mutex); mutex_unlock(&cgroup_tree_mutex); - kfree(opts.release_agent); - kfree(opts.name); msleep(10); + mutex_lock(&cgroup_tree_mutex); + mutex_lock(&cgroup_mutex); goto retry; } diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 2bc4a2256444..345628c78b5b 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -21,6 +21,7 @@ #include <linux/uaccess.h> #include <linux/freezer.h> #include <linux/seq_file.h> +#include <linux/mutex.h> /* * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is @@ -42,9 +43,10 @@ enum freezer_state_flags { struct freezer { struct cgroup_subsys_state css; unsigned int state; - spinlock_t lock; }; +static DEFINE_MUTEX(freezer_mutex); + static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) { return css ? container_of(css, struct freezer, css) : NULL; @@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css) if (!freezer) return ERR_PTR(-ENOMEM); - spin_lock_init(&freezer->lock); return &freezer->css; } @@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css) struct freezer *freezer = css_freezer(css); struct freezer *parent = parent_freezer(freezer); - /* - * The following double locking and freezing state inheritance - * guarantee that @cgroup can never escape ancestors' freezing - * states. See css_for_each_descendant_pre() for details. - */ - if (parent) - spin_lock_irq(&parent->lock); - spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING); + mutex_lock(&freezer_mutex); freezer->state |= CGROUP_FREEZER_ONLINE; @@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css) atomic_inc(&system_freezing_cnt); } - spin_unlock(&freezer->lock); - if (parent) - spin_unlock_irq(&parent->lock); - + mutex_unlock(&freezer_mutex); return 0; } @@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css) { struct freezer *freezer = css_freezer(css); - spin_lock_irq(&freezer->lock); + mutex_lock(&freezer_mutex); if (freezer->state & CGROUP_FREEZING) atomic_dec(&system_freezing_cnt); freezer->state = 0; - spin_unlock_irq(&freezer->lock); + mutex_unlock(&freezer_mutex); } static void freezer_css_free(struct cgroup_subsys_state *css) @@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, struct task_struct *task; bool clear_frozen = false; - spin_lock_irq(&freezer->lock); + mutex_lock(&freezer_mutex); /* * Make the new tasks conform to the current state of @new_css. @@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, } } - spin_unlock_irq(&freezer->lock); - - /* - * Propagate FROZEN clearing upwards. We may race with - * update_if_frozen(), but as long as both work bottom-up, either - * update_if_frozen() sees child's FROZEN cleared or we clear the - * parent's FROZEN later. No parent w/ !FROZEN children can be - * left FROZEN. - */ + /* propagate FROZEN clearing upwards */ while (clear_frozen && (freezer = parent_freezer(freezer))) { - spin_lock_irq(&freezer->lock); freezer->state &= ~CGROUP_FROZEN; clear_frozen = freezer->state & CGROUP_FREEZING; - spin_unlock_irq(&freezer->lock); } + + mutex_unlock(&freezer_mutex); } /** @@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task) { struct freezer *freezer; - rcu_read_lock(); - freezer = task_freezer(task); - /* * The root cgroup is non-freezable, so we can skip locking the * freezer. This is safe regardless of race with task migration. @@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task) * to do. If we lost and root is the new cgroup, noop is still the * right thing to do. */ - if (!parent_freezer(freezer)) - goto out; + if (task_css_is_root(task, freezer_cgrp_id)) + return; - /* - * Grab @freezer->lock and freeze @task after verifying @task still - * belongs to @freezer and it's freezing. The former is for the - * case where we have raced against task migration and lost and - * @task is already in a different cgroup which may not be frozen. - * This isn't strictly necessary as freeze_task() is allowed to be - * called spuriously but let's do it anyway for, if nothing else, - * documentation. - */ - spin_lock_irq(&freezer->lock); - if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING)) + mutex_lock(&freezer_mutex); + rcu_read_lock(); + + freezer = task_freezer(task); + if (freezer->state & CGROUP_FREEZING) freeze_task(task); - spin_unlock_irq(&freezer->lock); -out: + rcu_read_unlock(); + mutex_unlock(&freezer_mutex); } /** @@ -281,22 +255,24 @@ static void update_if_frozen(struct cgroup_subsys_state *css) struct css_task_iter it; struct task_struct *task; - WARN_ON_ONCE(!rcu_read_lock_held()); - - spin_lock_irq(&freezer->lock); + lockdep_assert_held(&freezer_mutex); if (!(freezer->state & CGROUP_FREEZING) || (freezer->state & CGROUP_FROZEN)) - goto out_unlock; + return; /* are all (live) children frozen? */ + rcu_read_lock(); css_for_each_child(pos, css) { struct freezer *child = css_freezer(pos); if ((child->state & CGROUP_FREEZER_ONLINE) && - !(child->state & CGROUP_FROZEN)) - goto out_unlock; + !(child->state & CGROUP_FROZEN)) { + rcu_read_unlock(); + return; + } } + rcu_read_unlock(); /* are all tasks frozen? */ css_task_iter_start(css, &it); @@ -317,21 +293,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css) freezer->state |= CGROUP_FROZEN; out_iter_end: css_task_iter_end(&it); -out_unlock: - spin_unlock_irq(&freezer->lock); } static int freezer_read(struct seq_file *m, void *v) { struct cgroup_subsys_state *css = seq_css(m), *pos; + mutex_lock(&freezer_mutex); rcu_read_lock(); /* update states bottom-up */ - css_for_each_descendant_post(pos, css) + css_for_each_descendant_post(pos, css) { + if (!css_tryget(pos)) + continue; + rcu_read_unlock(); + update_if_frozen(pos); + rcu_read_lock(); + css_put(pos); + } + rcu_read_unlock(); + mutex_unlock(&freezer_mutex); seq_puts(m, freezer_state_strs(css_freezer(css)->state)); seq_putc(m, '\n'); @@ -373,7 +357,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze, unsigned int state) { /* also synchronizes against task migration, see freezer_attach() */ - lockdep_assert_held(&freezer->lock); + lockdep_assert_held(&freezer_mutex); if (!(freezer->state & CGROUP_FREEZER_ONLINE)) return; @@ -414,31 +398,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze) * descendant will try to inherit its parent's FREEZING state as * CGROUP_FREEZING_PARENT. */ + mutex_lock(&freezer_mutex); rcu_read_lock(); css_for_each_descendant_pre(pos, &freezer->css) { struct freezer *pos_f = css_freezer(pos); struct freezer *parent = parent_freezer(pos_f); - spin_lock_irq(&pos_f->lock); + if (!css_tryget(pos)) + continue; + rcu_read_unlock(); - if (pos_f == freezer) { + if (pos_f == freezer) freezer_apply_state(pos_f, freeze, CGROUP_FREEZING_SELF); - } else { - /* - * Our update to @parent->state is already visible - * which is all we need. No need to lock @parent. - * For more info on synchronization, see - * freezer_post_create(). - */ + else freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING, CGROUP_FREEZING_PARENT); - } - spin_unlock_irq(&pos_f->lock); + rcu_read_lock(); + css_put(pos); } rcu_read_unlock(); + mutex_unlock(&freezer_mutex); } static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, diff --git a/kernel/cpu.c b/kernel/cpu.c index a9e710eef0e2..247979a1b815 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -726,10 +726,12 @@ void set_cpu_present(unsigned int cpu, bool present) void set_cpu_online(unsigned int cpu, bool online) { - if (online) + if (online) { cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); - else + cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); + } else { cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); + } } void set_cpu_active(unsigned int cpu, bool active) diff --git a/kernel/events/core.c b/kernel/events/core.c index f83a71a3e46d..440eefc67397 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event, cpuctx->exclusive = 0; } +struct remove_event { + struct perf_event *event; + bool detach_group; +}; + /* * Cross CPU call to remove a performance event * @@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event, */ static int __perf_remove_from_context(void *info) { - struct perf_event *event = info; + struct remove_event *re = info; + struct perf_event *event = re->event; struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); raw_spin_lock(&ctx->lock); event_sched_out(event, cpuctx, ctx); + if (re->detach_group) + perf_group_detach(event); list_del_event(event, ctx); if (!ctx->nr_events && cpuctx->task_ctx == ctx) { ctx->is_active = 0; @@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info) * When called from perf_event_exit_task, it's OK because the * context has been detached from its task. */ -static void perf_remove_from_context(struct perf_event *event) +static void perf_remove_from_context(struct perf_event *event, bool detach_group) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; + struct remove_event re = { + .event = event, + .detach_group = detach_group, + }; lockdep_assert_held(&ctx->mutex); @@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event) * Per cpu events are removed via an smp call and * the removal is always successful. */ - cpu_function_call(event->cpu, __perf_remove_from_context, event); + cpu_function_call(event->cpu, __perf_remove_from_context, &re); return; } retry: - if (!task_function_call(task, __perf_remove_from_context, event)) + if (!task_function_call(task, __perf_remove_from_context, &re)) return; raw_spin_lock_irq(&ctx->lock); @@ -1515,6 +1527,8 @@ retry: * Since the task isn't running, its safe to remove the event, us * holding the ctx->lock ensures the task won't get scheduled in. */ + if (detach_group) + perf_group_detach(event); list_del_event(event, ctx); raw_spin_unlock_irq(&ctx->lock); } @@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head) } static void ring_buffer_put(struct ring_buffer *rb); -static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); +static void ring_buffer_attach(struct perf_event *event, + struct ring_buffer *rb); static void unaccount_event_cpu(struct perf_event *event, int cpu) { @@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event) unaccount_event(event); if (event->rb) { - struct ring_buffer *rb; - /* * Can happen when we close an event with re-directed output. * @@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event) * over us; possibly making our ring_buffer_put() the last. */ mutex_lock(&event->mmap_mutex); - rb = event->rb; - if (rb) { - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - ring_buffer_put(rb); /* could be last */ - } + ring_buffer_attach(event, NULL); mutex_unlock(&event->mmap_mutex); } @@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event) * to trigger the AB-BA case. */ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); - raw_spin_lock_irq(&ctx->lock); - perf_group_detach(event); - raw_spin_unlock_irq(&ctx->lock); - perf_remove_from_context(event); + perf_remove_from_context(event, true); mutex_unlock(&ctx->mutex); free_event(event); @@ -3839,28 +3844,47 @@ unlock: static void ring_buffer_attach(struct perf_event *event, struct ring_buffer *rb) { + struct ring_buffer *old_rb = NULL; unsigned long flags; - if (!list_empty(&event->rb_entry)) - return; + if (event->rb) { + /* + * Should be impossible, we set this when removing + * event->rb_entry and wait/clear when adding event->rb_entry. + */ + WARN_ON_ONCE(event->rcu_pending); - spin_lock_irqsave(&rb->event_lock, flags); - if (list_empty(&event->rb_entry)) - list_add(&event->rb_entry, &rb->event_list); - spin_unlock_irqrestore(&rb->event_lock, flags); -} + old_rb = event->rb; + event->rcu_batches = get_state_synchronize_rcu(); + event->rcu_pending = 1; -static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) -{ - unsigned long flags; + spin_lock_irqsave(&old_rb->event_lock, flags); + list_del_rcu(&event->rb_entry); + spin_unlock_irqrestore(&old_rb->event_lock, flags); + } - if (list_empty(&event->rb_entry)) - return; + if (event->rcu_pending && rb) { + cond_synchronize_rcu(event->rcu_batches); + event->rcu_pending = 0; + } + + if (rb) { + spin_lock_irqsave(&rb->event_lock, flags); + list_add_rcu(&event->rb_entry, &rb->event_list); + spin_unlock_irqrestore(&rb->event_lock, flags); + } + + rcu_assign_pointer(event->rb, rb); - spin_lock_irqsave(&rb->event_lock, flags); - list_del_init(&event->rb_entry); - wake_up_all(&event->waitq); - spin_unlock_irqrestore(&rb->event_lock, flags); + if (old_rb) { + ring_buffer_put(old_rb); + /* + * Since we detached before setting the new rb, so that we + * could attach the new rb, we could have missed a wakeup. + * Provide it now. + */ + wake_up_all(&event->waitq); + } } static void ring_buffer_wakeup(struct perf_event *event) @@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; - struct ring_buffer *rb = event->rb; + struct ring_buffer *rb = ring_buffer_get(event); struct user_struct *mmap_user = rb->mmap_user; int mmap_locked = rb->mmap_locked; unsigned long size = perf_data_size(rb); @@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma) atomic_dec(&rb->mmap_count); if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) - return; + goto out_put; - /* Detach current event from the buffer. */ - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); + ring_buffer_attach(event, NULL); mutex_unlock(&event->mmap_mutex); /* If there's still other mmap()s of this buffer, we're done. */ - if (atomic_read(&rb->mmap_count)) { - ring_buffer_put(rb); /* can't be last */ - return; - } + if (atomic_read(&rb->mmap_count)) + goto out_put; /* * No other mmap()s, detach from all other events that might redirect @@ -3978,11 +3998,9 @@ again: * still restart the iteration to make sure we're not now * iterating the wrong list. */ - if (event->rb == rb) { - rcu_assign_pointer(event->rb, NULL); - ring_buffer_detach(event, rb); - ring_buffer_put(rb); /* can't be last, we still have one */ - } + if (event->rb == rb) + ring_buffer_attach(event, NULL); + mutex_unlock(&event->mmap_mutex); put_event(event); @@ -4007,6 +4025,7 @@ again: vma->vm_mm->pinned_vm -= mmap_locked; free_uid(mmap_user); +out_put: ring_buffer_put(rb); /* could be last */ } @@ -4124,7 +4143,6 @@ again: vma->vm_mm->pinned_vm += extra; ring_buffer_attach(event, rb); - rcu_assign_pointer(event->rb, rb); perf_event_init_userpage(event); perf_event_update_userpage(event); @@ -5408,6 +5426,9 @@ struct swevent_htable { /* Recursion avoidance in each contexts */ int recursion[PERF_NR_CONTEXTS]; + + /* Keeps track of cpu being initialized/exited */ + bool online; }; static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); @@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags) hwc->state = !(flags & PERF_EF_START); head = find_swevent_head(swhash, event); - if (WARN_ON_ONCE(!head)) + if (!head) { + /* + * We can race with cpu hotplug code. Do not + * WARN if the cpu just got unplugged. + */ + WARN_ON_ONCE(swhash->online); return -EINVAL; + } hlist_add_head_rcu(&event->hlist_entry, head); @@ -6914,7 +6941,7 @@ err_size: static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { - struct ring_buffer *rb = NULL, *old_rb = NULL; + struct ring_buffer *rb = NULL; int ret = -EINVAL; if (!output_event) @@ -6942,8 +6969,6 @@ set: if (atomic_read(&event->mmap_count)) goto unlock; - old_rb = event->rb; - if (output_event) { /* get the rb we want to redirect to */ rb = ring_buffer_get(output_event); @@ -6951,23 +6976,7 @@ set: goto unlock; } - if (old_rb) - ring_buffer_detach(event, old_rb); - - if (rb) - ring_buffer_attach(event, rb); - - rcu_assign_pointer(event->rb, rb); - - if (old_rb) { - ring_buffer_put(old_rb); - /* - * Since we detached before setting the new rb, so that we - * could attach the new rb, we could have missed a wakeup. - * Provide it now. - */ - wake_up_all(&event->waitq); - } + ring_buffer_attach(event, rb); ret = 0; unlock: @@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open, if (attr.freq) { if (attr.sample_freq > sysctl_perf_event_sample_rate) return -EINVAL; + } else { + if (attr.sample_period & (1ULL << 63)) + return -EINVAL; } /* @@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event_context *gctx = group_leader->ctx; mutex_lock(&gctx->mutex); - perf_remove_from_context(group_leader); + perf_remove_from_context(group_leader, false); /* * Removing from the context ends up with disabled @@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(group_leader); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { - perf_remove_from_context(sibling); + perf_remove_from_context(sibling, false); perf_event__state_init(sibling); put_ctx(gctx); } @@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) mutex_lock(&src_ctx->mutex); list_for_each_entry_safe(event, tmp, &src_ctx->event_list, event_entry) { - perf_remove_from_context(event); + perf_remove_from_context(event, false); unaccount_event_cpu(event, src_cpu); put_ctx(src_ctx); list_add(&event->migrate_entry, &events); @@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { - if (child_event->parent) { - raw_spin_lock_irq(&child_ctx->lock); - perf_group_detach(child_event); - raw_spin_unlock_irq(&child_ctx->lock); - } - - perf_remove_from_context(child_event); + perf_remove_from_context(child_event, !!child_event->parent); /* * It can happen that the parent exits first, and has events @@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn) * swapped under us. */ parent_ctx = perf_pin_task_context(parent, ctxn); + if (!parent_ctx) + return 0; /* * No need to check if parent_ctx != NULL here; since we saw @@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu) struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); + swhash->online = true; if (swhash->hlist_refcount > 0) { struct swevent_hlist *hlist; @@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) static void __perf_event_exit_context(void *__info) { + struct remove_event re = { .detach_group = false }; struct perf_event_context *ctx = __info; - struct perf_event *event; perf_pmu_rotate_stop(ctx->pmu); rcu_read_lock(); - list_for_each_entry_rcu(event, &ctx->event_list, event_entry) - __perf_remove_from_context(event); + list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry) + __perf_remove_from_context(&re); rcu_read_unlock(); } @@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu) perf_event_exit_cpu_context(cpu); mutex_lock(&swhash->hlist_mutex); + swhash->online = false; swevent_hlist_release(swhash); mutex_unlock(&swhash->hlist_mutex); } diff --git a/kernel/futex.c b/kernel/futex.c index 5f589279e462..81dbe773ce4c 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -745,7 +745,8 @@ void exit_pi_state_list(struct task_struct *curr) static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, - union futex_key *key, struct futex_pi_state **ps) + union futex_key *key, struct futex_pi_state **ps, + struct task_struct *task) { struct futex_pi_state *pi_state = NULL; struct futex_q *this, *next; @@ -786,6 +787,16 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, return -EINVAL; } + /* + * Protect against a corrupted uval. If uval + * is 0x80000000 then pid is 0 and the waiter + * bit is set. So the deadlock check in the + * calling code has failed and we did not fall + * into the check above due to !pid. + */ + if (task && pi_state->owner == task) + return -EDEADLK; + atomic_inc(&pi_state->refcount); *ps = pi_state; @@ -803,6 +814,11 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, if (!p) return -ESRCH; + if (!p->mm) { + put_task_struct(p); + return -EPERM; + } + /* * We need to look at the task state flags to figure out, * whether the task is exiting. To protect against the do_exit @@ -935,7 +951,7 @@ retry: * We dont have the lock. Look up the PI state (or create it if * we are the first waiter): */ - ret = lookup_pi_state(uval, hb, key, ps); + ret = lookup_pi_state(uval, hb, key, ps, task); if (unlikely(ret)) { switch (ret) { @@ -1347,7 +1363,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, * * Return: * 0 - failed to acquire the lock atomically; - * 1 - acquired the lock; + * >0 - acquired the lock, return value is vpid of the top_waiter * <0 - error */ static int futex_proxy_trylock_atomic(u32 __user *pifutex, @@ -1358,7 +1374,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, { struct futex_q *top_waiter = NULL; u32 curval; - int ret; + int ret, vpid; if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; @@ -1386,11 +1402,13 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, * the contended case or if set_waiters is 1. The pi_state is returned * in ps in contended cases. */ + vpid = task_pid_vnr(top_waiter->task); ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); - if (ret == 1) + if (ret == 1) { requeue_pi_wake_futex(top_waiter, key2, hb2); - + return vpid; + } return ret; } @@ -1421,7 +1439,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags, struct futex_pi_state *pi_state = NULL; struct futex_hash_bucket *hb1, *hb2; struct futex_q *this, *next; - u32 curval2; if (requeue_pi) { /* @@ -1509,16 +1526,25 @@ retry_private: * At this point the top_waiter has either taken uaddr2 or is * waiting on it. If the former, then the pi_state will not * exist yet, look it up one more time to ensure we have a - * reference to it. + * reference to it. If the lock was taken, ret contains the + * vpid of the top waiter task. */ - if (ret == 1) { + if (ret > 0) { WARN_ON(pi_state); drop_count++; task_count++; - ret = get_futex_value_locked(&curval2, uaddr2); - if (!ret) - ret = lookup_pi_state(curval2, hb2, &key2, - &pi_state); + /* + * If we acquired the lock, then the user + * space value of uaddr2 should be vpid. It + * cannot be changed by the top waiter as it + * is blocked on hb2 lock if it tries to do + * so. If something fiddled with it behind our + * back the pi state lookup might unearth + * it. So we rather use the known value than + * rereading and handing potential crap to + * lookup_pi_state. + */ + ret = lookup_pi_state(ret, hb2, &key2, &pi_state, NULL); } switch (ret) { diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 6b715c0af1b1..e0501fe7140d 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -990,11 +990,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, /* Remove an active timer from the queue: */ ret = remove_hrtimer(timer, base); - /* Switch the timer base, if necessary: */ - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); - if (mode & HRTIMER_MODE_REL) { - tim = ktime_add_safe(tim, new_base->get_time()); + tim = ktime_add_safe(tim, base->get_time()); /* * CONFIG_TIME_LOW_RES is a temporary way for architectures * to signal that they simply return xtime in @@ -1009,6 +1006,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, hrtimer_set_expires_range_ns(timer, tim, delta_ns); + /* Switch the timer base, if necessary: */ + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); + timer_stats_hrtimer_set_start_info(timer); leftmost = enqueue_hrtimer(timer, new_base); diff --git a/kernel/kexec.c b/kernel/kexec.c index c8380ad203bc..28c57069ef68 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1683,6 +1683,14 @@ int kernel_kexec(void) kexec_in_progress = true; kernel_restart_prepare(NULL); migrate_to_reboot_cpu(); + + /* + * migrate_to_reboot_cpu() disables CPU hotplug assuming that + * no further code needs to use CPU hotplug (which is true in + * the reboot case). However, the kexec path depends on using + * CPU hotplug again; so re-enable it here. + */ + cpu_hotplug_enable(); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); } diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index aa4dff04b594..a620d4d08ca6 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -343,9 +343,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * top_waiter can be NULL, when we are in the deboosting * mode! */ - if (top_waiter && (!task_has_pi_waiters(task) || - top_waiter != task_top_pi_waiter(task))) - goto out_unlock_pi; + if (top_waiter) { + if (!task_has_pi_waiters(task)) + goto out_unlock_pi; + /* + * If deadlock detection is off, we stop here if we + * are not the top pi waiter of the task. + */ + if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) + goto out_unlock_pi; + } /* * When deadlock detection is off then we check, if further @@ -361,7 +368,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, goto retry; } - /* Deadlock detection */ + /* + * Deadlock detection. If the lock is the same as the original + * lock which caused us to walk the lock chain or if the + * current lock is owned by the task which initiated the chain + * walk, we detected a deadlock. + */ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); raw_spin_unlock(&lock->wait_lock); @@ -527,6 +539,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, unsigned long flags; int chain_walk = 0, res; + /* + * Early deadlock detection. We really don't want the task to + * enqueue on itself just to untangle the mess later. It's not + * only an optimization. We drop the locks, so another waiter + * can come in before the chain walk detects the deadlock. So + * the other will detect the deadlock and return -EDEADLOCK, + * which is wrong, as the other waiter is not in a deadlock + * situation. + */ + if (detect_deadlock && owner == task) + return -EDEADLK; + raw_spin_lock_irqsave(&task->pi_lock, flags); __rt_mutex_adjust_prio(task); waiter->task = task; diff --git a/kernel/resource.c b/kernel/resource.c index 8957d686e29b..3c2237ac32db 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1288,13 +1288,10 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) if (p->flags & IORESOURCE_BUSY) continue; - printk(KERN_WARNING "resource map sanity check conflict: " - "0x%llx 0x%llx 0x%llx 0x%llx %s\n", + printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n", (unsigned long long)addr, (unsigned long long)(addr + size - 1), - (unsigned long long)p->start, - (unsigned long long)p->end, - p->name); + p->name, p); err = -1; break; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d9d8ece46a15..0a7251678982 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev) if (likely(prev->sched_class == class && rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq, prev); - if (likely(p && p != RETRY_TASK)) - return p; + if (unlikely(p == RETRY_TASK)) + goto again; + + /* assumes fair_sched_class->next == idle_sched_class */ + if (unlikely(!p)) + p = idle_sched_class.pick_next_task(rq, prev); + + return p; } again: @@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); dl_se->dl_throttled = 0; dl_se->dl_new = 1; + dl_se->dl_yielded = 0; } static void __setscheduler_params(struct task_struct *p, @@ -3188,17 +3195,40 @@ __getparam_dl(struct task_struct *p, struct sched_attr *attr) * We ask for the deadline not being zero, and greater or equal * than the runtime, as well as the period of being zero or * greater than deadline. Furthermore, we have to be sure that - * user parameters are above the internal resolution (1us); we - * check sched_runtime only since it is always the smaller one. + * user parameters are above the internal resolution of 1us (we + * check sched_runtime only since it is always the smaller one) and + * below 2^63 ns (we have to check both sched_deadline and + * sched_period, as the latter can be zero). */ static bool __checkparam_dl(const struct sched_attr *attr) { - return attr && attr->sched_deadline != 0 && - (attr->sched_period == 0 || - (s64)(attr->sched_period - attr->sched_deadline) >= 0) && - (s64)(attr->sched_deadline - attr->sched_runtime ) >= 0 && - attr->sched_runtime >= (2 << (DL_SCALE - 1)); + /* deadline != 0 */ + if (attr->sched_deadline == 0) + return false; + + /* + * Since we truncate DL_SCALE bits, make sure we're at least + * that big. + */ + if (attr->sched_runtime < (1ULL << DL_SCALE)) + return false; + + /* + * Since we use the MSB for wrap-around and sign issues, make + * sure it's not set (mind that period can be equal to zero). + */ + if (attr->sched_deadline & (1ULL << 63) || + attr->sched_period & (1ULL << 63)) + return false; + + /* runtime <= deadline <= period (if period != 0) */ + if ((attr->sched_period != 0 && + attr->sched_period < attr->sched_deadline) || + attr->sched_deadline < attr->sched_runtime) + return false; + + return true; } /* @@ -3639,6 +3669,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) * sys_sched_setattr - same as above, but with extended sched_attr * @pid: the pid in question. * @uattr: structure containing the extended parameters. + * @flags: for future extension. */ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, unsigned int, flags) @@ -3650,8 +3681,12 @@ SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, if (!uattr || pid < 0 || flags) return -EINVAL; - if (sched_copy_attr(uattr, &attr)) - return -EFAULT; + retval = sched_copy_attr(uattr, &attr); + if (retval) + return retval; + + if (attr.sched_policy < 0) + return -EINVAL; rcu_read_lock(); retval = -ESRCH; @@ -3701,7 +3736,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) */ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) { - struct sched_param lp; + struct sched_param lp = { .sched_priority = 0 }; struct task_struct *p; int retval; @@ -3718,11 +3753,8 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) if (retval) goto out_unlock; - if (task_has_dl_policy(p)) { - retval = -EINVAL; - goto out_unlock; - } - lp.sched_priority = p->rt_priority; + if (task_has_rt_policy(p)) + lp.sched_priority = p->rt_priority; rcu_read_unlock(); /* @@ -3783,6 +3815,7 @@ err_size: * @pid: the pid in question. * @uattr: structure containing the extended parameters. * @size: sizeof(attr) for fwd/bwd comp. + * @flags: for future extension. */ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, unsigned int, size, unsigned int, flags) @@ -5043,7 +5076,6 @@ static int sched_cpu_active(struct notifier_block *nfb, unsigned long action, void *hcpu) { switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: case CPU_DOWN_FAILED: set_cpu_active((long)hcpu, true); return NOTIFY_OK; @@ -6017,6 +6049,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) , .last_balance = jiffies, .balance_interval = sd_weight, + .max_newidle_lb_cost = 0, + .next_decay_max_lb_cost = jiffies, }; SD_INIT_NAME(sd, NUMA); sd->private = &tl->data; diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b9bb42b2d47..bd95963dae80 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -13,6 +13,7 @@ #include <linux/gfp.h> #include <linux/kernel.h> +#include <linux/slab.h> #include "cpudeadline.h" static inline int parent(int i) @@ -39,8 +40,10 @@ static void cpudl_exchange(struct cpudl *cp, int a, int b) { int cpu_a = cp->elements[a].cpu, cpu_b = cp->elements[b].cpu; - swap(cp->elements[a], cp->elements[b]); - swap(cp->cpu_to_idx[cpu_a], cp->cpu_to_idx[cpu_b]); + swap(cp->elements[a].cpu, cp->elements[b].cpu); + swap(cp->elements[a].dl , cp->elements[b].dl ); + + swap(cp->elements[cpu_a].idx, cp->elements[cpu_b].idx); } static void cpudl_heapify(struct cpudl *cp, int idx) @@ -140,7 +143,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) WARN_ON(!cpu_present(cpu)); raw_spin_lock_irqsave(&cp->lock, flags); - old_idx = cp->cpu_to_idx[cpu]; + old_idx = cp->elements[cpu].idx; if (!is_valid) { /* remove item */ if (old_idx == IDX_INVALID) { @@ -155,8 +158,8 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) cp->elements[old_idx].dl = cp->elements[cp->size - 1].dl; cp->elements[old_idx].cpu = new_cpu; cp->size--; - cp->cpu_to_idx[new_cpu] = old_idx; - cp->cpu_to_idx[cpu] = IDX_INVALID; + cp->elements[new_cpu].idx = old_idx; + cp->elements[cpu].idx = IDX_INVALID; while (old_idx > 0 && dl_time_before( cp->elements[parent(old_idx)].dl, cp->elements[old_idx].dl)) { @@ -173,7 +176,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid) cp->size++; cp->elements[cp->size - 1].dl = 0; cp->elements[cp->size - 1].cpu = cpu; - cp->cpu_to_idx[cpu] = cp->size - 1; + cp->elements[cpu].idx = cp->size - 1; cpudl_change_key(cp, cp->size - 1, dl); cpumask_clear_cpu(cpu, cp->free_cpus); } else { @@ -195,10 +198,21 @@ int cpudl_init(struct cpudl *cp) memset(cp, 0, sizeof(*cp)); raw_spin_lock_init(&cp->lock); cp->size = 0; - for (i = 0; i < NR_CPUS; i++) - cp->cpu_to_idx[i] = IDX_INVALID; - if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) + + cp->elements = kcalloc(nr_cpu_ids, + sizeof(struct cpudl_item), + GFP_KERNEL); + if (!cp->elements) + return -ENOMEM; + + if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) { + kfree(cp->elements); return -ENOMEM; + } + + for_each_possible_cpu(i) + cp->elements[i].idx = IDX_INVALID; + cpumask_setall(cp->free_cpus); return 0; @@ -210,7 +224,6 @@ int cpudl_init(struct cpudl *cp) */ void cpudl_cleanup(struct cpudl *cp) { - /* - * nothing to do for the moment - */ + free_cpumask_var(cp->free_cpus); + kfree(cp->elements); } diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index a202789a412c..538c9796ad4a 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h @@ -5,17 +5,17 @@ #define IDX_INVALID -1 -struct array_item { +struct cpudl_item { u64 dl; int cpu; + int idx; }; struct cpudl { raw_spinlock_t lock; int size; - int cpu_to_idx[NR_CPUS]; - struct array_item elements[NR_CPUS]; cpumask_var_t free_cpus; + struct cpudl_item *elements; }; diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 8b836b376d91..8834243abee2 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c @@ -30,6 +30,7 @@ #include <linux/gfp.h> #include <linux/sched.h> #include <linux/sched/rt.h> +#include <linux/slab.h> #include "cpupri.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ @@ -70,8 +71,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, int idx = 0; int task_pri = convert_prio(p->prio); - if (task_pri >= MAX_RT_PRIO) - return 0; + BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; @@ -219,8 +219,13 @@ int cpupri_init(struct cpupri *cp) goto cleanup; } + cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); + if (!cp->cpu_to_pri) + goto cleanup; + for_each_possible_cpu(i) cp->cpu_to_pri[i] = CPUPRI_INVALID; + return 0; cleanup: @@ -237,6 +242,7 @@ void cpupri_cleanup(struct cpupri *cp) { int i; + kfree(cp->cpu_to_pri); for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) free_cpumask_var(cp->pri_to_cpu[i].mask); } diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h index f6d756173491..6b033347fdfd 100644 --- a/kernel/sched/cpupri.h +++ b/kernel/sched/cpupri.h @@ -17,7 +17,7 @@ struct cpupri_vec { struct cpupri { struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; - int cpu_to_pri[NR_CPUS]; + int *cpu_to_pri; }; #ifdef CONFIG_SMP diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index a95097cb4591..72fdf06ef865 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -332,50 +332,50 @@ out: * softirq as those do not count in task exec_runtime any more. */ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, - struct rq *rq) + struct rq *rq, int ticks) { - cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); + cputime_t scaled = cputime_to_scaled(cputime_one_jiffy); + u64 cputime = (__force u64) cputime_one_jiffy; u64 *cpustat = kcpustat_this_cpu->cpustat; if (steal_account_process_tick()) return; + cputime *= ticks; + scaled *= ticks; + if (irqtime_account_hi_update()) { - cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; + cpustat[CPUTIME_IRQ] += cputime; } else if (irqtime_account_si_update()) { - cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; + cpustat[CPUTIME_SOFTIRQ] += cputime; } else if (this_cpu_ksoftirqd() == p) { /* * ksoftirqd time do not get accounted in cpu_softirq_time. * So, we have to handle it separately here. * Also, p->stime needs to be updated for ksoftirqd. */ - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SOFTIRQ); + __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ); } else if (user_tick) { - account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); + account_user_time(p, cputime, scaled); } else if (p == rq->idle) { - account_idle_time(cputime_one_jiffy); + account_idle_time(cputime); } else if (p->flags & PF_VCPU) { /* System time or guest time */ - account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); + account_guest_time(p, cputime, scaled); } else { - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SYSTEM); + __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM); } } static void irqtime_account_idle_ticks(int ticks) { - int i; struct rq *rq = this_rq(); - for (i = 0; i < ticks; i++) - irqtime_account_process_tick(current, 0, rq); + irqtime_account_process_tick(current, 0, rq, ticks); } #else /* CONFIG_IRQ_TIME_ACCOUNTING */ static inline void irqtime_account_idle_ticks(int ticks) {} static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick, - struct rq *rq) {} + struct rq *rq, int nr_ticks) {} #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ /* @@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick) return; if (sched_clock_irqtime) { - irqtime_account_process_tick(p, user_tick, rq); + irqtime_account_process_tick(p, user_tick, rq, 1); return; } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b08095786cb8..800e99b99075 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) sched_clock_tick(); update_rq_clock(rq); dl_se->dl_throttled = 0; + dl_se->dl_yielded = 0; if (p->on_rq) { enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); if (task_has_dl_policy(rq->curr)) @@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq) * We make the task go to sleep until its current deadline by * forcing its runtime to zero. This way, update_curr_dl() stops * it and the bandwidth timer will wake it up and will give it - * new scheduling parameters (thanks to dl_new=1). + * new scheduling parameters (thanks to dl_yielded=1). */ if (p->dl.runtime > 0) { - rq->curr->dl.dl_new = 1; + rq->curr->dl.dl_yielded = 1; p->dl.runtime = 0; } update_curr_dl(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7570dd969c28..0fdb96de81a5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq) int this_cpu = this_rq->cpu; idle_enter_fair(this_rq); + /* * We must set idle_stamp _before_ calling idle_balance(), such that we * measure the duration of idle_balance() as idle time. @@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq) raw_spin_lock(&this_rq->lock); + if (curr_cost > this_rq->max_idle_balance_cost) + this_rq->max_idle_balance_cost = curr_cost; + /* - * While browsing the domains, we released the rq lock. - * A task could have be enqueued in the meantime + * While browsing the domains, we released the rq lock, a task could + * have been enqueued in the meantime. Since we're not going idle, + * pretend we pulled a task. */ - if (this_rq->cfs.h_nr_running && !pulled_task) { + if (this_rq->cfs.h_nr_running && !pulled_task) pulled_task = 1; - goto out; - } if (pulled_task || time_after(jiffies, this_rq->next_balance)) { /* @@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq) this_rq->next_balance = next_balance; } - if (curr_cost > this_rq->max_idle_balance_cost) - this_rq->max_idle_balance_cost = curr_cost; - out: /* Is there a task of a high priority class? */ if (this_rq->nr_running != this_rq->cfs.h_nr_running && diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0ee63af30bd1..8edc87185427 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1916,6 +1916,12 @@ static void send_mayday(struct work_struct *work) /* mayday mayday mayday */ if (list_empty(&pwq->mayday_node)) { + /* + * If @pwq is for an unbound wq, its base ref may be put at + * any time due to an attribute change. Pin @pwq until the + * rescuer is done with it. + */ + get_pwq(pwq); list_add_tail(&pwq->mayday_node, &wq->maydays); wake_up_process(wq->rescuer->task); } @@ -2398,6 +2404,7 @@ static int rescuer_thread(void *__rescuer) struct worker *rescuer = __rescuer; struct workqueue_struct *wq = rescuer->rescue_wq; struct list_head *scheduled = &rescuer->scheduled; + bool should_stop; set_user_nice(current, RESCUER_NICE_LEVEL); @@ -2409,11 +2416,15 @@ static int rescuer_thread(void *__rescuer) repeat: set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) { - __set_current_state(TASK_RUNNING); - rescuer->task->flags &= ~PF_WQ_WORKER; - return 0; - } + /* + * By the time the rescuer is requested to stop, the workqueue + * shouldn't have any work pending, but @wq->maydays may still have + * pwq(s) queued. This can happen by non-rescuer workers consuming + * all the work items before the rescuer got to them. Go through + * @wq->maydays processing before acting on should_stop so that the + * list is always empty on exit. + */ + should_stop = kthread_should_stop(); /* see whether any pwq is asking for help */ spin_lock_irq(&wq_mayday_lock); @@ -2445,6 +2456,12 @@ repeat: process_scheduled_works(rescuer); /* + * Put the reference grabbed by send_mayday(). @pool won't + * go away while we're holding its lock. + */ + put_pwq(pwq); + + /* * Leave this pool. If keep_working() is %true, notify a * regular worker; otherwise, we end up with 0 concurrency * and stalling the execution. @@ -2459,6 +2476,12 @@ repeat: spin_unlock_irq(&wq_mayday_lock); + if (should_stop) { + __set_current_state(TASK_RUNNING); + rescuer->task->flags &= ~PF_WQ_WORKER; + return 0; + } + /* rescuers should never participate in concurrency management */ WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); schedule(); @@ -4100,7 +4123,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, if (!pwq) { pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n", wq->name); - goto out_unlock; + mutex_lock(&wq->mutex); + goto use_dfl_pwq; } /* diff --git a/mm/Kconfig b/mm/Kconfig index ebe5880c29d6..1b5a95f0fa01 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -581,3 +581,18 @@ config PGTABLE_MAPPING config GENERIC_EARLY_IOREMAP bool + +config MAX_STACK_SIZE_MB + int "Maximum user stack size for 32-bit processes (MB)" + default 80 + range 8 256 if METAG + range 8 2048 + depends on STACK_GROWSUP && (!64BIT || COMPAT) + help + This is the maximum stack size in Megabytes in the VM layout of 32-bit + user processes when the stack grows upwards (currently only on parisc + and metag arch). The stack will be located at the highest memory + address minus the given value, unless the RLIMIT_STACK hard limit is + changed to a smaller value in which case that is used. + + A sane initial value is 80 MB. diff --git a/mm/Makefile b/mm/Makefile index b484452dac57..0173940407f6 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -30,7 +30,6 @@ endif obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o -obj-$(CONFIG_BOUNCE) += bounce.o obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o obj-$(CONFIG_FRONTSWAP) += frontswap.o obj-$(CONFIG_ZSWAP) += zswap.o diff --git a/mm/filemap.c b/mm/filemap.c index 000a220e2a41..088358c8006b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping) { int ret = 0; /* Check for outstanding write errors */ - if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) + if (test_bit(AS_ENOSPC, &mapping->flags) && + test_and_clear_bit(AS_ENOSPC, &mapping->flags)) ret = -ENOSPC; - if (test_and_clear_bit(AS_EIO, &mapping->flags)) + if (test_bit(AS_EIO, &mapping->flags) && + test_and_clear_bit(AS_EIO, &mapping->flags)) ret = -EIO; return ret; } diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 91d67eaee050..8d2fcdfeff7f 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1775,10 +1775,9 @@ void __init kmemleak_init(void) int i; unsigned long flags; - kmemleak_early_log = 0; - #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF if (!kmemleak_skip_disable) { + kmemleak_early_log = 0; kmemleak_disable(); return; } @@ -1796,6 +1795,7 @@ void __init kmemleak_init(void) /* the kernel is still in UP mode, so disabling the IRQs is enough */ local_irq_save(flags); + kmemleak_early_log = 0; if (kmemleak_error) { local_irq_restore(flags); return; diff --git a/mm/madvise.c b/mm/madvise.c index 539eeb96b323..a402f8fdc68e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, for (; start < end; start += PAGE_SIZE) { index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - page = find_get_page(mapping, index); + page = find_get_entry(mapping, index); if (!radix_tree_exceptional_entry(page)) { if (page) page_cache_release(page); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c47dffdcb246..5177c6d4a2dd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) rcu_read_lock(); do { - memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - if (unlikely(!memcg)) + /* + * Page cache insertions can happen withou an + * actual mm context, e.g. during disk probing + * on boot, loopback IO, acct() writes etc. + */ + if (unlikely(!mm)) memcg = root_mem_cgroup; + else { + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (unlikely(!memcg)) + memcg = root_mem_cgroup; + } } while (!css_tryget(&memcg->css)); rcu_read_unlock(); return memcg; @@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm, return 0; } - /* - * Page cache insertions can happen without an actual mm - * context, e.g. during disk probing on boot. - */ - if (unlikely(!mm)) - memcg = root_mem_cgroup; - else { - memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true); - if (!memcg) - return -ENOMEM; - } + memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true); + if (!memcg) + return -ENOMEM; __mem_cgroup_commit_charge(memcg, page, 1, type, false); return 0; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 35ef28acf137..9ccef39a9de2 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags) return 0; } else if (PageHuge(hpage)) { /* - * Check "just unpoisoned", "filter hit", and - * "race with other subpage." + * Check "filter hit" and "race with other subpage." */ lock_page(hpage); - if (!PageHWPoison(hpage) - || (hwpoison_filter(p) && TestClearPageHWPoison(p)) - || (p != hpage && TestSetPageHWPoison(hpage))) { - atomic_long_sub(nr_pages, &num_poisoned_pages); - return 0; + if (PageHWPoison(hpage)) { + if ((hwpoison_filter(p) && TestClearPageHWPoison(p)) + || (p != hpage && TestSetPageHWPoison(hpage))) { + atomic_long_sub(nr_pages, &num_poisoned_pages); + unlock_page(hpage); + return 0; + } } set_page_hwpoison_huge_page(hpage); res = dequeue_hwpoisoned_huge_page(hpage); @@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags) */ if (!PageHWPoison(p)) { printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); + atomic_long_sub(nr_pages, &num_poisoned_pages); + put_page(hpage); res = 0; goto out; } diff --git a/mm/mremap.c b/mm/mremap.c index 0843feb66f3d..05f1180e9f21 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma, break; if (pmd_trans_huge(*old_pmd)) { int err = 0; - if (extent == HPAGE_PMD_SIZE) + if (extent == HPAGE_PMD_SIZE) { + VM_BUG_ON(vma->vm_file || !vma->anon_vma); + /* See comment in move_ptes() */ + if (need_rmap_locks) + anon_vma_lock_write(vma->anon_vma); err = move_huge_pmd(vma, new_vma, old_addr, new_addr, old_end, old_pmd, new_pmd); + if (need_rmap_locks) + anon_vma_unlock_write(vma->anon_vma); + } if (err > 0) { need_flush = true; continue; diff --git a/mm/percpu.c b/mm/percpu.c index 63e24fb4387b..2ddf9a990dbd 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); if (!chunk->map) { - kfree(chunk); + pcpu_mem_free(chunk, pcpu_chunk_struct_size); return NULL; } diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 175273f38cb1..44ebd5c2cd4a 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev) if (err < 0) goto out_uninit_mvrp; + vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; err = register_netdevice(dev); if (err < 0) goto out_uninit_mvrp; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 733ec283ed1b..019efb79708f 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) } } -static int vlan_calculate_locking_subclass(struct net_device *real_dev) -{ - int subclass = 0; - - while (is_vlan_dev(real_dev)) { - subclass++; - real_dev = vlan_dev_priv(real_dev)->real_dev; - } - - return subclass; -} - -static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from) -{ - int err = 0, subclass; - - subclass = vlan_calculate_locking_subclass(to); - - spin_lock_nested(&to->addr_list_lock, subclass); - err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); - if (!err) - __dev_set_rx_mode(to); - spin_unlock(&to->addr_list_lock); -} - -static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from) -{ - int err = 0, subclass; - - subclass = vlan_calculate_locking_subclass(to); - - spin_lock_nested(&to->addr_list_lock, subclass); - err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); - if (!err) - __dev_set_rx_mode(to); - spin_unlock(&to->addr_list_lock); -} - static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) { - vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); - vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); + dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); + dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); } /* @@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass) netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); } +static int vlan_dev_get_lock_subclass(struct net_device *dev) +{ + return vlan_dev_priv(dev)->nest_level; +} + static const struct header_ops vlan_header_ops = { .create = vlan_dev_hard_header, .rebuild = vlan_dev_rebuild_header, @@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops; static int vlan_dev_init(struct net_device *dev) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; - int subclass = 0; netif_carrier_off(dev); @@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &vlan_type); - subclass = vlan_calculate_locking_subclass(dev); - vlan_dev_set_lockdep_class(dev, subclass); + vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev)); vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan_dev_priv(dev)->vlan_pcpu_stats) @@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = { .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, + .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, }; void vlan_setup(struct net_device *dev) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index b3bd4ec3fd94..f04224c32005 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1545,6 +1545,8 @@ out_neigh: if ((orig_neigh_node) && (!is_single_hop_neigh)) batadv_orig_node_free_ref(orig_neigh_node); out: + if (router_ifinfo) + batadv_neigh_ifinfo_free_ref(router_ifinfo); if (router) batadv_neigh_node_free_ref(router); if (router_router) diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b25fd64d727b..aa5d4946d0d7 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -940,8 +940,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, * additional DAT answer may trigger kernel warnings about * a packet coming from the wrong port. */ - if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, - BATADV_NO_FLAGS)) { + if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) { ret = true; goto out; } diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index bcc4bea632fa..f14e54a05691 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb, struct batadv_neigh_node *neigh_node) { struct batadv_priv *bat_priv; - struct batadv_hard_iface *primary_if; + struct batadv_hard_iface *primary_if = NULL; struct batadv_frag_packet frag_header; struct sk_buff *skb_fragment; unsigned mtu = neigh_node->if_incoming->net_dev->mtu; unsigned header_size = sizeof(frag_header); unsigned max_fragment_size, max_packet_size; + bool ret = false; /* To avoid merge and refragmentation at next-hops we never send * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE @@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb, skb->len + ETH_HLEN); batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); - return true; + ret = true; + out_err: - return false; + if (primary_if) + batadv_hardif_free_ref(primary_if); + + return ret; } diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index c835e137423b..90cff585b37d 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -42,8 +42,10 @@ static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) { - if (atomic_dec_and_test(&gw_node->refcount)) + if (atomic_dec_and_test(&gw_node->refcount)) { + batadv_orig_node_free_ref(gw_node->orig_node); kfree_rcu(gw_node, rcu); + } } static struct batadv_gw_node * @@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, if (gateway->bandwidth_down == 0) return; + if (!atomic_inc_not_zero(&orig_node->refcount)) + return; + gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); - if (!gw_node) + if (!gw_node) { + batadv_orig_node_free_ref(orig_node); return; + } INIT_HLIST_NODE(&gw_node->list); gw_node->orig_node = orig_node; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index b851cc580853..fbda6b54baff 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) return true; /* no more parents..stop recursion */ - if (net_dev->iflink == net_dev->ifindex) + if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex) return false; /* recurse over the parent device */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index ffd9dfbd9b0e..6a484514cd3e 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node, static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu) { struct batadv_orig_ifinfo *orig_ifinfo; + struct batadv_neigh_node *router; orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu); if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing); + /* this is the last reference to this object */ + router = rcu_dereference_protected(orig_ifinfo->router, true); + if (router) + batadv_neigh_node_free_ref_now(router); kfree(orig_ifinfo); } @@ -702,6 +707,47 @@ free_orig_node: } /** + * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor + * @bat_priv: the bat priv with all the soft interface information + * @neigh: orig node which is to be checked + */ +static void +batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv, + struct batadv_neigh_node *neigh) +{ + struct batadv_neigh_ifinfo *neigh_ifinfo; + struct batadv_hard_iface *if_outgoing; + struct hlist_node *node_tmp; + + spin_lock_bh(&neigh->ifinfo_lock); + + /* for all ifinfo objects for this neighinator */ + hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, + &neigh->ifinfo_list, list) { + if_outgoing = neigh_ifinfo->if_outgoing; + + /* always keep the default interface */ + if (if_outgoing == BATADV_IF_DEFAULT) + continue; + + /* don't purge if the interface is not (going) down */ + if ((if_outgoing->if_status != BATADV_IF_INACTIVE) && + (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) && + (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED)) + continue; + + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, + "neighbor/ifinfo purge: neighbor %pM, iface: %s\n", + neigh->addr, if_outgoing->net_dev->name); + + hlist_del_rcu(&neigh_ifinfo->list); + batadv_neigh_ifinfo_free_ref(neigh_ifinfo); + } + + spin_unlock_bh(&neigh->ifinfo_lock); +} + +/** * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator * @bat_priv: the bat priv with all the soft interface information * @orig_node: orig node which is to be checked @@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, hlist_del_rcu(&neigh_node->list); batadv_neigh_node_free_ref(neigh_node); + } else { + /* only necessary if not the whole neighbor is to be + * deleted, but some interface has been removed. + */ + batadv_purge_neigh_ifinfo(bat_priv, neigh_node); } } @@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, { struct batadv_neigh_node *best_neigh_node; struct batadv_hard_iface *hard_iface; - bool changed; + bool changed_ifinfo, changed_neigh; if (batadv_has_timed_out(orig_node->last_seen, 2 * BATADV_PURGE_TIMEOUT)) { @@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, jiffies_to_msecs(orig_node->last_seen)); return true; } - changed = batadv_purge_orig_ifinfo(bat_priv, orig_node); - changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node); + changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node); + changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node); - if (!changed) + if (!changed_ifinfo && !changed_neigh) return false; /* first for NULL ... */ @@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset) bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); out: - batadv_hardif_free_ref(hard_iface); + if (hard_iface) + batadv_hardif_free_ref(hard_iface); return 0; } diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 80e1b0f60a30..2acf7fa1fec6 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -859,12 +859,12 @@ static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops, return NF_STOLEN; } -#if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4) +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) static int br_nf_dev_queue_xmit(struct sk_buff *skb) { int ret; - if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && + if (skb->protocol == htons(ETH_P_IP) && skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && !skb_is_gso(skb)) { if (br_parse_ip_options(skb)) diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index dac7f9b98687..1948d592aa54 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, return r; } -static int ceph_tcp_sendpage(struct socket *sock, struct page *page, +static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, bool more) { int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); @@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, return ret; } +static int ceph_tcp_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, bool more) +{ + int ret; + struct kvec iov; + + /* sendpage cannot properly handle pages with page_count == 0, + * we need to fallback to sendmsg if that's the case */ + if (page_count(page) >= 1) + return __ceph_tcp_sendpage(sock, page, offset, size, more); + + iov.iov_base = kmap(page) + offset; + iov.iov_len = size; + ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); + kunmap(page); + + return ret; +} /* * Shutdown/close the socket for the given connection. diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 8b8a5a24b223..c547e46084d3 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -329,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end) dout("crush decode tunable chooseleaf_descend_once = %d", c->chooseleaf_descend_once); + ceph_decode_need(p, end, sizeof(u8), done); + c->chooseleaf_vary_r = ceph_decode_8(p); + dout("crush decode tunable chooseleaf_vary_r = %d", + c->chooseleaf_vary_r); + done: dout("crush_decode success\n"); return c; diff --git a/net/core/dev.c b/net/core/dev.c index d2c8a06b3a98..9abc503b19b7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2418,7 +2418,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); * 2. No high memory really exists on this machine. */ -static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb) +static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; @@ -2493,38 +2493,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) } static netdev_features_t harmonize_features(struct sk_buff *skb, - const struct net_device *dev, - netdev_features_t features) + netdev_features_t features) { int tmp; if (skb->ip_summed != CHECKSUM_NONE && !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { features &= ~NETIF_F_ALL_CSUM; - } else if (illegal_highdma(dev, skb)) { + } else if (illegal_highdma(skb->dev, skb)) { features &= ~NETIF_F_SG; } return features; } -netdev_features_t netif_skb_dev_features(struct sk_buff *skb, - const struct net_device *dev) +netdev_features_t netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; - netdev_features_t features = dev->features; + netdev_features_t features = skb->dev->features; - if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs) + if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) features &= ~NETIF_F_GSO_MASK; if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; } else if (!vlan_tx_tag_present(skb)) { - return harmonize_features(skb, dev, features); + return harmonize_features(skb, features); } - features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | + features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) @@ -2532,9 +2530,9 @@ netdev_features_t netif_skb_dev_features(struct sk_buff *skb, NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; - return harmonize_features(skb, dev, features); + return harmonize_features(skb, features); } -EXPORT_SYMBOL(netif_skb_dev_features); +EXPORT_SYMBOL(netif_skb_features); int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) @@ -3953,6 +3951,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff } NAPI_GRO_CB(skb)->count = 1; NAPI_GRO_CB(skb)->age = jiffies; + NAPI_GRO_CB(skb)->last = skb; skb_shinfo(skb)->gso_size = skb_gro_len(skb); skb->next = napi->gro_list; napi->gro_list = skb; @@ -4543,6 +4542,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list) EXPORT_SYMBOL(netdev_adjacent_get_private); /** + * netdev_upper_get_next_dev_rcu - Get the next dev from upper list + * @dev: device + * @iter: list_head ** of the current position + * + * Gets the next device from the dev's upper list, starting from iter + * position. The caller must hold RCU read lock. + */ +struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter) +{ + struct netdev_adjacent *upper; + + WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); + + upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); + + if (&upper->list == &dev->adj_list.upper) + return NULL; + + *iter = &upper->list; + + return upper->dev; +} +EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); + +/** * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list * @dev: device * @iter: list_head ** of the current position @@ -4624,6 +4649,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev, EXPORT_SYMBOL(netdev_lower_get_next_private_rcu); /** + * netdev_lower_get_next - Get the next device from the lower neighbour + * list + * @dev: device + * @iter: list_head ** of the current position + * + * Gets the next netdev_adjacent from the dev's lower neighbour + * list, starting from iter position. The caller must hold RTNL lock or + * its own locking that guarantees that the neighbour lower + * list will remain unchainged. + */ +void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) +{ + struct netdev_adjacent *lower; + + lower = list_entry((*iter)->next, struct netdev_adjacent, list); + + if (&lower->list == &dev->adj_list.lower) + return NULL; + + *iter = &lower->list; + + return lower->dev; +} +EXPORT_SYMBOL(netdev_lower_get_next); + +/** * netdev_lower_get_first_private_rcu - Get the first ->private from the * lower neighbour list, RCU * variant @@ -5073,6 +5124,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev, } EXPORT_SYMBOL(netdev_lower_dev_get_private); + +int dev_get_nest_level(struct net_device *dev, + bool (*type_check)(struct net_device *dev)) +{ + struct net_device *lower = NULL; + struct list_head *iter; + int max_nest = -1; + int nest; + + ASSERT_RTNL(); + + netdev_for_each_lower_dev(dev, lower, iter) { + nest = dev_get_nest_level(lower, type_check); + if (max_nest < nest) + max_nest = nest; + } + + if (type_check(dev)) + max_nest++; + + return max_nest; +} +EXPORT_SYMBOL(dev_get_nest_level); + static void dev_change_rx_flags(struct net_device *dev, int flags) { const struct net_device_ops *ops = dev->netdev_ops; @@ -5238,7 +5313,6 @@ void __dev_set_rx_mode(struct net_device *dev) if (ops->ndo_set_rx_mode) ops->ndo_set_rx_mode(dev); } -EXPORT_SYMBOL(__dev_set_rx_mode); void dev_set_rx_mode(struct net_device *dev) { @@ -5543,7 +5617,7 @@ static int dev_new_index(struct net *net) /* Delayed registration/unregisteration */ static LIST_HEAD(net_todo_list); -static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); +DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); static void net_set_todo(struct net_device *dev) { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8f8a96ef9f3f..32d872eec7f5 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh) neigh->updated = jiffies; if (!(neigh->nud_state & NUD_FAILED)) return; - neigh->nud_state = NUD_PROBE; - atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES)); + neigh->nud_state = NUD_INCOMPLETE; + atomic_set(&neigh->probes, neigh_max_probes(neigh)); neigh_add_timer(neigh, jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME)); } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 81d3a9a08453..7c8ffd974961 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -24,7 +24,7 @@ static LIST_HEAD(pernet_list); static struct list_head *first_device = &pernet_list; -static DEFINE_MUTEX(net_mutex); +DEFINE_MUTEX(net_mutex); LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9837bebf93ce..2d8d8fcfa060 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops) } EXPORT_SYMBOL_GPL(__rtnl_link_unregister); +/* Return with the rtnl_lock held when there are no network + * devices unregistering in any network namespace. + */ +static void rtnl_lock_unregistering_all(void) +{ + struct net *net; + bool unregistering; + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait(&netdev_unregistering_wq, &wait, + TASK_UNINTERRUPTIBLE); + unregistering = false; + rtnl_lock(); + for_each_net(net) { + if (net->dev_unreg_count > 0) { + unregistering = true; + break; + } + } + if (!unregistering) + break; + __rtnl_unlock(); + schedule(); + } + finish_wait(&netdev_unregistering_wq, &wait); +} + /** * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink. * @ops: struct rtnl_link_ops * to unregister */ void rtnl_link_unregister(struct rtnl_link_ops *ops) { - rtnl_lock(); + /* Close the race with cleanup_net() */ + mutex_lock(&net_mutex); + rtnl_lock_unregistering_all(); __rtnl_link_unregister(ops); rtnl_unlock(); + mutex_unlock(&net_mutex); } EXPORT_SYMBOL_GPL(rtnl_link_unregister); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 1b62343f5837..8383b2bddeb9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) if (unlikely(p->len + len >= 65536)) return -E2BIG; - lp = NAPI_GRO_CB(p)->last ?: p; + lp = NAPI_GRO_CB(p)->last; pinfo = skb_shinfo(lp); if (headlen <= offset) { @@ -3192,7 +3192,7 @@ merge: __skb_pull(skb, offset); - if (!NAPI_GRO_CB(p)->last) + if (NAPI_GRO_CB(p)->last == p) skb_shinfo(p)->frag_list = skb; else NAPI_GRO_CB(p)->last->next = skb; diff --git a/net/core/utils.c b/net/core/utils.c index 2f737bf90b3f..eed34338736c 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w) { struct __net_random_once_work *work = container_of(w, struct __net_random_once_work, work); - if (!static_key_enabled(work->key)) - static_key_slow_inc(work->key); + BUG_ON(!static_key_enabled(work->key)); + static_key_slow_dec(work->key); kfree(work); } @@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key) } bool __net_get_random_once(void *buf, int nbytes, bool *done, - struct static_key *done_key) + struct static_key *once_key) { static DEFINE_SPINLOCK(lock); unsigned long flags; @@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done, *done = true; spin_unlock_irqrestore(&lock, flags); - __net_random_once_disable_jump(done_key); + __net_random_once_disable_jump(once_key); return true; } diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 0eb5d5e76dfb..5db37cef50a9 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev) goto out_free; } - chip_index = 0; + chip_index = -1; for_each_available_child_of_node(np, child) { + chip_index++; cd = &pd->chip[chip_index]; cd->mii_bus = &mdio_bus->dev; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8c54870db792..6d6dd345bc4d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1650,6 +1650,39 @@ static int __init init_ipv4_mibs(void) return register_pernet_subsys(&ipv4_mib_ops); } +static __net_init int inet_init_net(struct net *net) +{ + /* + * Set defaults for local port range + */ + seqlock_init(&net->ipv4.ip_local_ports.lock); + net->ipv4.ip_local_ports.range[0] = 32768; + net->ipv4.ip_local_ports.range[1] = 61000; + + seqlock_init(&net->ipv4.ping_group_range.lock); + /* + * Sane defaults - nobody may create ping sockets. + * Boot scripts should set this to distro-specific group. + */ + net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); + net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); + return 0; +} + +static __net_exit void inet_exit_net(struct net *net) +{ +} + +static __net_initdata struct pernet_operations af_inet_ops = { + .init = inet_init_net, + .exit = inet_exit_net, +}; + +static int __init init_inet_pernet_ops(void) +{ + return register_pernet_subsys(&af_inet_ops); +} + static int ipv4_proc_init(void); /* @@ -1794,6 +1827,9 @@ static int __init inet_init(void) if (ip_mr_init()) pr_crit("%s: Cannot init ipv4 mroute\n", __func__); #endif + + if (init_inet_pernet_ops()) + pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__); /* * Initialise per-cpu ipv4 mibs */ diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 8a043f03c88e..b10cd43a4722 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -821,13 +821,13 @@ struct fib_info *fib_create_info(struct fib_config *cfg) fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); if (fi == NULL) goto failure; + fib_info_cnt++; if (cfg->fc_mx) { fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); if (!fi->fib_metrics) goto failure; } else fi->fib_metrics = (u32 *) dst_default_metrics; - fib_info_cnt++; fi->fib_net = hold_net(net); fi->fib_protocol = cfg->fc_protocol; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 0d1e2cb877ec..a56b8e6e866a 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -37,11 +37,11 @@ void inet_get_local_port_range(struct net *net, int *low, int *high) unsigned int seq; do { - seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); + seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); - *low = net->ipv4.sysctl_local_ports.range[0]; - *high = net->ipv4.sysctl_local_ports.range[1]; - } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); + *low = net->ipv4.ip_local_ports.range[0]; + *high = net->ipv4.ip_local_ports.range[1]; + } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); } EXPORT_SYMBOL(inet_get_local_port_range); diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index be8abe73bb9f..6f111e48e11c 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -42,12 +42,12 @@ static bool ip_may_fragment(const struct sk_buff *skb) { return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) || - !skb->local_df; + skb->local_df; } static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) { - if (skb->len <= mtu || skb->local_df) + if (skb->len <= mtu) return false; if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) @@ -56,53 +56,6 @@ static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) return true; } -static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb) -{ - unsigned int mtu; - - if (skb->local_df || !skb_is_gso(skb)) - return false; - - mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true); - - /* if seglen > mtu, do software segmentation for IP fragmentation on - * output. DF bit cannot be set since ip_forward would have sent - * icmp error. - */ - return skb_gso_network_seglen(skb) > mtu; -} - -/* called if GSO skb needs to be fragmented on forward */ -static int ip_forward_finish_gso(struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - netdev_features_t features; - struct sk_buff *segs; - int ret = 0; - - features = netif_skb_dev_features(skb, dst->dev); - segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); - if (IS_ERR(segs)) { - kfree_skb(skb); - return -ENOMEM; - } - - consume_skb(skb); - - do { - struct sk_buff *nskb = segs->next; - int err; - - segs->next = NULL; - err = dst_output(segs); - - if (err && ret == 0) - ret = err; - segs = nskb; - } while (segs); - - return ret; -} static int ip_forward_finish(struct sk_buff *skb) { @@ -114,9 +67,6 @@ static int ip_forward_finish(struct sk_buff *skb) if (unlikely(opt->optlen)) ip_forward_options(skb); - if (ip_gso_exceeds_dst_mtu(skb)) - return ip_forward_finish_gso(skb); - return dst_output(skb); } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index c10a3ce5cbff..ed32313e307c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -232,8 +232,9 @@ static void ip_expire(unsigned long arg) * "Fragment Reassembly Timeout" message, per RFC792. */ if (qp->user == IP_DEFRAG_AF_PACKET || - (qp->user == IP_DEFRAG_CONNTRACK_IN && - skb_rtable(head)->rt_type != RTN_LOCAL)) + ((qp->user >= IP_DEFRAG_CONNTRACK_IN) && + (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) && + (skb_rtable(head)->rt_type != RTN_LOCAL))) goto out_rcu_unlock; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 1cbeba5edff9..a52f50187b54 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -211,6 +211,48 @@ static inline int ip_finish_output2(struct sk_buff *skb) return -EINVAL; } +static int ip_finish_output_gso(struct sk_buff *skb) +{ + netdev_features_t features; + struct sk_buff *segs; + int ret = 0; + + /* common case: locally created skb or seglen is <= mtu */ + if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) || + skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb)) + return ip_finish_output2(skb); + + /* Slowpath - GSO segment length is exceeding the dst MTU. + * + * This can happen in two cases: + * 1) TCP GRO packet, DF bit not set + * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly + * from host network stack. + */ + features = netif_skb_features(skb); + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); + if (IS_ERR(segs)) { + kfree_skb(skb); + return -ENOMEM; + } + + consume_skb(skb); + + do { + struct sk_buff *nskb = segs->next; + int err; + + segs->next = NULL; + err = ip_fragment(segs, ip_finish_output2); + + if (err && ret == 0) + ret = err; + segs = nskb; + } while (segs); + + return ret; +} + static int ip_finish_output(struct sk_buff *skb) { #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) @@ -220,10 +262,13 @@ static int ip_finish_output(struct sk_buff *skb) return dst_output(skb); } #endif - if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb)) + if (skb_is_gso(skb)) + return ip_finish_output_gso(skb); + + if (skb->len > ip_skb_dst_mtu(skb)) return ip_fragment(skb, ip_finish_output2); - else - return ip_finish_output2(skb); + + return ip_finish_output2(skb); } int ip_mc_output(struct sock *sk, struct sk_buff *skb) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index b3f859731c60..2acc2337d38b 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -540,9 +540,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, unsigned int max_headroom; /* The extra header space needed */ __be32 dst; int err; - bool connected = true; + bool connected; inner_iph = (const struct iphdr *)skb_inner_network_header(skb); + connected = (tunnel->parms.iph.daddr != 0); dst = tnl_params->daddr; if (dst == 0) { @@ -882,6 +883,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, */ if (!IS_ERR(itn->fb_tunnel_dev)) { itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL; + itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev); ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); } rtnl_unlock(); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index afcee51b90ed..13ef00f1e17b 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) static int vti4_err(struct sk_buff *skb, u32 info) { __be32 spi; + __u32 mark; struct xfrm_state *x; struct ip_tunnel *tunnel; struct ip_esp_hdr *esph; @@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info) if (!tunnel) return -1; + mark = be32_to_cpu(tunnel->parms.o_key); + switch (protocol) { case IPPROTO_ESP: esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); @@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info) return 0; } - x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, + x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr, spi, protocol, AF_INET); if (!x) return 0; diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 12e13bd82b5b..f40f321b41fc 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -22,7 +22,6 @@ #endif #include <net/netfilter/nf_conntrack_zones.h> -/* Returns new sk_buff, or NULL */ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) { int err; @@ -33,8 +32,10 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user) err = ip_defrag(skb, user); local_bh_enable(); - if (!err) + if (!err) { ip_send_check(ip_hdr(skb)); + skb->local_df = 1; + } return err; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 8210964a9f19..044a0ddf6a79 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -236,15 +236,15 @@ exit: static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, kgid_t *high) { - kgid_t *data = net->ipv4.sysctl_ping_group_range; + kgid_t *data = net->ipv4.ping_group_range.range; unsigned int seq; do { - seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); + seq = read_seqbegin(&net->ipv4.ping_group_range.lock); *low = data[0]; *high = data[1]; - } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); + } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq)); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index db1e0da871f4..5e676be3daeb 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1519,7 +1519,7 @@ static int __mkroute_input(struct sk_buff *skb, struct in_device *out_dev; unsigned int flags = 0; bool do_cache; - u32 itag; + u32 itag = 0; /* get a working reference to the output device */ out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 44eba052b43d..5cde8f263d40 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; /* Update system visible IP port range */ static void set_local_port_range(struct net *net, int range[2]) { - write_seqlock(&net->ipv4.sysctl_local_ports.lock); - net->ipv4.sysctl_local_ports.range[0] = range[0]; - net->ipv4.sysctl_local_ports.range[1] = range[1]; - write_sequnlock(&net->ipv4.sysctl_local_ports.lock); + write_seqlock(&net->ipv4.ip_local_ports.lock); + net->ipv4.ip_local_ports.range[0] = range[0]; + net->ipv4.ip_local_ports.range[1] = range[1]; + write_sequnlock(&net->ipv4.ip_local_ports.lock); } /* Validate changes from /proc interface. */ @@ -57,7 +57,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write, size_t *lenp, loff_t *ppos) { struct net *net = - container_of(table->data, struct net, ipv4.sysctl_local_ports.range); + container_of(table->data, struct net, ipv4.ip_local_ports.range); int ret; int range[2]; struct ctl_table tmp = { @@ -87,14 +87,14 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low { kgid_t *data = table->data; struct net *net = - container_of(table->data, struct net, ipv4.sysctl_ping_group_range); + container_of(table->data, struct net, ipv4.ping_group_range.range); unsigned int seq; do { - seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); + seq = read_seqbegin(&net->ipv4.ip_local_ports.lock); *low = data[0]; *high = data[1]; - } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); + } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq)); } /* Update system visible IP port range */ @@ -102,11 +102,11 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig { kgid_t *data = table->data; struct net *net = - container_of(table->data, struct net, ipv4.sysctl_ping_group_range); - write_seqlock(&net->ipv4.sysctl_local_ports.lock); + container_of(table->data, struct net, ipv4.ping_group_range.range); + write_seqlock(&net->ipv4.ip_local_ports.lock); data[0] = low; data[1] = high; - write_sequnlock(&net->ipv4.sysctl_local_ports.lock); + write_sequnlock(&net->ipv4.ip_local_ports.lock); } /* Validate changes from /proc interface. */ @@ -805,7 +805,7 @@ static struct ctl_table ipv4_net_table[] = { }, { .procname = "ping_group_range", - .data = &init_net.ipv4.sysctl_ping_group_range, + .data = &init_net.ipv4.ping_group_range.range, .maxlen = sizeof(gid_t)*2, .mode = 0644, .proc_handler = ipv4_ping_group_range, @@ -819,8 +819,8 @@ static struct ctl_table ipv4_net_table[] = { }, { .procname = "ip_local_port_range", - .maxlen = sizeof(init_net.ipv4.sysctl_local_ports.range), - .data = &init_net.ipv4.sysctl_local_ports.range, + .maxlen = sizeof(init_net.ipv4.ip_local_ports.range), + .data = &init_net.ipv4.ip_local_ports.range, .mode = 0644, .proc_handler = ipv4_local_port_range, }, @@ -858,20 +858,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) table[i].data += (void *)net - (void *)&init_net; } - /* - * Sane defaults - nobody may create ping sockets. - * Boot scripts should set this to distro-specific group. - */ - net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1); - net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0); - - /* - * Set defaults for local port range - */ - seqlock_init(&net->ipv4.sysctl_local_ports.lock); - net->ipv4.sysctl_local_ports.range[0] = 32768; - net->ipv4.sysctl_local_ports.range[1] = 61000; - net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table); if (net->ipv4.ipv4_hdr == NULL) goto err_reg; diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 40e701f2e1e0..186a8ecf92fa 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) if (err) return err; - memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED; - - skb->protocol = htons(ETH_P_IP); + IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; return x->outer_mode->output2(x, skb); } @@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output); int xfrm4_output_finish(struct sk_buff *skb) { + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + skb->protocol = htons(ETH_P_IP); + +#ifdef CONFIG_NETFILTER + IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; +#endif + + return xfrm_output(skb); +} + +static int __xfrm4_output(struct sk_buff *skb) +{ + struct xfrm_state *x = skb_dst(skb)->xfrm; + #ifdef CONFIG_NETFILTER - if (!skb_dst(skb)->xfrm) { + if (!x) { IPCB(skb)->flags |= IPSKB_REROUTED; return dst_output(skb); } - - IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; #endif - skb->protocol = htons(ETH_P_IP); - return xfrm_output(skb); + return x->outer_mode->afinfo->output_finish(skb); } int xfrm4_output(struct sock *sk, struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); - struct xfrm_state *x = dst->xfrm; - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, - NULL, dst->dev, - x->outer_mode->afinfo->output_finish, + NULL, skb_dst(skb)->dev, __xfrm4_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); } diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c index 7f7b243e8139..a2ce0101eaac 100644 --- a/net/ipv4/xfrm4_protocol.c +++ b/net/ipv4/xfrm4_protocol.c @@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err) { int ret; struct xfrm4_protocol *handler; + struct xfrm4_protocol __rcu **head = proto_handlers(protocol); - for_each_protocol_rcu(*proto_handlers(protocol), handler) + if (!head) + return 0; + + for_each_protocol_rcu(*head, handler) if ((ret = handler->cb_handler(skb, err)) <= 0) return ret; @@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, { int ret; struct xfrm4_protocol *handler; + struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr); XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; XFRM_SPI_SKB_CB(skb)->family = AF_INET; XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); - for_each_protocol_rcu(*proto_handlers(nexthdr), handler) + if (!head) + goto out; + + for_each_protocol_rcu(*head, handler) if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) return ret; +out: icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); kfree_skb(skb); @@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler, int ret = -EEXIST; int priority = handler->priority; + if (!proto_handlers(protocol) || !netproto(protocol)) + return -EINVAL; + mutex_lock(&xfrm4_protocol_mutex); if (!rcu_dereference_protected(*proto_handlers(protocol), @@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, struct xfrm4_protocol *t; int ret = -ENOENT; + if (!proto_handlers(protocol) || !netproto(protocol)) + return -EINVAL; + mutex_lock(&xfrm4_protocol_mutex); for (pprev = proto_handlers(protocol); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 59f95affceb0..b2f091566f88 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, unsigned int off; u16 flush = 1; int proto; - __wsum csum; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); @@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, NAPI_GRO_CB(skb)->flush |= flush; - csum = skb->csum; - skb_postpull_rcsum(skb, iph, skb_network_header_len(skb)); + skb_gro_postpull_rcsum(skb, iph, nlen); pp = ops->callbacks.gro_receive(head, skb); - skb->csum = csum; - out_unlock: rcu_read_unlock(); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 40e7581374f7..fbf11562b54c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -344,12 +344,16 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) { - if (skb->len <= mtu || skb->local_df) + if (skb->len <= mtu) return false; + /* ipv6 conntrack defrag sets max_frag_size + local_df */ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) return true; + if (skb->local_df) + return false; + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) return false; @@ -1225,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, unsigned int maxnonfragsize, headersize; headersize = sizeof(struct ipv6hdr) + - (opt ? opt->tot_len : 0) + + (opt ? opt->opt_flen + opt->opt_nflen : 0) + (dst_allfrag(&rt->dst) ? sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index b05b609f69d1..f6a66bb4114d 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1557,7 +1557,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[]) { u8 proto; - if (!data) + if (!data || !data[IFLA_IPTUN_PROTO]) return 0; proto = nla_get_u8(data[IFLA_IPTUN_PROTO]); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index b7c0f827140b..6cc9f9371cc5 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { __be32 spi; + __u32 mark; struct xfrm_state *x; struct ip6_tnl *t; struct ip_esp_hdr *esph; @@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (!t) return -1; + mark = be32_to_cpu(t->parms.o_key); + switch (protocol) { case IPPROTO_ESP: esph = (struct ip_esp_hdr *)(skb->data + offset); @@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, type != NDISC_REDIRECT) return 0; - x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, + x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr, spi, protocol, AF_INET6); if (!x) return 0; @@ -1097,7 +1100,6 @@ static int __init vti6_tunnel_init(void) err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); if (err < 0) { - unregister_pernet_device(&vti6_net_ops); pr_err("%s: can't register vti6 protocol\n", __func__); goto out; @@ -1106,7 +1108,6 @@ static int __init vti6_tunnel_init(void) err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); if (err < 0) { xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); - unregister_pernet_device(&vti6_net_ops); pr_err("%s: can't register vti6 protocol\n", __func__); goto out; @@ -1116,7 +1117,6 @@ static int __init vti6_tunnel_init(void) if (err < 0) { xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); - unregister_pernet_device(&vti6_net_ops); pr_err("%s: can't register vti6 protocol\n", __func__); goto out; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 09a22f4f36c9..ca8d4ea48a5d 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -851,7 +851,7 @@ out: static void ndisc_recv_na(struct sk_buff *skb) { struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); - const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; + struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) + @@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb) /* * Change: router to host */ - struct rt6_info *rt; - rt = rt6_get_dflt_router(saddr, dev); - if (rt) - ip6_del_rt(rt); + rt6_clean_tohost(dev_net(dev), saddr); } out: diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 95f3f1da0d7f..d38e6a8d8b9f 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -30,13 +30,15 @@ int ip6_route_me_harder(struct sk_buff *skb) .daddr = iph->daddr, .saddr = iph->saddr, }; + int err; dst = ip6_route_output(net, skb->sk, &fl6); - if (dst->error) { + err = dst->error; + if (err) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); dst_release(dst); - return dst->error; + return err; } /* Drop old route. */ diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 004fffb6c221..6ebdb7b6744c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) fib6_clean_all(net, fib6_remove_prefsrc, &adni); } +#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) +#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) + +/* Remove routers and update dst entries when gateway turn into host. */ +static int fib6_clean_tohost(struct rt6_info *rt, void *arg) +{ + struct in6_addr *gateway = (struct in6_addr *)arg; + + if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) || + ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) && + ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { + return -1; + } + return 0; +} + +void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) +{ + fib6_clean_all(net, fib6_clean_tohost, gateway); +} + struct arg_dev_net { struct net_device *dev; struct net *net; @@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh) if (tb[RTA_OIF]) oif = nla_get_u32(tb[RTA_OIF]); + if (tb[RTA_MARK]) + fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); + if (iif) { struct net_device *dev; int flags = 0; diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 0d78132ff18a..8517d3cd1aed 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, if (NAPI_GRO_CB(skb)->flush) goto skip_csum; - wsum = skb->csum; + wsum = NAPI_GRO_CB(skb)->csum; switch (skb->ip_summed) { case CHECKSUM_NONE: diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 19ef329bdbf8..b930d080c66f 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) if (err) return err; - memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); -#ifdef CONFIG_NETFILTER - IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; -#endif - - skb->protocol = htons(ETH_P_IPV6); skb->local_df = 1; return x->outer_mode->output2(x, skb); @@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output); int xfrm6_output_finish(struct sk_buff *skb) { + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + skb->protocol = htons(ETH_P_IPV6); + #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; #endif - skb->protocol = htons(ETH_P_IPV6); return xfrm_output(skb); } @@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb) struct xfrm_state *x = dst->xfrm; int mtu; +#ifdef CONFIG_NETFILTER + if (!x) { + IP6CB(skb)->flags |= IP6SKB_REROUTED; + return dst_output(skb); + } +#endif + if (skb->protocol == htons(ETH_P_IPV6)) mtu = ip6_skb_dst_mtu(skb); else @@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb) int xfrm6_output(struct sock *sk, struct sk_buff *skb) { - return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, - skb_dst(skb)->dev, __xfrm6_output); + return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, + NULL, skb_dst(skb)->dev, __xfrm6_output, + !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c index 6ab989c486f7..54d13f8dbbae 100644 --- a/net/ipv6/xfrm6_protocol.c +++ b/net/ipv6/xfrm6_protocol.c @@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err) { int ret; struct xfrm6_protocol *handler; + struct xfrm6_protocol __rcu **head = proto_handlers(protocol); + + if (!head) + return 0; for_each_protocol_rcu(*proto_handlers(protocol), handler) if ((ret = handler->cb_handler(skb, err)) <= 0) @@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler, struct xfrm6_protocol __rcu **pprev; struct xfrm6_protocol *t; bool add_netproto = false; - int ret = -EEXIST; int priority = handler->priority; + if (!proto_handlers(protocol) || !netproto(protocol)) + return -EINVAL; + mutex_lock(&xfrm6_protocol_mutex); if (!rcu_dereference_protected(*proto_handlers(protocol), @@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, struct xfrm6_protocol *t; int ret = -ENOENT; + if (!proto_handlers(protocol) || !netproto(protocol)) + return -EINVAL; + mutex_lock(&xfrm6_protocol_mutex); for (pprev = proto_handlers(protocol); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 01e77b0ae075..8c9d7302c846 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path, spin_lock_irqsave(&list->lock, flags); while (list_skb != (struct sk_buff *)list) { - if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { + if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { this = list_skb; break; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 222c28b75315..f169b6ee94ee 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -317,6 +317,7 @@ struct ieee80211_roc_work { bool started, abort, hw_begun, notified; bool to_be_freed; + bool on_channel; unsigned long hw_start_time; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index dee50aefd6e8..27600a9808ba 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata) sdata_lock(sdata); - if (ifmgd->auth_data) { + if (ifmgd->auth_data || ifmgd->assoc_data) { + const u8 *bssid = ifmgd->auth_data ? + ifmgd->auth_data->bss->bssid : + ifmgd->assoc_data->bss->bssid; + /* - * If we are trying to authenticate while suspending, cfg80211 - * won't know and won't actually abort those attempts, thus we - * need to do that ourselves. + * If we are trying to authenticate / associate while suspending, + * cfg80211 won't know and won't actually abort those attempts, + * thus we need to do that ourselves. */ - ieee80211_send_deauth_disassoc(sdata, - ifmgd->auth_data->bss->bssid, + ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, false, frame_buf); - ieee80211_destroy_auth_data(sdata, false); + if (ifmgd->assoc_data) + ieee80211_destroy_assoc_data(sdata, false); + if (ifmgd->auth_data) + ieee80211_destroy_auth_data(sdata, false); cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); } diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 6fb38558a5e6..7a17decd27f9 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) container_of(work, struct ieee80211_roc_work, work.work); struct ieee80211_sub_if_data *sdata = roc->sdata; struct ieee80211_local *local = sdata->local; - bool started; + bool started, on_channel; mutex_lock(&local->mtx); @@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work) if (!roc->started) { struct ieee80211_roc_work *dep; - /* start this ROC */ - ieee80211_offchannel_stop_vifs(local); + WARN_ON(local->use_chanctx); + + /* If actually operating on the desired channel (with at least + * 20 MHz channel width) don't stop all the operations but still + * treat it as though the ROC operation started properly, so + * other ROC operations won't interfere with this one. + */ + roc->on_channel = roc->chan == local->_oper_chandef.chan && + local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 && + local->_oper_chandef.width != NL80211_CHAN_WIDTH_10; - /* switch channel etc */ + /* start this ROC */ ieee80211_recalc_idle(local); - local->tmp_channel = roc->chan; - ieee80211_hw_config(local, 0); + if (!roc->on_channel) { + ieee80211_offchannel_stop_vifs(local); + + local->tmp_channel = roc->chan; + ieee80211_hw_config(local, 0); + } /* tell userspace or send frame */ ieee80211_handle_roc_started(roc); @@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work) finish: list_del(&roc->list); started = roc->started; + on_channel = roc->on_channel; ieee80211_roc_notify_destroy(roc, !roc->abort); - if (started) { + if (started && !on_channel) { ieee80211_flush_queues(local, NULL); local->tmp_channel = NULL; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 216c45b949e5..2b608b2b70ec 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1231,7 +1231,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { sta->last_rx = jiffies; - if (ieee80211_is_data(hdr->frame_control)) { + if (ieee80211_is_data(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1)) { sta->last_rx_rate_idx = status->rate_idx; sta->last_rx_rate_flag = status->flag; sta->last_rx_rate_vht_flag = status->vht_flag; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 137a192e64bc..847d92f6bef6 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1148,7 +1148,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) atomic_dec(&ps->num_sta_ps); /* This station just woke up and isn't aware of our SMPS state */ - if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, + if (!ieee80211_vif_is_mesh(&sdata->vif) && + !ieee80211_smps_is_restrictive(sta->known_smps_mode, sdata->smps_mode) && sta->known_smps_mode != sdata->bss->req_smps && sta_info_tx_streams(sta) != 1) { diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 00ba90b02ab2..60cb7a665976 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -314,10 +314,9 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, !is_multicast_ether_addr(hdr->addr1)) txflags |= IEEE80211_RADIOTAP_F_TX_FAIL; - if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || - (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) + if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) txflags |= IEEE80211_RADIOTAP_F_TX_CTS; - else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) + if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) txflags |= IEEE80211_RADIOTAP_F_TX_RTS; put_unaligned_le16(txflags, pos); diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index a0b0aea76525..cec5b60487a4 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -21,10 +21,10 @@ #define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \ __field(bool, p2p) \ - __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>") + __string(vif_name, sdata->name) #define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \ __entry->p2p = sdata->vif.p2p; \ - __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name) + __assign_str(vif_name, sdata->name) #define VIF_PR_FMT " vif:%s(%d%s)" #define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : "" diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 275c94f995f7..3c365837e910 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1780,7 +1780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->mtx); if (sched_scan_stopped) - cfg80211_sched_scan_stopped(local->hw.wiphy); + cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy); /* * If this is for hw restart things are still running. diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index e9e36a256165..9265adfdabfc 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, if (!vht_cap_ie || !sband->vht_cap.vht_supported) return; - /* A VHT STA must support 40 MHz */ - if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) - return; + /* + * A VHT STA must support 40 MHz, but if we verify that here + * then we break a few things - some APs (e.g. Netgear R6300v2 + * and others based on the BCM4360 chipset) will unset this + * capability bit when operating in 20 MHz. + */ vht_cap->vht_supported = true; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index ccc46fa5edbc..58579634427d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1336,6 +1336,9 @@ ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) #ifdef CONFIG_NF_NAT_NEEDED int ret; + if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) + return 0; + ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, cda[CTA_NAT_DST]); if (ret < 0) diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 804105391b9a..345acfb1720b 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -66,20 +66,6 @@ struct nft_jumpstack { int rulenum; }; -static inline void -nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt, - struct nft_jumpstack *jumpstack, unsigned int stackptr) -{ - struct nft_stats __percpu *stats; - const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this; - - rcu_read_lock_bh(); - stats = rcu_dereference(nft_base_chain(chain)->stats); - __this_cpu_inc(stats->pkts); - __this_cpu_add(stats->bytes, pkt->skb->len); - rcu_read_unlock_bh(); -} - enum nft_trace { NFT_TRACE_RULE, NFT_TRACE_RETURN, @@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt, unsigned int nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops) { - const struct nft_chain *chain = ops->priv; + const struct nft_chain *chain = ops->priv, *basechain = chain; const struct nft_rule *rule; const struct nft_expr *expr, *last; struct nft_data data[NFT_REG_MAX + 1]; unsigned int stackptr = 0; struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; - int rulenum = 0; + struct nft_stats __percpu *stats; + int rulenum; /* * Cache cursor to avoid problems in case that the cursor is updated * while traversing the ruleset. @@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops) unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor); do_chain: + rulenum = 0; rule = list_entry(&chain->rules, struct nft_rule, list); next_rule: data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; @@ -156,8 +144,10 @@ next_rule: switch (data[NFT_REG_VERDICT].verdict) { case NFT_BREAK: data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; - /* fall through */ + continue; case NFT_CONTINUE: + if (unlikely(pkt->skb->nf_trace)) + nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); continue; } break; @@ -183,37 +173,44 @@ next_rule: jumpstack[stackptr].rule = rule; jumpstack[stackptr].rulenum = rulenum; stackptr++; - /* fall through */ + chain = data[NFT_REG_VERDICT].chain; + goto do_chain; case NFT_GOTO: + if (unlikely(pkt->skb->nf_trace)) + nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE); + chain = data[NFT_REG_VERDICT].chain; goto do_chain; case NFT_RETURN: if (unlikely(pkt->skb->nf_trace)) nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN); - - /* fall through */ + break; case NFT_CONTINUE: + if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN))) + nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN); break; default: WARN_ON(1); } if (stackptr > 0) { - if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN); - stackptr--; chain = jumpstack[stackptr].chain; rule = jumpstack[stackptr].rule; rulenum = jumpstack[stackptr].rulenum; goto next_rule; } - nft_chain_stats(chain, pkt, jumpstack, stackptr); if (unlikely(pkt->skb->nf_trace)) - nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY); + nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); + + rcu_read_lock_bh(); + stats = rcu_dereference(nft_base_chain(basechain)->stats); + __this_cpu_inc(stats->pkts); + __this_cpu_add(stats->bytes, pkt->skb->len); + rcu_read_unlock_bh(); - return nft_base_chain(chain)->policy; + return nft_base_chain(basechain)->policy; } EXPORT_SYMBOL_GPL(nft_do_chain); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index e009087620e3..23ef77c60fff 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -256,15 +256,15 @@ replay: #endif { nfnl_unlock(subsys_id); - kfree_skb(nskb); - return netlink_ack(skb, nlh, -EOPNOTSUPP); + netlink_ack(skb, nlh, -EOPNOTSUPP); + return kfree_skb(nskb); } } if (!ss->commit || !ss->abort) { nfnl_unlock(subsys_id); - kfree_skb(nskb); - return netlink_ack(skb, nlh, -EOPNOTSUPP); + netlink_ack(skb, nlh, -EOPNOTSUPP); + return kfree_skb(skb); } while (skb->len >= nlmsg_total_size(0)) { diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 7633a752c65e..0ad080790a32 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, _debug("tktlen: %x", tktlen); if (tktlen > AFSTOKEN_RK_TIX_MAX) return -EKEYREJECTED; - if (8 * 4 + tktlen != toklen) + if (toklen < 8 * 4 + tktlen) return -EKEYREJECTED; plen = sizeof(*token) + sizeof(*token->kad) + tktlen; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index eed8404443d8..f435a88d899a 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, }; +static void tcindex_filter_result_init(struct tcindex_filter_result *r) +{ + memset(r, 0, sizeof(*r)); + tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); +} + static int tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, @@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, return err; memcpy(&cp, p, sizeof(cp)); - memset(&new_filter_result, 0, sizeof(new_filter_result)); - tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); + tcindex_filter_result_init(&new_filter_result); + tcindex_filter_result_init(&cr); if (old_r) - memcpy(&cr, r, sizeof(cr)); - else { - memset(&cr, 0, sizeof(cr)); - tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); - } + cr.res = r->res; if (tb[TCA_TCINDEX_HASH]) cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); @@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, err = -ENOMEM; if (!cp.perfect && !cp.h) { if (valid_perfect_hash(&cp)) { + int i; + cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); if (!cp.perfect) goto errout; + for (i = 0; i < cp.hash; i++) + tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT, + TCA_TCINDEX_POLICE); balloc = 1; } else { cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); @@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, tcf_bind_filter(tp, &cr.res, base); } - tcf_exts_change(tp, &cr.exts, &e); + if (old_r) + tcf_exts_change(tp, &r->exts, &e); + else + tcf_exts_change(tp, &cr.exts, &e); tcf_tree_lock(tp); if (old_r && old_r != r) - memset(old_r, 0, sizeof(*old_r)); + tcindex_filter_result_init(old_r); memcpy(p, &cp, sizeof(cp)); - memcpy(r, &cr, sizeof(cr)); + r->res = cr.res; if (r == &new_filter_result) { struct tcindex_filter **fp; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 7d09a712cb1f..88f108edfb58 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -284,14 +284,22 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy) } EXPORT_SYMBOL(cfg80211_sched_scan_results); -void cfg80211_sched_scan_stopped(struct wiphy *wiphy) +void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + ASSERT_RTNL(); + trace_cfg80211_sched_scan_stopped(wiphy); - rtnl_lock(); __cfg80211_stop_sched_scan(rdev, true); +} +EXPORT_SYMBOL(cfg80211_sched_scan_stopped_rtnl); + +void cfg80211_sched_scan_stopped(struct wiphy *wiphy) +{ + rtnl_lock(); + cfg80211_sched_scan_stopped_rtnl(wiphy); rtnl_unlock(); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index acdcb4a81817..3546a77033de 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -234,7 +234,6 @@ void cfg80211_conn_work(struct work_struct *work) NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, false, NULL); - cfg80211_sme_free(wdev); } wdev_unlock(wdev); } @@ -648,6 +647,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wdev->wiphy, bss); } + cfg80211_sme_free(wdev); return; } diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh index fd8fa9aa7c4e..5b3add31f9f1 100755 --- a/scripts/checksyscalls.sh +++ b/scripts/checksyscalls.sh @@ -25,7 +25,7 @@ cat << EOF #define __IGNORE_rmdir /* unlinkat */ #define __IGNORE_lchown /* fchownat */ #define __IGNORE_access /* faccessat */ -#define __IGNORE_rename /* renameat */ +#define __IGNORE_rename /* renameat2 */ #define __IGNORE_readlink /* readlinkat */ #define __IGNORE_symlink /* symlinkat */ #define __IGNORE_utimes /* futimesat */ @@ -37,6 +37,9 @@ cat << EOF #define __IGNORE_lstat64 /* fstatat64 */ #endif +/* Missing flags argument */ +#define __IGNORE_renameat /* renameat2 */ + /* CLOEXEC flag */ #define __IGNORE_pipe /* pipe2 */ #define __IGNORE_dup2 /* dup3 */ diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 8365909f5f8c..9134dbf70d3e 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -306,57 +306,138 @@ static int devcgroup_seq_show(struct seq_file *m, void *v) } /** - * may_access - verifies if a new exception is part of what is allowed - * by a dev cgroup based on the default policy + - * exceptions. This is used to make sure a child cgroup - * won't have more privileges than its parent or to - * verify if a certain access is allowed. - * @dev_cgroup: dev cgroup to be tested against - * @refex: new exception - * @behavior: behavior of the exception + * match_exception - iterates the exception list trying to find a complete match + * @exceptions: list of exceptions + * @type: device type (DEV_BLOCK or DEV_CHAR) + * @major: device file major number, ~0 to match all + * @minor: device file minor number, ~0 to match all + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) + * + * It is considered a complete match if an exception is found that will + * contain the entire range of provided parameters. + * + * Return: true in case it matches an exception completely */ -static bool may_access(struct dev_cgroup *dev_cgroup, - struct dev_exception_item *refex, - enum devcg_behavior behavior) +static bool match_exception(struct list_head *exceptions, short type, + u32 major, u32 minor, short access) { struct dev_exception_item *ex; - bool match = false; - rcu_lockdep_assert(rcu_read_lock_held() || - lockdep_is_held(&devcgroup_mutex), - "device_cgroup::may_access() called without proper synchronization"); + list_for_each_entry_rcu(ex, exceptions, list) { + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) + continue; + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) + continue; + if (ex->major != ~0 && ex->major != major) + continue; + if (ex->minor != ~0 && ex->minor != minor) + continue; + /* provided access cannot have more than the exception rule */ + if (access & (~ex->access)) + continue; + return true; + } + return false; +} + +/** + * match_exception_partial - iterates the exception list trying to find a partial match + * @exceptions: list of exceptions + * @type: device type (DEV_BLOCK or DEV_CHAR) + * @major: device file major number, ~0 to match all + * @minor: device file minor number, ~0 to match all + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) + * + * It is considered a partial match if an exception's range is found to + * contain *any* of the devices specified by provided parameters. This is + * used to make sure no extra access is being granted that is forbidden by + * any of the exception list. + * + * Return: true in case the provided range mat matches an exception completely + */ +static bool match_exception_partial(struct list_head *exceptions, short type, + u32 major, u32 minor, short access) +{ + struct dev_exception_item *ex; - list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) { - if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) + list_for_each_entry_rcu(ex, exceptions, list) { + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) continue; - if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) continue; - if (ex->major != ~0 && ex->major != refex->major) + /* + * We must be sure that both the exception and the provided + * range aren't masking all devices + */ + if (ex->major != ~0 && major != ~0 && ex->major != major) continue; - if (ex->minor != ~0 && ex->minor != refex->minor) + if (ex->minor != ~0 && minor != ~0 && ex->minor != minor) continue; - if (refex->access & (~ex->access)) + /* + * In order to make sure the provided range isn't matching + * an exception, all its access bits shouldn't match the + * exception's access bits + */ + if (!(access & ex->access)) continue; - match = true; - break; + return true; } + return false; +} + +/** + * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions + * @dev_cgroup: dev cgroup to be tested against + * @refex: new exception + * @behavior: behavior of the exception's dev_cgroup + * + * This is used to make sure a child cgroup won't have more privileges + * than its parent + */ +static bool verify_new_ex(struct dev_cgroup *dev_cgroup, + struct dev_exception_item *refex, + enum devcg_behavior behavior) +{ + bool match = false; + + rcu_lockdep_assert(rcu_read_lock_held() || + lockdep_is_held(&devcgroup_mutex), + "device_cgroup:verify_new_ex called without proper synchronization"); if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { if (behavior == DEVCG_DEFAULT_ALLOW) { - /* the exception will deny access to certain devices */ + /* + * new exception in the child doesn't matter, only + * adding extra restrictions + */ return true; } else { - /* the exception will allow access to certain devices */ + /* + * new exception in the child will add more devices + * that can be acessed, so it can't match any of + * parent's exceptions, even slightly + */ + match = match_exception_partial(&dev_cgroup->exceptions, + refex->type, + refex->major, + refex->minor, + refex->access); + if (match) - /* - * a new exception allowing access shouldn't - * match an parent's exception - */ return false; return true; } } else { - /* only behavior == DEVCG_DEFAULT_DENY allowed here */ + /* + * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore + * the new exception will add access to more devices and must + * be contained completely in an parent's exception to be + * allowed + */ + match = match_exception(&dev_cgroup->exceptions, refex->type, + refex->major, refex->minor, + refex->access); + if (match) /* parent has an exception that matches the proposed */ return true; @@ -378,7 +459,38 @@ static int parent_has_perm(struct dev_cgroup *childcg, if (!parent) return 1; - return may_access(parent, ex, childcg->behavior); + return verify_new_ex(parent, ex, childcg->behavior); +} + +/** + * parent_allows_removal - verify if it's ok to remove an exception + * @childcg: child cgroup from where the exception will be removed + * @ex: exception being removed + * + * When removing an exception in cgroups with default ALLOW policy, it must + * be checked if removing it will give the child cgroup more access than the + * parent. + * + * Return: true if it's ok to remove exception, false otherwise + */ +static bool parent_allows_removal(struct dev_cgroup *childcg, + struct dev_exception_item *ex) +{ + struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css)); + + if (!parent) + return true; + + /* It's always allowed to remove access to devices */ + if (childcg->behavior == DEVCG_DEFAULT_DENY) + return true; + + /* + * Make sure you're not removing part or a whole exception existing in + * the parent cgroup + */ + return !match_exception_partial(&parent->exceptions, ex->type, + ex->major, ex->minor, ex->access); } /** @@ -616,17 +728,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, switch (filetype) { case DEVCG_ALLOW: - if (!parent_has_perm(devcgroup, &ex)) - return -EPERM; /* * If the default policy is to allow by default, try to remove * an matching exception instead. And be silent about it: we * don't want to break compatibility */ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { + /* Check if the parent allows removing it first */ + if (!parent_allows_removal(devcgroup, &ex)) + return -EPERM; dev_exception_rm(devcgroup, &ex); - return 0; + break; } + + if (!parent_has_perm(devcgroup, &ex)) + return -EPERM; rc = dev_exception_add(devcgroup, &ex); break; case DEVCG_DENY: @@ -704,18 +820,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor, short access) { struct dev_cgroup *dev_cgroup; - struct dev_exception_item ex; - int rc; - - memset(&ex, 0, sizeof(ex)); - ex.type = type; - ex.major = major; - ex.minor = minor; - ex.access = access; + bool rc; rcu_read_lock(); dev_cgroup = task_devcgroup(current); - rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior); + if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) + /* Can't match any of the exceptions, even partially */ + rc = !match_exception_partial(&dev_cgroup->exceptions, + type, major, minor, access); + else + /* Need to match completely one exception to be allowed */ + rc = match_exception(&dev_cgroup->exceptions, type, major, + minor, access); rcu_read_unlock(); if (!rc) diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c index 94d08733cb38..76cbb9ec953a 100644 --- a/sound/core/pcm_dmaengine.c +++ b/sound/core/pcm_dmaengine.c @@ -182,6 +182,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream) int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream); + struct snd_pcm_runtime *runtime = substream->runtime; int ret; switch (cmd) { @@ -196,6 +197,11 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd) dmaengine_resume(prtd->dma_chan); break; case SNDRV_PCM_TRIGGER_SUSPEND: + if (runtime->info & SNDRV_PCM_INFO_PAUSE) + dmaengine_pause(prtd->dma_chan); + else + dmaengine_terminate_all(prtd->dma_chan); + break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: dmaengine_pause(prtd->dma_chan); break; diff --git a/sound/isa/sb/sb_mixer.c b/sound/isa/sb/sb_mixer.c index 6496822c1808..1ff78ec9f0ac 100644 --- a/sound/isa/sb/sb_mixer.c +++ b/sound/isa/sb/sb_mixer.c @@ -818,12 +818,14 @@ int snd_sbmixer_new(struct snd_sb *chip) return err; break; case SB_HW_DT019X: - if ((err = snd_sbmixer_init(chip, - snd_dt019x_controls, - ARRAY_SIZE(snd_dt019x_controls), - snd_dt019x_init_values, - ARRAY_SIZE(snd_dt019x_init_values), - "DT019X")) < 0) + err = snd_sbmixer_init(chip, + snd_dt019x_controls, + ARRAY_SIZE(snd_dt019x_controls), + snd_dt019x_init_values, + ARRAY_SIZE(snd_dt019x_init_values), + "DT019X"); + if (err < 0) + return err; break; default: strcpy(card->mixername, "???"); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b540ad71eb0d..6cc3cf285558 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1367,6 +1367,12 @@ static int azx_first_init(struct azx *chip) /* initialize streams */ azx_init_stream(chip); + /* workaround for Broadwell HDMI: the first stream is broken, + * so mask it by keeping it as if opened + */ + if (pci->vendor == 0x8086 && pci->device == 0x160c) + chip->azx_dev[0].opened = 1; + /* initialize chip */ azx_init_pci(chip); azx_init_chip(chip, (probe_only[dev] & 2) == 0); @@ -1737,6 +1743,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { /* Lynx Point */ { PCI_DEVICE(0x8086, 0x8c20), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, + /* 9 Series */ + { PCI_DEVICE(0x8086, 0x8ca0), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Wellsburg */ { PCI_DEVICE(0x8086, 0x8d20), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 1edbb9c47c2d..b4218a19df22 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3332,6 +3332,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, +{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, @@ -3387,6 +3388,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044"); MODULE_ALIAS("snd-hda-codec-id:10de0051"); MODULE_ALIAS("snd-hda-codec-id:10de0060"); MODULE_ALIAS("snd-hda-codec-id:10de0067"); +MODULE_ALIAS("snd-hda-codec-id:10de0071"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); MODULE_ALIAS("snd-hda-codec-id:11069f80"); MODULE_ALIAS("snd-hda-codec-id:11069f81"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 5f7c765391f1..49e884fb3e5d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4616,6 +4616,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x065c, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -4624,6 +4625,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0680, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c index fa158cfe9b32..d1929de641e2 100644 --- a/sound/soc/codecs/tlv320aic31xx.c +++ b/sound/soc/codecs/tlv320aic31xx.c @@ -376,7 +376,7 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w, reg = AIC31XX_ADCFLAG; break; default: - dev_err(w->codec->dev, "Unknown widget '%s' calling %s/n", + dev_err(w->codec->dev, "Unknown widget '%s' calling %s\n", w->name, __func__); return -EINVAL; } diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 5522d2566c67..ecd26dd2e442 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -154,6 +154,7 @@ static struct reg_default wm8962_reg[] = { { 40, 0x0000 }, /* R40 - SPKOUTL volume */ { 41, 0x0000 }, /* R41 - SPKOUTR volume */ + { 49, 0x0010 }, /* R49 - Class D Control 1 */ { 51, 0x0003 }, /* R51 - Class D Control 2 */ { 56, 0x0506 }, /* R56 - Clocking 4 */ @@ -795,7 +796,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg) case WM8962_ALC2: case WM8962_THERMAL_SHUTDOWN_STATUS: case WM8962_ADDITIONAL_CONTROL_4: - case WM8962_CLASS_D_CONTROL_1: case WM8962_DC_SERVO_6: case WM8962_INTERRUPT_STATUS_1: case WM8962_INTERRUPT_STATUS_2: @@ -2929,13 +2929,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source, static int wm8962_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; - int val; + int val, ret; if (mute) - val = WM8962_DAC_MUTE; + val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT; else val = 0; + /** + * The DAC mute bit is mirrored in two registers, update both to keep + * the register cache consistent. + */ + ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1, + WM8962_DAC_MUTE_ALT, val); + if (ret < 0) + return ret; + return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1, WM8962_DAC_MUTE, val); } diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h index a1a5d5294c19..910aafd09d21 100644 --- a/sound/soc/codecs/wm8962.h +++ b/sound/soc/codecs/wm8962.h @@ -1954,6 +1954,10 @@ #define WM8962_SPKOUTL_ENA_MASK 0x0040 /* SPKOUTL_ENA */ #define WM8962_SPKOUTL_ENA_SHIFT 6 /* SPKOUTL_ENA */ #define WM8962_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */ +#define WM8962_DAC_MUTE_ALT 0x0010 /* DAC_MUTE */ +#define WM8962_DAC_MUTE_ALT_MASK 0x0010 /* DAC_MUTE */ +#define WM8962_DAC_MUTE_ALT_SHIFT 4 /* DAC_MUTE */ +#define WM8962_DAC_MUTE_ALT_WIDTH 1 /* DAC_MUTE */ #define WM8962_SPKOUTL_PGA_MUTE 0x0002 /* SPKOUTL_PGA_MUTE */ #define WM8962_SPKOUTL_PGA_MUTE_MASK 0x0002 /* SPKOUTL_PGA_MUTE */ #define WM8962_SPKOUTL_PGA_MUTE_SHIFT 1 /* SPKOUTL_PGA_MUTE */ diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c index c8e5db1414d7..496ce2eb2f1f 100644 --- a/sound/soc/fsl/fsl_esai.c +++ b/sound/soc/fsl/fsl_esai.c @@ -258,10 +258,16 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id, return -EINVAL; } - if (ratio == 1) { + /* Only EXTAL source can be output directly without using PSR and PM */ + if (ratio == 1 && clksrc == esai_priv->extalclk) { /* Bypass all the dividers if not being needed */ ecr |= tx ? ESAI_ECR_ETO : ESAI_ECR_ERO; goto out; + } else if (ratio < 2) { + /* The ratio should be no less than 2 if using other sources */ + dev_err(dai->dev, "failed to derive required HCK%c rate\n", + tx ? 'T' : 'R'); + return -EINVAL; } ret = fsl_esai_divisor_cal(dai, tx, ratio, false, 0); @@ -307,7 +313,8 @@ static int fsl_esai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq) return -EINVAL; } - if (esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) { + /* The ratio should be contented by FP alone if bypassing PM and PSR */ + if (!esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) { dev_err(dai->dev, "the ratio is out of range (1 ~ 16)\n"); return -EINVAL; } @@ -454,12 +461,6 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream, } if (!dai->active) { - /* Reset Port C */ - regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC, - ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO)); - regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC, - ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO)); - /* Set synchronous mode */ regmap_update_bits(esai_priv->regmap, REG_ESAI_SAICR, ESAI_SAICR_SYNC, esai_priv->synchronous ? @@ -519,6 +520,11 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream, regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val); + /* Remove ESAI personal reset by configuring ESAI_PCRC and ESAI_PRRC */ + regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC, + ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO)); + regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC, + ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO)); return 0; } diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c index ac869931d7f1..267717aa96c1 100644 --- a/sound/soc/fsl/imx-audmux.c +++ b/sound/soc/fsl/imx-audmux.c @@ -145,7 +145,7 @@ static const struct file_operations audmux_debugfs_fops = { .llseek = default_llseek, }; -static void __init audmux_debugfs_init(void) +static void audmux_debugfs_init(void) { int i; char buf[20]; diff --git a/sound/soc/intel/sst-acpi.c b/sound/soc/intel/sst-acpi.c index 5d06eecb6198..18aee77f8d4a 100644 --- a/sound/soc/intel/sst-acpi.c +++ b/sound/soc/intel/sst-acpi.c @@ -138,6 +138,7 @@ static int sst_acpi_probe(struct platform_device *pdev) sst_pdata = &sst_acpi->sst_pdata; sst_pdata->id = desc->sst_id; + sst_pdata->dma_dev = dev; sst_acpi->desc = desc; sst_acpi->mach = mach; diff --git a/sound/soc/intel/sst-baytrail-dsp.c b/sound/soc/intel/sst-baytrail-dsp.c index a50bf7fc0e3a..adf0aca5aca6 100644 --- a/sound/soc/intel/sst-baytrail-dsp.c +++ b/sound/soc/intel/sst-baytrail-dsp.c @@ -324,7 +324,7 @@ static int sst_byt_init(struct sst_dsp *sst, struct sst_pdata *pdata) memcpy_toio(sst->addr.lpe + SST_BYT_MAILBOX_OFFSET, &pdata->fw_base, sizeof(u32)); - ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + ret = dma_coerce_mask_and_coherent(sst->dma_dev, DMA_BIT_MASK(32)); if (ret) return ret; diff --git a/sound/soc/intel/sst-baytrail-ipc.c b/sound/soc/intel/sst-baytrail-ipc.c index d0eaeee21be4..0d31dbbf4806 100644 --- a/sound/soc/intel/sst-baytrail-ipc.c +++ b/sound/soc/intel/sst-baytrail-ipc.c @@ -542,16 +542,20 @@ struct sst_byt_stream *sst_byt_stream_new(struct sst_byt *byt, int id, void *data) { struct sst_byt_stream *stream; + struct sst_dsp *sst = byt->dsp; + unsigned long flags; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) return NULL; + spin_lock_irqsave(&sst->spinlock, flags); list_add(&stream->node, &byt->stream_list); stream->notify_position = notify_position; stream->pdata = data; stream->byt = byt; stream->str_id = id; + spin_unlock_irqrestore(&sst->spinlock, flags); return stream; } @@ -630,6 +634,8 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream) { u64 header; int ret = 0; + struct sst_dsp *sst = byt->dsp; + unsigned long flags; if (!stream->commited) goto out; @@ -644,8 +650,10 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream) stream->commited = false; out: + spin_lock_irqsave(&sst->spinlock, flags); list_del(&stream->node); kfree(stream); + spin_unlock_irqrestore(&sst->spinlock, flags); return ret; } diff --git a/sound/soc/intel/sst-dsp-priv.h b/sound/soc/intel/sst-dsp-priv.h index 30ca14a6a835..401213455497 100644 --- a/sound/soc/intel/sst-dsp-priv.h +++ b/sound/soc/intel/sst-dsp-priv.h @@ -228,6 +228,7 @@ struct sst_dsp { spinlock_t spinlock; /* IPC locking */ struct mutex mutex; /* DSP FW lock */ struct device *dev; + struct device *dma_dev; void *thread_context; int irq; u32 id; diff --git a/sound/soc/intel/sst-dsp.c b/sound/soc/intel/sst-dsp.c index 0c129fd85ecf..0b715b20a2d7 100644 --- a/sound/soc/intel/sst-dsp.c +++ b/sound/soc/intel/sst-dsp.c @@ -337,6 +337,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev, spin_lock_init(&sst->spinlock); mutex_init(&sst->mutex); sst->dev = dev; + sst->dma_dev = pdata->dma_dev; sst->thread_context = sst_dev->thread_context; sst->sst_dev = sst_dev; sst->id = pdata->id; diff --git a/sound/soc/intel/sst-dsp.h b/sound/soc/intel/sst-dsp.h index 74052b59485c..e44423be66c4 100644 --- a/sound/soc/intel/sst-dsp.h +++ b/sound/soc/intel/sst-dsp.h @@ -169,6 +169,7 @@ struct sst_pdata { u32 dma_base; u32 dma_size; int dma_engine; + struct device *dma_dev; /* DSP */ u32 id; diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c index f7687107cf7f..928f228c38e7 100644 --- a/sound/soc/intel/sst-firmware.c +++ b/sound/soc/intel/sst-firmware.c @@ -57,14 +57,8 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp, sst_fw->private = private; sst_fw->size = fw->size; - err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32)); - if (err < 0) { - kfree(sst_fw); - return NULL; - } - /* allocate DMA buffer to store FW data */ - sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size, + sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size, &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL); if (!sst_fw->dma_buf) { dev_err(dsp->dev, "error: DMA alloc failed\n"); @@ -106,7 +100,7 @@ void sst_fw_free(struct sst_fw *sst_fw) list_del(&sst_fw->list); mutex_unlock(&dsp->mutex); - dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf, + dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf, sst_fw->dmable_fw_paddr); kfree(sst_fw); } @@ -202,6 +196,9 @@ static int block_alloc_contiguous(struct sst_module *module, size -= block->size; } + list_for_each_entry(block, &tmp, list) + list_add(&block->module_list, &module->block_list); + list_splice(&tmp, &dsp->used_block_list); return 0; } @@ -247,8 +244,7 @@ static int block_alloc(struct sst_module *module, /* do we span > 1 blocks */ if (data->size > block->size) { ret = block_alloc_contiguous(module, data, - block->offset + block->size, - data->size - block->size); + block->offset, data->size); if (ret == 0) return ret; } @@ -344,7 +340,7 @@ static int block_alloc_fixed(struct sst_module *module, err = block_alloc_contiguous(module, data, block->offset + block->size, - data->size - block->size + data->offset - block->offset); + data->size - block->size); if (err < 0) return -ENOMEM; @@ -371,15 +367,10 @@ static int block_alloc_fixed(struct sst_module *module, if (data->offset >= block->offset && data->offset < block_end) { err = block_alloc_contiguous(module, data, - block->offset + block->size, - data->size - block->size); + block->offset, data->size); if (err < 0) return -ENOMEM; - /* add block */ - block->data_type = data->data_type; - list_move(&block->list, &dsp->used_block_list); - list_add(&block->module_list, &module->block_list); return 0; } diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c index f5ebf36af889..535f517629fd 100644 --- a/sound/soc/intel/sst-haswell-dsp.c +++ b/sound/soc/intel/sst-haswell-dsp.c @@ -433,7 +433,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata) int ret = -ENODEV, i, j, region_count; u32 offset, size; - dev = sst->dev; + dev = sst->dma_dev; switch (sst->id) { case SST_DEV_ID_LYNX_POINT: @@ -466,7 +466,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata) return ret; } - ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31)); if (ret) return ret; diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c index 50e4246d4b57..e7996b39a484 100644 --- a/sound/soc/intel/sst-haswell-ipc.c +++ b/sound/soc/intel/sst-haswell-ipc.c @@ -1159,11 +1159,14 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id, void *data) { struct sst_hsw_stream *stream; + struct sst_dsp *sst = hsw->dsp; + unsigned long flags; stream = kzalloc(sizeof(*stream), GFP_KERNEL); if (stream == NULL) return NULL; + spin_lock_irqsave(&sst->spinlock, flags); list_add(&stream->node, &hsw->stream_list); stream->notify_position = notify_position; stream->pdata = data; @@ -1172,6 +1175,7 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id, /* work to process notification messages */ INIT_WORK(&stream->notify_work, hsw_notification_work); + spin_unlock_irqrestore(&sst->spinlock, flags); return stream; } @@ -1180,6 +1184,8 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream) { u32 header; int ret = 0; + struct sst_dsp *sst = hsw->dsp; + unsigned long flags; /* dont free DSP streams that are not commited */ if (!stream->commited) @@ -1201,8 +1207,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream) trace_hsw_stream_free_req(stream, &stream->free_req); out: + cancel_work_sync(&stream->notify_work); + spin_lock_irqsave(&sst->spinlock, flags); list_del(&stream->node); kfree(stream); + spin_unlock_irqrestore(&sst->spinlock, flags); return ret; } @@ -1538,10 +1547,28 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream) } /* Stream pointer positions */ -int sst_hsw_get_dsp_position(struct sst_hsw *hsw, +u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw, struct sst_hsw_stream *stream) { - return stream->rpos.position; + u32 rpos; + + sst_dsp_read(hsw->dsp, &rpos, + stream->reply.read_position_register_address, sizeof(rpos)); + + return rpos; +} + +/* Stream presentation (monotonic) positions */ +u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw, + struct sst_hsw_stream *stream) +{ + u64 ppos; + + sst_dsp_read(hsw->dsp, &ppos, + stream->reply.presentation_position_register_address, + sizeof(ppos)); + + return ppos; } int sst_hsw_stream_set_write_position(struct sst_hsw *hsw, diff --git a/sound/soc/intel/sst-haswell-ipc.h b/sound/soc/intel/sst-haswell-ipc.h index d517929ccc38..2ac194a6d04b 100644 --- a/sound/soc/intel/sst-haswell-ipc.h +++ b/sound/soc/intel/sst-haswell-ipc.h @@ -464,7 +464,9 @@ int sst_hsw_stream_get_write_pos(struct sst_hsw *hsw, struct sst_hsw_stream *stream, u32 *position); int sst_hsw_stream_set_write_position(struct sst_hsw *hsw, struct sst_hsw_stream *stream, u32 stage_id, u32 position); -int sst_hsw_get_dsp_position(struct sst_hsw *hsw, +u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw, + struct sst_hsw_stream *stream); +u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw, struct sst_hsw_stream *stream); /* HW port config */ diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c index 0a32dd13a23d..9d5f64a583a3 100644 --- a/sound/soc/intel/sst-haswell-pcm.c +++ b/sound/soc/intel/sst-haswell-pcm.c @@ -99,6 +99,7 @@ struct hsw_pcm_data { struct snd_compr_stream *cstream; unsigned int wpos; struct mutex mutex; + bool allocated; }; /* private data for the driver */ @@ -107,12 +108,14 @@ struct hsw_priv_data { struct sst_hsw *hsw; /* page tables */ - unsigned char *pcm_pg[HSW_PCM_COUNT][2]; + struct snd_dma_buffer dmab[HSW_PCM_COUNT][2]; /* DAI data */ struct hsw_pcm_data pcm[HSW_PCM_COUNT]; }; +static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data); + static inline u32 hsw_mixer_to_ipc(unsigned int value) { if (value >= ARRAY_SIZE(volume_map)) @@ -273,28 +276,26 @@ static const struct snd_kcontrol_new hsw_volume_controls[] = { }; /* Create DMA buffer page table for DSP */ -static int create_adsp_page_table(struct hsw_priv_data *pdata, - struct snd_soc_pcm_runtime *rtd, - unsigned char *dma_area, size_t size, int pcm, int stream) +static int create_adsp_page_table(struct snd_pcm_substream *substream, + struct hsw_priv_data *pdata, struct snd_soc_pcm_runtime *rtd, + unsigned char *dma_area, size_t size, int pcm) { - int i, pages; + struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream); + int i, pages, stream = substream->stream; - if (size % PAGE_SIZE) - pages = (size / PAGE_SIZE) + 1; - else - pages = size / PAGE_SIZE; + pages = snd_sgbuf_aligned_pages(size); dev_dbg(rtd->dev, "generating page table for %p size 0x%zu pages %d\n", dma_area, size, pages); for (i = 0; i < pages; i++) { u32 idx = (((i << 2) + i)) >> 1; - u32 pfn = (virt_to_phys(dma_area + i * PAGE_SIZE)) >> PAGE_SHIFT; + u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT; u32 *pg_table; dev_dbg(rtd->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn); - pg_table = (u32*)(pdata->pcm_pg[pcm][stream] + idx); + pg_table = (u32 *)(pdata->dmab[pcm][stream].area + idx); if (i & 1) *pg_table |= (pfn << 4); @@ -317,12 +318,36 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream, struct sst_hsw *hsw = pdata->hsw; struct sst_module *module_data; struct sst_dsp *dsp; + struct snd_dma_buffer *dmab; enum sst_hsw_stream_type stream_type; enum sst_hsw_stream_path_id path_id; u32 rate, bits, map, pages, module_id; u8 channels; int ret; + /* check if we are being called a subsequent time */ + if (pcm_data->allocated) { + ret = sst_hsw_stream_reset(hsw, pcm_data->stream); + if (ret < 0) + dev_dbg(rtd->dev, "error: reset stream failed %d\n", + ret); + + ret = sst_hsw_stream_free(hsw, pcm_data->stream); + if (ret < 0) { + dev_dbg(rtd->dev, "error: free stream failed %d\n", + ret); + return ret; + } + pcm_data->allocated = false; + + pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id, + hsw_notify_pointer, pcm_data); + if (pcm_data->stream == NULL) { + dev_err(rtd->dev, "error: failed to create stream\n"); + return -EINVAL; + } + } + /* stream direction */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) path_id = SST_HSW_STREAM_PATH_SSP0_OUT; @@ -416,8 +441,10 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream, return ret; } - ret = create_adsp_page_table(pdata, rtd, runtime->dma_area, - runtime->dma_bytes, rtd->cpu_dai->id, substream->stream); + dmab = snd_pcm_get_dma_buf(substream); + + ret = create_adsp_page_table(substream, pdata, rtd, runtime->dma_area, + runtime->dma_bytes, rtd->cpu_dai->id); if (ret < 0) return ret; @@ -430,9 +457,9 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream, pages = runtime->dma_bytes / PAGE_SIZE; ret = sst_hsw_stream_buffer(hsw, pcm_data->stream, - virt_to_phys(pdata->pcm_pg[rtd->cpu_dai->id][substream->stream]), + pdata->dmab[rtd->cpu_dai->id][substream->stream].addr, pages, runtime->dma_bytes, 0, - (u32)(virt_to_phys(runtime->dma_area) >> PAGE_SHIFT)); + snd_sgbuf_get_addr(dmab, 0) >> PAGE_SHIFT); if (ret < 0) { dev_err(rtd->dev, "error: failed to set DMA buffer %d\n", ret); return ret; @@ -474,6 +501,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream, dev_err(rtd->dev, "error: failed to commit stream %d\n", ret); return ret; } + pcm_data->allocated = true; ret = sst_hsw_stream_pause(hsw, pcm_data->stream, 1); if (ret < 0) @@ -541,12 +569,14 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream) struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd); struct sst_hsw *hsw = pdata->hsw; snd_pcm_uframes_t offset; + uint64_t ppos; + u32 position = sst_hsw_get_dsp_position(hsw, pcm_data->stream); - offset = bytes_to_frames(runtime, - sst_hsw_get_dsp_position(hsw, pcm_data->stream)); + offset = bytes_to_frames(runtime, position); + ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream); - dev_dbg(rtd->dev, "PCM: DMA pointer %zu bytes\n", - frames_to_bytes(runtime, (u32)offset)); + dev_dbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n", + position, ppos); return offset; } @@ -606,6 +636,7 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream) dev_dbg(rtd->dev, "error: free stream failed %d\n", ret); goto out; } + pcm_data->allocated = 0; pcm_data->stream = NULL; out: @@ -621,7 +652,7 @@ static struct snd_pcm_ops hsw_pcm_ops = { .hw_free = hsw_pcm_hw_free, .trigger = hsw_pcm_trigger, .pointer = hsw_pcm_pointer, - .mmap = snd_pcm_lib_default_mmap, + .page = snd_pcm_sgbuf_ops_page, }; static void hsw_pcm_free(struct snd_pcm *pcm) @@ -632,17 +663,16 @@ static void hsw_pcm_free(struct snd_pcm *pcm) static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd) { struct snd_pcm *pcm = rtd->pcm; + struct snd_soc_platform *platform = rtd->platform; + struct sst_pdata *pdata = dev_get_platdata(platform->dev); + struct device *dev = pdata->dma_dev; int ret = 0; - ret = dma_coerce_mask_and_coherent(rtd->card->dev, DMA_BIT_MASK(32)); - if (ret) - return ret; - if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream || pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { ret = snd_pcm_lib_preallocate_pages_for_all(pcm, - SNDRV_DMA_TYPE_DEV, - rtd->card->dev, + SNDRV_DMA_TYPE_DEV_SG, + dev, hsw_pcm_hardware.buffer_bytes_max, hsw_pcm_hardware.buffer_bytes_max); if (ret) { @@ -742,11 +772,14 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform) { struct sst_pdata *pdata = dev_get_platdata(platform->dev); struct hsw_priv_data *priv_data; - int i; + struct device *dma_dev; + int i, ret = 0; if (!pdata) return -ENODEV; + dma_dev = pdata->dma_dev; + priv_data = devm_kzalloc(platform->dev, sizeof(*priv_data), GFP_KERNEL); priv_data->hsw = pdata->dsp; snd_soc_platform_set_drvdata(platform, priv_data); @@ -758,15 +791,17 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform) /* playback */ if (hsw_dais[i].playback.channels_min) { - priv_data->pcm_pg[i][0] = kzalloc(PAGE_SIZE, GFP_DMA); - if (priv_data->pcm_pg[i][0] == NULL) + ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev, + PAGE_SIZE, &priv_data->dmab[i][0]); + if (ret < 0) goto err; } /* capture */ if (hsw_dais[i].capture.channels_min) { - priv_data->pcm_pg[i][1] = kzalloc(PAGE_SIZE, GFP_DMA); - if (priv_data->pcm_pg[i][1] == NULL) + ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev, + PAGE_SIZE, &priv_data->dmab[i][1]); + if (ret < 0) goto err; } } @@ -776,11 +811,11 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform) err: for (;i >= 0; i--) { if (hsw_dais[i].playback.channels_min) - kfree(priv_data->pcm_pg[i][0]); + snd_dma_free_pages(&priv_data->dmab[i][0]); if (hsw_dais[i].capture.channels_min) - kfree(priv_data->pcm_pg[i][1]); + snd_dma_free_pages(&priv_data->dmab[i][1]); } - return -ENOMEM; + return ret; } static int hsw_pcm_remove(struct snd_soc_platform *platform) @@ -791,9 +826,9 @@ static int hsw_pcm_remove(struct snd_soc_platform *platform) for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) { if (hsw_dais[i].playback.channels_min) - kfree(priv_data->pcm_pg[i][0]); + snd_dma_free_pages(&priv_data->dmab[i][0]); if (hsw_dais[i].capture.channels_min) - kfree(priv_data->pcm_pg[i][1]); + snd_dma_free_pages(&priv_data->dmab[i][1]); } return 0; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 215b668166be..89424470a1f3 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -197,13 +197,12 @@ static void rsnd_dma_complete(void *data) * rsnd_dai_pointer_update() will be called twice, * ant it will breaks io->byte_pos */ - - rsnd_dai_pointer_update(io, io->byte_per_period); - if (dma->submit_loop) rsnd_dma_continue(dma); rsnd_unlock(priv, flags); + + rsnd_dai_pointer_update(io, io->byte_per_period); } static void __rsnd_dma_start(struct rsnd_dma *dma) diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 7769b0a2bc5a..6d6ceee447d5 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1612,8 +1612,11 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie) "ASoC: Failed to turn on bias: %d\n", ret); } - /* Prepare for a STADDBY->ON or ON->STANDBY transition */ - if (d->bias_level != d->target_bias_level) { + /* Prepare for a transition to ON or away from ON */ + if ((d->target_bias_level == SND_SOC_BIAS_ON && + d->bias_level != SND_SOC_BIAS_ON) || + (d->target_bias_level != SND_SOC_BIAS_ON && + d->bias_level == SND_SOC_BIAS_ON)) { ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE); if (ret != 0) dev_err(d->dev, @@ -3475,8 +3478,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card) cpu_dai = rtd->cpu_dai; codec_dai = rtd->codec_dai; - /* dynamic FE links have no fixed DAI mapping */ - if (rtd->dai_link->dynamic) + /* + * dynamic FE links have no fixed DAI mapping. + * CODEC<->CODEC links have no direct connection. + */ + if (rtd->dai_link->dynamic || rtd->dai_link->params) continue; /* there is no point in connecting BE DAI links with dummies */ diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 2cedf09f6d96..a391de058037 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1675,7 +1675,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP; break; case SNDRV_PCM_TRIGGER_SUSPEND: - if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) + if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) continue; if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) diff --git a/tools/Makefile b/tools/Makefile index bcae806b0c39..9a617adc6675 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -44,6 +44,9 @@ cpupower: FORCE cgroup firewire hv guest usb virtio vm net: FORCE $(call descend,$@) +liblockdep: FORCE + $(call descend,lib/lockdep) + libapikfs: FORCE $(call descend,lib/api) @@ -91,6 +94,9 @@ cpupower_clean: cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean: $(call descend,$(@:_clean=),clean) +liblockdep_clean: + $(call descend,lib/lockdep,clean) + libapikfs_clean: $(call descend,lib/api,clean) diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index cb09d3ff8f58..bba2f5253b6e 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile @@ -1,8 +1,7 @@ # file format version FILE_VERSION = 1 -MAKEFLAGS += --no-print-directory -LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion) +LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion) # Makefiles suck: This macro sets a default value of $(2) for the # variable named by $(1), unless the variable has been set by @@ -231,7 +230,7 @@ install_lib: all_cmd install: install_lib clean: - $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d + $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d $(RM) tags TAGS endif # skip-makefile |